This commit is contained in:
Joseph Provino 2016-01-07 21:10:28 +00:00
commit 9288ff53e7
1316 changed files with 58581 additions and 14455 deletions

View file

@ -340,3 +340,5 @@ f242d4332f563648426a1b0fa02d8741beba19ef jdk9-b92
5ac6287ec71aafe021cc839d8bc828108d23aaba jdk-9+95 5ac6287ec71aafe021cc839d8bc828108d23aaba jdk-9+95
139f19d70350238e15e107945cea75082b6380b3 jdk-9+96 139f19d70350238e15e107945cea75082b6380b3 jdk-9+96
4edcff1b9a8875eb6380a2165dfec599e8e3f7c0 jdk-9+97 4edcff1b9a8875eb6380a2165dfec599e8e3f7c0 jdk-9+97
d00ad2d9049ac60815f70bff445e95df85648bd2 jdk-9+98
f9bcdce2df26678c3fe468130b535c0342c69b89 jdk-9+99

View file

@ -340,3 +340,5 @@ cf1dc4c035fb84693d4ae5ad818785cb4d1465d1 jdk9-b90
12a6fb4f070f8ca8fbca219ab9abf5da8908b317 jdk-9+95 12a6fb4f070f8ca8fbca219ab9abf5da8908b317 jdk-9+95
5582a79892596169ebddb3e2c2aa44939e4e3f40 jdk-9+96 5582a79892596169ebddb3e2c2aa44939e4e3f40 jdk-9+96
75c3897541ecb52ee16d001ea605b12971df7303 jdk-9+97 75c3897541ecb52ee16d001ea605b12971df7303 jdk-9+97
48987460c7d49a29013963ee44d090194396bb61 jdk-9+98
7c0577bea4c65d69c5bef67023a89d2efa4fb2f7 jdk-9+99

8
README
View file

@ -6,7 +6,7 @@ README:
The root repository can be obtained with something like: The root repository can be obtained with something like:
hg clone http://hg.openjdk.java.net/jdk9/jdk9 openjdk9 hg clone http://hg.openjdk.java.net/jdk9/jdk9 openjdk9
You can run the get_source.sh script located in the root repository to get You can run the get_source.sh script located in the root repository to get
the other needed repositories: the other needed repositories:
cd openjdk9 && sh ./get_source.sh cd openjdk9 && sh ./get_source.sh
@ -17,7 +17,7 @@ README:
See http://openjdk.java.net/ for more information about OpenJDK. See http://openjdk.java.net/ for more information about OpenJDK.
Simple Build Instructions: Simple Build Instructions:
0. Get the necessary system software/packages installed on your system, see 0. Get the necessary system software/packages installed on your system, see
http://hg.openjdk.java.net/jdk9/jdk9/raw-file/tip/README-builds.html http://hg.openjdk.java.net/jdk9/jdk9/raw-file/tip/README-builds.html
@ -28,10 +28,10 @@ Simple Build Instructions:
2. Configure the build: 2. Configure the build:
bash ./configure bash ./configure
3. Build the OpenJDK: 3. Build the OpenJDK:
make all make all
The resulting JDK image should be found in build/*/images/j2sdk-image The resulting JDK image should be found in build/*/images/jdk
where make is GNU make 3.81 or newer, /usr/bin/make on Linux usually where make is GNU make 3.81 or newer, /usr/bin/make on Linux usually
is 3.81 or newer. Note that on Solaris, GNU make is called "gmake". is 3.81 or newer. Note that on Solaris, GNU make is called "gmake".

View file

@ -250,9 +250,7 @@ Compilers</a>, <a href="#freetype">freetype</a>, <a href="#cups">cups</a>, and
</ul></li> </ul></li>
<li><p><strong>Mac OS X</strong></p> <li><p><strong>Mac OS X</strong></p>
<p>Install <a href="https://developer.apple.com/xcode/">XCode 4.5.2</a> and also <p>Install <a href="https://developer.apple.com/xcode/">XCode 6.3</a></p></li>
install the "Command line tools" found under the preferences pane
"Downloads"</p></li>
</ul> </ul>
<p><a name="linux"></a></p> <p><a name="linux"></a></p>
@ -279,39 +277,67 @@ OpenJDK.</p>
<h5>Studio Compilers</h5> <h5>Studio Compilers</h5>
<p>At a minimum, the <a href="http://www.oracle.com/ <p>At a minimum, the <a href="http://www.oracle.com/
technetwork/server-storage/solarisstudio/downloads/index.htm">Studio 12 Update 1 Compilers</a> (containing technetwork/server-storage/solarisstudio/downloads/index.htm">Studio 12 Update 4 Compilers</a> (containing
version 5.10 of the C and C++ compilers) is required, including specific version 5.13 of the C and C++ compilers) is required, including specific
patches.</p> patches.</p>
<p>The Solaris SPARC patch list is:</p> <p>The Solaris Studio installation should contain at least these packages:</p>
<ul> <blockquote>
<li>118683-05: SunOS 5.10: Patch for profiling libraries and assembler</li> <p><table border="1">
<li>119963-21: SunOS 5.10: Shared library patch for C++</li> <thead>
<li>120753-08: SunOS 5.10: Microtasking libraries (libmtsk) patch</li> <tr>
<li>128228-09: Sun Studio 12 Update 1: Patch for Sun C++ Compiler</li> <td><strong>Package</strong></td>
<li>141860-03: Sun Studio 12 Update 1: Patch for Compiler Common patch for Sun C <td><strong>Version</strong></td>
C++ F77 F95</li> </tr>
<li>141861-05: Sun Studio 12 Update 1: Patch for Sun C Compiler</li> </thead>
<li>142371-01: Sun Studio 12.1 Update 1: Patch for dbx</li> <tbody>
<li>143384-02: Sun Studio 12 Update 1: Patch for debuginfo handling</li> <tr>
<li>143385-02: Sun Studio 12 Update 1: Patch for Compiler Common patch for Sun C <td>developer/solarisstudio-124/backend</td>
C++ F77 F95</li> <td>12.4-1.0.6.0</td>
<li>142369-01: Sun Studio 12.1: Patch for Performance Analyzer Tools</li> </tr>
</ul> <tr>
<td>developer/solarisstudio-124/c++</td>
<td>12.4-1.0.10.0</td>
</tr>
<tr>
<td>developer/solarisstudio-124/cc</td>
<td>12.4-1.0.4.0</td>
</tr>
<tr>
<td>developer/solarisstudio-124/library/c++-libs</td>
<td>12.4-1.0.10.0</td>
</tr>
<tr>
<td>developer/solarisstudio-124/library/math-libs</td>
<td>12.4-1.0.0.1</td>
</tr>
<tr>
<td>developer/solarisstudio-124/library/studio-gccrt</td>
<td>12.4-1.0.0.1</td>
</tr>
<tr>
<td>developer/solarisstudio-124/studio-common</td>
<td>12.4-1.0.0.1</td>
</tr>
<tr>
<td>developer/solarisstudio-124/studio-ja</td>
<td>12.4-1.0.0.1</td>
</tr>
<tr>
<td>developer/solarisstudio-124/studio-legal</td>
<td>12.4-1.0.0.1</td>
</tr>
<tr>
<td>developer/solarisstudio-124/studio-zhCN</td>
<td>12.4-1.0.0.1</td>
</tr>
</tbody>
</table></p>
</blockquote>
<p>The Solaris X86 patch list is:</p> <p>In particular backend 12.4-1.0.6.0 contains a critical patch for the sparc
version.</p>
<ul>
<li>119961-07: SunOS 5.10_x86, x64, Patch for profiling libraries and assembler</li>
<li>119964-21: SunOS 5.10_x86: Shared library patch for C++_x86</li>
<li>120754-08: SunOS 5.10_x86: Microtasking libraries (libmtsk) patch</li>
<li>141858-06: Sun Studio 12 Update 1_x86: Sun Compiler Common patch for x86
backend</li>
<li>128229-09: Sun Studio 12 Update 1_x86: Patch for C++ Compiler</li>
<li>142363-05: Sun Studio 12 Update 1_x86: Patch for C Compiler</li>
<li>142368-01: Sun Studio 12.1_x86: Patch for Performance Analyzer Tools</li>
</ul>
<p>Place the <code>bin</code> directory in <code>PATH</code>.</p> <p>Place the <code>bin</code> directory in <code>PATH</code>.</p>
@ -1144,10 +1170,6 @@ where the resulting bits can be used.</p>
<p>With Linux, it was just a matter of picking a stable distribution that is a <p>With Linux, it was just a matter of picking a stable distribution that is a
good representative for Linux in general.</p> good representative for Linux in general.</p>
<p><strong>NOTE: We expect a change here from Fedora 9 to something else, but it has not
been completely determined yet, possibly Ubuntu 12.04 X64, unbiased community
feedback would be welcome on what a good choice would be here.</strong></p>
<p>It is understood that most developers will NOT be using these specific <p>It is understood that most developers will NOT be using these specific
versions, and in fact creating these specific versions may be difficult due to versions, and in fact creating these specific versions may be difficult due to
the age of some of this software. It is expected that developers are more often the age of some of this software. It is expected that developers are more often
@ -1176,7 +1198,7 @@ so that they can be dealt with accordingly.</p>
<tr> <tr>
<td>Linux X86 (32-bit) and X64 (64-bit)</td> <td>Linux X86 (32-bit) and X64 (64-bit)</td>
<td>Oracle Enterprise Linux 6.4</td> <td>Oracle Enterprise Linux 6.4</td>
<td>gcc 4.8.2 </td> <td>gcc 4.9.2 </td>
<td>JDK 8</td> <td>JDK 8</td>
<td>2 or more</td> <td>2 or more</td>
<td>1 GB</td> <td>1 GB</td>
@ -1184,8 +1206,8 @@ so that they can be dealt with accordingly.</p>
</tr> </tr>
<tr> <tr>
<td>Solaris SPARCV9 (64-bit)</td> <td>Solaris SPARCV9 (64-bit)</td>
<td>Solaris 10 Update 10</td> <td>Solaris 11 Update 1</td>
<td>Studio 12 Update 3 + patches</td> <td>Studio 12 Update 4 + patches</td>
<td>JDK 8</td> <td>JDK 8</td>
<td>4 or more</td> <td>4 or more</td>
<td>4 GB</td> <td>4 GB</td>
@ -1193,8 +1215,8 @@ so that they can be dealt with accordingly.</p>
</tr> </tr>
<tr> <tr>
<td>Solaris X64 (64-bit)</td> <td>Solaris X64 (64-bit)</td>
<td>Solaris 10 Update 10</td> <td>Solaris 11 Update 1</td>
<td>Studio 12 Update 3 + patches</td> <td>Studio 12 Update 4 + patches</td>
<td>JDK 8</td> <td>JDK 8</td>
<td>4 or more</td> <td>4 or more</td>
<td>4 GB</td> <td>4 GB</td>
@ -1221,7 +1243,7 @@ so that they can be dealt with accordingly.</p>
<tr> <tr>
<td>Mac OS X X64 (64-bit)</td> <td>Mac OS X X64 (64-bit)</td>
<td>Mac OS X 10.9 "Mavericks"</td> <td>Mac OS X 10.9 "Mavericks"</td>
<td>XCode 5.1.1 or newer</td> <td>Xcode 6.3 or newer</td>
<td>JDK 8</td> <td>JDK 8</td>
<td>2 or more</td> <td>2 or more</td>
<td>4 GB</td> <td>4 GB</td>

View file

@ -215,9 +215,7 @@ And for specific systems:
* **Mac OS X** * **Mac OS X**
Install [XCode 4.5.2](https://developer.apple.com/xcode/) and also Install [XCode 6.3](https://developer.apple.com/xcode/)
install the "Command line tools" found under the preferences pane
"Downloads"
<a name="linux"></a> <a name="linux"></a>
#### Linux #### Linux
@ -239,36 +237,66 @@ OpenJDK.
<a name="studio"></a> <a name="studio"></a>
##### Studio Compilers ##### Studio Compilers
At a minimum, the [Studio 12 Update 1 Compilers](http://www.oracle.com/ At a minimum, the [Studio 12 Update 4 Compilers](http://www.oracle.com/
technetwork/server-storage/solarisstudio/downloads/index.htm) (containing technetwork/server-storage/solarisstudio/downloads/index.htm) (containing
version 5.10 of the C and C++ compilers) is required, including specific version 5.13 of the C and C++ compilers) is required, including specific
patches. patches.
The Solaris SPARC patch list is: The Solaris Studio installation should contain at least these packages:
* 118683-05: SunOS 5.10: Patch for profiling libraries and assembler > <table border="1">
* 119963-21: SunOS 5.10: Shared library patch for C++ <thead>
* 120753-08: SunOS 5.10: Microtasking libraries (libmtsk) patch <tr>
* 128228-09: Sun Studio 12 Update 1: Patch for Sun C++ Compiler <td>**Package**</td>
* 141860-03: Sun Studio 12 Update 1: Patch for Compiler Common patch for Sun C <td>**Version**</td>
C++ F77 F95 </tr>
* 141861-05: Sun Studio 12 Update 1: Patch for Sun C Compiler </thead>
* 142371-01: Sun Studio 12.1 Update 1: Patch for dbx <tbody>
* 143384-02: Sun Studio 12 Update 1: Patch for debuginfo handling <tr>
* 143385-02: Sun Studio 12 Update 1: Patch for Compiler Common patch for Sun C <td>developer/solarisstudio-124/backend</td>
C++ F77 F95 <td>12.4-1.0.6.0</td>
* 142369-01: Sun Studio 12.1: Patch for Performance Analyzer Tools </tr>
<tr>
<td>developer/solarisstudio-124/c++</td>
<td>12.4-1.0.10.0</td>
</tr>
<tr>
<td>developer/solarisstudio-124/cc</td>
<td>12.4-1.0.4.0</td>
</tr>
<tr>
<td>developer/solarisstudio-124/library/c++-libs</td>
<td>12.4-1.0.10.0</td>
</tr>
<tr>
<td>developer/solarisstudio-124/library/math-libs</td>
<td>12.4-1.0.0.1</td>
</tr>
<tr>
<td>developer/solarisstudio-124/library/studio-gccrt</td>
<td>12.4-1.0.0.1</td>
</tr>
<tr>
<td>developer/solarisstudio-124/studio-common</td>
<td>12.4-1.0.0.1</td>
</tr>
<tr>
<td>developer/solarisstudio-124/studio-ja</td>
<td>12.4-1.0.0.1</td>
</tr>
<tr>
<td>developer/solarisstudio-124/studio-legal</td>
<td>12.4-1.0.0.1</td>
</tr>
<tr>
<td>developer/solarisstudio-124/studio-zhCN</td>
<td>12.4-1.0.0.1</td>
</tr>
</tbody>
</table>
The Solaris X86 patch list is: In particular backend 12.4-1.0.6.0 contains a critical patch for the sparc
version.
* 119961-07: SunOS 5.10_x86, x64, Patch for profiling libraries and assembler
* 119964-21: SunOS 5.10_x86: Shared library patch for C++\_x86
* 120754-08: SunOS 5.10_x86: Microtasking libraries (libmtsk) patch
* 141858-06: Sun Studio 12 Update 1_x86: Sun Compiler Common patch for x86
backend
* 128229-09: Sun Studio 12 Update 1_x86: Patch for C++ Compiler
* 142363-05: Sun Studio 12 Update 1_x86: Patch for C Compiler
* 142368-01: Sun Studio 12.1_x86: Patch for Performance Analyzer Tools
Place the `bin` directory in `PATH`. Place the `bin` directory in `PATH`.
@ -1044,10 +1072,6 @@ where the resulting bits can be used.
With Linux, it was just a matter of picking a stable distribution that is a With Linux, it was just a matter of picking a stable distribution that is a
good representative for Linux in general. good representative for Linux in general.
**NOTE: We expect a change here from Fedora 9 to something else, but it has not
been completely determined yet, possibly Ubuntu 12.04 X64, unbiased community
feedback would be welcome on what a good choice would be here.**
It is understood that most developers will NOT be using these specific It is understood that most developers will NOT be using these specific
versions, and in fact creating these specific versions may be difficult due to versions, and in fact creating these specific versions may be difficult due to
the age of some of this software. It is expected that developers are more often the age of some of this software. It is expected that developers are more often
@ -1075,7 +1099,7 @@ so that they can be dealt with accordingly.
<tr> <tr>
<td>Linux X86 (32-bit) and X64 (64-bit)</td> <td>Linux X86 (32-bit) and X64 (64-bit)</td>
<td>Oracle Enterprise Linux 6.4</td> <td>Oracle Enterprise Linux 6.4</td>
<td>gcc 4.8.2 </td> <td>gcc 4.9.2 </td>
<td>JDK 8</td> <td>JDK 8</td>
<td>2 or more</td> <td>2 or more</td>
<td>1 GB</td> <td>1 GB</td>
@ -1083,8 +1107,8 @@ so that they can be dealt with accordingly.
</tr> </tr>
<tr> <tr>
<td>Solaris SPARCV9 (64-bit)</td> <td>Solaris SPARCV9 (64-bit)</td>
<td>Solaris 10 Update 10</td> <td>Solaris 11 Update 1</td>
<td>Studio 12 Update 3 + patches</td> <td>Studio 12 Update 4 + patches</td>
<td>JDK 8</td> <td>JDK 8</td>
<td>4 or more</td> <td>4 or more</td>
<td>4 GB</td> <td>4 GB</td>
@ -1092,8 +1116,8 @@ so that they can be dealt with accordingly.
</tr> </tr>
<tr> <tr>
<td>Solaris X64 (64-bit)</td> <td>Solaris X64 (64-bit)</td>
<td>Solaris 10 Update 10</td> <td>Solaris 11 Update 1</td>
<td>Studio 12 Update 3 + patches</td> <td>Studio 12 Update 4 + patches</td>
<td>JDK 8</td> <td>JDK 8</td>
<td>4 or more</td> <td>4 or more</td>
<td>4 GB</td> <td>4 GB</td>
@ -1120,7 +1144,7 @@ so that they can be dealt with accordingly.
<tr> <tr>
<td>Mac OS X X64 (64-bit)</td> <td>Mac OS X X64 (64-bit)</td>
<td>Mac OS X 10.9 "Mavericks"</td> <td>Mac OS X 10.9 "Mavericks"</td>
<td>XCode 5.1.1 or newer</td> <td>Xcode 6.3 or newer</td>
<td>JDK 8</td> <td>JDK 8</td>
<td>2 or more</td> <td>2 or more</td>
<td>4 GB</td> <td>4 GB</td>

View file

@ -1,5 +1,5 @@
# #
# Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -149,6 +149,19 @@ AC_DEFUN_ONCE([BPERF_SETUP_BUILD_JOBS],
AC_SUBST(JOBS) AC_SUBST(JOBS)
]) ])
AC_DEFUN_ONCE([BPERF_SETUP_TEST_JOBS],
[
# The number of test jobs will be chosen automatically if TEST_JOBS is 0
AC_ARG_WITH(test-jobs, [AS_HELP_STRING([--with-test-jobs],
[number of parallel tests jobs to run @<:@based on build jobs@:>@])])
if test "x$with_test_jobs" = x; then
TEST_JOBS=0
else
TEST_JOBS=$with_test_jobs
fi
AC_SUBST(TEST_JOBS)
])
AC_DEFUN([BPERF_SETUP_CCACHE], AC_DEFUN([BPERF_SETUP_CCACHE],
[ [
AC_ARG_ENABLE([ccache], AC_ARG_ENABLE([ccache],

View file

@ -44,6 +44,7 @@ m4_include([boot-jdk.m4])
m4_include([build-performance.m4]) m4_include([build-performance.m4])
m4_include([flags.m4]) m4_include([flags.m4])
m4_include([help.m4]) m4_include([help.m4])
m4_include([hotspot.m4])
m4_include([jdk-options.m4]) m4_include([jdk-options.m4])
m4_include([jdk-version.m4]) m4_include([jdk-version.m4])
m4_include([libraries.m4]) m4_include([libraries.m4])
@ -94,9 +95,10 @@ JDKOPT_SETUP_OPEN_OR_CUSTOM
# These are needed to be able to create a configuration name (and thus the output directory) # These are needed to be able to create a configuration name (and thus the output directory)
JDKOPT_SETUP_JDK_VARIANT JDKOPT_SETUP_JDK_VARIANT
JDKOPT_SETUP_JVM_INTERPRETER HOTSPOT_SETUP_JVM_INTERPRETER
JDKOPT_SETUP_JVM_VARIANTS HOTSPOT_SETUP_JVM_VARIANTS
JDKOPT_SETUP_DEBUG_LEVEL JDKOPT_SETUP_DEBUG_LEVEL
HOTSPOT_SETUP_DEBUG_LEVEL
# With basic setup done, call the custom early hook. # With basic setup done, call the custom early hook.
CUSTOM_EARLY_HOOK CUSTOM_EARLY_HOOK
@ -132,6 +134,7 @@ BASIC_SETUP_DEFAULT_MAKE_TARGET
# We need build & target for this. # We need build & target for this.
JDKOPT_SETUP_JDK_OPTIONS JDKOPT_SETUP_JDK_OPTIONS
HOTSPOT_SETUP_HOTSPOT_OPTIONS
JDKVER_SETUP_JDK_VERSION_NUMBERS JDKVER_SETUP_JDK_VERSION_NUMBERS
############################################################################### ###############################################################################
@ -220,7 +223,7 @@ LIB_SETUP_LIBRARIES
# #
############################################################################### ###############################################################################
JDKOPT_SETUP_BUILD_TWEAKS HOTSPOT_SETUP_BUILD_TWEAKS
JDKOPT_DETECT_INTREE_EC JDKOPT_DETECT_INTREE_EC
############################################################################### ###############################################################################
@ -233,6 +236,7 @@ JDKOPT_DETECT_INTREE_EC
BPERF_SETUP_BUILD_CORES BPERF_SETUP_BUILD_CORES
BPERF_SETUP_BUILD_MEMORY BPERF_SETUP_BUILD_MEMORY
BPERF_SETUP_BUILD_JOBS BPERF_SETUP_BUILD_JOBS
BPERF_SETUP_TEST_JOBS
# Setup arguments for the boot jdk (after cores and memory have been setup) # Setup arguments for the boot jdk (after cores and memory have been setup)
BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS

View file

@ -120,13 +120,17 @@ AC_DEFUN([FLAGS_SETUP_SYSROOT_FLAGS],
AC_DEFUN_ONCE([FLAGS_SETUP_INIT_FLAGS], AC_DEFUN_ONCE([FLAGS_SETUP_INIT_FLAGS],
[ [
# Option used to tell the compiler whether to create 32- or 64-bit executables # COMPILER_TARGET_BITS_FLAG : option for selecting 32- or 64-bit output
# COMPILER_COMMAND_FILE_FLAG : option for passing a command file to the compiler
if test "x$TOOLCHAIN_TYPE" = xxlc; then if test "x$TOOLCHAIN_TYPE" = xxlc; then
COMPILER_TARGET_BITS_FLAG="-q" COMPILER_TARGET_BITS_FLAG="-q"
COMPILER_COMMAND_FILE_FLAG="-f"
else else
COMPILER_TARGET_BITS_FLAG="-m" COMPILER_TARGET_BITS_FLAG="-m"
COMPILER_COMMAND_FILE_FLAG="@"
fi fi
AC_SUBST(COMPILER_TARGET_BITS_FLAG) AC_SUBST(COMPILER_TARGET_BITS_FLAG)
AC_SUBST(COMPILER_COMMAND_FILE_FLAG)
# FIXME: figure out if we should select AR flags depending on OS or toolchain. # FIXME: figure out if we should select AR flags depending on OS or toolchain.
if test "x$OPENJDK_TARGET_OS" = xmacosx; then if test "x$OPENJDK_TARGET_OS" = xmacosx; then
@ -226,37 +230,38 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_LIBS],
else else
SHARED_LIBRARY_FLAGS="-dynamiclib -compatibility_version 1.0.0 -current_version 1.0.0 $PICFLAG" SHARED_LIBRARY_FLAGS="-dynamiclib -compatibility_version 1.0.0 -current_version 1.0.0 $PICFLAG"
fi fi
SET_EXECUTABLE_ORIGIN='-Xlinker -rpath -Xlinker @loader_path/.' SET_EXECUTABLE_ORIGIN='-Wl,-rpath,@loader_path/.'
SET_SHARED_LIBRARY_ORIGIN="$SET_EXECUTABLE_ORIGIN" SET_SHARED_LIBRARY_ORIGIN="$SET_EXECUTABLE_ORIGIN"
SET_SHARED_LIBRARY_NAME='-Xlinker -install_name -Xlinker @rpath/[$]1' SET_SHARED_LIBRARY_NAME='-Wl,-install_name,@rpath/[$]1'
SET_SHARED_LIBRARY_MAPFILE='' SET_SHARED_LIBRARY_MAPFILE=''
else else
# Default works for linux, might work on other platforms as well. # Default works for linux, might work on other platforms as well.
SHARED_LIBRARY_FLAGS='-shared' SHARED_LIBRARY_FLAGS='-shared'
SET_EXECUTABLE_ORIGIN='-Xlinker -rpath -Xlinker \$$$$ORIGIN[$]1' SET_EXECUTABLE_ORIGIN='-Wl,-rpath,\$$$$ORIGIN[$]1'
SET_SHARED_LIBRARY_ORIGIN="-Xlinker -z -Xlinker origin $SET_EXECUTABLE_ORIGIN" SET_SHARED_LIBRARY_ORIGIN="-Wl,-z,origin $SET_EXECUTABLE_ORIGIN"
SET_SHARED_LIBRARY_NAME='-Xlinker -soname=[$]1' SET_SHARED_LIBRARY_NAME='-Wl,-soname=[$]1'
SET_SHARED_LIBRARY_MAPFILE='-Xlinker -version-script=[$]1' SET_SHARED_LIBRARY_MAPFILE='-Wl,-version-script=[$]1'
fi fi
elif test "x$TOOLCHAIN_TYPE" = xclang; then elif test "x$TOOLCHAIN_TYPE" = xclang; then
PICFLAG=''
C_FLAG_REORDER='' C_FLAG_REORDER=''
CXX_FLAG_REORDER='' CXX_FLAG_REORDER=''
if test "x$OPENJDK_TARGET_OS" = xmacosx; then if test "x$OPENJDK_TARGET_OS" = xmacosx; then
# Linking is different on MacOSX # Linking is different on MacOSX
PICFLAG=''
SHARED_LIBRARY_FLAGS="-dynamiclib -compatibility_version 1.0.0 -current_version 1.0.0 $PICFLAG" SHARED_LIBRARY_FLAGS="-dynamiclib -compatibility_version 1.0.0 -current_version 1.0.0 $PICFLAG"
SET_EXECUTABLE_ORIGIN='-Xlinker -rpath -Xlinker @loader_path/.' SET_EXECUTABLE_ORIGIN='-Wl,-rpath,@loader_path/.'
SET_SHARED_LIBRARY_ORIGIN="$SET_EXECUTABLE_ORIGIN" SET_SHARED_LIBRARY_ORIGIN="$SET_EXECUTABLE_ORIGIN"
SET_SHARED_LIBRARY_NAME='-Xlinker -install_name -Xlinker @rpath/[$]1' SET_SHARED_LIBRARY_NAME='-Wl,-install_name,@rpath/[$]1'
SET_SHARED_LIBRARY_MAPFILE='' SET_SHARED_LIBRARY_MAPFILE=''
else else
# Default works for linux, might work on other platforms as well. # Default works for linux, might work on other platforms as well.
PICFLAG='-fPIC'
SHARED_LIBRARY_FLAGS='-shared' SHARED_LIBRARY_FLAGS='-shared'
SET_EXECUTABLE_ORIGIN='-Xlinker -rpath -Xlinker \$$$$ORIGIN[$]1' SET_EXECUTABLE_ORIGIN='-Wl,-rpath,\$$$$ORIGIN[$]1'
SET_SHARED_LIBRARY_ORIGIN="-Xlinker -z -Xlinker origin $SET_EXECUTABLE_ORIGIN" SET_SHARED_LIBRARY_ORIGIN="-Wl,-z,origin $SET_EXECUTABLE_ORIGIN"
SET_SHARED_LIBRARY_NAME='-Xlinker -soname=[$]1' SET_SHARED_LIBRARY_NAME='-Wl,-soname=[$]1'
SET_SHARED_LIBRARY_MAPFILE='-Xlinker -version-script=[$]1' SET_SHARED_LIBRARY_MAPFILE='-Wl,-version-script=[$]1'
fi fi
elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
PICFLAG="-KPIC" PICFLAG="-KPIC"
@ -265,7 +270,7 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_LIBS],
SHARED_LIBRARY_FLAGS="-G" SHARED_LIBRARY_FLAGS="-G"
SET_EXECUTABLE_ORIGIN='-R\$$$$ORIGIN[$]1' SET_EXECUTABLE_ORIGIN='-R\$$$$ORIGIN[$]1'
SET_SHARED_LIBRARY_ORIGIN="$SET_EXECUTABLE_ORIGIN" SET_SHARED_LIBRARY_ORIGIN="$SET_EXECUTABLE_ORIGIN"
SET_SHARED_LIBRARY_NAME='' SET_SHARED_LIBRARY_NAME='-h [$]1'
SET_SHARED_LIBRARY_MAPFILE='-M[$]1' SET_SHARED_LIBRARY_MAPFILE='-M[$]1'
elif test "x$TOOLCHAIN_TYPE" = xxlc; then elif test "x$TOOLCHAIN_TYPE" = xxlc; then
PICFLAG="-qpic=large" PICFLAG="-qpic=large"
@ -280,7 +285,7 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_LIBS],
PICFLAG="" PICFLAG=""
C_FLAG_REORDER='' C_FLAG_REORDER=''
CXX_FLAG_REORDER='' CXX_FLAG_REORDER=''
SHARED_LIBRARY_FLAGS="-LD" SHARED_LIBRARY_FLAGS="-dll"
SET_EXECUTABLE_ORIGIN='' SET_EXECUTABLE_ORIGIN=''
SET_SHARED_LIBRARY_ORIGIN='' SET_SHARED_LIBRARY_ORIGIN=''
SET_SHARED_LIBRARY_NAME='' SET_SHARED_LIBRARY_NAME=''
@ -293,6 +298,7 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_LIBS],
AC_SUBST(SET_SHARED_LIBRARY_ORIGIN) AC_SUBST(SET_SHARED_LIBRARY_ORIGIN)
AC_SUBST(SET_SHARED_LIBRARY_NAME) AC_SUBST(SET_SHARED_LIBRARY_NAME)
AC_SUBST(SET_SHARED_LIBRARY_MAPFILE) AC_SUBST(SET_SHARED_LIBRARY_MAPFILE)
AC_SUBST(SHARED_LIBRARY_FLAGS)
if test "x$OPENJDK_TARGET_OS" = xsolaris; then if test "x$OPENJDK_TARGET_OS" = xsolaris; then
CFLAGS_JDK="${CFLAGS_JDK} -D__solaris__" CFLAGS_JDK="${CFLAGS_JDK} -D__solaris__"
@ -573,6 +579,25 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
CFLAGS_JDK="${CFLAGS_JDK} -fno-strict-aliasing" CFLAGS_JDK="${CFLAGS_JDK} -fno-strict-aliasing"
;; ;;
esac esac
elif test "x$TOOLCHAIN_TYPE" = xclang; then
if test "x$OPENJDK_TARGET_OS" = xlinux; then
if test "x$OPENJDK_TARGET_CPU" = xx86; then
# Force compatibility with i586 on 32 bit intel platforms.
COMMON_CCXXFLAGS="${COMMON_CCXXFLAGS} -march=i586"
fi
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS $COMMON_CCXXFLAGS_JDK -Wall -Wextra -Wno-unused -Wno-unused-parameter -Wformat=2 \
-pipe -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE"
case $OPENJDK_TARGET_CPU_ARCH in
ppc )
# on ppc we don't prevent gcc to omit frame pointer but do prevent strict aliasing
CFLAGS_JDK="${CFLAGS_JDK} -fno-strict-aliasing"
;;
* )
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -fno-omit-frame-pointer"
CFLAGS_JDK="${CFLAGS_JDK} -fno-strict-aliasing"
;;
esac
fi
elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS $COMMON_CCXXFLAGS_JDK -DTRACING -DMACRO_MEMSYS_OPS -DBREAKPTS" COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS $COMMON_CCXXFLAGS_JDK -DTRACING -DMACRO_MEMSYS_OPS -DBREAKPTS"
if test "x$OPENJDK_TARGET_CPU_ARCH" = xx86; then if test "x$OPENJDK_TARGET_CPU_ARCH" = xx86; then
@ -748,17 +773,17 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
# If this is a --hash-style=gnu system, use --hash-style=both, why? # If this is a --hash-style=gnu system, use --hash-style=both, why?
# We have previously set HAS_GNU_HASH if this is the case # We have previously set HAS_GNU_HASH if this is the case
if test -n "$HAS_GNU_HASH"; then if test -n "$HAS_GNU_HASH"; then
LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker --hash-style=both" LDFLAGS_JDK="${LDFLAGS_JDK} -Wl,--hash-style=both"
fi fi
if test "x$OPENJDK_TARGET_OS" = xlinux; then if test "x$OPENJDK_TARGET_OS" = xlinux; then
# And since we now know that the linker is gnu, then add -z defs, to forbid # And since we now know that the linker is gnu, then add -z defs, to forbid
# undefined symbols in object files. # undefined symbols in object files.
LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker -z -Xlinker defs" LDFLAGS_JDK="${LDFLAGS_JDK} -Wl,-z,defs"
case $DEBUG_LEVEL in case $DEBUG_LEVEL in
release ) release )
# tell linker to optimize libraries. # tell linker to optimize libraries.
# Should this be supplied to the OSS linker as well? # Should this be supplied to the OSS linker as well?
LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker -O1" LDFLAGS_JDK="${LDFLAGS_JDK} -Wl,-O1"
;; ;;
slowdebug ) slowdebug )
if test "x$HAS_LINKER_NOW" = "xtrue"; then if test "x$HAS_LINKER_NOW" = "xtrue"; then
@ -785,7 +810,7 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
esac esac
fi fi
elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
LDFLAGS_JDK="$LDFLAGS_JDK -z defs -xildoff -ztext" LDFLAGS_JDK="$LDFLAGS_JDK -Wl,-z,defs -xildoff -ztext"
LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK -norunpath -xnolib" LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK -norunpath -xnolib"
elif test "x$TOOLCHAIN_TYPE" = xxlc; then elif test "x$TOOLCHAIN_TYPE" = xxlc; then
LDFLAGS_JDK="${LDFLAGS_JDK} -brtl -bnolibpath -bexpall -bernotok" LDFLAGS_JDK="${LDFLAGS_JDK} -brtl -bnolibpath -bexpall -bernotok"
@ -803,17 +828,19 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
fi fi
LDFLAGS_JDKEXE="${LDFLAGS_JDKEXE} /STACK:$LDFLAGS_STACK_SIZE" LDFLAGS_JDKEXE="${LDFLAGS_JDKEXE} /STACK:$LDFLAGS_STACK_SIZE"
elif test "x$OPENJDK_TARGET_OS" = xlinux; then elif test "x$OPENJDK_TARGET_OS" = xlinux; then
LDFLAGS_JDKEXE="$LDFLAGS_JDKEXE -Xlinker --allow-shlib-undefined" LDFLAGS_JDKEXE="$LDFLAGS_JDKEXE -Wl,--allow-shlib-undefined"
fi fi
# Customize LDFLAGS for libs # Customize LDFLAGS for libs
LDFLAGS_JDKLIB="${LDFLAGS_JDK}" LDFLAGS_JDKLIB="${LDFLAGS_JDK}"
LDFLAGS_JDKLIB="${LDFLAGS_JDKLIB} ${SHARED_LIBRARY_FLAGS}"
if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
LDFLAGS_JDKLIB="${LDFLAGS_JDKLIB} -dll -libpath:${OUTPUT_ROOT}/support/modules_libs/java.base" LDFLAGS_JDKLIB="${LDFLAGS_JDKLIB} \
-libpath:${OUTPUT_ROOT}/support/modules_libs/java.base"
JDKLIB_LIBS="" JDKLIB_LIBS=""
else else
LDFLAGS_JDKLIB="${LDFLAGS_JDKLIB} ${SHARED_LIBRARY_FLAGS} \ LDFLAGS_JDKLIB="${LDFLAGS_JDKLIB} \
-L${OUTPUT_ROOT}/support/modules_libs/java.base${OPENJDK_TARGET_CPU_LIBDIR}" -L${OUTPUT_ROOT}/support/modules_libs/java.base${OPENJDK_TARGET_CPU_LIBDIR}"
# On some platforms (mac) the linker warns about non existing -L dirs. # On some platforms (mac) the linker warns about non existing -L dirs.

File diff suppressed because it is too large Load diff

View file

@ -86,7 +86,11 @@ Then run configure with '--with-freetype-src=<freetype_src>'. This will
automatically build the freetype library into '<freetype_src>/lib64' for 64-bit automatically build the freetype library into '<freetype_src>/lib64' for 64-bit
builds or into '<freetype_src>/lib32' for 32-bit builds. builds or into '<freetype_src>/lib32' for 32-bit builds.
Afterwards you can always use '--with-freetype-include=<freetype_src>/include' Afterwards you can always use '--with-freetype-include=<freetype_src>/include'
and '--with-freetype-lib=<freetype_src>/lib[32|64]' for other builds." and '--with-freetype-lib=<freetype_src>/lib[32|64]' for other builds.
Alternatively you can unpack the sources like this to use the default directory:
tar --one-top-level=$HOME/freetype --strip-components=1 -xzf freetype-2.5.3.tar.gz"
;; ;;
esac esac
} }

268
common/autoconf/hotspot.m4 Normal file
View file

@ -0,0 +1,268 @@
#
# Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
###############################################################################
# Check which interpreter of the JVM we want to build.
# Currently we have:
# template: Template interpreter (the default)
# cpp : C++ interpreter
AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_INTERPRETER],
[
AC_ARG_WITH([jvm-interpreter], [AS_HELP_STRING([--with-jvm-interpreter],
[JVM interpreter to build (template, cpp) @<:@template@:>@])])
AC_MSG_CHECKING([which interpreter of the JVM to build])
if test "x$with_jvm_interpreter" = x; then
JVM_INTERPRETER="template"
else
JVM_INTERPRETER="$with_jvm_interpreter"
fi
AC_MSG_RESULT([$JVM_INTERPRETER])
if test "x$JVM_INTERPRETER" != xtemplate && test "x$JVM_INTERPRETER" != xcpp; then
AC_MSG_ERROR([The available JVM interpreters are: template, cpp])
fi
AC_SUBST(JVM_INTERPRETER)
])
###############################################################################
# Check which variants of the JVM that we want to build.
# Currently we have:
# server: normal interpreter and a C2 or tiered C1/C2 compiler
# client: normal interpreter and C1 (no C2 compiler) (only 32-bit platforms)
# minimal1: reduced form of client with optional VM services and features stripped out
# zero: no machine code interpreter, no compiler
# zeroshark: zero interpreter and shark/llvm compiler backend
# core: interpreter only, no compiler (only works on some platforms)
AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_VARIANTS],
[
AC_MSG_CHECKING([which variants of the JVM to build])
AC_ARG_WITH([jvm-variants], [AS_HELP_STRING([--with-jvm-variants],
[JVM variants (separated by commas) to build (server, client, minimal1, zero, zeroshark, core) @<:@server@:>@])])
if test "x$with_jvm_variants" = x; then
with_jvm_variants="server"
fi
JVM_VARIANTS=",$with_jvm_variants,"
TEST_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/server,//' -e 's/client,//' -e 's/minimal1,//' -e 's/zero,//' -e 's/zeroshark,//' -e 's/core,//'`
if test "x$TEST_VARIANTS" != "x,"; then
AC_MSG_ERROR([The available JVM variants are: server, client, minimal1, zero, zeroshark, core])
fi
AC_MSG_RESULT([$with_jvm_variants])
JVM_VARIANT_SERVER=`$ECHO "$JVM_VARIANTS" | $SED -e '/,server,/!s/.*/false/g' -e '/,server,/s/.*/true/g'`
JVM_VARIANT_CLIENT=`$ECHO "$JVM_VARIANTS" | $SED -e '/,client,/!s/.*/false/g' -e '/,client,/s/.*/true/g'`
JVM_VARIANT_MINIMAL1=`$ECHO "$JVM_VARIANTS" | $SED -e '/,minimal1,/!s/.*/false/g' -e '/,minimal1,/s/.*/true/g'`
JVM_VARIANT_ZERO=`$ECHO "$JVM_VARIANTS" | $SED -e '/,zero,/!s/.*/false/g' -e '/,zero,/s/.*/true/g'`
JVM_VARIANT_ZEROSHARK=`$ECHO "$JVM_VARIANTS" | $SED -e '/,zeroshark,/!s/.*/false/g' -e '/,zeroshark,/s/.*/true/g'`
JVM_VARIANT_CORE=`$ECHO "$JVM_VARIANTS" | $SED -e '/,core,/!s/.*/false/g' -e '/,core,/s/.*/true/g'`
if test "x$JVM_VARIANT_CLIENT" = xtrue; then
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
AC_MSG_ERROR([You cannot build a client JVM for a 64-bit machine.])
fi
fi
if test "x$JVM_VARIANT_MINIMAL1" = xtrue; then
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
AC_MSG_ERROR([You cannot build a minimal JVM for a 64-bit machine.])
fi
fi
# Replace the commas with AND for use in the build directory name.
ANDED_JVM_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/^,//' -e 's/,$//' -e 's/,/AND/g'`
COUNT_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/server,/1/' -e 's/client,/1/' -e 's/minimal1,/1/' -e 's/zero,/1/' -e 's/zeroshark,/1/' -e 's/core,/1/'`
if test "x$COUNT_VARIANTS" != "x,1"; then
BUILDING_MULTIPLE_JVM_VARIANTS=yes
else
BUILDING_MULTIPLE_JVM_VARIANTS=no
fi
if test "x$JVM_VARIANT_ZERO" = xtrue && test "x$BUILDING_MULTIPLE_JVM_VARIANTS" = xyes; then
AC_MSG_ERROR([You cannot build multiple variants with zero.])
fi
AC_SUBST(JVM_VARIANTS)
AC_SUBST(JVM_VARIANT_SERVER)
AC_SUBST(JVM_VARIANT_CLIENT)
AC_SUBST(JVM_VARIANT_MINIMAL1)
AC_SUBST(JVM_VARIANT_ZERO)
AC_SUBST(JVM_VARIANT_ZEROSHARK)
AC_SUBST(JVM_VARIANT_CORE)
INCLUDE_SA=true
if test "x$JVM_VARIANT_ZERO" = xtrue ; then
INCLUDE_SA=false
fi
if test "x$JVM_VARIANT_ZEROSHARK" = xtrue ; then
INCLUDE_SA=false
fi
if test "x$OPENJDK_TARGET_OS" = xaix ; then
INCLUDE_SA=false
fi
if test "x$OPENJDK_TARGET_CPU" = xaarch64; then
INCLUDE_SA=false
fi
AC_SUBST(INCLUDE_SA)
if test "x$OPENJDK_TARGET_OS" = "xmacosx"; then
MACOSX_UNIVERSAL="true"
fi
AC_SUBST(MACOSX_UNIVERSAL)
])
###############################################################################
# Setup legacy vars/targets and new vars to deal with different debug levels.
#
# release: no debug information, all optimizations, no asserts.
# optimized: no debug information, all optimizations, no asserts, HotSpot target is 'optimized'.
# fastdebug: debug information (-g), all optimizations, all asserts
# slowdebug: debug information (-g), no optimizations, all asserts
#
AC_DEFUN_ONCE([HOTSPOT_SETUP_DEBUG_LEVEL],
[
case $DEBUG_LEVEL in
release )
VARIANT="OPT"
FASTDEBUG="false"
DEBUG_CLASSFILES="false"
BUILD_VARIANT_RELEASE=""
HOTSPOT_DEBUG_LEVEL="product"
HOTSPOT_EXPORT="product"
;;
fastdebug )
VARIANT="DBG"
FASTDEBUG="true"
DEBUG_CLASSFILES="true"
BUILD_VARIANT_RELEASE="-fastdebug"
HOTSPOT_DEBUG_LEVEL="fastdebug"
HOTSPOT_EXPORT="fastdebug"
;;
slowdebug )
VARIANT="DBG"
FASTDEBUG="false"
DEBUG_CLASSFILES="true"
BUILD_VARIANT_RELEASE="-debug"
HOTSPOT_DEBUG_LEVEL="debug"
HOTSPOT_EXPORT="debug"
;;
optimized )
VARIANT="OPT"
FASTDEBUG="false"
DEBUG_CLASSFILES="false"
BUILD_VARIANT_RELEASE="-optimized"
HOTSPOT_DEBUG_LEVEL="optimized"
HOTSPOT_EXPORT="optimized"
;;
esac
# The debug level 'optimized' is a little special because it is currently only
# applicable to the HotSpot build where it means to build a completely
# optimized version of the VM without any debugging code (like for the
# 'release' debug level which is called 'product' in the HotSpot build) but
# with the exception that it can contain additional code which is otherwise
# protected by '#ifndef PRODUCT' macros. These 'optimized' builds are used to
# test new and/or experimental features which are not intended for customer
# shipment. Because these new features need to be tested and benchmarked in
# real world scenarios, we want to build the containing JDK at the 'release'
# debug level.
if test "x$DEBUG_LEVEL" = xoptimized; then
DEBUG_LEVEL="release"
fi
#####
# Generate the legacy makefile targets for hotspot.
# The hotspot api for selecting the build artifacts, really, needs to be improved.
# JDK-7195896 will fix this on the hotspot side by using the JVM_VARIANT_* variables to
# determine what needs to be built. All we will need to set here is all_product, all_fastdebug etc
# But until then ...
HOTSPOT_TARGET=""
if test "x$JVM_VARIANT_SERVER" = xtrue; then
HOTSPOT_TARGET="$HOTSPOT_TARGET${HOTSPOT_DEBUG_LEVEL} "
fi
if test "x$JVM_VARIANT_CLIENT" = xtrue; then
HOTSPOT_TARGET="$HOTSPOT_TARGET${HOTSPOT_DEBUG_LEVEL}1 "
fi
if test "x$JVM_VARIANT_MINIMAL1" = xtrue; then
HOTSPOT_TARGET="$HOTSPOT_TARGET${HOTSPOT_DEBUG_LEVEL}minimal1 "
fi
if test "x$JVM_VARIANT_ZERO" = xtrue; then
HOTSPOT_TARGET="$HOTSPOT_TARGET${HOTSPOT_DEBUG_LEVEL}zero "
fi
if test "x$JVM_VARIANT_ZEROSHARK" = xtrue; then
HOTSPOT_TARGET="$HOTSPOT_TARGET${HOTSPOT_DEBUG_LEVEL}shark "
fi
if test "x$JVM_VARIANT_CORE" = xtrue; then
HOTSPOT_TARGET="$HOTSPOT_TARGET${HOTSPOT_DEBUG_LEVEL}core "
fi
HOTSPOT_TARGET="$HOTSPOT_TARGET docs export_$HOTSPOT_EXPORT"
# On Macosx universal binaries are produced, but they only contain
# 64 bit intel. This invalidates control of which jvms are built
# from configure, but only server is valid anyway. Fix this
# when hotspot makefiles are rewritten.
if test "x$MACOSX_UNIVERSAL" = xtrue; then
HOTSPOT_TARGET=universal_${HOTSPOT_EXPORT}
fi
#####
AC_SUBST(DEBUG_LEVEL)
AC_SUBST(VARIANT)
AC_SUBST(FASTDEBUG)
AC_SUBST(DEBUG_CLASSFILES)
AC_SUBST(BUILD_VARIANT_RELEASE)
])
AC_DEFUN_ONCE([HOTSPOT_SETUP_HOTSPOT_OPTIONS],
[
# Control wether Hotspot runs Queens test after build.
AC_ARG_ENABLE([hotspot-test-in-build], [AS_HELP_STRING([--enable-hotspot-test-in-build],
[run the Queens test after Hotspot build @<:@disabled@:>@])],,
[enable_hotspot_test_in_build=no])
if test "x$enable_hotspot_test_in_build" = "xyes"; then
TEST_IN_BUILD=true
else
TEST_IN_BUILD=false
fi
AC_SUBST(TEST_IN_BUILD)
])
AC_DEFUN_ONCE([HOTSPOT_SETUP_BUILD_TWEAKS],
[
HOTSPOT_MAKE_ARGS="$HOTSPOT_TARGET"
AC_SUBST(HOTSPOT_MAKE_ARGS)
])

View file

@ -23,19 +23,16 @@
# questions. # questions.
# #
###############################################################################
# Check which variant of the JDK that we want to build.
# Currently we have:
# normal: standard edition
# but the custom make system may add other variants
#
# Effectively the JDK variant gives a name to a specific set of
# modules to compile into the JDK.
AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_VARIANT], AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_VARIANT],
[ [
###############################################################################
#
# Check which variant of the JDK that we want to build.
# Currently we have:
# normal: standard edition
# but the custom make system may add other variants
#
# Effectively the JDK variant gives a name to a specific set of
# modules to compile into the JDK. In the future, these modules
# might even be Jigsaw modules.
#
AC_MSG_CHECKING([which variant of the JDK to build]) AC_MSG_CHECKING([which variant of the JDK to build])
AC_ARG_WITH([jdk-variant], [AS_HELP_STRING([--with-jdk-variant], AC_ARG_WITH([jdk-variant], [AS_HELP_STRING([--with-jdk-variant],
[JDK variant to build (normal) @<:@normal@:>@])]) [JDK variant to build (normal) @<:@normal@:>@])])
@ -51,138 +48,14 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_VARIANT],
AC_MSG_RESULT([$JDK_VARIANT]) AC_MSG_RESULT([$JDK_VARIANT])
]) ])
AC_DEFUN_ONCE([JDKOPT_SETUP_JVM_INTERPRETER],
[
############################################################################### ###############################################################################
# # Set the debug level
# Check which interpreter of the JVM we want to build. # release: no debug information, all optimizations, no asserts.
# Currently we have: # optimized: no debug information, all optimizations, no asserts, HotSpot target is 'optimized'.
# template: Template interpreter (the default) # fastdebug: debug information (-g), all optimizations, all asserts
# cpp : C++ interpreter # slowdebug: debug information (-g), no optimizations, all asserts
AC_MSG_CHECKING([which interpreter of the JVM to build])
AC_ARG_WITH([jvm-interpreter], [AS_HELP_STRING([--with-jvm-interpreter],
[JVM interpreter to build (template, cpp) @<:@template@:>@])])
if test "x$with_jvm_interpreter" = x; then
with_jvm_interpreter="template"
fi
JVM_INTERPRETER="$with_jvm_interpreter"
if test "x$JVM_INTERPRETER" != xtemplate && test "x$JVM_INTERPRETER" != xcpp; then
AC_MSG_ERROR([The available JVM interpreters are: template, cpp])
fi
AC_SUBST(JVM_INTERPRETER)
AC_MSG_RESULT([$with_jvm_interpreter])
])
AC_DEFUN_ONCE([JDKOPT_SETUP_JVM_VARIANTS],
[
###############################################################################
#
# Check which variants of the JVM that we want to build.
# Currently we have:
# server: normal interpreter and a tiered C1/C2 compiler
# client: normal interpreter and C1 (no C2 compiler) (only 32-bit platforms)
# minimal1: reduced form of client with optional VM services and features stripped out
# kernel: kernel footprint JVM that passes the TCK without major performance problems,
# ie normal interpreter and C1, only the serial GC, kernel jvmti etc
# zero: no machine code interpreter, no compiler
# zeroshark: zero interpreter and shark/llvm compiler backend
# core: interpreter only, no compiler (only works on some platforms)
AC_MSG_CHECKING([which variants of the JVM to build])
AC_ARG_WITH([jvm-variants], [AS_HELP_STRING([--with-jvm-variants],
[JVM variants (separated by commas) to build (server, client, minimal1, kernel, zero, zeroshark, core) @<:@server@:>@])])
if test "x$with_jvm_variants" = x; then
with_jvm_variants="server"
fi
JVM_VARIANTS=",$with_jvm_variants,"
TEST_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/server,//' -e 's/client,//' -e 's/minimal1,//' -e 's/kernel,//' -e 's/zero,//' -e 's/zeroshark,//' -e 's/core,//'`
if test "x$TEST_VARIANTS" != "x,"; then
AC_MSG_ERROR([The available JVM variants are: server, client, minimal1, kernel, zero, zeroshark, core])
fi
AC_MSG_RESULT([$with_jvm_variants])
JVM_VARIANT_SERVER=`$ECHO "$JVM_VARIANTS" | $SED -e '/,server,/!s/.*/false/g' -e '/,server,/s/.*/true/g'`
JVM_VARIANT_CLIENT=`$ECHO "$JVM_VARIANTS" | $SED -e '/,client,/!s/.*/false/g' -e '/,client,/s/.*/true/g'`
JVM_VARIANT_MINIMAL1=`$ECHO "$JVM_VARIANTS" | $SED -e '/,minimal1,/!s/.*/false/g' -e '/,minimal1,/s/.*/true/g'`
JVM_VARIANT_KERNEL=`$ECHO "$JVM_VARIANTS" | $SED -e '/,kernel,/!s/.*/false/g' -e '/,kernel,/s/.*/true/g'`
JVM_VARIANT_ZERO=`$ECHO "$JVM_VARIANTS" | $SED -e '/,zero,/!s/.*/false/g' -e '/,zero,/s/.*/true/g'`
JVM_VARIANT_ZEROSHARK=`$ECHO "$JVM_VARIANTS" | $SED -e '/,zeroshark,/!s/.*/false/g' -e '/,zeroshark,/s/.*/true/g'`
JVM_VARIANT_CORE=`$ECHO "$JVM_VARIANTS" | $SED -e '/,core,/!s/.*/false/g' -e '/,core,/s/.*/true/g'`
if test "x$JVM_VARIANT_CLIENT" = xtrue; then
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
AC_MSG_ERROR([You cannot build a client JVM for a 64-bit machine.])
fi
fi
if test "x$JVM_VARIANT_KERNEL" = xtrue; then
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
AC_MSG_ERROR([You cannot build a kernel JVM for a 64-bit machine.])
fi
fi
if test "x$JVM_VARIANT_MINIMAL1" = xtrue; then
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
AC_MSG_ERROR([You cannot build a minimal JVM for a 64-bit machine.])
fi
fi
# Replace the commas with AND for use in the build directory name.
ANDED_JVM_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/^,//' -e 's/,$//' -e 's/,/AND/g'`
COUNT_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/server,/1/' -e 's/client,/1/' -e 's/minimal1,/1/' -e 's/kernel,/1/' -e 's/zero,/1/' -e 's/zeroshark,/1/' -e 's/core,/1/'`
if test "x$COUNT_VARIANTS" != "x,1"; then
BUILDING_MULTIPLE_JVM_VARIANTS=yes
else
BUILDING_MULTIPLE_JVM_VARIANTS=no
fi
AC_SUBST(JVM_VARIANTS)
AC_SUBST(JVM_VARIANT_SERVER)
AC_SUBST(JVM_VARIANT_CLIENT)
AC_SUBST(JVM_VARIANT_MINIMAL1)
AC_SUBST(JVM_VARIANT_KERNEL)
AC_SUBST(JVM_VARIANT_ZERO)
AC_SUBST(JVM_VARIANT_ZEROSHARK)
AC_SUBST(JVM_VARIANT_CORE)
INCLUDE_SA=true
if test "x$JVM_VARIANT_ZERO" = xtrue ; then
INCLUDE_SA=false
fi
if test "x$JVM_VARIANT_ZEROSHARK" = xtrue ; then
INCLUDE_SA=false
fi
if test "x$OPENJDK_TARGET_OS" = xaix ; then
INCLUDE_SA=false
fi
if test "x$OPENJDK_TARGET_CPU" = xaarch64; then
INCLUDE_SA=false
fi
AC_SUBST(INCLUDE_SA)
if test "x$OPENJDK_TARGET_OS" = "xmacosx"; then
MACOSX_UNIVERSAL="true"
fi
AC_SUBST(MACOSX_UNIVERSAL)
])
AC_DEFUN_ONCE([JDKOPT_SETUP_DEBUG_LEVEL], AC_DEFUN_ONCE([JDKOPT_SETUP_DEBUG_LEVEL],
[ [
###############################################################################
#
# Set the debug level
# release: no debug information, all optimizations, no asserts.
# optimized: no debug information, all optimizations, no asserts, HotSpot target is 'optimized'.
# fastdebug: debug information (-g), all optimizations, all asserts
# slowdebug: debug information (-g), no optimizations, all asserts
#
DEBUG_LEVEL="release" DEBUG_LEVEL="release"
AC_MSG_CHECKING([which debug level to use]) AC_MSG_CHECKING([which debug level to use])
AC_ARG_ENABLE([debug], [AS_HELP_STRING([--enable-debug], AC_ARG_ENABLE([debug], [AS_HELP_STRING([--enable-debug],
@ -208,118 +81,8 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_DEBUG_LEVEL],
test "x$DEBUG_LEVEL" != xslowdebug; then test "x$DEBUG_LEVEL" != xslowdebug; then
AC_MSG_ERROR([Allowed debug levels are: release, fastdebug and slowdebug]) AC_MSG_ERROR([Allowed debug levels are: release, fastdebug and slowdebug])
fi fi
###############################################################################
#
# Setup legacy vars/targets and new vars to deal with different debug levels.
#
case $DEBUG_LEVEL in
release )
VARIANT="OPT"
FASTDEBUG="false"
DEBUG_CLASSFILES="false"
BUILD_VARIANT_RELEASE=""
HOTSPOT_DEBUG_LEVEL="product"
HOTSPOT_EXPORT="product"
;;
fastdebug )
VARIANT="DBG"
FASTDEBUG="true"
DEBUG_CLASSFILES="true"
BUILD_VARIANT_RELEASE="-fastdebug"
HOTSPOT_DEBUG_LEVEL="fastdebug"
HOTSPOT_EXPORT="fastdebug"
;;
slowdebug )
VARIANT="DBG"
FASTDEBUG="false"
DEBUG_CLASSFILES="true"
BUILD_VARIANT_RELEASE="-debug"
HOTSPOT_DEBUG_LEVEL="debug"
HOTSPOT_EXPORT="debug"
;;
optimized )
VARIANT="OPT"
FASTDEBUG="false"
DEBUG_CLASSFILES="false"
BUILD_VARIANT_RELEASE="-optimized"
HOTSPOT_DEBUG_LEVEL="optimized"
HOTSPOT_EXPORT="optimized"
;;
esac
# The debug level 'optimized' is a little special because it is currently only
# applicable to the HotSpot build where it means to build a completely
# optimized version of the VM without any debugging code (like for the
# 'release' debug level which is called 'product' in the HotSpot build) but
# with the exception that it can contain additional code which is otherwise
# protected by '#ifndef PRODUCT' macros. These 'optimized' builds are used to
# test new and/or experimental features which are not intended for customer
# shipment. Because these new features need to be tested and benchmarked in
# real world scenarios, we want to build the containing JDK at the 'release'
# debug level.
if test "x$DEBUG_LEVEL" = xoptimized; then
DEBUG_LEVEL="release"
fi
#####
# Generate the legacy makefile targets for hotspot.
# The hotspot api for selecting the build artifacts, really, needs to be improved.
# JDK-7195896 will fix this on the hotspot side by using the JVM_VARIANT_* variables to
# determine what needs to be built. All we will need to set here is all_product, all_fastdebug etc
# But until then ...
HOTSPOT_TARGET=""
if test "x$JVM_VARIANT_SERVER" = xtrue; then
HOTSPOT_TARGET="$HOTSPOT_TARGET${HOTSPOT_DEBUG_LEVEL} "
fi
if test "x$JVM_VARIANT_CLIENT" = xtrue; then
HOTSPOT_TARGET="$HOTSPOT_TARGET${HOTSPOT_DEBUG_LEVEL}1 "
fi
if test "x$JVM_VARIANT_MINIMAL1" = xtrue; then
HOTSPOT_TARGET="$HOTSPOT_TARGET${HOTSPOT_DEBUG_LEVEL}minimal1 "
fi
if test "x$JVM_VARIANT_KERNEL" = xtrue; then
HOTSPOT_TARGET="$HOTSPOT_TARGET${HOTSPOT_DEBUG_LEVEL}kernel "
fi
if test "x$JVM_VARIANT_ZERO" = xtrue; then
HOTSPOT_TARGET="$HOTSPOT_TARGET${HOTSPOT_DEBUG_LEVEL}zero "
fi
if test "x$JVM_VARIANT_ZEROSHARK" = xtrue; then
HOTSPOT_TARGET="$HOTSPOT_TARGET${HOTSPOT_DEBUG_LEVEL}shark "
fi
if test "x$JVM_VARIANT_CORE" = xtrue; then
HOTSPOT_TARGET="$HOTSPOT_TARGET${HOTSPOT_DEBUG_LEVEL}core "
fi
HOTSPOT_TARGET="$HOTSPOT_TARGET docs export_$HOTSPOT_EXPORT"
# On Macosx universal binaries are produced, but they only contain
# 64 bit intel. This invalidates control of which jvms are built
# from configure, but only server is valid anyway. Fix this
# when hotspot makefiles are rewritten.
if test "x$MACOSX_UNIVERSAL" = xtrue; then
HOTSPOT_TARGET=universal_${HOTSPOT_EXPORT}
fi
#####
AC_SUBST(DEBUG_LEVEL)
AC_SUBST(VARIANT)
AC_SUBST(FASTDEBUG)
AC_SUBST(DEBUG_CLASSFILES)
AC_SUBST(BUILD_VARIANT_RELEASE)
]) ])
############################################################################### ###############################################################################
# #
# Should we build only OpenJDK even if closed sources are present? # Should we build only OpenJDK even if closed sources are present?
@ -367,12 +130,8 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_OPEN_OR_CUSTOM],
AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_OPTIONS], AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_OPTIONS],
[ [
###############################################################################
#
# Should we build a JDK/JVM with headful support (ie a graphical ui)? # Should we build a JDK/JVM with headful support (ie a graphical ui)?
# We always build headless support. # We always build headless support.
#
AC_MSG_CHECKING([headful support]) AC_MSG_CHECKING([headful support])
AC_ARG_ENABLE([headful], [AS_HELP_STRING([--disable-headful], AC_ARG_ENABLE([headful], [AS_HELP_STRING([--disable-headful],
[disable building headful support (graphical UI support) @<:@enabled@:>@])], [disable building headful support (graphical UI support) @<:@enabled@:>@])],
@ -398,21 +157,7 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_OPTIONS],
AC_SUBST(SUPPORT_HEADFUL) AC_SUBST(SUPPORT_HEADFUL)
AC_SUBST(BUILD_HEADLESS) AC_SUBST(BUILD_HEADLESS)
# Control wether Hotspot runs Queens test after build.
AC_ARG_ENABLE([hotspot-test-in-build], [AS_HELP_STRING([--enable-hotspot-test-in-build],
[run the Queens test after Hotspot build @<:@disabled@:>@])],,
[enable_hotspot_test_in_build=no])
if test "x$enable_hotspot_test_in_build" = "xyes"; then
TEST_IN_BUILD=true
else
TEST_IN_BUILD=false
fi
AC_SUBST(TEST_IN_BUILD)
###############################################################################
#
# Choose cacerts source file # Choose cacerts source file
#
AC_ARG_WITH(cacerts-file, [AS_HELP_STRING([--with-cacerts-file], AC_ARG_WITH(cacerts-file, [AS_HELP_STRING([--with-cacerts-file],
[specify alternative cacerts file])]) [specify alternative cacerts file])])
if test "x$with_cacerts_file" != x; then if test "x$with_cacerts_file" != x; then
@ -420,10 +165,7 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_OPTIONS],
fi fi
AC_SUBST(CACERTS_FILE) AC_SUBST(CACERTS_FILE)
###############################################################################
#
# Enable or disable unlimited crypto # Enable or disable unlimited crypto
#
AC_ARG_ENABLE(unlimited-crypto, [AS_HELP_STRING([--enable-unlimited-crypto], AC_ARG_ENABLE(unlimited-crypto, [AS_HELP_STRING([--enable-unlimited-crypto],
[Enable unlimited crypto policy @<:@disabled@:>@])],, [Enable unlimited crypto policy @<:@disabled@:>@])],,
[enable_unlimited_crypto=no]) [enable_unlimited_crypto=no])
@ -434,10 +176,7 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_OPTIONS],
fi fi
AC_SUBST(UNLIMITED_CRYPTO) AC_SUBST(UNLIMITED_CRYPTO)
###############################################################################
#
# Compress jars # Compress jars
#
COMPRESS_JARS=false COMPRESS_JARS=false
AC_SUBST(COMPRESS_JARS) AC_SUBST(COMPRESS_JARS)
@ -455,19 +194,6 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_OPTIONS],
AC_SUBST(COPYRIGHT_YEAR) AC_SUBST(COPYRIGHT_YEAR)
]) ])
AC_DEFUN_ONCE([JDKOPT_SETUP_BUILD_TWEAKS],
[
HOTSPOT_MAKE_ARGS="$HOTSPOT_TARGET"
AC_SUBST(HOTSPOT_MAKE_ARGS)
# The name of the Service Agent jar.
SALIB_NAME="${LIBRARY_PREFIX}saproc${SHARED_LIBRARY_SUFFIX}"
if test "x$OPENJDK_TARGET_OS" = "xwindows"; then
SALIB_NAME="${LIBRARY_PREFIX}sawindbg${SHARED_LIBRARY_SUFFIX}"
fi
AC_SUBST(SALIB_NAME)
])
############################################################################### ###############################################################################
# #
# Enable or disable the elliptic curve crypto implementation # Enable or disable the elliptic curve crypto implementation
@ -487,7 +213,6 @@ AC_DEFUN_ONCE([JDKOPT_DETECT_INTREE_EC],
AC_SUBST(ENABLE_INTREE_EC) AC_SUBST(ENABLE_INTREE_EC)
]) ])
AC_DEFUN_ONCE([JDKOPT_SETUP_DEBUG_SYMBOLS], AC_DEFUN_ONCE([JDKOPT_SETUP_DEBUG_SYMBOLS],
[ [
# #
@ -498,8 +223,21 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_DEBUG_SYMBOLS],
AC_ARG_WITH([native-debug-symbols], AC_ARG_WITH([native-debug-symbols],
[AS_HELP_STRING([--with-native-debug-symbols], [AS_HELP_STRING([--with-native-debug-symbols],
[set the native debug symbol configuration (none, internal, external, zipped) @<:@zipped@:>@])], [set the native debug symbol configuration (none, internal, external, zipped) @<:@zipped@:>@])],
[], [
[with_native_debug_symbols="zipped"]) if test "x$OPENJDK_TARGET_OS" = xaix; then
if test "x$withval" = xexternal || test "x$withval" = xzipped; then
AC_MSG_ERROR([AIX only supports the parameters 'none' and 'internal' for --with-native-debug-symbols])
fi
fi
],
[
if test "x$OPENJDK_TARGET_OS" = xaix; then
# AIX doesn't support 'zipped' so use 'internal' as default
with_native_debug_symbols="internal"
else
with_native_debug_symbols="zipped"
fi
])
NATIVE_DEBUG_SYMBOLS=$with_native_debug_symbols NATIVE_DEBUG_SYMBOLS=$with_native_debug_symbols
AC_MSG_RESULT([$NATIVE_DEBUG_SYMBOLS]) AC_MSG_RESULT([$NATIVE_DEBUG_SYMBOLS])
@ -632,5 +370,3 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_STATIC_BUILD],
AC_SUBST(STATIC_BUILD) AC_SUBST(STATIC_BUILD)
]) ])

View file

@ -321,6 +321,25 @@ AC_DEFUN_ONCE([LIB_SETUP_FREETYPE],
BASIC_WINDOWS_REWRITE_AS_UNIX_PATH(FREETYPE_BASE_DIR) BASIC_WINDOWS_REWRITE_AS_UNIX_PATH(FREETYPE_BASE_DIR)
LIB_CHECK_POTENTIAL_FREETYPE([$FREETYPE_BASE_DIR/include], [$FREETYPE_BASE_DIR/lib], [well-known location]) LIB_CHECK_POTENTIAL_FREETYPE([$FREETYPE_BASE_DIR/include], [$FREETYPE_BASE_DIR/lib], [well-known location])
fi fi
if test "x$FOUND_FREETYPE" != xyes; then
FREETYPE_BASE_DIR="$HOME/freetype"
BASIC_WINDOWS_REWRITE_AS_UNIX_PATH(FREETYPE_BASE_DIR)
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
LIB_CHECK_POTENTIAL_FREETYPE([$FREETYPE_BASE_DIR/include], [$FREETYPE_BASE_DIR/lib64], [well-known location])
else
LIB_CHECK_POTENTIAL_FREETYPE([$FREETYPE_BASE_DIR/include], [$FREETYPE_BASE_DIR/lib32], [well-known location])
fi
if test "x$FOUND_FREETYPE" != xyes && test -d $FREETYPE_BASE_DIR \
&& test -s "$FREETYPE_BASE_DIR/builds/windows/vc2010/freetype.vcxproj" && test "x$MSBUILD" != x; then
# Source is available, as a last resort try to build freetype in default location
LIB_BUILD_FREETYPE($FREETYPE_BASE_DIR)
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
LIB_CHECK_POTENTIAL_FREETYPE([$FREETYPE_BASE_DIR/include], [$FREETYPE_BASE_DIR/lib64], [well-known location])
else
LIB_CHECK_POTENTIAL_FREETYPE([$FREETYPE_BASE_DIR/include], [$FREETYPE_BASE_DIR/lib32], [well-known location])
fi
fi
fi
else else
FREETYPE_BASE_DIR="$SYSROOT/usr" FREETYPE_BASE_DIR="$SYSROOT/usr"
LIB_CHECK_POTENTIAL_FREETYPE([$FREETYPE_BASE_DIR/include], [$FREETYPE_BASE_DIR/lib], [well-known location]) LIB_CHECK_POTENTIAL_FREETYPE([$FREETYPE_BASE_DIR/include], [$FREETYPE_BASE_DIR/lib], [well-known location])

View file

@ -204,13 +204,12 @@ SUPPORT_HEADLESS:=@SUPPORT_HEADLESS@
# These are the libjvms that we want to build. # These are the libjvms that we want to build.
# The java launcher uses the default. # The java launcher uses the default.
# The others can be selected by specifying -client -server -minimal1 -kernel -zero or -zeroshark # The others can be selected by specifying -client -server -minimal1 -zero or -zeroshark
# on the java launcher command line. # on the java launcher command line.
JVM_VARIANTS:=@JVM_VARIANTS@ JVM_VARIANTS:=@JVM_VARIANTS@
JVM_VARIANT_SERVER:=@JVM_VARIANT_SERVER@ JVM_VARIANT_SERVER:=@JVM_VARIANT_SERVER@
JVM_VARIANT_CLIENT:=@JVM_VARIANT_CLIENT@ JVM_VARIANT_CLIENT:=@JVM_VARIANT_CLIENT@
JVM_VARIANT_MINIMAL1:=@JVM_VARIANT_MINIMAL1@ JVM_VARIANT_MINIMAL1:=@JVM_VARIANT_MINIMAL1@
JVM_VARIANT_KERNEL:=@JVM_VARIANT_KERNEL@
JVM_VARIANT_ZERO:=@JVM_VARIANT_ZERO@ JVM_VARIANT_ZERO:=@JVM_VARIANT_ZERO@
JVM_VARIANT_ZEROSHARK:=@JVM_VARIANT_ZEROSHARK@ JVM_VARIANT_ZEROSHARK:=@JVM_VARIANT_ZEROSHARK@
JVM_VARIANT_CORE:=@JVM_VARIANT_CORE@ JVM_VARIANT_CORE:=@JVM_VARIANT_CORE@
@ -270,6 +269,7 @@ SJAVAC_SERVER_DIR=$(MAKESUPPORT_OUTPUTDIR)/javacservers
# Number of parallel jobs to use for compilation # Number of parallel jobs to use for compilation
JOBS?=@JOBS@ JOBS?=@JOBS@
TEST_JOBS?=@TEST_JOBS@
# Default make target # Default make target
DEFAULT_MAKE_TARGET:=@DEFAULT_MAKE_TARGET@ DEFAULT_MAKE_TARGET:=@DEFAULT_MAKE_TARGET@
@ -280,6 +280,8 @@ FREETYPE_BUNDLE_LIB_PATH=@FREETYPE_BUNDLE_LIB_PATH@
CUPS_CFLAGS:=@CUPS_CFLAGS@ CUPS_CFLAGS:=@CUPS_CFLAGS@
ALSA_LIBS:=@ALSA_LIBS@ ALSA_LIBS:=@ALSA_LIBS@
ALSA_CFLAGS:=@ALSA_CFLAGS@ ALSA_CFLAGS:=@ALSA_CFLAGS@
LIBFFI_LIBS:=@LIBFFI_LIBS@
LIBFFI_CFLAGS:=@LIBFFI_CFLAGS@
PACKAGE_PATH=@PACKAGE_PATH@ PACKAGE_PATH=@PACKAGE_PATH@
@ -300,11 +302,15 @@ MACOSX_VERSION_MIN=@MACOSX_VERSION_MIN@
# Toolchain type: gcc, clang, solstudio, lxc, microsoft... # Toolchain type: gcc, clang, solstudio, lxc, microsoft...
TOOLCHAIN_TYPE:=@TOOLCHAIN_TYPE@ TOOLCHAIN_TYPE:=@TOOLCHAIN_TYPE@
TOOLCHAIN_VERSION := @TOOLCHAIN_VERSION@
# Option used to tell the compiler whether to create 32- or 64-bit executables # Option used to tell the compiler whether to create 32- or 64-bit executables
COMPILER_TARGET_BITS_FLAG:=@COMPILER_TARGET_BITS_FLAG@ COMPILER_TARGET_BITS_FLAG:=@COMPILER_TARGET_BITS_FLAG@
COMPILER_SUPPORTS_TARGET_BITS_FLAG=@COMPILER_SUPPORTS_TARGET_BITS_FLAG@ COMPILER_SUPPORTS_TARGET_BITS_FLAG=@COMPILER_SUPPORTS_TARGET_BITS_FLAG@
# Option used to pass a command file to the compiler
COMPILER_COMMAND_FILE_FLAG:=@COMPILER_COMMAND_FILE_FLAG@
CC_OUT_OPTION:=@CC_OUT_OPTION@ CC_OUT_OPTION:=@CC_OUT_OPTION@
EXE_OUT_OPTION:=@EXE_OUT_OPTION@ EXE_OUT_OPTION:=@EXE_OUT_OPTION@
LD_OUT_OPTION:=@LD_OUT_OPTION@ LD_OUT_OPTION:=@LD_OUT_OPTION@
@ -388,6 +394,7 @@ LDFLAGS_TESTEXE:=@LDFLAGS_TESTEXE@
BUILD_CC:=@FIXPATH@ @BUILD_ICECC@ @BUILD_CC@ BUILD_CC:=@FIXPATH@ @BUILD_ICECC@ @BUILD_CC@
BUILD_CXX:=@FIXPATH@ @BUILD_ICECC@ @BUILD_CXX@ BUILD_CXX:=@FIXPATH@ @BUILD_ICECC@ @BUILD_CXX@
BUILD_LD:=@FIXPATH@ @BUILD_LD@ BUILD_LD:=@FIXPATH@ @BUILD_LD@
BUILD_LDCXX:=@FIXPATH@ @BUILD_LDCXX@
BUILD_AS:=@FIXPATH@ @BUILD_AS@ BUILD_AS:=@FIXPATH@ @BUILD_AS@
BUILD_AR:=@FIXPATH@ @BUILD_AR@ BUILD_AR:=@FIXPATH@ @BUILD_AR@
BUILD_NM:=@FIXPATH@ @BUILD_NM@ BUILD_NM:=@FIXPATH@ @BUILD_NM@
@ -433,6 +440,8 @@ COMPRESS_JARS=@COMPRESS_JARS@
# (Note absence of := assignment, because we do not want to evaluate the macro body here) # (Note absence of := assignment, because we do not want to evaluate the macro body here)
SET_SHARED_LIBRARY_NAME=@SET_SHARED_LIBRARY_NAME@ SET_SHARED_LIBRARY_NAME=@SET_SHARED_LIBRARY_NAME@
SHARED_LIBRARY_FLAGS=@SHARED_LIBRARY_FLAGS@
# Set origin using the linker, ie use the relative path to the dependent library to find the dependees. # Set origin using the linker, ie use the relative path to the dependent library to find the dependees.
# (Note absence of := assignment, because we do not want to evaluate the macro body here) # (Note absence of := assignment, because we do not want to evaluate the macro body here)
SET_SHARED_LIBRARY_ORIGIN=@SET_SHARED_LIBRARY_ORIGIN@ SET_SHARED_LIBRARY_ORIGIN=@SET_SHARED_LIBRARY_ORIGIN@
@ -650,9 +659,6 @@ PNG_CFLAGS:=@PNG_CFLAGS@
# Misc # Misc
# #
# Name of Service Agent library
SALIB_NAME=@SALIB_NAME@
INCLUDE_SA=@INCLUDE_SA@ INCLUDE_SA=@INCLUDE_SA@
OS_VERSION_MAJOR:=@OS_VERSION_MAJOR@ OS_VERSION_MAJOR:=@OS_VERSION_MAJOR@

View file

@ -216,7 +216,11 @@ AC_DEFUN_ONCE([TOOLCHAIN_PRE_DETECTION],
# The microsoft toolchain also requires INCLUDE and LIB to be set. # The microsoft toolchain also requires INCLUDE and LIB to be set.
export INCLUDE="$VS_INCLUDE" export INCLUDE="$VS_INCLUDE"
export LIB="$VS_LIB" export LIB="$VS_LIB"
else
# Currently we do not define this for other toolchains. This might change as the need arise.
TOOLCHAIN_VERSION=
fi fi
AC_SUBST(TOOLCHAIN_VERSION)
# For solaris we really need solaris tools, and not the GNU equivalent. # For solaris we really need solaris tools, and not the GNU equivalent.
# The build tools on Solaris reside in /usr/ccs (C Compilation System), # The build tools on Solaris reside in /usr/ccs (C Compilation System),
@ -731,6 +735,7 @@ AC_DEFUN_ONCE([TOOLCHAIN_SETUP_BUILD_COMPILERS],
BUILD_AS="$BUILD_CC -c" BUILD_AS="$BUILD_CC -c"
# Just like for the target compiler, use the compiler as linker # Just like for the target compiler, use the compiler as linker
BUILD_LD="$BUILD_CC" BUILD_LD="$BUILD_CC"
BUILD_LDCXX="$BUILD_CXX"
PATH="$OLDPATH" PATH="$OLDPATH"
else else
@ -739,6 +744,7 @@ AC_DEFUN_ONCE([TOOLCHAIN_SETUP_BUILD_COMPILERS],
BUILD_CC="$CC" BUILD_CC="$CC"
BUILD_CXX="$CXX" BUILD_CXX="$CXX"
BUILD_LD="$LD" BUILD_LD="$LD"
BUILD_LDCXX="$LDCXX"
BUILD_NM="$NM" BUILD_NM="$NM"
BUILD_AS="$AS" BUILD_AS="$AS"
BUILD_SYSROOT_CFLAGS="$SYSROOT_CFLAGS" BUILD_SYSROOT_CFLAGS="$SYSROOT_CFLAGS"
@ -749,6 +755,7 @@ AC_DEFUN_ONCE([TOOLCHAIN_SETUP_BUILD_COMPILERS],
AC_SUBST(BUILD_CC) AC_SUBST(BUILD_CC)
AC_SUBST(BUILD_CXX) AC_SUBST(BUILD_CXX)
AC_SUBST(BUILD_LD) AC_SUBST(BUILD_LD)
AC_SUBST(BUILD_LDCXX)
AC_SUBST(BUILD_NM) AC_SUBST(BUILD_NM)
AC_SUBST(BUILD_AS) AC_SUBST(BUILD_AS)
AC_SUBST(BUILD_SYSROOT_CFLAGS) AC_SUBST(BUILD_SYSROOT_CFLAGS)
@ -822,13 +829,13 @@ AC_DEFUN_ONCE([TOOLCHAIN_MISC_CHECKS],
[HAS_CFLAG_OPTIMIZE_DEBUG=false]) [HAS_CFLAG_OPTIMIZE_DEBUG=false])
# "-z relro" supported in GNU binutils 2.17 and later # "-z relro" supported in GNU binutils 2.17 and later
LINKER_RELRO_FLAG="-Xlinker -z -Xlinker relro" LINKER_RELRO_FLAG="-Wl,-z,relro"
FLAGS_LINKER_CHECK_ARGUMENTS([$LINKER_RELRO_FLAG], FLAGS_LINKER_CHECK_ARGUMENTS([$LINKER_RELRO_FLAG],
[HAS_LINKER_RELRO=true], [HAS_LINKER_RELRO=true],
[HAS_LINKER_RELRO=false]) [HAS_LINKER_RELRO=false])
# "-z now" supported in GNU binutils 2.11 and later # "-z now" supported in GNU binutils 2.11 and later
LINKER_NOW_FLAG="-Xlinker -z -Xlinker now" LINKER_NOW_FLAG="-Wl,-z,now"
FLAGS_LINKER_CHECK_ARGUMENTS([$LINKER_NOW_FLAG], FLAGS_LINKER_CHECK_ARGUMENTS([$LINKER_NOW_FLAG],
[HAS_LINKER_NOW=true], [HAS_LINKER_NOW=true],
[HAS_LINKER_NOW=false]) [HAS_LINKER_NOW=false])
@ -841,7 +848,7 @@ AC_DEFUN_ONCE([TOOLCHAIN_MISC_CHECKS],
AC_MSG_CHECKING([for broken SuSE 'ld' which only understands anonymous version tags in executables]) AC_MSG_CHECKING([for broken SuSE 'ld' which only understands anonymous version tags in executables])
$ECHO "SUNWprivate_1.1 { local: *; };" > version-script.map $ECHO "SUNWprivate_1.1 { local: *; };" > version-script.map
$ECHO "int main() { }" > main.c $ECHO "int main() { }" > main.c
if $CXX -Xlinker -version-script=version-script.map main.c 2>&AS_MESSAGE_LOG_FD >&AS_MESSAGE_LOG_FD; then if $CXX -Wl,-version-script=version-script.map main.c 2>&AS_MESSAGE_LOG_FD >&AS_MESSAGE_LOG_FD; then
AC_MSG_RESULT(no) AC_MSG_RESULT(no)
USING_BROKEN_SUSE_LD=no USING_BROKEN_SUSE_LD=no
else else

View file

@ -37,13 +37,18 @@ fi
if [ "$OPENJDK_TARGET_OS" = "macosx" ]; then if [ "$OPENJDK_TARGET_OS" = "macosx" ]; then
FULLDUMP_CMD="$OTOOL -v -V -h -X -d" FULLDUMP_CMD="$OTOOL -v -V -h -X -d"
LDD_CMD="$OTOOL -L" LDD_CMD="$OTOOL -L"
DIS_CMD="$OTOOL -v -t" DIS_CMD="$OTOOL -v -V -t"
STAT_PRINT_SIZE="-f %z" STAT_PRINT_SIZE="-f %z"
elif [ "$OPENJDK_TARGET_OS" = "windows" ]; then elif [ "$OPENJDK_TARGET_OS" = "windows" ]; then
FULLDUMP_CMD="$DUMPBIN -all" FULLDUMP_CMD="$DUMPBIN -all"
LDD_CMD="$DUMPBIN -dependants | $GREP .dll" LDD_CMD="$DUMPBIN -dependants | $GREP .dll"
DIS_CMD="$DUMPBIN -disasm:nobytes" DIS_CMD="$DUMPBIN -disasm:nobytes"
STAT_PRINT_SIZE="-c %s" STAT_PRINT_SIZE="-c %s"
elif [ "$OPENJDK_TARGET_OS" = "aix" ]; then
FULLDUMP_CMD="dump -h -r -t -n -X64"
LDD_CMD="$LDD"
DIS_CMD="$OBJDUMP -d"
STAT_PRINT_SIZE="-c %s"
else else
FULLDUMP_CMD="$READELF -a" FULLDUMP_CMD="$READELF -a"
LDD_CMD="$LDD" LDD_CMD="$LDD"
@ -730,6 +735,9 @@ compare_bin_file() {
# Some symbols get seemingly random 15 character prefixes. Filter them out. # Some symbols get seemingly random 15 character prefixes. Filter them out.
$NM -a $ORIG_OTHER_FILE 2> /dev/null | $GREP -v $NAME | $AWK '{print $2, $3, $4, $5}' | $SED 's/^\([a-zA-Z] [\.\$]\)[a-zA-Z0-9_\$]\{15,15\}\./\1./g' | $SYM_SORT_CMD > $WORK_FILE_BASE.symbols.other $NM -a $ORIG_OTHER_FILE 2> /dev/null | $GREP -v $NAME | $AWK '{print $2, $3, $4, $5}' | $SED 's/^\([a-zA-Z] [\.\$]\)[a-zA-Z0-9_\$]\{15,15\}\./\1./g' | $SYM_SORT_CMD > $WORK_FILE_BASE.symbols.other
$NM -a $ORIG_THIS_FILE 2> /dev/null | $GREP -v $NAME | $AWK '{print $2, $3, $4, $5}' | $SED 's/^\([a-zA-Z] [\.\$]\)[a-zA-Z0-9_\$]\{15,15\}\./\1./g' | $SYM_SORT_CMD > $WORK_FILE_BASE.symbols.this $NM -a $ORIG_THIS_FILE 2> /dev/null | $GREP -v $NAME | $AWK '{print $2, $3, $4, $5}' | $SED 's/^\([a-zA-Z] [\.\$]\)[a-zA-Z0-9_\$]\{15,15\}\./\1./g' | $SYM_SORT_CMD > $WORK_FILE_BASE.symbols.this
elif [ "$OPENJDK_TARGET_OS" = "aix" ]; then
$OBJDUMP -T $ORIG_OTHER_FILE 2> /dev/null | $GREP -v $NAME | $AWK '{print $2, $3, $4, $5}' | $SYM_SORT_CMD > $WORK_FILE_BASE.symbols.other
$OBJDUMP -T $ORIG_THIS_FILE 2> /dev/null | $GREP -v $NAME | $AWK '{print $2, $3, $4, $5}' | $SYM_SORT_CMD > $WORK_FILE_BASE.symbols.this
else else
$NM -a $ORIG_OTHER_FILE 2> /dev/null | $GREP -v $NAME | $AWK '{print $2, $3, $4, $5}' | $SYM_SORT_CMD > $WORK_FILE_BASE.symbols.other $NM -a $ORIG_OTHER_FILE 2> /dev/null | $GREP -v $NAME | $AWK '{print $2, $3, $4, $5}' | $SYM_SORT_CMD > $WORK_FILE_BASE.symbols.other
$NM -a $ORIG_THIS_FILE 2> /dev/null | $GREP -v $NAME | $AWK '{print $2, $3, $4, $5}' | $SYM_SORT_CMD > $WORK_FILE_BASE.symbols.this $NM -a $ORIG_THIS_FILE 2> /dev/null | $GREP -v $NAME | $AWK '{print $2, $3, $4, $5}' | $SYM_SORT_CMD > $WORK_FILE_BASE.symbols.this
@ -796,14 +804,21 @@ compare_bin_file() {
DEP_MSG=" - " DEP_MSG=" - "
fi fi
# Some linux compilers add a unique Build ID
if [ "$OPENJDK_TARGET_OS" = "linux" ]; then
BUILD_ID_FILTER="$SED -r 's/(Build ID:) [0-9a-f]{40}/\1/'"
else
BUILD_ID_FILTER="$CAT"
fi
# Compare fulldump output # Compare fulldump output
if [ -n "$FULLDUMP_CMD" ] && [ -z "$SKIP_FULLDUMP_DIFF" ]; then if [ -n "$FULLDUMP_CMD" ] && [ -z "$SKIP_FULLDUMP_DIFF" ]; then
if [ -z "$FULLDUMP_DIFF_FILTER" ]; then if [ -z "$FULLDUMP_DIFF_FILTER" ]; then
FULLDUMP_DIFF_FILTER="$CAT" FULLDUMP_DIFF_FILTER="$CAT"
fi fi
$FULLDUMP_CMD $OTHER_FILE | eval "$FULLDUMP_DIFF_FILTER" \ $FULLDUMP_CMD $OTHER_FILE | eval "$BUILD_ID_FILTER" | eval "$FULLDUMP_DIFF_FILTER" \
> $WORK_FILE_BASE.fulldump.other 2>&1 > $WORK_FILE_BASE.fulldump.other 2>&1
$FULLDUMP_CMD $THIS_FILE | eval "$FULLDUMP_DIFF_FILTER" \ $FULLDUMP_CMD $THIS_FILE | eval "$BUILD_ID_FILTER" | eval "$FULLDUMP_DIFF_FILTER" \
> $WORK_FILE_BASE.fulldump.this 2>&1 > $WORK_FILE_BASE.fulldump.this 2>&1
LC_ALL=C $DIFF $WORK_FILE_BASE.fulldump.other $WORK_FILE_BASE.fulldump.this \ LC_ALL=C $DIFF $WORK_FILE_BASE.fulldump.other $WORK_FILE_BASE.fulldump.this \

View file

@ -57,14 +57,18 @@ ACCEPTED_BIN_DIFF="
./demo/jvmti/mtrace/lib/libmtrace.so ./demo/jvmti/mtrace/lib/libmtrace.so
./demo/jvmti/versionCheck/lib/libversionCheck.so ./demo/jvmti/versionCheck/lib/libversionCheck.so
./demo/jvmti/waiters/lib/libwaiters.so ./demo/jvmti/waiters/lib/libwaiters.so
./lib/i386/client/libjsig.so
./lib/i386/client/libjvm.so ./lib/i386/client/libjvm.so
./lib/i386/libattach.so ./lib/i386/libattach.so
./lib/i386/libdt_socket.so ./lib/i386/libdt_socket.so
./lib/i386/libinstrument.so ./lib/i386/libinstrument.so
./lib/i386/libjsdt.so ./lib/i386/libjsdt.so
./lib/i386/libjsig.so
./lib/i386/libmanagement.so ./lib/i386/libmanagement.so
./lib/i386/libnet.so
./lib/i386/libnpt.so ./lib/i386/libnpt.so
./lib/i386/libverify.so ./lib/i386/libverify.so
./lib/i386/server/libjsig.so
./lib/i386/server/libjvm.so ./lib/i386/server/libjvm.so
./bin/appletviewer ./bin/appletviewer
./bin/idlj ./bin/idlj
@ -105,6 +109,17 @@ ACCEPTED_BIN_DIFF="
./bin/xjc ./bin/xjc
" "
# Issue with __FILE__ usage in generated header files prevent clean fulldump diff of
# server jvm with old hotspot build.
KNOWN_FULLDUMP_DIFF="
./lib/i386/server/libjvm.so
"
KNOWN_DIS_DIFF="
./lib/i386/server/libjvm.so
"
DIS_DIFF_FILTER="$SED \
-e 's/\(:\t\)\([0-9a-z]\{2,2\} \)\{1,7\}/\1<hex>/g' \
-e 's/0x[0-9a-z]\{2,9\}/<hex>/g'"
fi fi
if [ "$OPENJDK_TARGET_OS" = "linux" ] && [ "$OPENJDK_TARGET_CPU" = "x86_64" ]; then if [ "$OPENJDK_TARGET_OS" = "linux" ] && [ "$OPENJDK_TARGET_CPU" = "x86_64" ]; then
@ -135,6 +150,7 @@ ACCEPTED_BIN_DIFF="
./lib/amd64/libjsdt.so ./lib/amd64/libjsdt.so
./lib/amd64/libjsig.so ./lib/amd64/libjsig.so
./lib/amd64/libmanagement.so ./lib/amd64/libmanagement.so
./lib/amd64/libnet.so
./lib/amd64/libnpt.so ./lib/amd64/libnpt.so
./lib/amd64/libsaproc.so ./lib/amd64/libsaproc.so
./lib/amd64/libverify.so ./lib/amd64/libverify.so
@ -179,6 +195,12 @@ ACCEPTED_BIN_DIFF="
./bin/xjc ./bin/xjc
" "
# Issue with __FILE__ usage in generated header files prevent clean fulldump diff of
# server jvm with old hotspot build.
KNOWN_FULLDUMP_DIFF="
./lib/amd64/server/libjvm.so
"
fi fi
if [ "$OPENJDK_TARGET_OS" = "solaris" ] && [ "$OPENJDK_TARGET_CPU" = "x86_64" ]; then if [ "$OPENJDK_TARGET_OS" = "solaris" ] && [ "$OPENJDK_TARGET_CPU" = "x86_64" ]; then
@ -299,14 +321,13 @@ SKIP_FULLDUMP_DIFF="true"
# Filter random C++ symbol strings. # Filter random C++ symbol strings.
# Some numbers differ randomly. # Some numbers differ randomly.
# Can't use space in these expressions as the shell will mess with them.
DIS_DIFF_FILTER="$SED \ DIS_DIFF_FILTER="$SED \
-e 's/\.[a-zA-Z0-9_\$]\{15,15\}/<SYM>/g' \ -e 's/\.[a-zA-Z0-9_\$]\{15\}/<SYM>/g' \
-e 's/\([0-9a-f][0-9a-f].\)\{2,8\}[0-9a-f][0-9a-f]/<NUMS>/g' \ -e 's/\(\# \)[0-9a-f]*\( <\)/\1<HEX>\2/g' \
-e 's/\(0x\)[0-9a-f]*\([,(>]\)/\1<HEX>\2/g' \ -e 's/0x[0-9a-f]*$/<HEX>/g' \
-e 's/\(0x\)[0-9a-f]*$/\1<HEX>/g' \ -e 's/0x[0-9a-f]*\([,(>]\)/<HEX>\1/g' \
-e 's/\(\#.\)[0-9a-f]*\(.<\)/\1<HEX>\2/g' \ -e 's/: [0-9a-f][0-9a-f]\( [0-9a-f][0-9a-f]\)\{2,10\}/: <NUMS>/g' \
-e 's/[\.A-Za-z0-9%]\{16,16\}$/<BIN>/g'" -e 's/ [\.A-Za-z0-9%@]\{16\}$/ <BIN>/g'"
fi fi
@ -425,18 +446,23 @@ ACCEPTED_SMALL_SIZE_DIFF="
./bin/xjc ./bin/xjc
" "
# Filter random C++ symbol strings.
# Some numbers differ randomly. # Some numbers differ randomly.
DIS_DIFF_FILTER="$SED \ DIS_DIFF_FILTER="$SED \
-e 's/\$[a-zA-Z0-9_\$]\{15,15\}/<SYM>/g' \ -e 's/\$[a-zA-Z0-9_\$]\{15\}/<SYM>/g' \
-e 's/[0-9a-f][0-9a-f].[0-9a-f][0-9a-f].[0-9a-f][0-9a-f].[0-9a-f][0-9a-f]/<NUMS>/g' \ -e 's/: [0-9a-f][0-9a-f]\( [0-9a-f][0-9a-f]\)\{2,10\}/: <NUMS>/g' \
-e 's/\(%g1,.0x\)[0-9a-f]*\(,.%g1\)/\1<HEX>\2/g' \ -e 's/, [0-9a-fx\-]\{1,8\}/, <CONST>/g' \
-e 's/\(!.\)[0-9a-f]*\(.<SUNWprivate_1.1+0x\)[0-9a-f]*/\1<NUM>\2<HEX>/g' \ -e 's/call [0-9a-f]\{7\}/call <ADDR>/g' \
-e 's/\!.[0-9a-f]\{1,4\} <_DYNAMIC+0x[0-9a-f]\{1,4\}>/<DYNAMIC>/g'" -e 's/0x[0-9a-f]\{1,8\}/<HEX>/g' \
-e 's/\! [0-9a-f]\{1,8\} /! <ADDR> /g'"
# Some xor instructions end up with different args in the lib but not in the object files. # libjvm.so
ACCEPTED_DIS_DIFF=" # __FILE__ macro usage in debug.hpp causes differences between old and new
./demo/jvmti/waiters/lib/libwaiters.so # hotspot builds in ad_sparc.o and ad_sparc_clone.o. The .o files compare
# equal when stripped, but at link time differences appear. Removing
# __FILE__ from ShouldNotCallThis() and ShouldNotReachHere() removes
# the differences.
KNOWN_DIS_DIFF="
./lib/sparcv9/server/libjvm.so
" "
SKIP_FULLDUMP_DIFF="true" SKIP_FULLDUMP_DIFF="true"
@ -634,11 +660,12 @@ ACCEPTED_BIN_DIFF="
SORT_SYMBOLS=" SORT_SYMBOLS="
./Contents/Home/lib/libsaproc.dylib ./Contents/Home/lib/libsaproc.dylib
./lib/libsaproc.dylib ./lib/libsaproc.dylib
./lib/libjsig.dylib
" "
ACCEPTED_SMALL_SIZE_DIFF="$ACCEPTED_BIN_DIFF" ACCEPTED_SMALL_SIZE_DIFF="$ACCEPTED_BIN_DIFF"
DIS_DIFF_FILTER="$SED \ DIS_DIFF_FILTER="LANG=C $SED \
-e 's/0x[0-9a-f]\{4,16\}/<HEXSTR>/g'" -e 's/0x[0-9a-f]\{3,16\}/<HEXSTR>/g' -e 's/^[0-9a-f]\{12,20\}/<ADDR>/'"
fi fi

View file

@ -32,7 +32,7 @@ installed_jib_script=${mydir}/../../.jib/jib
install_data=${mydir}/../../.jib/.data install_data=${mydir}/../../.jib/.data
setup_url() { setup_url() {
if [ -f "~/.config/jib/jib.conf" ]; then if [ -f ~/.config/jib/jib.conf ]; then
source ~/.config/jib/jib.conf source ~/.config/jib/jib.conf
fi fi
@ -50,6 +50,9 @@ setup_url() {
if [ -n "${JIB_SERVER}" ]; then if [ -n "${JIB_SERVER}" ]; then
jib_server="${JIB_SERVER}" jib_server="${JIB_SERVER}"
fi fi
if [ -n "${JIB_SERVER_MIRRORS}" ]; then
jib_server_mirrors="${JIB_SERVER_MIRRORS}"
fi
if [ -n "${JIB_REPOSITORY}" ]; then if [ -n "${JIB_REPOSITORY}" ]; then
jib_repository="${JIB_REPOSITORY}" jib_repository="${JIB_REPOSITORY}"
fi fi
@ -70,8 +73,9 @@ setup_url() {
jib_url="${JIB_URL}" jib_url="${JIB_URL}"
data_string="${jib_url}" data_string="${jib_url}"
else else
data_string="${jib_repository}/${jib_organization}/${jib_module}/${jib_revision}/${jib_module}-${jib_revision}.${jib_ext}" jib_path="${jib_repository}/${jib_organization}/${jib_module}/${jib_revision}/${jib_module}-${jib_revision}.${jib_ext}"
jib_url="${jib_server}/${data_string}" data_string="${jib_path}"
jib_url="${jib_server}/${jib_path}"
fi fi
} }
@ -104,7 +108,25 @@ install_jib() {
${getcmd} ${jib_url} > "${installed_jib_script}.gz" ${getcmd} ${jib_url} > "${installed_jib_script}.gz"
if [ ! -s "${installed_jib_script}.gz" ]; then if [ ! -s "${installed_jib_script}.gz" ]; then
echo "Failed to download ${jib_url}" echo "Failed to download ${jib_url}"
exit 1 if [ -n "${jib_path}" -a -n "${jib_server_mirrors}" ]; then
OLD_IFS="${IFS}"
IFS=" ,"
for mirror in ${jib_server_mirrors}; do
echo "Trying mirror ${mirror}"
jib_url="${mirror}/${jib_path}"
${getcmd} ${jib_url} > "${installed_jib_script}.gz"
if [ -s "${installed_jib_script}.gz" ]; then
echo "Download from mirror successful"
break
else
echo "Failed to download ${jib_url}"
fi
done
IFS="${OLD_IFS}"
fi
if [ ! -s "${installed_jib_script}.gz" ]; then
exit 1
fi
fi fi
echo "Extracting JIB bootstrap script" echo "Extracting JIB bootstrap script"
rm -f "${installed_jib_script}" rm -f "${installed_jib_script}"

View file

@ -357,8 +357,8 @@ var getJibProfilesDependencies = function (input, common) {
var devkit_platform_revisions = { var devkit_platform_revisions = {
linux_x64: "gcc4.9.2-OEL6.4+1.0", linux_x64: "gcc4.9.2-OEL6.4+1.0",
macosx_x64: "Xcode6.3-MacOSX10.9+1.0", macosx_x64: "Xcode6.3-MacOSX10.9+1.0",
solaris_x64: "SS12u3-Solaris10u10+1.0", solaris_x64: "SS12u4-Solaris11u1+1.0",
solaris_sparcv9: "SS12u3-Solaris10u10+1.0", solaris_sparcv9: "SS12u4-Solaris11u1+1.0",
windows_x64: "VS2013SP4+1.0" windows_x64: "VS2013SP4+1.0"
}; };

View file

@ -340,3 +340,5 @@ f7d70caad89ad0c43bb057bca0aad6f17ce05a6a jdk9-b92
fd038e8a16eec80d0d6b337d74a582790ed4b3ee jdk-9+95 fd038e8a16eec80d0d6b337d74a582790ed4b3ee jdk-9+95
feb1bd85d7990dcf5584ca9e53104269c01db006 jdk-9+96 feb1bd85d7990dcf5584ca9e53104269c01db006 jdk-9+96
10a482b863582376d4ca229090334b23b05159fc jdk-9+97 10a482b863582376d4ca229090334b23b05159fc jdk-9+97
ea285530245cf4e0edf0479121a41347d3030eba jdk-9+98
180212ee1d8710691ba9944593dfc1ff3e4f1532 jdk-9+99

View file

@ -500,3 +500,5 @@ a22b7c80529f5f05c847e932e017456e83c46233 jdk9-b94
0c79cf3cdf0904fc4a630b91b32904491e1ae430 jdk-9+95 0c79cf3cdf0904fc4a630b91b32904491e1ae430 jdk-9+95
a94bb7203596dd632486f1e3655fa5f70541dc08 jdk-9+96 a94bb7203596dd632486f1e3655fa5f70541dc08 jdk-9+96
de592ea5f7ba0f8a8c5afc03bd169f7690c72b6f jdk-9+97 de592ea5f7ba0f8a8c5afc03bd169f7690c72b6f jdk-9+97
e5b1a23be1e105417ba1c4c576ab373eb3fa2c2b jdk-9+98
f008e8cc10d5b3212fb22d58c96fa01d38654f19 jdk-9+99

View file

@ -677,12 +677,6 @@ class JVMCIArchiveParticipant:
assert service assert service
self.services.setdefault(service, []).append(provider) self.services.setdefault(service, []).append(provider)
return True return True
elif arcname.endswith('_OptionDescriptors.class'):
# Need to create service files for the providers of the
# jdk.vm.ci.options.Options service created by
# jdk.vm.ci.options.processor.OptionProcessor.
provider = arcname[:-len('.class'):].replace('/', '.')
self.services.setdefault('jdk.vm.ci.options.OptionDescriptors', []).append(provider)
return False return False
def __addsrc__(self, arcname, contents): def __addsrc__(self, arcname, contents):
@ -761,21 +755,6 @@ class JVMCI9JDKConfig(mx.JDKConfig):
if jacocoArgs: if jacocoArgs:
args = jacocoArgs + args args = jacocoArgs + args
# Support for -G: options
def translateGOption(arg):
if arg.startswith('-G:+'):
if '=' in arg:
mx.abort('Mixing + and = in -G: option specification: ' + arg)
arg = '-Djvmci.option.' + arg[len('-G:+'):] + '=true'
elif arg.startswith('-G:-'):
if '=' in arg:
mx.abort('Mixing - and = in -G: option specification: ' + arg)
arg = '-Djvmci.option.' + arg[len('-G:+'):] + '=false'
elif arg.startswith('-G:'):
arg = '-Djvmci.option.' + arg[len('-G:'):]
return arg
args = map(translateGOption, args)
args = ['-Xbootclasspath/p:' + dep.classpath_repr() for dep in _jvmci_bootclasspath_prepends] + args args = ['-Xbootclasspath/p:' + dep.classpath_repr() for dep in _jvmci_bootclasspath_prepends] + args
jvmciModeArgs = _jvmciModes[_vm.jvmciMode] jvmciModeArgs = _jvmciModes[_vm.jvmciMode]

View file

@ -109,7 +109,6 @@ suite = {
"jdk.vm.ci.code", "jdk.vm.ci.code",
], ],
"checkstyle" : "jdk.vm.ci.service", "checkstyle" : "jdk.vm.ci.service",
"annotationProcessors" : ["JVMCI_OPTIONS_PROCESSOR"],
"javaCompliance" : "1.8", "javaCompliance" : "1.8",
"workingSets" : "API,JVMCI", "workingSets" : "API,JVMCI",
}, },
@ -135,40 +134,17 @@ suite = {
"workingSets" : "JVMCI", "workingSets" : "JVMCI",
}, },
"jdk.vm.ci.options" : {
"subDir" : "src/jdk.vm.ci/share/classes",
"sourceDirs" : ["src"],
"checkstyle" : "jdk.vm.ci.service",
"dependencies" : ["jdk.vm.ci.inittimer"],
"javaCompliance" : "1.8",
"workingSets" : "JVMCI",
},
"jdk.vm.ci.options.processor" : {
"subDir" : "src/jdk.vm.ci/share/classes",
"sourceDirs" : ["src"],
"dependencies" : [
"jdk.vm.ci.options",
],
"checkstyle" : "jdk.vm.ci.service",
"javaCompliance" : "1.8",
"workingSets" : "JVMCI,Codegen",
},
"jdk.vm.ci.options.test" : {
"subDir" : "test/compiler/jvmci",
"sourceDirs" : ["src"],
"dependencies" : [
"jdk.vm.ci.options",
"mx:JUNIT",
],
"checkstyle" : "jdk.vm.ci.service",
"javaCompliance" : "1.8",
"workingSets" : "JVMCI",
},
# ------------- JVMCI:HotSpot ------------- # ------------- JVMCI:HotSpot -------------
"jdk.vm.ci.aarch64" : {
"subDir" : "src/jdk.vm.ci/share/classes",
"sourceDirs" : ["src"],
"dependencies" : ["jdk.vm.ci.code"],
"checkstyle" : "jdk.vm.ci.service",
"javaCompliance" : "1.8",
"workingSets" : "JVMCI,AArch64",
},
"jdk.vm.ci.amd64" : { "jdk.vm.ci.amd64" : {
"subDir" : "src/jdk.vm.ci/share/classes", "subDir" : "src/jdk.vm.ci/share/classes",
"sourceDirs" : ["src"], "sourceDirs" : ["src"],
@ -191,15 +167,12 @@ suite = {
"subDir" : "src/jdk.vm.ci/share/classes", "subDir" : "src/jdk.vm.ci/share/classes",
"sourceDirs" : ["src"], "sourceDirs" : ["src"],
"dependencies" : [ "dependencies" : [
"jdk.vm.ci.options",
"jdk.vm.ci.hotspotvmconfig", "jdk.vm.ci.hotspotvmconfig",
"jdk.vm.ci.common", "jdk.vm.ci.common",
"jdk.vm.ci.inittimer",
"jdk.vm.ci.runtime", "jdk.vm.ci.runtime",
"jdk.vm.ci.service", "jdk.vm.ci.service",
], ],
"annotationProcessors" : [
"JVMCI_OPTIONS_PROCESSOR",
],
"checkstyle" : "jdk.vm.ci.service", "checkstyle" : "jdk.vm.ci.service",
"javaCompliance" : "1.8", "javaCompliance" : "1.8",
"workingSets" : "JVMCI", "workingSets" : "JVMCI",
@ -213,6 +186,21 @@ suite = {
"workingSets" : "JVMCI,HotSpot", "workingSets" : "JVMCI,HotSpot",
}, },
"jdk.vm.ci.hotspot.aarch64" : {
"subDir" : "src/jdk.vm.ci/share/classes",
"sourceDirs" : ["src"],
"dependencies" : [
"jdk.vm.ci.aarch64",
"jdk.vm.ci.hotspot",
],
"checkstyle" : "jdk.vm.ci.service",
"annotationProcessors" : [
"JVMCI_SERVICE_PROCESSOR",
],
"javaCompliance" : "1.8",
"workingSets" : "JVMCI,HotSpot,AArch64",
},
"jdk.vm.ci.hotspot.amd64" : { "jdk.vm.ci.hotspot.amd64" : {
"subDir" : "src/jdk.vm.ci/share/classes", "subDir" : "src/jdk.vm.ci/share/classes",
"sourceDirs" : ["src"], "sourceDirs" : ["src"],
@ -258,22 +246,17 @@ suite = {
"dependencies" : ["jdk.vm.ci.service"], "dependencies" : ["jdk.vm.ci.service"],
}, },
"JVMCI_OPTIONS" : {
"subDir" : "src/jdk.vm.ci/share/classes",
"dependencies" : ["jdk.vm.ci.options"],
},
"JVMCI_API" : { "JVMCI_API" : {
"subDir" : "src/jdk.vm.ci/share/classes", "subDir" : "src/jdk.vm.ci/share/classes",
"dependencies" : [ "dependencies" : [
"jdk.vm.ci.inittimer", "jdk.vm.ci.inittimer",
"jdk.vm.ci.runtime", "jdk.vm.ci.runtime",
"jdk.vm.ci.common", "jdk.vm.ci.common",
"jdk.vm.ci.aarch64",
"jdk.vm.ci.amd64", "jdk.vm.ci.amd64",
"jdk.vm.ci.sparc", "jdk.vm.ci.sparc",
], ],
"distDependencies" : [ "distDependencies" : [
"JVMCI_OPTIONS",
"JVMCI_SERVICE", "JVMCI_SERVICE",
], ],
}, },
@ -288,6 +271,7 @@ suite = {
"JVMCI_HOTSPOT" : { "JVMCI_HOTSPOT" : {
"subDir" : "src/jdk.vm.ci/share/classes", "subDir" : "src/jdk.vm.ci/share/classes",
"dependencies" : [ "dependencies" : [
"jdk.vm.ci.hotspot.aarch64",
"jdk.vm.ci.hotspot.amd64", "jdk.vm.ci.hotspot.amd64",
"jdk.vm.ci.hotspot.sparc", "jdk.vm.ci.hotspot.sparc",
], ],
@ -301,7 +285,6 @@ suite = {
"JVMCI_TEST" : { "JVMCI_TEST" : {
"subDir" : "test/compiler/jvmci", "subDir" : "test/compiler/jvmci",
"dependencies" : [ "dependencies" : [
"jdk.vm.ci.options.test",
"jdk.vm.ci.runtime.test", "jdk.vm.ci.runtime.test",
], ],
"distDependencies" : [ "distDependencies" : [
@ -310,13 +293,6 @@ suite = {
"exclude" : ["mx:JUNIT"], "exclude" : ["mx:JUNIT"],
}, },
"JVMCI_OPTIONS_PROCESSOR" : {
"subDir" : "src/jdk.vm.ci/share/classes",
"dependencies" : ["jdk.vm.ci.options.processor"],
"distDependencies" : [
"JVMCI_OPTIONS",
],
},
"JVMCI_SERVICE_PROCESSOR" : { "JVMCI_SERVICE_PROCESSOR" : {
"subDir" : "src/jdk.vm.ci/share/classes", "subDir" : "src/jdk.vm.ci/share/classes",
@ -332,25 +308,23 @@ suite = {
"subDir" : "src/jdk.vm.ci/share/classes", "subDir" : "src/jdk.vm.ci/share/classes",
"overlaps" : [ "overlaps" : [
"JVMCI_API", "JVMCI_API",
"JVMCI_OPTIONS",
"JVMCI_SERVICE", "JVMCI_SERVICE",
"JVMCI_HOTSPOT", "JVMCI_HOTSPOT",
"JVMCI_HOTSPOTVMCONFIG", "JVMCI_HOTSPOTVMCONFIG",
"JVMCI_SERVICE_PROCESSOR", "JVMCI_SERVICE_PROCESSOR",
"JVMCI_OPTIONS_PROCESSOR"
], ],
"dependencies" : [ "dependencies" : [
"jdk.vm.ci.options",
"jdk.vm.ci.service", "jdk.vm.ci.service",
"jdk.vm.ci.inittimer", "jdk.vm.ci.inittimer",
"jdk.vm.ci.runtime", "jdk.vm.ci.runtime",
"jdk.vm.ci.common", "jdk.vm.ci.common",
"jdk.vm.ci.aarch64",
"jdk.vm.ci.amd64", "jdk.vm.ci.amd64",
"jdk.vm.ci.sparc", "jdk.vm.ci.sparc",
"jdk.vm.ci.hotspotvmconfig", "jdk.vm.ci.hotspotvmconfig",
"jdk.vm.ci.hotspot.aarch64",
"jdk.vm.ci.hotspot.amd64", "jdk.vm.ci.hotspot.amd64",
"jdk.vm.ci.hotspot.sparc", "jdk.vm.ci.hotspot.sparc",
"jdk.vm.ci.options.processor",
"jdk.vm.ci.service.processor" "jdk.vm.ci.service.processor"
], ],
}, },

View file

@ -1,6 +1,6 @@
# #
# Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
# Copyright 2012, 2013 SAP AG. All rights reserved. # Copyright 2012, 2015 SAP AG. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -61,10 +61,6 @@ ifndef CC_INTERP
FORCE_TIERED=1 FORCE_TIERED=1
endif endif
endif endif
# C1 is not ported on ppc64(le), so we cannot build a tiered VM:
ifneq (,$(filter $(ARCH),ppc64 pp64le))
FORCE_TIERED=0
endif
ifdef LP64 ifdef LP64
ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","") ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")

View file

@ -68,5 +68,5 @@ MAPFILE = $(GAMMADIR)/make/aix/makefiles/mapfile-vers-debug
LFLAGS_QIPA= LFLAGS_QIPA=
VERSION = optimized VERSION = optimized
SYSDEFS += -DASSERT -DFASTDEBUG SYSDEFS += -DASSERT
PICFLAGS = DEFAULT PICFLAGS = DEFAULT

View file

@ -0,0 +1,32 @@
#
# Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
# Copyright 2012, 2015 SAP AG. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# Sets make macros for making tiered version of VM
TYPE=TIERED
VM_SUBDIR = server
CFLAGS += -DCOMPILER2 -DCOMPILER1

View file

@ -107,8 +107,8 @@ ifeq ($(INCLUDE_NMT), false)
memTracker.cpp nmtDCmd.cpp mallocSiteTable.cpp memTracker.cpp nmtDCmd.cpp mallocSiteTable.cpp
endif endif
ifneq (,$(findstring $(Platform_arch_model), x86_64, sparc)) ifneq (,$(findstring $(Platform_arch_model), aarch64, arm_64, sparc, x86_64))
# JVMCI is supported only on x86_64 and SPARC. # JVMCI is supported
else else
INCLUDE_JVMCI := false INCLUDE_JVMCI := false
endif endif

View file

@ -36,15 +36,6 @@ SRC_DIR := $(HOTSPOT_TOPDIR)/src/jdk.vm.ci/share/classes
################################################################################ ################################################################################
# Compile the annotation processor # Compile the annotation processor
$(eval $(call SetupJavaCompilation, BUILD_JVMCI_OPTIONS, \
SETUP := GENERATE_OLDBYTECODE, \
SRC := $(SRC_DIR)/jdk.vm.ci.options/src \
$(SRC_DIR)/jdk.vm.ci.options.processor/src \
$(SRC_DIR)/jdk.vm.ci.inittimer/src, \
BIN := $(BUILDTOOLS_OUTPUTDIR)/jvmci_options, \
JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.ci.options.jar, \
))
$(eval $(call SetupJavaCompilation, BUILD_JVMCI_SERVICE, \ $(eval $(call SetupJavaCompilation, BUILD_JVMCI_SERVICE, \
SETUP := GENERATE_OLDBYTECODE, \ SETUP := GENERATE_OLDBYTECODE, \
SRC := $(SRC_DIR)/jdk.vm.ci.service/src \ SRC := $(SRC_DIR)/jdk.vm.ci.service/src \
@ -57,6 +48,7 @@ $(eval $(call SetupJavaCompilation, BUILD_JVMCI_SERVICE, \
PROC_SRC_SUBDIRS := \ PROC_SRC_SUBDIRS := \
jdk.vm.ci.hotspot \ jdk.vm.ci.hotspot \
jdk.vm.ci.hotspot.aarch64 \
jdk.vm.ci.hotspot.amd64 \ jdk.vm.ci.hotspot.amd64 \
jdk.vm.ci.hotspot.sparc \ jdk.vm.ci.hotspot.sparc \
jdk.vm.ci.runtime \ jdk.vm.ci.runtime \
@ -69,11 +61,10 @@ PROC_SRCS := $(filter %.java, $(call CacheFind, $(PROC_SRC_DIRS)))
ALL_SRC_DIRS := $(wildcard $(SRC_DIR)/*/src) ALL_SRC_DIRS := $(wildcard $(SRC_DIR)/*/src)
SOURCEPATH := $(call PathList, $(ALL_SRC_DIRS)) SOURCEPATH := $(call PathList, $(ALL_SRC_DIRS))
PROCESSOR_PATH := $(call PathList, \ PROCESSOR_PATH := $(call PathList, \
$(BUILDTOOLS_OUTPUTDIR)/jdk.vm.ci.options.jar \
$(BUILDTOOLS_OUTPUTDIR)/jdk.vm.ci.service.jar) $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.ci.service.jar)
$(GENSRC_DIR)/_gensrc_proc_done: $(PROC_SRCS) \ $(GENSRC_DIR)/_gensrc_proc_done: $(PROC_SRCS) \
$(BUILD_JVMCI_OPTIONS) $(BUILD_JVMCI_SERVICE) $(BUILD_JVMCI_SERVICE)
$(MKDIR) -p $(@D) $(MKDIR) -p $(@D)
$(eval $(call ListPathsSafely,PROC_SRCS,$(@D)/_gensrc_proc_files)) $(eval $(call ListPathsSafely,PROC_SRCS,$(@D)/_gensrc_proc_files))
$(JAVA_SMALL) $(NEW_JAVAC) \ $(JAVA_SMALL) $(NEW_JAVAC) \
@ -91,15 +82,6 @@ TARGETS += $(GENSRC_DIR)/_gensrc_proc_done
################################################################################ ################################################################################
$(GENSRC_DIR)/META-INF/services/jdk.vm.ci.options.OptionDescriptors: \
$(GENSRC_DIR)/_gensrc_proc_done
$(MKDIR) -p $(@D)
$(FIND) $(GENSRC_DIR) -name '*_OptionDescriptors.java' | $(SED) 's:.*/jdk\.vm\.ci/\(.*\)\.java:\1:' | $(TR) '/' '.' > $@
TARGETS += $(GENSRC_DIR)/META-INF/services/jdk.vm.ci.options.OptionDescriptors
################################################################################
$(GENSRC_DIR)/_providers_converted: $(GENSRC_DIR)/_gensrc_proc_done $(GENSRC_DIR)/_providers_converted: $(GENSRC_DIR)/_gensrc_proc_done
$(MKDIR) -p $(GENSRC_DIR)/META-INF/services $(MKDIR) -p $(GENSRC_DIR)/META-INF/services
($(CD) $(GENSRC_DIR)/META-INF/jvmci.providers && \ ($(CD) $(GENSRC_DIR)/META-INF/jvmci.providers && \

View file

@ -57,14 +57,6 @@ ifndef CC_INTERP
FORCE_TIERED=1 FORCE_TIERED=1
endif endif
endif endif
# C1 is not ported on ppc64, so we cannot build a tiered VM:
# Notice: after 8046471 ARCH will be 'ppc' for top-level ppc64 builds but
# 'ppc64' for HotSpot-only ppc64 builds. Need to detect both variants here!
ifneq (,$(findstring $(ARCH), ppc ppc64))
ifeq ($(ARCH_DATA_MODEL), 64)
FORCE_TIERED=0
endif
endif
ifdef LP64 ifdef LP64
ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","") ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")

View file

@ -46,6 +46,8 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
$(HOTSPOT_TOPDIR)/test/runtime/jni/8033445 \ $(HOTSPOT_TOPDIR)/test/runtime/jni/8033445 \
$(HOTSPOT_TOPDIR)/test/runtime/jni/ToStringInInterfaceTest \ $(HOTSPOT_TOPDIR)/test/runtime/jni/ToStringInInterfaceTest \
$(HOTSPOT_TOPDIR)/test/runtime/SameObject \ $(HOTSPOT_TOPDIR)/test/runtime/SameObject \
$(HOTSPOT_TOPDIR)/test/compiler/floatingpoint/ \
$(HOTSPOT_TOPDIR)/test/compiler/calls \
# #
# Add conditional directories here when needed. # Add conditional directories here when needed.

View file

@ -3484,10 +3484,14 @@ int Matcher::regnum_to_fpu_offset(int regnum)
return 0; return 0;
} }
bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) // Is this branch offset short enough that a short branch can be used?
{ //
Unimplemented(); // NOTE: If the platform does not provide any short branch variants, then
return false; // this method should return false for offset 0.
bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
// The passed offset is relative to address of the branch.
return (-32768 <= offset && offset < 32768);
} }
const bool Matcher::isSimpleConstant64(jlong value) { const bool Matcher::isSimpleConstant64(jlong value) {
@ -4667,17 +4671,12 @@ encode %{
if (!_method) { if (!_method) {
// A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap. // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf); call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
} else if (_optimized_virtual) {
call = __ trampoline_call(Address(addr, relocInfo::opt_virtual_call_type), &cbuf);
} else { } else {
call = __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf); int method_index = resolved_method_index(cbuf);
} RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
if (call == NULL) { : static_call_Relocation::spec(method_index);
ciEnv::current()->record_failure("CodeCache is full"); call = __ trampoline_call(Address(addr, rspec), &cbuf);
return;
}
if (_method) {
// Emit stub for static call // Emit stub for static call
address stub = CompiledStaticCall::emit_to_interp_stub(cbuf); address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
if (stub == NULL) { if (stub == NULL) {
@ -4685,11 +4684,16 @@ encode %{
return; return;
} }
} }
if (call == NULL) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
%} %}
enc_class aarch64_enc_java_dynamic_call(method meth) %{ enc_class aarch64_enc_java_dynamic_call(method meth) %{
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
address call = __ ic_call((address)$meth$$method); int method_index = resolved_method_index(cbuf);
address call = __ ic_call((address)$meth$$method, method_index);
if (call == NULL) { if (call == NULL) {
ciEnv::current()->record_failure("CodeCache is full"); ciEnv::current()->record_failure("CodeCache is full");
return; return;
@ -13845,7 +13849,8 @@ instruct cmpP_narrowOop_imm0_branch(cmpOp cmp, iRegN oop, immP0 zero, label labl
// Test bit and Branch // Test bit and Branch
instruct cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{ // Patterns for short (< 32KiB) variants
instruct cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
match(If cmp (CmpL op1 op2)); match(If cmp (CmpL op1 op2));
predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
|| n->in(1)->as_Bool()->_test._test == BoolTest::ge); || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
@ -13855,16 +13860,15 @@ instruct cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl, rFlagsReg
format %{ "cb$cmp $op1, $labl # long" %} format %{ "cb$cmp $op1, $labl # long" %}
ins_encode %{ ins_encode %{
Label* L = $labl$$label; Label* L = $labl$$label;
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; Assembler::Condition cond =
if (cond == Assembler::LT) ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
__ tbnz($op1$$Register, 63, *L); __ tbr(cond, $op1$$Register, 63, *L);
else
__ tbz($op1$$Register, 63, *L);
%} %}
ins_pipe(pipe_cmp_branch); ins_pipe(pipe_cmp_branch);
ins_short_branch(1);
%} %}
instruct cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{ instruct cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
match(If cmp (CmpI op1 op2)); match(If cmp (CmpI op1 op2));
predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
|| n->in(1)->as_Bool()->_test._test == BoolTest::ge); || n->in(1)->as_Bool()->_test._test == BoolTest::ge);
@ -13874,16 +13878,15 @@ instruct cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl, rFla
format %{ "cb$cmp $op1, $labl # int" %} format %{ "cb$cmp $op1, $labl # int" %}
ins_encode %{ ins_encode %{
Label* L = $labl$$label; Label* L = $labl$$label;
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; Assembler::Condition cond =
if (cond == Assembler::LT) ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
__ tbnz($op1$$Register, 31, *L); __ tbr(cond, $op1$$Register, 31, *L);
else
__ tbz($op1$$Register, 31, *L);
%} %}
ins_pipe(pipe_cmp_branch); ins_pipe(pipe_cmp_branch);
ins_short_branch(1);
%} %}
instruct cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl, rFlagsReg cr) %{ instruct cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
match(If cmp (CmpL (AndL op1 op2) op3)); match(If cmp (CmpL (AndL op1 op2) op3));
predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
|| n->in(1)->as_Bool()->_test._test == BoolTest::eq) || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
@ -13896,15 +13899,13 @@ instruct cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl,
Label* L = $labl$$label; Label* L = $labl$$label;
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
int bit = exact_log2($op2$$constant); int bit = exact_log2($op2$$constant);
if (cond == Assembler::EQ) __ tbr(cond, $op1$$Register, bit, *L);
__ tbz($op1$$Register, bit, *L);
else
__ tbnz($op1$$Register, bit, *L);
%} %}
ins_pipe(pipe_cmp_branch); ins_pipe(pipe_cmp_branch);
ins_short_branch(1);
%} %}
instruct cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl, rFlagsReg cr) %{ instruct cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
match(If cmp (CmpI (AndI op1 op2) op3)); match(If cmp (CmpI (AndI op1 op2) op3));
predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
|| n->in(1)->as_Bool()->_test._test == BoolTest::eq) || n->in(1)->as_Bool()->_test._test == BoolTest::eq)
@ -13917,10 +13918,79 @@ instruct cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label l
Label* L = $labl$$label; Label* L = $labl$$label;
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
int bit = exact_log2($op2$$constant); int bit = exact_log2($op2$$constant);
if (cond == Assembler::EQ) __ tbr(cond, $op1$$Register, bit, *L);
__ tbz($op1$$Register, bit, *L); %}
else ins_pipe(pipe_cmp_branch);
__ tbnz($op1$$Register, bit, *L); ins_short_branch(1);
%}
// And far variants
instruct far_cmpL_branch_sign(cmpOp cmp, iRegL op1, immL0 op2, label labl) %{
match(If cmp (CmpL op1 op2));
predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
|| n->in(1)->as_Bool()->_test._test == BoolTest::ge);
effect(USE labl);
ins_cost(BRANCH_COST);
format %{ "cb$cmp $op1, $labl # long" %}
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond =
((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
__ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
%}
ins_pipe(pipe_cmp_branch);
%}
instruct far_cmpI_branch_sign(cmpOp cmp, iRegIorL2I op1, immI0 op2, label labl) %{
match(If cmp (CmpI op1 op2));
predicate(n->in(1)->as_Bool()->_test._test == BoolTest::lt
|| n->in(1)->as_Bool()->_test._test == BoolTest::ge);
effect(USE labl);
ins_cost(BRANCH_COST);
format %{ "cb$cmp $op1, $labl # int" %}
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond =
((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
__ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
%}
ins_pipe(pipe_cmp_branch);
%}
instruct far_cmpL_branch_bit(cmpOp cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
match(If cmp (CmpL (AndL op1 op2) op3));
predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
|| n->in(1)->as_Bool()->_test._test == BoolTest::eq)
&& is_power_of_2(n->in(2)->in(1)->in(2)->get_long()));
effect(USE labl);
ins_cost(BRANCH_COST);
format %{ "tb$cmp $op1, $op2, $labl" %}
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
int bit = exact_log2($op2$$constant);
__ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
%}
ins_pipe(pipe_cmp_branch);
%}
instruct far_cmpI_branch_bit(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
match(If cmp (CmpI (AndI op1 op2) op3));
predicate((n->in(1)->as_Bool()->_test._test == BoolTest::ne
|| n->in(1)->as_Bool()->_test._test == BoolTest::eq)
&& is_power_of_2(n->in(2)->in(1)->in(2)->get_int()));
effect(USE labl);
ins_cost(BRANCH_COST);
format %{ "tb$cmp $op1, $op2, $labl" %}
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
int bit = exact_log2($op2$$constant);
__ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
%} %}
ins_pipe(pipe_cmp_branch); ins_pipe(pipe_cmp_branch);
%} %}
@ -15318,6 +15388,124 @@ instruct vmul2D(vecX dst, vecX src1, vecX src2)
ins_pipe(pipe_class_default); ins_pipe(pipe_class_default);
%} %}
// --------------------------------- MLA --------------------------------------
instruct vmla4S(vecD dst, vecD src1, vecD src2)
%{
predicate(n->as_Vector()->length() == 2 ||
n->as_Vector()->length() == 4);
match(Set dst (AddVS dst (MulVS src1 src2)));
ins_cost(INSN_COST);
format %{ "mlav $dst,$src1,$src2\t# vector (4H)" %}
ins_encode %{
__ mlav(as_FloatRegister($dst$$reg), __ T4H,
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(pipe_class_default);
%}
instruct vmla8S(vecX dst, vecX src1, vecX src2)
%{
predicate(n->as_Vector()->length() == 8);
match(Set dst (AddVS dst (MulVS src1 src2)));
ins_cost(INSN_COST);
format %{ "mlav $dst,$src1,$src2\t# vector (8H)" %}
ins_encode %{
__ mlav(as_FloatRegister($dst$$reg), __ T8H,
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(pipe_class_default);
%}
instruct vmla2I(vecD dst, vecD src1, vecD src2)
%{
predicate(n->as_Vector()->length() == 2);
match(Set dst (AddVI dst (MulVI src1 src2)));
ins_cost(INSN_COST);
format %{ "mlav $dst,$src1,$src2\t# vector (2S)" %}
ins_encode %{
__ mlav(as_FloatRegister($dst$$reg), __ T2S,
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(pipe_class_default);
%}
instruct vmla4I(vecX dst, vecX src1, vecX src2)
%{
predicate(n->as_Vector()->length() == 4);
match(Set dst (AddVI dst (MulVI src1 src2)));
ins_cost(INSN_COST);
format %{ "mlav $dst,$src1,$src2\t# vector (4S)" %}
ins_encode %{
__ mlav(as_FloatRegister($dst$$reg), __ T4S,
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(pipe_class_default);
%}
// --------------------------------- MLS --------------------------------------
instruct vmls4S(vecD dst, vecD src1, vecD src2)
%{
predicate(n->as_Vector()->length() == 2 ||
n->as_Vector()->length() == 4);
match(Set dst (SubVS dst (MulVS src1 src2)));
ins_cost(INSN_COST);
format %{ "mlsv $dst,$src1,$src2\t# vector (4H)" %}
ins_encode %{
__ mlsv(as_FloatRegister($dst$$reg), __ T4H,
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(pipe_class_default);
%}
instruct vmls8S(vecX dst, vecX src1, vecX src2)
%{
predicate(n->as_Vector()->length() == 8);
match(Set dst (SubVS dst (MulVS src1 src2)));
ins_cost(INSN_COST);
format %{ "mlsv $dst,$src1,$src2\t# vector (8H)" %}
ins_encode %{
__ mlsv(as_FloatRegister($dst$$reg), __ T8H,
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(pipe_class_default);
%}
instruct vmls2I(vecD dst, vecD src1, vecD src2)
%{
predicate(n->as_Vector()->length() == 2);
match(Set dst (SubVI dst (MulVI src1 src2)));
ins_cost(INSN_COST);
format %{ "mlsv $dst,$src1,$src2\t# vector (2S)" %}
ins_encode %{
__ mlsv(as_FloatRegister($dst$$reg), __ T2S,
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(pipe_class_default);
%}
instruct vmls4I(vecX dst, vecX src1, vecX src2)
%{
predicate(n->as_Vector()->length() == 4);
match(Set dst (SubVI dst (MulVI src1 src2)));
ins_cost(INSN_COST);
format %{ "mlsv $dst,$src1,$src2\t# vector (4S)" %}
ins_encode %{
__ mlsv(as_FloatRegister($dst$$reg), __ T4S,
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(pipe_class_default);
%}
// --------------------------------- DIV -------------------------------------- // --------------------------------- DIV --------------------------------------
instruct vdiv2F(vecD dst, vecD src1, vecD src2) instruct vdiv2F(vecD dst, vecD src1, vecD src2)

View file

@ -135,15 +135,10 @@ REGISTER_DECLARATION(Register, rlocals, r24);
// bytecode pointer // bytecode pointer
REGISTER_DECLARATION(Register, rbcp, r22); REGISTER_DECLARATION(Register, rbcp, r22);
// Dispatch table base // Dispatch table base
REGISTER_DECLARATION(Register, rdispatch, r21); REGISTER_DECLARATION(Register, rdispatch, r21);
// Java stack pointer // Java stack pointer
REGISTER_DECLARATION(Register, esp, r20); REGISTER_DECLARATION(Register, esp, r20);
// TODO : x86 uses rbp to save SP in method handle code
// we may need to do the same with fp
// JSR 292 fixed register usages:
//REGISTER_DECLARATION(Register, r_mh_SP_save, r29);
#define assert_cond(ARG1) assert(ARG1, #ARG1) #define assert_cond(ARG1) assert(ARG1, #ARG1)
namespace asm_util { namespace asm_util {
@ -551,6 +546,7 @@ class Address VALUE_OBJ_CLASS_SPEC {
size = 0; break; size = 0; break;
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
size = 0; // unreachable
} }
} else { } else {
size = i->get(31, 31); size = i->get(31, 31);
@ -2041,6 +2037,8 @@ public:
INSN(addv, 0, 0b100001); INSN(addv, 0, 0b100001);
INSN(subv, 1, 0b100001); INSN(subv, 1, 0b100001);
INSN(mulv, 0, 0b100111); INSN(mulv, 0, 0b100111);
INSN(mlav, 0, 0b100101);
INSN(mlsv, 1, 0b100101);
INSN(sshl, 0, 0b010001); INSN(sshl, 0, 0b010001);
INSN(ushl, 1, 0b010001); INSN(ushl, 1, 0b010001);

View file

@ -173,6 +173,7 @@ static jlong as_long(LIR_Opr data) {
break; break;
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
result = 0; // unreachable
} }
return result; return result;
} }
@ -720,6 +721,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
break; break;
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
insn = &Assembler::str; // unreachable
} }
if (info) add_debug_info_for_null_check_here(info); if (info) add_debug_info_for_null_check_here(info);
@ -1110,6 +1112,7 @@ void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break; case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break;
case lir_cond_greater: acond = (is_unordered ? Assembler::HI : Assembler::GT); break; case lir_cond_greater: acond = (is_unordered ? Assembler::HI : Assembler::GT); break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
acond = Assembler::EQ; // unreachable
} }
} else { } else {
switch (op->cond()) { switch (op->cond()) {
@ -1121,7 +1124,8 @@ void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
case lir_cond_greater: acond = Assembler::GT; break; case lir_cond_greater: acond = Assembler::GT; break;
case lir_cond_belowEqual: acond = Assembler::LS; break; case lir_cond_belowEqual: acond = Assembler::LS; break;
case lir_cond_aboveEqual: acond = Assembler::HS; break; case lir_cond_aboveEqual: acond = Assembler::HS; break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
acond = Assembler::EQ; // unreachable
} }
} }
__ br(acond,*(op->label())); __ br(acond,*(op->label()));
@ -1313,7 +1317,9 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
ciMethodData* md; ciMethodData* md;
ciProfileData* data; ciProfileData* data;
if (op->should_profile()) { const bool should_profile = op->should_profile();
if (should_profile) {
ciMethod* method = op->profiled_method(); ciMethod* method = op->profiled_method();
assert(method != NULL, "Should have method"); assert(method != NULL, "Should have method");
int bci = op->profiled_bci(); int bci = op->profiled_bci();
@ -1324,8 +1330,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
} }
Label profile_cast_success, profile_cast_failure; Label profile_cast_success, profile_cast_failure;
Label *success_target = op->should_profile() ? &profile_cast_success : success; Label *success_target = should_profile ? &profile_cast_success : success;
Label *failure_target = op->should_profile() ? &profile_cast_failure : failure; Label *failure_target = should_profile ? &profile_cast_failure : failure;
if (obj == k_RInfo) { if (obj == k_RInfo) {
k_RInfo = dst; k_RInfo = dst;
@ -1341,7 +1347,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
assert_different_registers(obj, k_RInfo, klass_RInfo); assert_different_registers(obj, k_RInfo, klass_RInfo);
if (op->should_profile()) { if (should_profile) {
Label not_null; Label not_null;
__ cbnz(obj, not_null); __ cbnz(obj, not_null);
// Object is null; update MDO and exit // Object is null; update MDO and exit
@ -1413,7 +1419,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
// successful cast, fall through to profile or jump // successful cast, fall through to profile or jump
} }
} }
if (op->should_profile()) { if (should_profile) {
Register mdo = klass_RInfo, recv = k_RInfo; Register mdo = klass_RInfo, recv = k_RInfo;
__ bind(profile_cast_success); __ bind(profile_cast_success);
__ mov_metadata(mdo, md->constant_encoding()); __ mov_metadata(mdo, md->constant_encoding());
@ -1438,6 +1444,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
const bool should_profile = op->should_profile();
LIR_Code code = op->code(); LIR_Code code = op->code();
if (code == lir_store_check) { if (code == lir_store_check) {
Register value = op->object()->as_register(); Register value = op->object()->as_register();
@ -1452,7 +1460,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
ciMethodData* md; ciMethodData* md;
ciProfileData* data; ciProfileData* data;
if (op->should_profile()) { if (should_profile) {
ciMethod* method = op->profiled_method(); ciMethod* method = op->profiled_method();
assert(method != NULL, "Should have method"); assert(method != NULL, "Should have method");
int bci = op->profiled_bci(); int bci = op->profiled_bci();
@ -1463,10 +1471,10 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
} }
Label profile_cast_success, profile_cast_failure, done; Label profile_cast_success, profile_cast_failure, done;
Label *success_target = op->should_profile() ? &profile_cast_success : &done; Label *success_target = should_profile ? &profile_cast_success : &done;
Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); Label *failure_target = should_profile ? &profile_cast_failure : stub->entry();
if (op->should_profile()) { if (should_profile) {
Label not_null; Label not_null;
__ cbnz(value, not_null); __ cbnz(value, not_null);
// Object is null; update MDO and exit // Object is null; update MDO and exit
@ -1502,7 +1510,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ cbzw(k_RInfo, *failure_target); __ cbzw(k_RInfo, *failure_target);
// fall through to the success case // fall through to the success case
if (op->should_profile()) { if (should_profile) {
Register mdo = klass_RInfo, recv = k_RInfo; Register mdo = klass_RInfo, recv = k_RInfo;
__ bind(profile_cast_success); __ bind(profile_cast_success);
__ mov_metadata(mdo, md->constant_encoding()); __ mov_metadata(mdo, md->constant_encoding());
@ -1621,9 +1629,10 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
case lir_cond_lessEqual: acond = Assembler::LE; ncond = Assembler::GT; break; case lir_cond_lessEqual: acond = Assembler::LE; ncond = Assembler::GT; break;
case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break; case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break;
case lir_cond_greater: acond = Assembler::GT; ncond = Assembler::LE; break; case lir_cond_greater: acond = Assembler::GT; ncond = Assembler::LE; break;
case lir_cond_belowEqual: Unimplemented(); break; case lir_cond_belowEqual:
case lir_cond_aboveEqual: Unimplemented(); break; case lir_cond_aboveEqual:
default: ShouldNotReachHere(); default: ShouldNotReachHere();
acond = Assembler::EQ; ncond = Assembler::NE; // unreachable
} }
assert(result->is_single_cpu() || result->is_double_cpu(), assert(result->is_single_cpu() || result->is_double_cpu(),
@ -1724,6 +1733,7 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
break; break;
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
c = 0; // unreachable
break; break;
} }
@ -1926,6 +1936,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
break; break;
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
imm = 0; // unreachable
break; break;
} }
@ -3123,6 +3134,9 @@ void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr
break; break;
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
lda = &MacroAssembler::ldaxr;
add = &MacroAssembler::add;
stl = &MacroAssembler::stlxr; // unreachable
} }
switch (code) { switch (code) {

View file

@ -238,6 +238,7 @@ LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
} }
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
r = NULL; // unreachable
} }
return r; return r;
} }

View file

@ -27,6 +27,7 @@
#define CPU_AARCH64_VM_C1_MACROASSEMBLER_AARCH64_HPP #define CPU_AARCH64_VM_C1_MACROASSEMBLER_AARCH64_HPP
using MacroAssembler::build_frame; using MacroAssembler::build_frame;
using MacroAssembler::null_check;
// C1_MacroAssembler contains high-level macros for C1 // C1_MacroAssembler contains high-level macros for C1

View file

@ -433,11 +433,11 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
// This is the sp before any possible extension (adapter/locals). // This is the sp before any possible extension (adapter/locals).
intptr_t* unextended_sp = interpreter_frame_sender_sp(); intptr_t* unextended_sp = interpreter_frame_sender_sp();
#ifdef COMPILER2 #if defined(COMPILER2) || INCLUDE_JVMCI
if (map->update_map()) { if (map->update_map()) {
update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset)); update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset));
} }
#endif // COMPILER2 #endif // COMPILER2 || INCLUDE_JVMCI
return frame(sender_sp, unextended_sp, link(), sender_pc()); return frame(sender_sp, unextended_sp, link(), sender_pc());
} }

View file

@ -28,6 +28,10 @@
const int StackAlignmentInBytes = 16; const int StackAlignmentInBytes = 16;
// Indicates whether the C calling conventions require that
// 32-bit integer argument values are extended to 64 bits.
const bool CCallingConventionRequiresIntsAsLongs = false;
#define SUPPORTS_NATIVE_CX8 #define SUPPORTS_NATIVE_CX8
// The maximum B/BL offset range on AArch64 is 128MB. // The maximum B/BL offset range on AArch64 is 128MB.

View file

@ -40,14 +40,7 @@ define_pd_global(bool, ImplicitNullChecks, true); // Generate code for im
define_pd_global(bool, TrapBasedNullChecks, false); define_pd_global(bool, TrapBasedNullChecks, false);
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast
// See 4827828 for this change. There is no globals_core_i486.hpp. I can't #if defined(COMPILER2) || INCLUDE_JVMCI
// assign a different value for C2 without touching a number of files. Use
// #ifdef to minimize the change as it's late in Mantis. -- FIXME.
// c1 doesn't have this problem because the fix to 4858033 assures us
// the the vep is aligned at CodeEntryAlignment whereas c2 only aligns
// the uep and the vep doesn't get real alignment but just slops on by
// only assured that the entry instruction meets the 5 byte size requirement.
#ifdef COMPILER2
define_pd_global(intx, CodeEntryAlignment, 64); define_pd_global(intx, CodeEntryAlignment, 64);
#else #else
define_pd_global(intx, CodeEntryAlignment, 16); define_pd_global(intx, CodeEntryAlignment, 16);

View file

@ -1054,13 +1054,39 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
bind(skip_receiver_profile); bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target. // The method data pointer needs to be updated to reflect the new target.
#if INCLUDE_JVMCI
if (MethodProfileWidth == 0) {
update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));
}
#else // INCLUDE_JVMCI
update_mdp_by_constant(mdp, update_mdp_by_constant(mdp,
in_bytes(VirtualCallData:: in_bytes(VirtualCallData::
virtual_call_data_size())); virtual_call_data_size()));
#endif // INCLUDE_JVMCI
bind(profile_continue); bind(profile_continue);
} }
} }
#if INCLUDE_JVMCI
void InterpreterMacroAssembler::profile_called_method(Register method, Register mdp, Register reg2) {
assert_different_registers(method, mdp, reg2);
if (ProfileInterpreter && MethodProfileWidth > 0) {
Label profile_continue;
// If no method data exists, go to profile_continue.
test_method_data_pointer(mdp, profile_continue);
Label done;
record_item_in_profile_helper(method, mdp, reg2, 0, done, MethodProfileWidth,
&VirtualCallData::method_offset, &VirtualCallData::method_count_offset, in_bytes(VirtualCallData::nonprofiled_receiver_count_offset()));
bind(done);
update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));
bind(profile_continue);
}
}
#endif // INCLUDE_JVMCI
// This routine creates a state machine for updating the multi-row // This routine creates a state machine for updating the multi-row
// type profile at a virtual call site (or other type-sensitive bytecode). // type profile at a virtual call site (or other type-sensitive bytecode).
// The machine visits each row (of receiver/count) until the receiver type // The machine visits each row (of receiver/count) until the receiver type
@ -1080,14 +1106,36 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
if (is_virtual_call) { if (is_virtual_call) {
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
} }
return; #if INCLUDE_JVMCI
} else if (EnableJVMCI) {
increment_mdp_data_at(mdp, in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset()));
}
#endif // INCLUDE_JVMCI
} else {
int non_profiled_offset = -1;
if (is_virtual_call) {
non_profiled_offset = in_bytes(CounterData::count_offset());
}
#if INCLUDE_JVMCI
else if (EnableJVMCI) {
non_profiled_offset = in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset());
}
#endif // INCLUDE_JVMCI
int last_row = VirtualCallData::row_limit() - 1; record_item_in_profile_helper(receiver, mdp, reg2, 0, done, TypeProfileWidth,
&VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset, non_profiled_offset);
}
}
void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp,
Register reg2, int start_row, Label& done, int total_rows,
OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn,
int non_profiled_offset) {
int last_row = total_rows - 1;
assert(start_row <= last_row, "must be work left to do"); assert(start_row <= last_row, "must be work left to do");
// Test this row for both the receiver and for null. // Test this row for both the item and for null.
// Take any of three different outcomes: // Take any of three different outcomes:
// 1. found receiver => increment count and goto done // 1. found item => increment count and goto done
// 2. found null => keep looking for case 1, maybe allocate this cell // 2. found null => keep looking for case 1, maybe allocate this cell
// 3. found something else => keep looking for cases 1 and 2 // 3. found something else => keep looking for cases 1 and 2
// Case 3 is handled by a recursive call. // Case 3 is handled by a recursive call.
@ -1095,55 +1143,56 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
Label next_test; Label next_test;
bool test_for_null_also = (row == start_row); bool test_for_null_also = (row == start_row);
// See if the receiver is receiver[n]. // See if the item is item[n].
int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row)); int item_offset = in_bytes(item_offset_fn(row));
test_mdp_data_at(mdp, recvr_offset, receiver, test_mdp_data_at(mdp, item_offset, item,
(test_for_null_also ? reg2 : noreg), (test_for_null_also ? reg2 : noreg),
next_test); next_test);
// (Reg2 now contains the receiver from the CallData.) // (Reg2 now contains the item from the CallData.)
// The receiver is receiver[n]. Increment count[n]. // The item is item[n]. Increment count[n].
int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row)); int count_offset = in_bytes(item_count_offset_fn(row));
increment_mdp_data_at(mdp, count_offset); increment_mdp_data_at(mdp, count_offset);
b(done); b(done);
bind(next_test); bind(next_test);
if (test_for_null_also) { if (test_for_null_also) {
Label found_null; Label found_null;
// Failed the equality check on receiver[n]... Test for null. // Failed the equality check on item[n]... Test for null.
if (start_row == last_row) { if (start_row == last_row) {
// The only thing left to do is handle the null case. // The only thing left to do is handle the null case.
if (is_virtual_call) { if (non_profiled_offset >= 0) {
cbz(reg2, found_null); cbz(reg2, found_null);
// Receiver did not match any saved receiver and there is no empty row for it. // Item did not match any saved item and there is no empty row for it.
// Increment total counter to indicate polymorphic case. // Increment total counter to indicate polymorphic case.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); increment_mdp_data_at(mdp, non_profiled_offset);
b(done); b(done);
bind(found_null); bind(found_null);
} else { } else {
cbz(reg2, done); cbnz(reg2, done);
} }
break; break;
} }
// Since null is rare, make it be the branch-taken case. // Since null is rare, make it be the branch-taken case.
cbz(reg2,found_null); cbz(reg2, found_null);
// Put all the "Case 3" tests here. // Put all the "Case 3" tests here.
record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done, is_virtual_call); record_item_in_profile_helper(item, mdp, reg2, start_row + 1, done, total_rows,
item_offset_fn, item_count_offset_fn, non_profiled_offset);
// Found a null. Keep searching for a matching receiver, // Found a null. Keep searching for a matching item,
// but remember that this is an empty (unused) slot. // but remember that this is an empty (unused) slot.
bind(found_null); bind(found_null);
} }
} }
// In the fall-through case, we found no matching receiver, but we // In the fall-through case, we found no matching item, but we
// observed the receiver[start_row] is NULL. // observed the item[start_row] is NULL.
// Fill in the receiver field and increment the count. // Fill in the item field and increment the count.
int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row)); int item_offset = in_bytes(item_offset_fn(start_row));
set_mdp_data_at(mdp, recvr_offset, receiver); set_mdp_data_at(mdp, item_offset, item);
int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row)); int count_offset = in_bytes(item_count_offset_fn(start_row));
mov(reg2, DataLayout::counter_increment); mov(reg2, DataLayout::counter_increment);
set_mdp_data_at(mdp, count_offset, reg2); set_mdp_data_at(mdp, count_offset, reg2);
if (start_row > 0) { if (start_row > 0) {
@ -1347,9 +1396,8 @@ void InterpreterMacroAssembler::notify_method_entry() {
// the code to check if the event should be sent. // the code to check if the event should be sent.
if (JvmtiExport::can_post_interpreter_events()) { if (JvmtiExport::can_post_interpreter_events()) {
Label L; Label L;
ldr(r3, Address(rthread, JavaThread::interp_only_mode_offset())); ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
tst(r3, ~0); cbzw(r3, L);
br(Assembler::EQ, L);
call_VM(noreg, CAST_FROM_FN_PTR(address, call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::post_method_entry)); InterpreterRuntime::post_method_entry));
bind(L); bind(L);

View file

@ -33,6 +33,7 @@
// This file specializes the assember with interpreter-specific macros // This file specializes the assember with interpreter-specific macros
typedef ByteSize (*OffsetFunction)(uint);
class InterpreterMacroAssembler: public MacroAssembler { class InterpreterMacroAssembler: public MacroAssembler {
protected: protected:
@ -234,6 +235,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
void record_klass_in_profile_helper(Register receiver, Register mdp, void record_klass_in_profile_helper(Register receiver, Register mdp,
Register reg2, int start_row, Register reg2, int start_row,
Label& done, bool is_virtual_call); Label& done, bool is_virtual_call);
void record_item_in_profile_helper(Register item, Register mdp,
Register reg2, int start_row, Label& done, int total_rows,
OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn,
int non_profiled_offset);
void update_mdp_by_offset(Register mdp_in, int offset_of_offset); void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp); void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
@ -247,6 +252,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_virtual_call(Register receiver, Register mdp, void profile_virtual_call(Register receiver, Register mdp,
Register scratch2, Register scratch2,
bool receiver_can_be_null = false); bool receiver_can_be_null = false);
void profile_called_method(Register method, Register mdp, Register reg2) NOT_JVMCI_RETURN;
void profile_ret(Register return_bci, Register mdp); void profile_ret(Register return_bci, Register mdp);
void profile_null_seen(Register mdp); void profile_null_seen(Register mdp);
void profile_typecheck(Register mdp, Register klass, Register scratch); void profile_typecheck(Register mdp, Register klass, Register scratch);

View file

@ -229,6 +229,7 @@ void TemplateInterpreterGenerator::generate_transcendental_entry(AbstractInterpr
break; break;
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
fn = NULL; // unreachable
} }
const int gpargs = 0, rtype = 3; const int gpargs = 0, rtype = 3;
__ mov(rscratch1, fn); __ mov(rscratch1, fn);

View file

@ -61,6 +61,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
case T_FLOAT: name = "jni_fast_GetFloatField"; break; case T_FLOAT: name = "jni_fast_GetFloatField"; break;
case T_DOUBLE: name = "jni_fast_GetDoubleField"; break; case T_DOUBLE: name = "jni_fast_GetDoubleField"; break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
name = NULL; // unreachable
} }
ResourceMark rm; ResourceMark rm;
BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE); BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE);
@ -125,6 +126,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break; case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break;
case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break; case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
slow_case_addr = NULL; // unreachable
} }
{ {

View file

@ -678,7 +678,7 @@ address MacroAssembler::trampoline_call(Address entry, CodeBuffer *cbuf) {
if (cbuf) cbuf->set_insts_mark(); if (cbuf) cbuf->set_insts_mark();
relocate(entry.rspec()); relocate(entry.rspec());
if (Assembler::reachable_from_branch_at(pc(), entry.target())) { if (!far_branches()) {
bl(entry.target()); bl(entry.target());
} else { } else {
bl(pc()); bl(pc());
@ -733,8 +733,8 @@ address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
return stub; return stub;
} }
address MacroAssembler::ic_call(address entry) { address MacroAssembler::ic_call(address entry, jint method_index) {
RelocationHolder rh = virtual_call_Relocation::spec(pc()); RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
// address const_ptr = long_constant((jlong)Universe::non_oop_word()); // address const_ptr = long_constant((jlong)Universe::non_oop_word());
// unsigned long offset; // unsigned long offset;
// ldr_constant(rscratch2, const_ptr); // ldr_constant(rscratch2, const_ptr);

View file

@ -410,7 +410,7 @@ class MacroAssembler: public Assembler {
#define WRAP(INSN) \ #define WRAP(INSN) \
void INSN(Register Rd, Register Rn, Register Rm, Register Ra) { \ void INSN(Register Rd, Register Rn, Register Rm, Register Ra) { \
if ((VM_Version::cpu_cpuFeatures() & VM_Version::CPU_A53MAC) && Ra != zr) \ if ((VM_Version::features() & VM_Version::CPU_A53MAC) && Ra != zr) \
nop(); \ nop(); \
Assembler::INSN(Rd, Rn, Rm, Ra); \ Assembler::INSN(Rd, Rn, Rm, Ra); \
} }
@ -480,6 +480,32 @@ public:
orr(Vd, T, Vn, Vn); orr(Vd, T, Vn, Vn);
} }
public:
// Generalized Test Bit And Branch, including a "far" variety which
// spans more than 32KiB.
void tbr(Condition cond, Register Rt, int bitpos, Label &dest, bool far = false) {
assert(cond == EQ || cond == NE, "must be");
if (far)
cond = ~cond;
void (Assembler::* branch)(Register Rt, int bitpos, Label &L);
if (cond == Assembler::EQ)
branch = &Assembler::tbz;
else
branch = &Assembler::tbnz;
if (far) {
Label L;
(this->*branch)(Rt, bitpos, L);
b(dest);
bind(L);
} else {
(this->*branch)(Rt, bitpos, dest);
}
}
// macro instructions for accessing and updating floating point // macro instructions for accessing and updating floating point
// status register // status register
// //
@ -976,7 +1002,7 @@ public:
} }
// Emit the CompiledIC call idiom // Emit the CompiledIC call idiom
address ic_call(address entry); address ic_call(address entry, jint method_index = 0);
public: public:

View file

@ -62,7 +62,6 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
inline bool is_jump_or_nop(); inline bool is_jump_or_nop();
inline bool is_cond_jump(); inline bool is_cond_jump();
bool is_safepoint_poll(); bool is_safepoint_poll();
inline bool is_mov_literal64();
bool is_movz(); bool is_movz();
bool is_movk(); bool is_movk();
bool is_sigill_zombie_not_entrant(); bool is_sigill_zombie_not_entrant();
@ -98,6 +97,14 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
static bool is_ldr_literal_at(address instr); static bool is_ldr_literal_at(address instr);
static bool is_ldrw_to_zr(address instr); static bool is_ldrw_to_zr(address instr);
static bool is_call_at(address instr) {
const uint32_t insn = (*(uint32_t*)instr);
return (insn >> 26) == 0b100101;
}
bool is_call() {
return is_call_at(addr_at(0));
}
static bool maybe_cpool_ref(address instr) { static bool maybe_cpool_ref(address instr) {
return is_adrp_at(instr) || is_ldr_literal_at(instr); return is_adrp_at(instr) || is_ldr_literal_at(instr);
} }
@ -157,11 +164,6 @@ class NativeCall: public NativeInstruction {
inline friend NativeCall* nativeCall_at(address address); inline friend NativeCall* nativeCall_at(address address);
inline friend NativeCall* nativeCall_before(address return_address); inline friend NativeCall* nativeCall_before(address return_address);
static bool is_call_at(address instr) {
const uint32_t insn = (*(uint32_t*)instr);
return (insn >> 26) == 0b100101;
}
static bool is_call_before(address return_address) { static bool is_call_before(address return_address) {
return is_call_at(return_address - NativeCall::return_address_offset); return is_call_at(return_address - NativeCall::return_address_offset);
} }

View file

@ -59,14 +59,20 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
address Relocation::pd_call_destination(address orig_addr) { address Relocation::pd_call_destination(address orig_addr) {
assert(is_call(), "should be a call here"); assert(is_call(), "should be a call here");
if (is_call()) { if (NativeCall::is_call_at(addr())) {
address trampoline = nativeCall_at(addr())->get_trampoline(); address trampoline = nativeCall_at(addr())->get_trampoline();
if (trampoline) { if (trampoline) {
return nativeCallTrampolineStub_at(trampoline)->destination(); return nativeCallTrampolineStub_at(trampoline)->destination();
} }
} }
if (orig_addr != NULL) { if (orig_addr != NULL) {
return MacroAssembler::pd_call_destination(orig_addr); address new_addr = MacroAssembler::pd_call_destination(orig_addr);
// If call is branch to self, don't try to relocate it, just leave it
// as branch to self. This happens during code generation if the code
// buffer expands. It will be relocated to the trampoline above once
// code generation is complete.
new_addr = (new_addr == orig_addr) ? addr() : new_addr;
return new_addr;
} }
return MacroAssembler::pd_call_destination(addr()); return MacroAssembler::pd_call_destination(addr());
} }

View file

@ -39,10 +39,13 @@
#ifdef COMPILER1 #ifdef COMPILER1
#include "c1/c1_Runtime1.hpp" #include "c1/c1_Runtime1.hpp"
#endif #endif
#ifdef COMPILER2 #if defined(COMPILER2) || INCLUDE_JVMCI
#include "adfiles/ad_aarch64.hpp" #include "adfiles/ad_aarch64.hpp"
#include "opto/runtime.hpp" #include "opto/runtime.hpp"
#endif #endif
#if INCLUDE_JVMCI
#include "jvmci/jvmciJavaClasses.hpp"
#endif
#ifdef BUILTIN_SIM #ifdef BUILTIN_SIM
#include "../../../../../../simulator/simulator.hpp" #include "../../../../../../simulator/simulator.hpp"
@ -109,14 +112,14 @@ class RegisterSaver {
}; };
OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) { OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
#ifdef COMPILER2 #if defined(COMPILER2) || INCLUDE_JVMCI
if (save_vectors) { if (save_vectors) {
// Save upper half of vector registers // Save upper half of vector registers
int vect_words = 32 * 8 / wordSize; int vect_words = 32 * 8 / wordSize;
additional_frame_words += vect_words; additional_frame_words += vect_words;
} }
#else #else
assert(!save_vectors, "vectors are generated only by C2"); assert(!save_vectors, "vectors are generated only by C2 and JVMCI");
#endif #endif
int frame_size_in_bytes = round_to(additional_frame_words*wordSize + int frame_size_in_bytes = round_to(additional_frame_words*wordSize +
@ -166,7 +169,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) { void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
#ifndef COMPILER2 #ifndef COMPILER2
assert(!restore_vectors, "vectors are generated only by C2"); assert(!restore_vectors, "vectors are generated only by C2 and JVMCI");
#endif #endif
__ pop_CPU_state(restore_vectors); __ pop_CPU_state(restore_vectors);
__ leave(); __ leave();
@ -547,6 +550,18 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
// Pre-load the register-jump target early, to schedule it better. // Pre-load the register-jump target early, to schedule it better.
__ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset()))); __ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
#if INCLUDE_JVMCI
if (EnableJVMCI) {
// check if this call should be routed towards a specific entry point
__ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
Label no_alternative_target;
__ cbz(rscratch2, no_alternative_target);
__ mov(rscratch1, rscratch2);
__ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
__ bind(no_alternative_target);
}
#endif // INCLUDE_JVMCI
// Now generate the shuffle code. // Now generate the shuffle code.
for (int i = 0; i < total_args_passed; i++) { for (int i = 0; i < total_args_passed; i++) {
if (sig_bt[i] == T_VOID) { if (sig_bt[i] == T_VOID) {
@ -2237,7 +2252,13 @@ void SharedRuntime::generate_deopt_blob() {
// Allocate space for the code // Allocate space for the code
ResourceMark rm; ResourceMark rm;
// Setup code generation tools // Setup code generation tools
CodeBuffer buffer("deopt_blob", 2048, 1024); int pad = 0;
#if INCLUDE_JVMCI
if (EnableJVMCI) {
pad += 512; // Increase the buffer size when compiling for JVMCI
}
#endif
CodeBuffer buffer("deopt_blob", 2048+pad, 1024);
MacroAssembler* masm = new MacroAssembler(&buffer); MacroAssembler* masm = new MacroAssembler(&buffer);
int frame_size_in_words; int frame_size_in_words;
OopMap* map = NULL; OopMap* map = NULL;
@ -2294,6 +2315,12 @@ void SharedRuntime::generate_deopt_blob() {
__ b(cont); __ b(cont);
int reexecute_offset = __ pc() - start; int reexecute_offset = __ pc() - start;
#if defined(INCLUDE_JVMCI) && !defined(COMPILER1)
if (EnableJVMCI && UseJVMCICompiler) {
// JVMCI does not use this kind of deoptimization
__ should_not_reach_here();
}
#endif
// Reexecute case // Reexecute case
// return address is the pc describes what bci to do re-execute at // return address is the pc describes what bci to do re-execute at
@ -2304,6 +2331,44 @@ void SharedRuntime::generate_deopt_blob() {
__ movw(rcpool, Deoptimization::Unpack_reexecute); // callee-saved __ movw(rcpool, Deoptimization::Unpack_reexecute); // callee-saved
__ b(cont); __ b(cont);
#if INCLUDE_JVMCI
Label after_fetch_unroll_info_call;
int implicit_exception_uncommon_trap_offset = 0;
int uncommon_trap_offset = 0;
if (EnableJVMCI) {
implicit_exception_uncommon_trap_offset = __ pc() - start;
__ ldr(lr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
__ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
uncommon_trap_offset = __ pc() - start;
// Save everything in sight.
RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
// fetch_unroll_info needs to call last_java_frame()
Label retaddr;
__ set_last_Java_frame(sp, noreg, retaddr, rscratch1);
__ ldrw(c_rarg1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
__ movw(rscratch1, -1);
__ strw(rscratch1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset())));
__ movw(rcpool, (int32_t)Deoptimization::Unpack_reexecute);
__ mov(c_rarg0, rthread);
__ lea(rscratch1,
RuntimeAddress(CAST_FROM_FN_PTR(address,
Deoptimization::uncommon_trap)));
__ blrt(rscratch1, 2, 0, MacroAssembler::ret_type_integral);
__ bind(retaddr);
oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
__ reset_last_Java_frame(false, false);
__ b(after_fetch_unroll_info_call);
} // EnableJVMCI
#endif // INCLUDE_JVMCI
int exception_offset = __ pc() - start; int exception_offset = __ pc() - start;
// Prolog for exception case // Prolog for exception case
@ -2395,7 +2460,13 @@ void SharedRuntime::generate_deopt_blob() {
__ reset_last_Java_frame(false, true); __ reset_last_Java_frame(false, true);
// Load UnrollBlock* into rdi #if INCLUDE_JVMCI
if (EnableJVMCI) {
__ bind(after_fetch_unroll_info_call);
}
#endif
// Load UnrollBlock* into r5
__ mov(r5, r0); __ mov(r5, r0);
__ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes())); __ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
@ -2547,7 +2618,12 @@ void SharedRuntime::generate_deopt_blob() {
_deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words); _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
_deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
#if INCLUDE_JVMCI
if (EnableJVMCI) {
_deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
_deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
}
#endif
#ifdef BUILTIN_SIM #ifdef BUILTIN_SIM
if (NotifySimulator) { if (NotifySimulator) {
unsigned char *base = _deopt_blob->code_begin(); unsigned char *base = _deopt_blob->code_begin();
@ -2560,7 +2636,7 @@ uint SharedRuntime::out_preserve_stack_slots() {
return 0; return 0;
} }
#ifdef COMPILER2 #if defined(COMPILER2) || INCLUDE_JVMCI
//------------------------------generate_uncommon_trap_blob-------------------- //------------------------------generate_uncommon_trap_blob--------------------
void SharedRuntime::generate_uncommon_trap_blob() { void SharedRuntime::generate_uncommon_trap_blob() {
// Allocate space for the code // Allocate space for the code
@ -2943,7 +3019,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
} }
#ifdef COMPILER2 #if defined(COMPILER2) || INCLUDE_JVMCI
// This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
// //
//------------------------------generate_exception_blob--------------------------- //------------------------------generate_exception_blob---------------------------

View file

@ -958,8 +958,8 @@ class StubGenerator: public StubCodeGenerator {
const Register t0 = r3, t1 = r4; const Register t0 = r3, t1 = r4;
if (is_backwards) { if (is_backwards) {
__ lea(s, Address(s, count, Address::uxtw(exact_log2(-step)))); __ lea(s, Address(s, count, Address::lsl(exact_log2(-step))));
__ lea(d, Address(d, count, Address::uxtw(exact_log2(-step)))); __ lea(d, Address(d, count, Address::lsl(exact_log2(-step))));
} }
Label done, tail; Label done, tail;
@ -1051,10 +1051,10 @@ class StubGenerator: public StubCodeGenerator {
__ cmp(rscratch2, count); __ cmp(rscratch2, count);
__ br(Assembler::HS, end); __ br(Assembler::HS, end);
if (size == (size_t)wordSize) { if (size == (size_t)wordSize) {
__ ldr(temp, Address(a, rscratch2, Address::uxtw(exact_log2(size)))); __ ldr(temp, Address(a, rscratch2, Address::lsl(exact_log2(size))));
__ verify_oop(temp); __ verify_oop(temp);
} else { } else {
__ ldrw(r16, Address(a, rscratch2, Address::uxtw(exact_log2(size)))); __ ldrw(r16, Address(a, rscratch2, Address::lsl(exact_log2(size))));
__ decode_heap_oop(temp); // calls verify_oop __ decode_heap_oop(temp); // calls verify_oop
} }
__ add(rscratch2, rscratch2, size); __ add(rscratch2, rscratch2, size);
@ -1087,12 +1087,14 @@ class StubGenerator: public StubCodeGenerator {
__ align(CodeEntryAlignment); __ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name); StubCodeMark mark(this, "StubRoutines", name);
address start = __ pc(); address start = __ pc();
__ enter();
if (entry != NULL) { if (entry != NULL) {
*entry = __ pc(); *entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory) // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:"); BLOCK_COMMENT("Entry:");
} }
__ enter();
if (is_oop) { if (is_oop) {
__ push(RegSet::of(d, count), sp); __ push(RegSet::of(d, count), sp);
// no registers are destroyed by this call // no registers are destroyed by this call
@ -1104,10 +1106,11 @@ class StubGenerator: public StubCodeGenerator {
if (VerifyOops) if (VerifyOops)
verify_oop_array(size, d, count, r16); verify_oop_array(size, d, count, r16);
__ sub(count, count, 1); // make an inclusive end pointer __ sub(count, count, 1); // make an inclusive end pointer
__ lea(count, Address(d, count, Address::uxtw(exact_log2(size)))); __ lea(count, Address(d, count, Address::lsl(exact_log2(size))));
gen_write_ref_array_post_barrier(d, count, rscratch1); gen_write_ref_array_post_barrier(d, count, rscratch1);
} }
__ leave(); __ leave();
__ mov(r0, zr); // return 0
__ ret(lr); __ ret(lr);
#ifdef BUILTIN_SIM #ifdef BUILTIN_SIM
{ {
@ -1140,11 +1143,16 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", name); StubCodeMark mark(this, "StubRoutines", name);
address start = __ pc(); address start = __ pc();
__ enter();
if (entry != NULL) {
*entry = __ pc();
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
BLOCK_COMMENT("Entry:");
}
__ cmp(d, s); __ cmp(d, s);
__ br(Assembler::LS, nooverlap_target); __ br(Assembler::LS, nooverlap_target);
__ enter();
if (is_oop) { if (is_oop) {
__ push(RegSet::of(d, count), sp); __ push(RegSet::of(d, count), sp);
// no registers are destroyed by this call // no registers are destroyed by this call
@ -1160,6 +1168,7 @@ class StubGenerator: public StubCodeGenerator {
gen_write_ref_array_post_barrier(d, count, rscratch1); gen_write_ref_array_post_barrier(d, count, rscratch1);
} }
__ leave(); __ leave();
__ mov(r0, zr); // return 0
__ ret(lr); __ ret(lr);
#ifdef BUILTIN_SIM #ifdef BUILTIN_SIM
{ {
@ -1559,7 +1568,29 @@ class StubGenerator: public StubCodeGenerator {
Register dst_pos, // destination position (c_rarg3) Register dst_pos, // destination position (c_rarg3)
Register length, Register length,
Register temp, Register temp,
Label& L_failed) { Unimplemented(); } Label& L_failed) {
BLOCK_COMMENT("arraycopy_range_checks:");
assert_different_registers(rscratch1, temp);
// if (src_pos + length > arrayOop(src)->length()) FAIL;
__ ldrw(rscratch1, Address(src, arrayOopDesc::length_offset_in_bytes()));
__ addw(temp, length, src_pos);
__ cmpw(temp, rscratch1);
__ br(Assembler::HI, L_failed);
// if (dst_pos + length > arrayOop(dst)->length()) FAIL;
__ ldrw(rscratch1, Address(dst, arrayOopDesc::length_offset_in_bytes()));
__ addw(temp, length, dst_pos);
__ cmpw(temp, rscratch1);
__ br(Assembler::HI, L_failed);
// Have to clean up high 32 bits of 'src_pos' and 'dst_pos'.
__ movw(src_pos, src_pos);
__ movw(dst_pos, dst_pos);
BLOCK_COMMENT("arraycopy_range_checks done");
}
// These stubs get called from some dumb test routine. // These stubs get called from some dumb test routine.
// I'll write them properly when they're called from // I'll write them properly when they're called from
@ -1569,6 +1600,309 @@ class StubGenerator: public StubCodeGenerator {
} }
//
// Generate 'unsafe' array copy stub
// Though just as safe as the other stubs, it takes an unscaled
// size_t argument instead of an element count.
//
// Input:
// c_rarg0 - source array address
// c_rarg1 - destination array address
// c_rarg2 - byte count, treated as ssize_t, can be zero
//
// Examines the alignment of the operands and dispatches
// to a long, int, short, or byte copy loop.
//
address generate_unsafe_copy(const char *name,
address byte_copy_entry) {
#ifdef PRODUCT
return StubRoutines::_jbyte_arraycopy;
#else
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ pc();
__ enter(); // required for proper stackwalking of RuntimeStub frame
// bump this on entry, not on exit:
__ lea(rscratch2, ExternalAddress((address)&SharedRuntime::_unsafe_array_copy_ctr));
__ incrementw(Address(rscratch2));
__ b(RuntimeAddress(byte_copy_entry));
return start;
#endif
}
//
// Generate generic array copy stubs
//
// Input:
// c_rarg0 - src oop
// c_rarg1 - src_pos (32-bits)
// c_rarg2 - dst oop
// c_rarg3 - dst_pos (32-bits)
// c_rarg4 - element count (32-bits)
//
// Output:
// r0 == 0 - success
// r0 == -1^K - failure, where K is partial transfer count
//
address generate_generic_copy(const char *name,
address byte_copy_entry, address short_copy_entry,
address int_copy_entry, address oop_copy_entry,
address long_copy_entry, address checkcast_copy_entry) {
Label L_failed, L_failed_0, L_objArray;
Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs;
// Input registers
const Register src = c_rarg0; // source array oop
const Register src_pos = c_rarg1; // source position
const Register dst = c_rarg2; // destination array oop
const Register dst_pos = c_rarg3; // destination position
const Register length = c_rarg4;
StubCodeMark mark(this, "StubRoutines", name);
__ align(CodeEntryAlignment);
address start = __ pc();
__ enter(); // required for proper stackwalking of RuntimeStub frame
// bump this on entry, not on exit:
inc_counter_np(SharedRuntime::_generic_array_copy_ctr);
//-----------------------------------------------------------------------
// Assembler stub will be used for this call to arraycopy
// if the following conditions are met:
//
// (1) src and dst must not be null.
// (2) src_pos must not be negative.
// (3) dst_pos must not be negative.
// (4) length must not be negative.
// (5) src klass and dst klass should be the same and not NULL.
// (6) src and dst should be arrays.
// (7) src_pos + length must not exceed length of src.
// (8) dst_pos + length must not exceed length of dst.
//
// if (src == NULL) return -1;
__ cbz(src, L_failed);
// if (src_pos < 0) return -1;
__ tbnz(src_pos, 31, L_failed); // i.e. sign bit set
// if (dst == NULL) return -1;
__ cbz(dst, L_failed);
// if (dst_pos < 0) return -1;
__ tbnz(dst_pos, 31, L_failed); // i.e. sign bit set
// registers used as temp
const Register scratch_length = r16; // elements count to copy
const Register scratch_src_klass = r17; // array klass
const Register lh = r18; // layout helper
// if (length < 0) return -1;
__ movw(scratch_length, length); // length (elements count, 32-bits value)
__ tbnz(scratch_length, 31, L_failed); // i.e. sign bit set
__ load_klass(scratch_src_klass, src);
#ifdef ASSERT
// assert(src->klass() != NULL);
{
BLOCK_COMMENT("assert klasses not null {");
Label L1, L2;
__ cbnz(scratch_src_klass, L2); // it is broken if klass is NULL
__ bind(L1);
__ stop("broken null klass");
__ bind(L2);
__ load_klass(rscratch1, dst);
__ cbz(rscratch1, L1); // this would be broken also
BLOCK_COMMENT("} assert klasses not null done");
}
#endif
// Load layout helper (32-bits)
//
// |array_tag| | header_size | element_type | |log2_element_size|
// 32 30 24 16 8 2 0
//
// array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
//
const int lh_offset = in_bytes(Klass::layout_helper_offset());
// Handle objArrays completely differently...
const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
__ ldrw(lh, Address(scratch_src_klass, lh_offset));
__ movw(rscratch1, objArray_lh);
__ eorw(rscratch2, lh, rscratch1);
__ cbzw(rscratch2, L_objArray);
// if (src->klass() != dst->klass()) return -1;
__ load_klass(rscratch2, dst);
__ eor(rscratch2, rscratch2, scratch_src_klass);
__ cbnz(rscratch2, L_failed);
// if (!src->is_Array()) return -1;
__ tbz(lh, 31, L_failed); // i.e. (lh >= 0)
// At this point, it is known to be a typeArray (array_tag 0x3).
#ifdef ASSERT
{
BLOCK_COMMENT("assert primitive array {");
Label L;
__ movw(rscratch2, Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
__ cmpw(lh, rscratch2);
__ br(Assembler::GE, L);
__ stop("must be a primitive array");
__ bind(L);
BLOCK_COMMENT("} assert primitive array done");
}
#endif
arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
rscratch2, L_failed);
// TypeArrayKlass
//
// src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
// dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
//
const Register rscratch1_offset = rscratch1; // array offset
const Register r18_elsize = lh; // element size
__ ubfx(rscratch1_offset, lh, Klass::_lh_header_size_shift,
exact_log2(Klass::_lh_header_size_mask+1)); // array_offset
__ add(src, src, rscratch1_offset); // src array offset
__ add(dst, dst, rscratch1_offset); // dst array offset
BLOCK_COMMENT("choose copy loop based on element size");
// next registers should be set before the jump to corresponding stub
const Register from = c_rarg0; // source array address
const Register to = c_rarg1; // destination array address
const Register count = c_rarg2; // elements count
// 'from', 'to', 'count' registers should be set in such order
// since they are the same as 'src', 'src_pos', 'dst'.
assert(Klass::_lh_log2_element_size_shift == 0, "fix this code");
// The possible values of elsize are 0-3, i.e. exact_log2(element
// size in bytes). We do a simple bitwise binary search.
__ BIND(L_copy_bytes);
__ tbnz(r18_elsize, 1, L_copy_ints);
__ tbnz(r18_elsize, 0, L_copy_shorts);
__ lea(from, Address(src, src_pos));// src_addr
__ lea(to, Address(dst, dst_pos));// dst_addr
__ movw(count, scratch_length); // length
__ b(RuntimeAddress(byte_copy_entry));
__ BIND(L_copy_shorts);
__ lea(from, Address(src, src_pos, Address::lsl(1)));// src_addr
__ lea(to, Address(dst, dst_pos, Address::lsl(1)));// dst_addr
__ movw(count, scratch_length); // length
__ b(RuntimeAddress(short_copy_entry));
__ BIND(L_copy_ints);
__ tbnz(r18_elsize, 0, L_copy_longs);
__ lea(from, Address(src, src_pos, Address::lsl(2)));// src_addr
__ lea(to, Address(dst, dst_pos, Address::lsl(2)));// dst_addr
__ movw(count, scratch_length); // length
__ b(RuntimeAddress(int_copy_entry));
__ BIND(L_copy_longs);
#ifdef ASSERT
{
BLOCK_COMMENT("assert long copy {");
Label L;
__ andw(lh, lh, Klass::_lh_log2_element_size_mask); // lh -> r18_elsize
__ cmpw(r18_elsize, LogBytesPerLong);
__ br(Assembler::EQ, L);
__ stop("must be long copy, but elsize is wrong");
__ bind(L);
BLOCK_COMMENT("} assert long copy done");
}
#endif
__ lea(from, Address(src, src_pos, Address::lsl(3)));// src_addr
__ lea(to, Address(dst, dst_pos, Address::lsl(3)));// dst_addr
__ movw(count, scratch_length); // length
__ b(RuntimeAddress(long_copy_entry));
// ObjArrayKlass
__ BIND(L_objArray);
// live at this point: scratch_src_klass, scratch_length, src[_pos], dst[_pos]
Label L_plain_copy, L_checkcast_copy;
// test array classes for subtyping
__ load_klass(r18, dst);
__ cmp(scratch_src_klass, r18); // usual case is exact equality
__ br(Assembler::NE, L_checkcast_copy);
// Identically typed arrays can be copied without element-wise checks.
arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
rscratch2, L_failed);
__ lea(from, Address(src, src_pos, Address::lsl(3)));
__ add(from, from, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
__ lea(to, Address(dst, dst_pos, Address::lsl(3)));
__ add(to, to, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
__ movw(count, scratch_length); // length
__ BIND(L_plain_copy);
__ b(RuntimeAddress(oop_copy_entry));
__ BIND(L_checkcast_copy);
// live at this point: scratch_src_klass, scratch_length, r18 (dst_klass)
{
// Before looking at dst.length, make sure dst is also an objArray.
__ ldrw(rscratch1, Address(r18, lh_offset));
__ movw(rscratch2, objArray_lh);
__ eorw(rscratch1, rscratch1, rscratch2);
__ cbnzw(rscratch1, L_failed);
// It is safe to examine both src.length and dst.length.
arraycopy_range_checks(src, src_pos, dst, dst_pos, scratch_length,
r18, L_failed);
const Register rscratch2_dst_klass = rscratch2;
__ load_klass(rscratch2_dst_klass, dst); // reload
// Marshal the base address arguments now, freeing registers.
__ lea(from, Address(src, src_pos, Address::lsl(3)));
__ add(from, from, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
__ lea(to, Address(dst, dst_pos, Address::lsl(3)));
__ add(to, to, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
__ movw(count, length); // length (reloaded)
Register sco_temp = c_rarg3; // this register is free now
assert_different_registers(from, to, count, sco_temp,
rscratch2_dst_klass, scratch_src_klass);
// assert_clean_int(count, sco_temp);
// Generate the type check.
const int sco_offset = in_bytes(Klass::super_check_offset_offset());
__ ldrw(sco_temp, Address(rscratch2_dst_klass, sco_offset));
// assert_clean_int(sco_temp, r18);
generate_type_check(scratch_src_klass, sco_temp, rscratch2_dst_klass, L_plain_copy);
// Fetch destination element klass from the ObjArrayKlass header.
int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
__ ldr(rscratch2_dst_klass, Address(rscratch2_dst_klass, ek_offset));
__ ldrw(sco_temp, Address(rscratch2_dst_klass, sco_offset));
// the checkcast_copy loop needs two extra arguments:
assert(c_rarg3 == sco_temp, "#3 already in place");
// Set up arguments for checkcast_copy_entry.
__ mov(c_rarg4, rscratch2_dst_klass); // dst.klass.element_klass
__ b(RuntimeAddress(checkcast_copy_entry));
}
__ BIND(L_failed);
__ mov(r0, -1);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(lr);
return start;
}
void generate_arraycopy_stubs() { void generate_arraycopy_stubs() {
address entry; address entry;
address entry_jbyte_arraycopy; address entry_jbyte_arraycopy;
@ -1655,6 +1989,18 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL,
/*dest_uninitialized*/true); /*dest_uninitialized*/true);
StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy",
entry_jbyte_arraycopy);
StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy",
entry_jbyte_arraycopy,
entry_jshort_arraycopy,
entry_jint_arraycopy,
entry_oop_arraycopy,
entry_jlong_arraycopy,
entry_checkcast_arraycopy);
} }
void generate_math_stubs() { Unimplemented(); } void generate_math_stubs() { Unimplemented(); }
@ -1973,7 +2319,7 @@ class StubGenerator: public StubCodeGenerator {
// c_rarg4 - input length // c_rarg4 - input length
// //
// Output: // Output:
// x0 - input length // r0 - input length
// //
address generate_cipherBlockChaining_decryptAESCrypt() { address generate_cipherBlockChaining_decryptAESCrypt() {
assert(UseAES, "need AES instructions and misaligned SSE support"); assert(UseAES, "need AES instructions and misaligned SSE support");

View file

@ -223,6 +223,19 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
__ restore_constant_pool_cache(); __ restore_constant_pool_cache();
__ get_method(rmethod); __ get_method(rmethod);
#if INCLUDE_JVMCI
// Check if we need to take lock at entry of synchronized method.
if (UseJVMCICompiler) {
Label L;
__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
__ cbz(rscratch1, L);
// Clear flag.
__ strb(zr, Address(rthread, JavaThread::pending_monitorenter_offset()));
// Take lock.
lock_method();
__ bind(L);
}
#endif
// handle exceptions // handle exceptions
{ {
Label L; Label L;
@ -367,7 +380,7 @@ void TemplateInterpreterGenerator::generate_counter_incr(
__ br(Assembler::LT, *profile_method_continue); __ br(Assembler::LT, *profile_method_continue);
// if no method data exists, go to profile_method // if no method data exists, go to profile_method
__ test_method_data_pointer(r0, *profile_method); __ test_method_data_pointer(rscratch2, *profile_method);
} }
{ {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -30,16 +30,8 @@
// constants required by the Serviceability Agent. This file is // constants required by the Serviceability Agent. This file is
// referenced by vmStructs.cpp. // referenced by vmStructs.cpp.
#define VM_STRUCTS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \ #define VM_STRUCTS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
\ volatile_nonstatic_field(JavaFrameAnchor, _last_Java_fp, intptr_t*)
/******************************/ \
/* JavaCallWrapper */ \
/******************************/ \
/******************************/ \
/* JavaFrameAnchor */ \
/******************************/ \
volatile_nonstatic_field(JavaFrameAnchor, _last_Java_fp, intptr_t*)
#define VM_TYPES_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) #define VM_TYPES_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type)

View file

@ -67,8 +67,6 @@ int VM_Version::_model2;
int VM_Version::_variant; int VM_Version::_variant;
int VM_Version::_revision; int VM_Version::_revision;
int VM_Version::_stepping; int VM_Version::_stepping;
int VM_Version::_cpuFeatures;
const char* VM_Version::_features_str = "";
static BufferBlob* stub_blob; static BufferBlob* stub_blob;
static const int stub_size = 550; static const int stub_size = 550;
@ -129,7 +127,7 @@ void VM_Version::get_processor_features() {
char buf[512]; char buf[512];
_cpuFeatures = auxv; _features = auxv;
int cpu_lines = 0; int cpu_lines = 0;
if (FILE *f = fopen("/proc/cpuinfo", "r")) { if (FILE *f = fopen("/proc/cpuinfo", "r")) {
@ -154,12 +152,12 @@ void VM_Version::get_processor_features() {
} }
// Enable vendor specific features // Enable vendor specific features
if (_cpu == CPU_CAVIUM && _variant == 0) _cpuFeatures |= CPU_DMB_ATOMICS; if (_cpu == CPU_CAVIUM && _variant == 0) _features |= CPU_DMB_ATOMICS;
if (_cpu == CPU_ARM && (_model == 0xd03 || _model2 == 0xd03)) _cpuFeatures |= CPU_A53MAC; if (_cpu == CPU_ARM && (_model == 0xd03 || _model2 == 0xd03)) _features |= CPU_A53MAC;
// If an olde style /proc/cpuinfo (cpu_lines == 1) then if _model is an A57 (0xd07) // If an olde style /proc/cpuinfo (cpu_lines == 1) then if _model is an A57 (0xd07)
// we assume the worst and assume we could be on a big little system and have // we assume the worst and assume we could be on a big little system and have
// undisclosed A53 cores which we could be swapped to at any stage // undisclosed A53 cores which we could be swapped to at any stage
if (_cpu == CPU_ARM && cpu_lines == 1 && _model == 0xd07) _cpuFeatures |= CPU_A53MAC; if (_cpu == CPU_ARM && cpu_lines == 1 && _model == 0xd07) _features |= CPU_A53MAC;
sprintf(buf, "0x%02x:0x%x:0x%03x:%d", _cpu, _variant, _model, _revision); sprintf(buf, "0x%02x:0x%x:0x%03x:%d", _cpu, _variant, _model, _revision);
if (_model2) sprintf(buf+strlen(buf), "(0x%03x)", _model2); if (_model2) sprintf(buf+strlen(buf), "(0x%03x)", _model2);
@ -169,7 +167,7 @@ void VM_Version::get_processor_features() {
if (auxv & HWCAP_SHA1) strcat(buf, ", sha1"); if (auxv & HWCAP_SHA1) strcat(buf, ", sha1");
if (auxv & HWCAP_SHA2) strcat(buf, ", sha256"); if (auxv & HWCAP_SHA2) strcat(buf, ", sha256");
_features_str = os::strdup(buf); _features_string = os::strdup(buf);
if (FLAG_IS_DEFAULT(UseCRC32)) { if (FLAG_IS_DEFAULT(UseCRC32)) {
UseCRC32 = (auxv & HWCAP_CRC32) != 0; UseCRC32 = (auxv & HWCAP_CRC32) != 0;
@ -182,6 +180,11 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(UseAdler32Intrinsics, true); FLAG_SET_DEFAULT(UseAdler32Intrinsics, true);
} }
if (UseVectorizedMismatchIntrinsic) {
warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU.");
FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
}
if (auxv & HWCAP_AES) { if (auxv & HWCAP_AES) {
UseAES = UseAES || FLAG_IS_DEFAULT(UseAES); UseAES = UseAES || FLAG_IS_DEFAULT(UseAES);
UseAESIntrinsics = UseAESIntrinsics =
@ -199,6 +202,11 @@ void VM_Version::get_processor_features() {
} }
} }
if (UseAESCTRIntrinsics) {
warning("AES/CTR intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
}
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
UseCRC32Intrinsics = true; UseCRC32Intrinsics = true;
} }
@ -267,7 +275,7 @@ void VM_Version::get_processor_features() {
} }
if (FLAG_IS_DEFAULT(UseBarriersForVolatile)) { if (FLAG_IS_DEFAULT(UseBarriersForVolatile)) {
UseBarriersForVolatile = (_cpuFeatures & CPU_DMB_ATOMICS) != 0; UseBarriersForVolatile = (_features & CPU_DMB_ATOMICS) != 0;
} }
if (FLAG_IS_DEFAULT(UsePopCountInstruction)) { if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {

View file

@ -30,7 +30,8 @@
#include "runtime/vm_version.hpp" #include "runtime/vm_version.hpp"
class VM_Version : public Abstract_VM_Version { class VM_Version : public Abstract_VM_Version {
public: friend class JVMCIVMStructs;
protected: protected:
static int _cpu; static int _cpu;
static int _model; static int _model;
@ -38,9 +39,6 @@ protected:
static int _variant; static int _variant;
static int _revision; static int _revision;
static int _stepping; static int _stepping;
static int _cpuFeatures; // features returned by the "cpuid" instruction
// 0 if this instruction is not available
static const char* _features_str;
static void get_processor_features(); static void get_processor_features();
@ -52,7 +50,7 @@ public:
static void assert_is_initialized() { static void assert_is_initialized() {
} }
enum { enum Family {
CPU_ARM = 'A', CPU_ARM = 'A',
CPU_BROADCOM = 'B', CPU_BROADCOM = 'B',
CPU_CAVIUM = 'C', CPU_CAVIUM = 'C',
@ -64,9 +62,9 @@ public:
CPU_QUALCOM = 'Q', CPU_QUALCOM = 'Q',
CPU_MARVELL = 'V', CPU_MARVELL = 'V',
CPU_INTEL = 'i', CPU_INTEL = 'i',
} cpuFamily; };
enum { enum Feature_Flag {
CPU_FP = (1<<0), CPU_FP = (1<<0),
CPU_ASIMD = (1<<1), CPU_ASIMD = (1<<1),
CPU_EVTSTRM = (1<<2), CPU_EVTSTRM = (1<<2),
@ -77,16 +75,13 @@ public:
CPU_CRC32 = (1<<7), CPU_CRC32 = (1<<7),
CPU_A53MAC = (1 << 30), CPU_A53MAC = (1 << 30),
CPU_DMB_ATOMICS = (1 << 31), CPU_DMB_ATOMICS = (1 << 31),
} cpuFeatureFlags; };
static const char* cpu_features() { return _features_str; }
static int cpu_family() { return _cpu; } static int cpu_family() { return _cpu; }
static int cpu_model() { return _model; } static int cpu_model() { return _model; }
static int cpu_model2() { return _model2; } static int cpu_model2() { return _model2; }
static int cpu_variant() { return _variant; } static int cpu_variant() { return _variant; }
static int cpu_revision() { return _revision; } static int cpu_revision() { return _revision; }
static int cpu_cpuFeatures() { return _cpuFeatures; }
}; };
#endif // CPU_AARCH64_VM_VM_VERSION_AARCH64_HPP #endif // CPU_AARCH64_VM_VM_VERSION_AARCH64_HPP

View file

@ -53,9 +53,6 @@ int AbstractAssembler::code_fill_byte() {
return 0x00; // illegal instruction 0x00000000 return 0x00; // illegal instruction 0x00000000
} }
void Assembler::print_instruction(int inst) {
Unimplemented();
}
// Patch instruction `inst' at offset `inst_pos' to refer to // Patch instruction `inst' at offset `inst_pos' to refer to
// `dest_pos' and return the resulting instruction. We should have // `dest_pos' and return the resulting instruction. We should have
@ -484,7 +481,7 @@ int Assembler::add_const_optimized(Register d, Register s, long x, Register tmp,
if (d != s) { mr(d, s); } if (d != s) { mr(d, s); }
return 0; return 0;
} }
if (return_simm16_rest) { if (return_simm16_rest && (d == s)) {
return xd; return xd;
} }
addi(d, s, xd); addi(d, s, xd);

View file

@ -31,10 +31,37 @@
// Address is an abstraction used to represent a memory location // Address is an abstraction used to represent a memory location
// as used in assembler instructions. // as used in assembler instructions.
// PPC instructions grok either baseReg + indexReg or baseReg + disp. // PPC instructions grok either baseReg + indexReg or baseReg + disp.
// So far we do not use this as simplification by this class is low
// on PPC with its simple addressing mode. Use RegisterOrConstant to
// represent an offset.
class Address VALUE_OBJ_CLASS_SPEC { class Address VALUE_OBJ_CLASS_SPEC {
private:
Register _base; // Base register.
Register _index; // Index register.
intptr_t _disp; // Displacement.
public:
Address(Register b, Register i, address d = 0)
: _base(b), _index(i), _disp((intptr_t)d) {
assert(i == noreg || d == 0, "can't have both");
}
Address(Register b, address d = 0)
: _base(b), _index(noreg), _disp((intptr_t)d) {}
Address(Register b, intptr_t d)
: _base(b), _index(noreg), _disp(d) {}
Address(Register b, RegisterOrConstant roc)
: _base(b), _index(noreg), _disp(0) {
if (roc.is_constant()) _disp = roc.as_constant(); else _index = roc.as_register();
}
Address()
: _base(noreg), _index(noreg), _disp(0) {}
// accessors
Register base() const { return _base; }
Register index() const { return _index; }
int disp() const { return (int)_disp; }
bool is_const() const { return _base == noreg && _index == noreg; }
}; };
class AddressLiteral VALUE_OBJ_CLASS_SPEC { class AddressLiteral VALUE_OBJ_CLASS_SPEC {
@ -164,10 +191,14 @@ struct FunctionDescriptor VALUE_OBJ_CLASS_SPEC {
}; };
#endif #endif
// The PPC Assembler: Pure assembler doing NO optimizations on the
// instruction level; i.e., what you write is what you get. The
// Assembler is generating code into a CodeBuffer.
class Assembler : public AbstractAssembler { class Assembler : public AbstractAssembler {
protected: protected:
// Displacement routines // Displacement routines
static void print_instruction(int inst);
static int patched_branch(int dest_pos, int inst, int inst_pos); static int patched_branch(int dest_pos, int inst, int inst_pos);
static int branch_destination(int inst, int pos); static int branch_destination(int inst, int pos);
@ -839,41 +870,38 @@ class Assembler : public AbstractAssembler {
enum Predict { pt = 1, pn = 0 }; // pt = predict taken enum Predict { pt = 1, pn = 0 }; // pt = predict taken
// instruction must start at passed address // Instruction must start at passed address.
static int instr_len(unsigned char *instr) { return BytesPerInstWord; } static int instr_len(unsigned char *instr) { return BytesPerInstWord; }
// instruction must be left-justified in argument
static int instr_len(unsigned long instr) { return BytesPerInstWord; }
// longest instructions // longest instructions
static int instr_maxlen() { return BytesPerInstWord; } static int instr_maxlen() { return BytesPerInstWord; }
// Test if x is within signed immediate range for nbits. // Test if x is within signed immediate range for nbits.
static bool is_simm(int x, unsigned int nbits) { static bool is_simm(int x, unsigned int nbits) {
assert(0 < nbits && nbits < 32, "out of bounds"); assert(0 < nbits && nbits < 32, "out of bounds");
const int min = -( ((int)1) << nbits-1 ); const int min = -(((int)1) << nbits-1);
const int maxplus1 = ( ((int)1) << nbits-1 ); const int maxplus1 = (((int)1) << nbits-1);
return min <= x && x < maxplus1; return min <= x && x < maxplus1;
} }
static bool is_simm(jlong x, unsigned int nbits) { static bool is_simm(jlong x, unsigned int nbits) {
assert(0 < nbits && nbits < 64, "out of bounds"); assert(0 < nbits && nbits < 64, "out of bounds");
const jlong min = -( ((jlong)1) << nbits-1 ); const jlong min = -(((jlong)1) << nbits-1);
const jlong maxplus1 = ( ((jlong)1) << nbits-1 ); const jlong maxplus1 = (((jlong)1) << nbits-1);
return min <= x && x < maxplus1; return min <= x && x < maxplus1;
} }
// Test if x is within unsigned immediate range for nbits // Test if x is within unsigned immediate range for nbits.
static bool is_uimm(int x, unsigned int nbits) { static bool is_uimm(int x, unsigned int nbits) {
assert(0 < nbits && nbits < 32, "out of bounds"); assert(0 < nbits && nbits < 32, "out of bounds");
const int maxplus1 = ( ((int)1) << nbits ); const unsigned int maxplus1 = (((unsigned int)1) << nbits);
return 0 <= x && x < maxplus1; return (unsigned int)x < maxplus1;
} }
static bool is_uimm(jlong x, unsigned int nbits) { static bool is_uimm(jlong x, unsigned int nbits) {
assert(0 < nbits && nbits < 64, "out of bounds"); assert(0 < nbits && nbits < 64, "out of bounds");
const jlong maxplus1 = ( ((jlong)1) << nbits ); const julong maxplus1 = (((julong)1) << nbits);
return 0 <= x && x < maxplus1; return (julong)x < maxplus1;
} }
protected: protected:
@ -1196,6 +1224,8 @@ class Assembler : public AbstractAssembler {
inline void mullw_( Register d, Register a, Register b); inline void mullw_( Register d, Register a, Register b);
inline void mulhw( Register d, Register a, Register b); inline void mulhw( Register d, Register a, Register b);
inline void mulhw_( Register d, Register a, Register b); inline void mulhw_( Register d, Register a, Register b);
inline void mulhwu( Register d, Register a, Register b);
inline void mulhwu_(Register d, Register a, Register b);
inline void mulhd( Register d, Register a, Register b); inline void mulhd( Register d, Register a, Register b);
inline void mulhd_( Register d, Register a, Register b); inline void mulhd_( Register d, Register a, Register b);
inline void mulhdu( Register d, Register a, Register b); inline void mulhdu( Register d, Register a, Register b);
@ -1376,8 +1406,11 @@ class Assembler : public AbstractAssembler {
inline void orc( Register a, Register s, Register b); inline void orc( Register a, Register s, Register b);
inline void orc_( Register a, Register s, Register b); inline void orc_( Register a, Register s, Register b);
inline void extsb( Register a, Register s); inline void extsb( Register a, Register s);
inline void extsb_( Register a, Register s);
inline void extsh( Register a, Register s); inline void extsh( Register a, Register s);
inline void extsh_( Register a, Register s);
inline void extsw( Register a, Register s); inline void extsw( Register a, Register s);
inline void extsw_( Register a, Register s);
// extended mnemonics // extended mnemonics
inline void nop(); inline void nop();
@ -1767,6 +1800,8 @@ class Assembler : public AbstractAssembler {
inline void smt_yield(); inline void smt_yield();
inline void smt_mdoio(); inline void smt_mdoio();
inline void smt_mdoom(); inline void smt_mdoom();
// >= Power8
inline void smt_miso();
// trap instructions // trap instructions
inline void twi_0(Register a); // for load with acquire semantics use load+twi_0+isync (trap can't occur) inline void twi_0(Register a); // for load with acquire semantics use load+twi_0+isync (trap can't occur)
@ -2168,6 +2203,7 @@ class Assembler : public AbstractAssembler {
inline void load_const(Register d, void* a, Register tmp = noreg); inline void load_const(Register d, void* a, Register tmp = noreg);
inline void load_const(Register d, Label& L, Register tmp = noreg); inline void load_const(Register d, Label& L, Register tmp = noreg);
inline void load_const(Register d, AddressLiteral& a, Register tmp = noreg); inline void load_const(Register d, AddressLiteral& a, Register tmp = noreg);
inline void load_const32(Register d, int i); // load signed int (patchable)
// Load a 64 bit constant, optimized, not identifyable. // Load a 64 bit constant, optimized, not identifyable.
// Tmp can be used to increase ILP. Set return_simm16_rest = true to get a // Tmp can be used to increase ILP. Set return_simm16_rest = true to get a

View file

@ -117,6 +117,8 @@ inline void Assembler::mullw( Register d, Register a, Register b) { emit_int32(
inline void Assembler::mullw_( Register d, Register a, Register b) { emit_int32(MULLW_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); } inline void Assembler::mullw_( Register d, Register a, Register b) { emit_int32(MULLW_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
inline void Assembler::mulhw( Register d, Register a, Register b) { emit_int32(MULHW_OPCODE | rt(d) | ra(a) | rb(b) | rc(0)); } inline void Assembler::mulhw( Register d, Register a, Register b) { emit_int32(MULHW_OPCODE | rt(d) | ra(a) | rb(b) | rc(0)); }
inline void Assembler::mulhw_( Register d, Register a, Register b) { emit_int32(MULHW_OPCODE | rt(d) | ra(a) | rb(b) | rc(1)); } inline void Assembler::mulhw_( Register d, Register a, Register b) { emit_int32(MULHW_OPCODE | rt(d) | ra(a) | rb(b) | rc(1)); }
inline void Assembler::mulhwu( Register d, Register a, Register b) { emit_int32(MULHWU_OPCODE | rt(d) | ra(a) | rb(b) | rc(0)); }
inline void Assembler::mulhwu_(Register d, Register a, Register b) { emit_int32(MULHWU_OPCODE | rt(d) | ra(a) | rb(b) | rc(1)); }
inline void Assembler::mulhd( Register d, Register a, Register b) { emit_int32(MULHD_OPCODE | rt(d) | ra(a) | rb(b) | rc(0)); } inline void Assembler::mulhd( Register d, Register a, Register b) { emit_int32(MULHD_OPCODE | rt(d) | ra(a) | rb(b) | rc(0)); }
inline void Assembler::mulhd_( Register d, Register a, Register b) { emit_int32(MULHD_OPCODE | rt(d) | ra(a) | rb(b) | rc(1)); } inline void Assembler::mulhd_( Register d, Register a, Register b) { emit_int32(MULHD_OPCODE | rt(d) | ra(a) | rb(b) | rc(1)); }
inline void Assembler::mulhdu( Register d, Register a, Register b) { emit_int32(MULHDU_OPCODE | rt(d) | ra(a) | rb(b) | rc(0)); } inline void Assembler::mulhdu( Register d, Register a, Register b) { emit_int32(MULHDU_OPCODE | rt(d) | ra(a) | rb(b) | rc(0)); }
@ -206,8 +208,11 @@ inline void Assembler::andc_( Register a, Register s, Register b) { emit_in
inline void Assembler::orc( Register a, Register s, Register b) { emit_int32(ORC_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); } inline void Assembler::orc( Register a, Register s, Register b) { emit_int32(ORC_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
inline void Assembler::orc_( Register a, Register s, Register b) { emit_int32(ORC_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); } inline void Assembler::orc_( Register a, Register s, Register b) { emit_int32(ORC_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
inline void Assembler::extsb( Register a, Register s) { emit_int32(EXTSB_OPCODE | rta(a) | rs(s) | rc(0)); } inline void Assembler::extsb( Register a, Register s) { emit_int32(EXTSB_OPCODE | rta(a) | rs(s) | rc(0)); }
inline void Assembler::extsb_( Register a, Register s) { emit_int32(EXTSB_OPCODE | rta(a) | rs(s) | rc(1)); }
inline void Assembler::extsh( Register a, Register s) { emit_int32(EXTSH_OPCODE | rta(a) | rs(s) | rc(0)); } inline void Assembler::extsh( Register a, Register s) { emit_int32(EXTSH_OPCODE | rta(a) | rs(s) | rc(0)); }
inline void Assembler::extsh_( Register a, Register s) { emit_int32(EXTSH_OPCODE | rta(a) | rs(s) | rc(1)); }
inline void Assembler::extsw( Register a, Register s) { emit_int32(EXTSW_OPCODE | rta(a) | rs(s) | rc(0)); } inline void Assembler::extsw( Register a, Register s) { emit_int32(EXTSW_OPCODE | rta(a) | rs(s) | rc(0)); }
inline void Assembler::extsw_( Register a, Register s) { emit_int32(EXTSW_OPCODE | rta(a) | rs(s) | rc(1)); }
// extended mnemonics // extended mnemonics
inline void Assembler::nop() { Assembler::ori(R0, R0, 0); } inline void Assembler::nop() { Assembler::ori(R0, R0, 0); }
@ -609,6 +614,8 @@ inline void Assembler::smt_prio_high() { Assembler::or_unchecked(R3, R3,
inline void Assembler::smt_yield() { Assembler::or_unchecked(R27, R27, R27); } inline void Assembler::smt_yield() { Assembler::or_unchecked(R27, R27, R27); }
inline void Assembler::smt_mdoio() { Assembler::or_unchecked(R29, R29, R29); } inline void Assembler::smt_mdoio() { Assembler::or_unchecked(R29, R29, R29); }
inline void Assembler::smt_mdoom() { Assembler::or_unchecked(R30, R30, R30); } inline void Assembler::smt_mdoom() { Assembler::or_unchecked(R30, R30, R30); }
// >= Power8
inline void Assembler::smt_miso() { Assembler::or_unchecked(R26, R26, R26); }
inline void Assembler::twi_0(Register a) { twi_unchecked(0, a, 0);} inline void Assembler::twi_0(Register a) { twi_unchecked(0, a, 0);}
@ -967,12 +974,15 @@ inline void Assembler::load_const(Register d, Label& L, Register tmp) {
// Load a 64 bit constant encoded by an AddressLiteral. patchable. // Load a 64 bit constant encoded by an AddressLiteral. patchable.
inline void Assembler::load_const(Register d, AddressLiteral& a, Register tmp) { inline void Assembler::load_const(Register d, AddressLiteral& a, Register tmp) {
assert(d != R0, "R0 not allowed");
// First relocate (we don't change the offset in the RelocationHolder, // First relocate (we don't change the offset in the RelocationHolder,
// just pass a.rspec()), then delegate to load_const(Register, long). // just pass a.rspec()), then delegate to load_const(Register, long).
relocate(a.rspec()); relocate(a.rspec());
load_const(d, (long)a.value(), tmp); load_const(d, (long)a.value(), tmp);
} }
inline void Assembler::load_const32(Register d, int i) {
lis(d, i >> 16);
ori(d, d, i & 0xFFFF);
}
#endif // CPU_PPC_VM_ASSEMBLER_PPC_INLINE_HPP #endif // CPU_PPC_VM_ASSEMBLER_PPC_INLINE_HPP

View file

@ -0,0 +1,527 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "c1/c1_CodeStubs.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "nativeInst_ppc.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/macros.hpp"
#include "vmreg_ppc.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif // INCLUDE_ALL_GCS
#define __ ce->masm()->
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
bool throw_index_out_of_bounds_exception)
: _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
, _index(index) {
assert(info != NULL, "must have info");
_info = new CodeEmitInfo(info);
}
void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
if (_info->deoptimize_on_exception()) {
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
// May be used by optimizations like LoopInvariantCodeMotion or RangeCheckEliminator.
DEBUG_ONLY( __ untested("RangeCheckStub: predicate_failed_trap_id"); )
//__ load_const_optimized(R0, a);
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
__ mtctr(R0);
__ bctrl();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ illtrap());
return;
}
address stub = _throw_index_out_of_bounds_exception ? Runtime1::entry_for(Runtime1::throw_index_exception_id)
: Runtime1::entry_for(Runtime1::throw_range_check_failed_id);
//__ load_const_optimized(R0, stub);
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
__ mtctr(R0);
Register index = R0; // pass in R0
if (_index->is_register()) {
__ extsw(index, _index->as_register());
} else {
__ load_const_optimized(index, _index->as_jint());
}
__ bctrl();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ illtrap());
}
PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
_info = new CodeEmitInfo(info);
}
void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
//__ load_const_optimized(R0, a);
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
__ mtctr(R0);
__ bctrl();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ illtrap());
}
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
// Parameter 1: bci
__ load_const_optimized(R0, _bci);
__ std(R0, -16, R1_SP);
// Parameter 2: Method*
Metadata *m = _method->as_constant_ptr()->as_metadata();
AddressLiteral md = __ constant_metadata_address(m); // Notify OOP recorder (don't need the relocation).
__ load_const_optimized(R0, md.value());
__ std(R0, -8, R1_SP);
address a = Runtime1::entry_for(Runtime1::counter_overflow_id);
//__ load_const_optimized(R0, a);
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
__ mtctr(R0);
__ bctrl();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
__ b(_continuation);
}
void DivByZeroStub::emit_code(LIR_Assembler* ce) {
if (_offset != -1) {
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
}
__ bind(_entry);
address stub = Runtime1::entry_for(Runtime1::throw_div0_exception_id);
//__ load_const_optimized(R0, stub);
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
__ mtctr(R0);
__ bctrl();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ illtrap());
}
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
address a;
if (_info->deoptimize_on_exception()) {
// Deoptimize, do not throw the exception, because it is probably wrong to do it here.
a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
} else {
a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
}
if (ImplicitNullChecks || TrapBasedNullChecks) {
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
}
__ bind(_entry);
//__ load_const_optimized(R0, a);
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
__ mtctr(R0);
__ bctrl();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ illtrap());
}
// Implementation of SimpleExceptionStub
void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
address stub = Runtime1::entry_for(_stub);
//__ load_const_optimized(R0, stub);
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
if (_obj->is_valid()) { __ mr_if_needed(/*tmp1 in do_CheckCast*/ R4_ARG2, _obj->as_register()); }
__ mtctr(R0);
__ bctrl();
ce->add_call_info_here(_info);
debug_only( __ illtrap(); )
}
// Implementation of NewInstanceStub
NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
_result = result;
_klass = klass;
_klass_reg = klass_reg;
_info = new CodeEmitInfo(info);
assert(stub_id == Runtime1::new_instance_id ||
stub_id == Runtime1::fast_new_instance_id ||
stub_id == Runtime1::fast_new_instance_init_check_id,
"need new_instance id");
_stub_id = stub_id;
}
void NewInstanceStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
address entry = Runtime1::entry_for(_stub_id);
//__ load_const_optimized(R0, entry);
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
__ mtctr(R0);
__ bctrl();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
__ b(_continuation);
}
// Implementation of NewTypeArrayStub
NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
_klass_reg = klass_reg;
_length = length;
_result = result;
_info = new CodeEmitInfo(info);
}
void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
address entry = Runtime1::entry_for(Runtime1::new_type_array_id);
//__ load_const_optimized(R0, entry);
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
__ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended
__ mtctr(R0);
__ bctrl();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
__ b(_continuation);
}
// Implementation of NewObjectArrayStub
NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
_klass_reg = klass_reg;
_length = length;
_result = result;
_info = new CodeEmitInfo(info);
}
void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
address entry = Runtime1::entry_for(Runtime1::new_object_array_id);
//__ load_const_optimized(R0, entry);
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
__ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended
__ mtctr(R0);
__ bctrl();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
__ b(_continuation);
}
// Implementation of MonitorAccessStubs
MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
: MonitorAccessStub(obj_reg, lock_reg) {
_info = new CodeEmitInfo(info);
}
void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorenter_id : Runtime1::monitorenter_nofpu_id);
//__ load_const_optimized(R0, stub);
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
__ mr_if_needed(/*scratch_opr()->as_register()*/ R4_ARG2, _obj_reg->as_register());
assert(_lock_reg->as_register() == R5_ARG3, "");
__ mtctr(R0);
__ bctrl();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
__ b(_continuation);
}
void MonitorExitStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
if (_compute_lock) {
ce->monitor_address(_monitor_ix, _lock_reg);
}
address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorexit_id : Runtime1::monitorexit_nofpu_id);
//__ load_const_optimized(R0, stub);
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
assert(_lock_reg->as_register() == R4_ARG2, "");
__ mtctr(R0);
__ bctrl();
__ b(_continuation);
}
// Implementation of patching:
// - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes).
// - Replace original code with a call to the stub.
// At Runtime:
// - call to stub, jump to runtime
// - in runtime: preserve all registers (especially objects, i.e., source and destination object)
// - in runtime: after initializing class, restore original code, reexecute instruction
int PatchingStub::_patch_info_offset = -(5 * BytesPerInstWord);
void PatchingStub::align_patch_site(MacroAssembler* ) {
// Patch sites on ppc are always properly aligned.
}
#ifdef ASSERT
inline void compare_with_patch_site(address template_start, address pc_start, int bytes_to_copy) {
address start = template_start;
for (int i = 0; i < bytes_to_copy; i++) {
address ptr = (address)(pc_start + i);
int a_byte = (*ptr) & 0xFF;
assert(a_byte == *start++, "should be the same code");
}
}
#endif
void PatchingStub::emit_code(LIR_Assembler* ce) {
// copy original code here
assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
"not enough room for call");
assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes");
Label call_patch;
int being_initialized_entry = __ offset();
if (_id == load_klass_id) {
// Produce a copy of the load klass instruction for use by the being initialized case.
AddressLiteral addrlit((address)NULL, metadata_Relocation::spec(_index));
__ load_const(_obj, addrlit, R0);
DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); )
} else if (_id == load_mirror_id || _id == load_appendix_id) {
// Produce a copy of the load mirror instruction for use by the being initialized case.
AddressLiteral addrlit((address)NULL, oop_Relocation::spec(_index));
__ load_const(_obj, addrlit, R0);
DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); )
} else {
// Make a copy the code which is going to be patched.
for (int i = 0; i < _bytes_to_copy; i++) {
address ptr = (address)(_pc_start + i);
int a_byte = (*ptr) & 0xFF;
__ emit_int8 (a_byte);
}
}
address end_of_patch = __ pc();
int bytes_to_skip = 0;
if (_id == load_mirror_id) {
int offset = __ offset();
__ block_comment(" being_initialized check");
// Static field accesses have special semantics while the class
// initializer is being run so we emit a test which can be used to
// check that this code is being executed by the initializing
// thread.
assert(_obj != noreg, "must be a valid register");
assert(_index >= 0, "must have oop index");
__ mr(R0, _obj); // spill
__ ld(_obj, java_lang_Class::klass_offset_in_bytes(), _obj);
__ ld(_obj, in_bytes(InstanceKlass::init_thread_offset()), _obj);
__ cmpd(CCR0, _obj, R16_thread);
__ mr(_obj, R0); // restore
__ bne(CCR0, call_patch);
// Load_klass patches may execute the patched code before it's
// copied back into place so we need to jump back into the main
// code of the nmethod to continue execution.
__ b(_patch_site_continuation);
// Make sure this extra code gets skipped.
bytes_to_skip += __ offset() - offset;
}
// Now emit the patch record telling the runtime how to find the
// pieces of the patch. We only need 3 bytes but it has to be
// aligned as an instruction so emit 4 bytes.
int sizeof_patch_record = 4;
bytes_to_skip += sizeof_patch_record;
// Emit the offsets needed to find the code to patch.
int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
// Emit the patch record. We need to emit a full word, so emit an extra empty byte.
__ emit_int8(0);
__ emit_int8(being_initialized_entry_offset);
__ emit_int8(bytes_to_skip);
__ emit_int8(_bytes_to_copy);
address patch_info_pc = __ pc();
assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
address entry = __ pc();
NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
address target = NULL;
relocInfo::relocType reloc_type = relocInfo::none;
switch (_id) {
case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
reloc_type = relocInfo::metadata_type; break;
case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
reloc_type = relocInfo::oop_type; break;
case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
reloc_type = relocInfo::oop_type; break;
default: ShouldNotReachHere();
}
__ bind(call_patch);
__ block_comment("patch entry point");
//__ load_const(R0, target); + mtctr + bctrl must have size -_patch_info_offset
__ load_const32(R0, MacroAssembler::offset_to_global_toc(target));
__ add(R0, R29_TOC, R0);
__ mtctr(R0);
__ bctrl();
assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
ce->add_call_info_here(_info);
__ b(_patch_site_entry);
if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
CodeSection* cs = __ code_section();
address pc = (address)_pc_start;
RelocIterator iter(cs, pc, pc + 1);
relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none);
}
}
void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
address stub = Runtime1::entry_for(Runtime1::deoptimize_id);
//__ load_const_optimized(R0, stub);
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
__ mtctr(R0);
__ load_const_optimized(R0, _trap_request); // Pass trap request in R0.
__ bctrl();
ce->add_call_info_here(_info);
debug_only(__ illtrap());
}
void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
//---------------slow case: call to native-----------------
__ bind(_entry);
__ mr(R3_ARG1, src()->as_register());
__ extsw(R4_ARG2, src_pos()->as_register());
__ mr(R5_ARG3, dst()->as_register());
__ extsw(R6_ARG4, dst_pos()->as_register());
__ extsw(R7_ARG5, length()->as_register());
ce->emit_static_call_stub();
bool success = ce->emit_trampoline_stub_for_call(SharedRuntime::get_resolve_static_call_stub());
if (!success) { return; }
__ relocate(relocInfo::static_call_type);
// Note: At this point we do not have the address of the trampoline
// stub, and the entry point might be too far away for bl, so __ pc()
// serves as dummy and the bl will be patched later.
__ code()->set_insts_mark();
__ bl(__ pc());
ce->add_call_info_here(info());
ce->verify_oop_map(info());
#ifndef PRODUCT
const address counter = (address)&Runtime1::_arraycopy_slowcase_cnt;
const Register tmp = R3, tmp2 = R4;
int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
__ lwz(tmp2, simm16_offs, tmp);
__ addi(tmp2, tmp2, 1);
__ stw(tmp2, simm16_offs, tmp);
#endif
__ b(_continuation);
}
///////////////////////////////////////////////////////////////////////////////////
#if INCLUDE_ALL_GCS
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that marking is in progress.
// If do_load() is true then we have to emit the
// load of the previous value; otherwise it has already
// been loaded into _pre_val.
__ bind(_entry);
assert(pre_val()->is_register(), "Precondition.");
Register pre_val_reg = pre_val()->as_register();
if (do_load()) {
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
}
__ cmpdi(CCR0, pre_val_reg, 0);
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), _continuation);
address stub = Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id);
//__ load_const_optimized(R0, stub);
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
__ std(pre_val_reg, -8, R1_SP); // Pass pre_val on stack.
__ mtctr(R0);
__ bctrl();
__ b(_continuation);
}
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
assert(addr()->is_register(), "Precondition.");
assert(new_val()->is_register(), "Precondition.");
Register addr_reg = addr()->as_pointer_register();
Register new_val_reg = new_val()->as_register();
__ cmpdi(CCR0, new_val_reg, 0);
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), _continuation);
address stub = Runtime1::entry_for(Runtime1::Runtime1::g1_post_barrier_slow_id);
//__ load_const_optimized(R0, stub);
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
__ mtctr(R0);
__ mr(R0, addr_reg); // Pass addr in R0.
__ bctrl();
__ b(_continuation);
}
#endif // INCLUDE_ALL_GCS
///////////////////////////////////////////////////////////////////////////////////
#undef __

View file

@ -0,0 +1,76 @@
/*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_C1_DEFS_PPC_HPP
#define CPU_PPC_VM_C1_DEFS_PPC_HPP
// Native word offsets from memory address.
enum {
#if defined(VM_LITTLE_ENDIAN)
pd_lo_word_offset_in_bytes = 0,
pd_hi_word_offset_in_bytes = BytesPerInt
#else
pd_lo_word_offset_in_bytes = BytesPerInt,
pd_hi_word_offset_in_bytes = 0
#endif
};
// Explicit rounding operations are not required to implement the strictFP mode.
enum {
pd_strict_fp_requires_explicit_rounding = false
};
// registers
enum {
pd_nof_cpu_regs_frame_map = 32, // Number of registers used during code emission.
pd_nof_caller_save_cpu_regs_frame_map = 27, // Number of cpu registers killed by calls. (At least R3_ARG1 ... R10_ARG8, but using all like C2.)
pd_nof_cpu_regs_reg_alloc = 27, // Number of registers that are visible to register allocator.
pd_nof_cpu_regs_linearscan = 32, // Number of registers visible linear scan.
pd_first_callee_saved_reg = pd_nof_caller_save_cpu_regs_frame_map,
pd_last_callee_saved_reg = pd_nof_cpu_regs_reg_alloc - 1,
pd_first_cpu_reg = 0,
pd_last_cpu_reg = pd_nof_cpu_regs_reg_alloc - 1,
pd_nof_fpu_regs_frame_map = 32, // Number of registers used during code emission.
pd_nof_caller_save_fpu_regs_frame_map = 32, // Number of fpu registers killed by calls.
pd_nof_fpu_regs_reg_alloc = 32, // Number of registers that are visible to register allocator.
pd_nof_fpu_regs_linearscan = 32, // Number of registers visible to linear scan.
pd_first_fpu_reg = pd_nof_cpu_regs_frame_map,
pd_last_fpu_reg = pd_nof_cpu_regs_frame_map + pd_nof_fpu_regs_reg_alloc - 1,
pd_nof_xmm_regs_linearscan = 0,
pd_nof_caller_save_xmm_regs = 0,
pd_first_xmm_reg = -1,
pd_last_xmm_reg = -1
};
// For debug info: a float value in a register is saved in single precision by runtime stubs.
enum {
pd_float_saved_as_double = true
};
#endif // CPU_PPC_VM_C1_DEFS_PPC_HPP

View file

@ -0,0 +1,32 @@
/*
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_C1_FPUSTACKSIM_PPC_HPP
#define CPU_PPC_VM_C1_FPUSTACKSIM_PPC_HPP
// No FPU stack on PPC.
class FpuStackSim;
#endif // CPU_PPC_VM_C1_FPUSTACKSIM_PPC_HPP

View file

@ -0,0 +1,394 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_LIR.hpp"
#include "runtime/sharedRuntime.hpp"
#include "vmreg_ppc.inline.hpp"
const int FrameMap::pd_c_runtime_reserved_arg_size = 7;
LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool outgoing) {
LIR_Opr opr = LIR_OprFact::illegalOpr;
VMReg r_1 = reg->first();
VMReg r_2 = reg->second();
if (r_1->is_stack()) {
// Convert stack slot to an SP offset.
// The calling convention does not count the SharedRuntime::out_preserve_stack_slots() value
// so we must add it in here.
int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
opr = LIR_OprFact::address(new LIR_Address(SP_opr, st_off + STACK_BIAS, type));
} else if (r_1->is_Register()) {
Register reg = r_1->as_Register();
//if (outgoing) {
// assert(!reg->is_in(), "should be using I regs");
//} else {
// assert(!reg->is_out(), "should be using O regs");
//}
if (r_2->is_Register() && (type == T_LONG || type == T_DOUBLE)) {
opr = as_long_opr(reg);
} else if (type == T_OBJECT || type == T_ARRAY) {
opr = as_oop_opr(reg);
} else {
opr = as_opr(reg);
}
} else if (r_1->is_FloatRegister()) {
assert(type == T_DOUBLE || type == T_FLOAT, "wrong type");
FloatRegister f = r_1->as_FloatRegister();
if (type == T_DOUBLE) {
opr = as_double_opr(f);
} else {
opr = as_float_opr(f);
}
}
return opr;
}
// FrameMap
//--------------------------------------------------------
FloatRegister FrameMap::_fpu_regs [FrameMap::nof_fpu_regs];
LIR_Opr FrameMap::R0_opr;
LIR_Opr FrameMap::R1_opr;
LIR_Opr FrameMap::R2_opr;
LIR_Opr FrameMap::R3_opr;
LIR_Opr FrameMap::R4_opr;
LIR_Opr FrameMap::R5_opr;
LIR_Opr FrameMap::R6_opr;
LIR_Opr FrameMap::R7_opr;
LIR_Opr FrameMap::R8_opr;
LIR_Opr FrameMap::R9_opr;
LIR_Opr FrameMap::R10_opr;
LIR_Opr FrameMap::R11_opr;
LIR_Opr FrameMap::R12_opr;
LIR_Opr FrameMap::R13_opr;
LIR_Opr FrameMap::R14_opr;
LIR_Opr FrameMap::R15_opr;
LIR_Opr FrameMap::R16_opr;
LIR_Opr FrameMap::R17_opr;
LIR_Opr FrameMap::R18_opr;
LIR_Opr FrameMap::R19_opr;
LIR_Opr FrameMap::R20_opr;
LIR_Opr FrameMap::R21_opr;
LIR_Opr FrameMap::R22_opr;
LIR_Opr FrameMap::R23_opr;
LIR_Opr FrameMap::R24_opr;
LIR_Opr FrameMap::R25_opr;
LIR_Opr FrameMap::R26_opr;
LIR_Opr FrameMap::R27_opr;
LIR_Opr FrameMap::R28_opr;
LIR_Opr FrameMap::R29_opr;
LIR_Opr FrameMap::R30_opr;
LIR_Opr FrameMap::R31_opr;
LIR_Opr FrameMap::R0_oop_opr;
//LIR_Opr FrameMap::R1_oop_opr;
LIR_Opr FrameMap::R2_oop_opr;
LIR_Opr FrameMap::R3_oop_opr;
LIR_Opr FrameMap::R4_oop_opr;
LIR_Opr FrameMap::R5_oop_opr;
LIR_Opr FrameMap::R6_oop_opr;
LIR_Opr FrameMap::R7_oop_opr;
LIR_Opr FrameMap::R8_oop_opr;
LIR_Opr FrameMap::R9_oop_opr;
LIR_Opr FrameMap::R10_oop_opr;
LIR_Opr FrameMap::R11_oop_opr;
LIR_Opr FrameMap::R12_oop_opr;
//LIR_Opr FrameMap::R13_oop_opr;
LIR_Opr FrameMap::R14_oop_opr;
LIR_Opr FrameMap::R15_oop_opr;
//LIR_Opr FrameMap::R16_oop_opr;
LIR_Opr FrameMap::R17_oop_opr;
LIR_Opr FrameMap::R18_oop_opr;
LIR_Opr FrameMap::R19_oop_opr;
LIR_Opr FrameMap::R20_oop_opr;
LIR_Opr FrameMap::R21_oop_opr;
LIR_Opr FrameMap::R22_oop_opr;
LIR_Opr FrameMap::R23_oop_opr;
LIR_Opr FrameMap::R24_oop_opr;
LIR_Opr FrameMap::R25_oop_opr;
LIR_Opr FrameMap::R26_oop_opr;
LIR_Opr FrameMap::R27_oop_opr;
LIR_Opr FrameMap::R28_oop_opr;
//LIR_Opr FrameMap::R29_oop_opr;
LIR_Opr FrameMap::R30_oop_opr;
LIR_Opr FrameMap::R31_oop_opr;
LIR_Opr FrameMap::R0_metadata_opr;
//LIR_Opr FrameMap::R1_metadata_opr;
LIR_Opr FrameMap::R2_metadata_opr;
LIR_Opr FrameMap::R3_metadata_opr;
LIR_Opr FrameMap::R4_metadata_opr;
LIR_Opr FrameMap::R5_metadata_opr;
LIR_Opr FrameMap::R6_metadata_opr;
LIR_Opr FrameMap::R7_metadata_opr;
LIR_Opr FrameMap::R8_metadata_opr;
LIR_Opr FrameMap::R9_metadata_opr;
LIR_Opr FrameMap::R10_metadata_opr;
LIR_Opr FrameMap::R11_metadata_opr;
LIR_Opr FrameMap::R12_metadata_opr;
//LIR_Opr FrameMap::R13_metadata_opr;
LIR_Opr FrameMap::R14_metadata_opr;
LIR_Opr FrameMap::R15_metadata_opr;
//LIR_Opr FrameMap::R16_metadata_opr;
LIR_Opr FrameMap::R17_metadata_opr;
LIR_Opr FrameMap::R18_metadata_opr;
LIR_Opr FrameMap::R19_metadata_opr;
LIR_Opr FrameMap::R20_metadata_opr;
LIR_Opr FrameMap::R21_metadata_opr;
LIR_Opr FrameMap::R22_metadata_opr;
LIR_Opr FrameMap::R23_metadata_opr;
LIR_Opr FrameMap::R24_metadata_opr;
LIR_Opr FrameMap::R25_metadata_opr;
LIR_Opr FrameMap::R26_metadata_opr;
LIR_Opr FrameMap::R27_metadata_opr;
LIR_Opr FrameMap::R28_metadata_opr;
//LIR_Opr FrameMap::R29_metadata_opr;
LIR_Opr FrameMap::R30_metadata_opr;
LIR_Opr FrameMap::R31_metadata_opr;
LIR_Opr FrameMap::SP_opr;
LIR_Opr FrameMap::R0_long_opr;
LIR_Opr FrameMap::R3_long_opr;
LIR_Opr FrameMap::F1_opr;
LIR_Opr FrameMap::F1_double_opr;
LIR_Opr FrameMap::_caller_save_cpu_regs[] = { 0, };
LIR_Opr FrameMap::_caller_save_fpu_regs[] = { 0, };
FloatRegister FrameMap::nr2floatreg (int rnr) {
assert(_init_done, "tables not initialized");
debug_only(fpu_range_check(rnr);)
return _fpu_regs[rnr];
}
// Returns true if reg could be smashed by a callee.
bool FrameMap::is_caller_save_register (LIR_Opr reg) {
if (reg->is_single_fpu() || reg->is_double_fpu()) { return true; }
if (reg->is_double_cpu()) {
return is_caller_save_register(reg->as_register_lo()) ||
is_caller_save_register(reg->as_register_hi());
}
return is_caller_save_register(reg->as_register());
}
bool FrameMap::is_caller_save_register (Register r) {
// not visible to allocator: R0: scratch, R1: SP
// r->encoding() < 2 + nof_caller_save_cpu_regs();
return true; // Currently all regs are caller save.
}
void FrameMap::initialize() {
assert(!_init_done, "once");
int i = 0;
// Put generally available registers at the beginning (allocated, saved for GC).
for (int j = 0; j < nof_cpu_regs; ++j) {
Register rj = as_Register(j);
if (reg_needs_save(rj)) {
map_register(i++, rj);
}
}
assert(i == nof_cpu_regs_reg_alloc, "number of allocated registers");
// The following registers are not normally available.
for (int j = 0; j < nof_cpu_regs; ++j) {
Register rj = as_Register(j);
if (!reg_needs_save(rj)) {
map_register(i++, rj);
}
}
assert(i == nof_cpu_regs, "number of CPU registers");
for (i = 0; i < nof_fpu_regs; i++) {
_fpu_regs[i] = as_FloatRegister(i);
}
_init_done = true;
R0_opr = as_opr(R0);
R1_opr = as_opr(R1);
R2_opr = as_opr(R2);
R3_opr = as_opr(R3);
R4_opr = as_opr(R4);
R5_opr = as_opr(R5);
R6_opr = as_opr(R6);
R7_opr = as_opr(R7);
R8_opr = as_opr(R8);
R9_opr = as_opr(R9);
R10_opr = as_opr(R10);
R11_opr = as_opr(R11);
R12_opr = as_opr(R12);
R13_opr = as_opr(R13);
R14_opr = as_opr(R14);
R15_opr = as_opr(R15);
R16_opr = as_opr(R16);
R17_opr = as_opr(R17);
R18_opr = as_opr(R18);
R19_opr = as_opr(R19);
R20_opr = as_opr(R20);
R21_opr = as_opr(R21);
R22_opr = as_opr(R22);
R23_opr = as_opr(R23);
R24_opr = as_opr(R24);
R25_opr = as_opr(R25);
R26_opr = as_opr(R26);
R27_opr = as_opr(R27);
R28_opr = as_opr(R28);
R29_opr = as_opr(R29);
R30_opr = as_opr(R30);
R31_opr = as_opr(R31);
R0_oop_opr = as_oop_opr(R0);
//R1_oop_opr = as_oop_opr(R1);
R2_oop_opr = as_oop_opr(R2);
R3_oop_opr = as_oop_opr(R3);
R4_oop_opr = as_oop_opr(R4);
R5_oop_opr = as_oop_opr(R5);
R6_oop_opr = as_oop_opr(R6);
R7_oop_opr = as_oop_opr(R7);
R8_oop_opr = as_oop_opr(R8);
R9_oop_opr = as_oop_opr(R9);
R10_oop_opr = as_oop_opr(R10);
R11_oop_opr = as_oop_opr(R11);
R12_oop_opr = as_oop_opr(R12);
//R13_oop_opr = as_oop_opr(R13);
R14_oop_opr = as_oop_opr(R14);
R15_oop_opr = as_oop_opr(R15);
//R16_oop_opr = as_oop_opr(R16);
R17_oop_opr = as_oop_opr(R17);
R18_oop_opr = as_oop_opr(R18);
R19_oop_opr = as_oop_opr(R19);
R20_oop_opr = as_oop_opr(R20);
R21_oop_opr = as_oop_opr(R21);
R22_oop_opr = as_oop_opr(R22);
R23_oop_opr = as_oop_opr(R23);
R24_oop_opr = as_oop_opr(R24);
R25_oop_opr = as_oop_opr(R25);
R26_oop_opr = as_oop_opr(R26);
R27_oop_opr = as_oop_opr(R27);
R28_oop_opr = as_oop_opr(R28);
//R29_oop_opr = as_oop_opr(R29);
R30_oop_opr = as_oop_opr(R30);
R31_oop_opr = as_oop_opr(R31);
R0_metadata_opr = as_metadata_opr(R0);
//R1_metadata_opr = as_metadata_opr(R1);
R2_metadata_opr = as_metadata_opr(R2);
R3_metadata_opr = as_metadata_opr(R3);
R4_metadata_opr = as_metadata_opr(R4);
R5_metadata_opr = as_metadata_opr(R5);
R6_metadata_opr = as_metadata_opr(R6);
R7_metadata_opr = as_metadata_opr(R7);
R8_metadata_opr = as_metadata_opr(R8);
R9_metadata_opr = as_metadata_opr(R9);
R10_metadata_opr = as_metadata_opr(R10);
R11_metadata_opr = as_metadata_opr(R11);
R12_metadata_opr = as_metadata_opr(R12);
//R13_metadata_opr = as_metadata_opr(R13);
R14_metadata_opr = as_metadata_opr(R14);
R15_metadata_opr = as_metadata_opr(R15);
//R16_metadata_opr = as_metadata_opr(R16);
R17_metadata_opr = as_metadata_opr(R17);
R18_metadata_opr = as_metadata_opr(R18);
R19_metadata_opr = as_metadata_opr(R19);
R20_metadata_opr = as_metadata_opr(R20);
R21_metadata_opr = as_metadata_opr(R21);
R22_metadata_opr = as_metadata_opr(R22);
R23_metadata_opr = as_metadata_opr(R23);
R24_metadata_opr = as_metadata_opr(R24);
R25_metadata_opr = as_metadata_opr(R25);
R26_metadata_opr = as_metadata_opr(R26);
R27_metadata_opr = as_metadata_opr(R27);
R28_metadata_opr = as_metadata_opr(R28);
//R29_metadata_opr = as_metadata_opr(R29);
R30_metadata_opr = as_metadata_opr(R30);
R31_metadata_opr = as_metadata_opr(R31);
SP_opr = as_pointer_opr(R1_SP);
R0_long_opr = LIR_OprFact::double_cpu(cpu_reg2rnr(R0), cpu_reg2rnr(R0));
R3_long_opr = LIR_OprFact::double_cpu(cpu_reg2rnr(R3), cpu_reg2rnr(R3));
F1_opr = as_float_opr(F1);
F1_double_opr = as_double_opr(F1);
// All the allocated cpu regs are caller saved.
for (int i = 0; i < max_nof_caller_save_cpu_regs; i++) {
_caller_save_cpu_regs[i] = LIR_OprFact::single_cpu(i);
}
// All the fpu regs are caller saved.
for (int i = 0; i < nof_caller_save_fpu_regs; i++) {
_caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i);
}
}
Address FrameMap::make_new_address(ByteSize sp_offset) const {
return Address(R1_SP, STACK_BIAS + in_bytes(sp_offset));
}
VMReg FrameMap::fpu_regname (int n) {
return as_FloatRegister(n)->as_VMReg();
}
LIR_Opr FrameMap::stack_pointer() {
return SP_opr;
}
// JSR 292
// On PPC64, there is no need to save the SP, because neither
// method handle intrinsics, nor compiled lambda forms modify it.
LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() {
return LIR_OprFact::illegalOpr;
}
bool FrameMap::validate_frame() {
int max_offset = in_bytes(framesize_in_bytes());
int java_index = 0;
for (int i = 0; i < _incoming_arguments->length(); i++) {
LIR_Opr opr = _incoming_arguments->at(i);
if (opr->is_stack()) {
max_offset = MAX2(_argument_locations->at(java_index), max_offset);
}
java_index += type2size[opr->type()];
}
return Assembler::is_simm16(max_offset + STACK_BIAS);
}

View file

@ -0,0 +1,202 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_C1_FRAMEMAP_PPC_HPP
#define CPU_PPC_VM_C1_FRAMEMAP_PPC_HPP
public:
enum {
nof_reg_args = 8, // Registers R3-R10 are available for parameter passing.
first_available_sp_in_frame = frame::jit_out_preserve_size,
frame_pad_in_bytes = 0
};
static const int pd_c_runtime_reserved_arg_size;
static LIR_Opr R0_opr;
static LIR_Opr R1_opr;
static LIR_Opr R2_opr;
static LIR_Opr R3_opr;
static LIR_Opr R4_opr;
static LIR_Opr R5_opr;
static LIR_Opr R6_opr;
static LIR_Opr R7_opr;
static LIR_Opr R8_opr;
static LIR_Opr R9_opr;
static LIR_Opr R10_opr;
static LIR_Opr R11_opr;
static LIR_Opr R12_opr;
static LIR_Opr R13_opr;
static LIR_Opr R14_opr;
static LIR_Opr R15_opr;
static LIR_Opr R16_opr;
static LIR_Opr R17_opr;
static LIR_Opr R18_opr;
static LIR_Opr R19_opr;
static LIR_Opr R20_opr;
static LIR_Opr R21_opr;
static LIR_Opr R22_opr;
static LIR_Opr R23_opr;
static LIR_Opr R24_opr;
static LIR_Opr R25_opr;
static LIR_Opr R26_opr;
static LIR_Opr R27_opr;
static LIR_Opr R28_opr;
static LIR_Opr R29_opr;
static LIR_Opr R30_opr;
static LIR_Opr R31_opr;
static LIR_Opr R0_oop_opr;
//R1: Stack pointer. Not an oop.
static LIR_Opr R2_oop_opr;
static LIR_Opr R3_oop_opr;
static LIR_Opr R4_oop_opr;
static LIR_Opr R5_oop_opr;
static LIR_Opr R6_oop_opr;
static LIR_Opr R7_oop_opr;
static LIR_Opr R8_oop_opr;
static LIR_Opr R9_oop_opr;
static LIR_Opr R10_oop_opr;
static LIR_Opr R11_oop_opr;
static LIR_Opr R12_oop_opr;
//R13: System thread register. Not usable.
static LIR_Opr R14_oop_opr;
static LIR_Opr R15_oop_opr;
//R16: Java thread register. Not an oop.
static LIR_Opr R17_oop_opr;
static LIR_Opr R18_oop_opr;
static LIR_Opr R19_oop_opr;
static LIR_Opr R20_oop_opr;
static LIR_Opr R21_oop_opr;
static LIR_Opr R22_oop_opr;
static LIR_Opr R23_oop_opr;
static LIR_Opr R24_oop_opr;
static LIR_Opr R25_oop_opr;
static LIR_Opr R26_oop_opr;
static LIR_Opr R27_oop_opr;
static LIR_Opr R28_oop_opr;
static LIR_Opr R29_oop_opr;
//R29: TOC register. Not an oop.
static LIR_Opr R30_oop_opr;
static LIR_Opr R31_oop_opr;
static LIR_Opr R0_metadata_opr;
//R1: Stack pointer. Not metadata.
static LIR_Opr R2_metadata_opr;
static LIR_Opr R3_metadata_opr;
static LIR_Opr R4_metadata_opr;
static LIR_Opr R5_metadata_opr;
static LIR_Opr R6_metadata_opr;
static LIR_Opr R7_metadata_opr;
static LIR_Opr R8_metadata_opr;
static LIR_Opr R9_metadata_opr;
static LIR_Opr R10_metadata_opr;
static LIR_Opr R11_metadata_opr;
static LIR_Opr R12_metadata_opr;
//R13: System thread register. Not usable.
static LIR_Opr R14_metadata_opr;
static LIR_Opr R15_metadata_opr;
//R16: Java thread register. Not metadata.
static LIR_Opr R17_metadata_opr;
static LIR_Opr R18_metadata_opr;
static LIR_Opr R19_metadata_opr;
static LIR_Opr R20_metadata_opr;
static LIR_Opr R21_metadata_opr;
static LIR_Opr R22_metadata_opr;
static LIR_Opr R23_metadata_opr;
static LIR_Opr R24_metadata_opr;
static LIR_Opr R25_metadata_opr;
static LIR_Opr R26_metadata_opr;
static LIR_Opr R27_metadata_opr;
static LIR_Opr R28_metadata_opr;
//R29: TOC register. Not metadata.
static LIR_Opr R30_metadata_opr;
static LIR_Opr R31_metadata_opr;
static LIR_Opr SP_opr;
static LIR_Opr R0_long_opr;
static LIR_Opr R3_long_opr;
static LIR_Opr F1_opr;
static LIR_Opr F1_double_opr;
private:
static FloatRegister _fpu_regs [nof_fpu_regs];
static LIR_Opr as_long_single_opr(Register r) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
}
static LIR_Opr as_long_pair_opr(Register r) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r->successor()), cpu_reg2rnr(r));
}
public:
#ifdef _LP64
static LIR_Opr as_long_opr(Register r) {
return as_long_single_opr(r);
}
static LIR_Opr as_pointer_opr(Register r) {
return as_long_single_opr(r);
}
#else
static LIR_Opr as_long_opr(Register r) {
Unimplemented(); return 0;
// return as_long_pair_opr(r);
}
static LIR_Opr as_pointer_opr(Register r) {
Unimplemented(); return 0;
// return as_opr(r);
}
#endif
static LIR_Opr as_float_opr(FloatRegister r) {
return LIR_OprFact::single_fpu(r->encoding());
}
static LIR_Opr as_double_opr(FloatRegister r) {
return LIR_OprFact::double_fpu(r->encoding());
}
static FloatRegister nr2floatreg (int rnr);
static VMReg fpu_regname (int n);
static bool is_caller_save_register(LIR_Opr reg);
static bool is_caller_save_register(Register r);
static int nof_caller_save_cpu_regs() { return pd_nof_caller_save_cpu_regs_frame_map; }
static int last_cpu_reg() { return pd_last_cpu_reg; }
// Registers which need to be saved in the frames (e.g. for GC).
// Register usage:
// R0: scratch
// R1: sp
// R13: system thread id
// R16: java thread
// R29: global TOC
static bool reg_needs_save(Register r) { return r != R0 && r != R1 && r != R13 && r != R16 && r != R29; }
#endif // CPU_PPC_VM_C1_FRAMEMAP_PPC_HPP

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,69 @@
/*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_C1_LIRASSEMBLER_PPC_HPP
#define CPU_PPC_VM_C1_LIRASSEMBLER_PPC_HPP
private:
//////////////////////////////////////////////////////////////////////////////
// PPC64 load/store emission
//
// The PPC ld/st instructions cannot accomodate displacements > 16 bits long.
// The following "pseudo" instructions (load/store) make it easier to
// use the indexed addressing mode by allowing 32 bit displacements:
//
void explicit_null_check(Register addr, CodeEmitInfo* info);
int store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned);
int store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide);
int load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned);
int load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide);
int shift_amount(BasicType t);
// Record the type of the receiver in ReceiverTypeData.
void type_profile_helper(Register mdo, int mdo_offset_bias,
ciMethodData *md, ciProfileData *data,
Register recv, Register tmp1, Label* update_done);
// Setup pointers to MDO, MDO slot, also compute offset bias to access the slot.
void setup_md_access(ciMethod* method, int bci,
ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias);
public:
static const ConditionRegister BOOL_RESULT;
// Emit trampoline stub for call. Call bailout() if failed. Return true on success.
bool emit_trampoline_stub_for_call(address target, Register Rtoc = noreg);
enum {
max_static_call_stub_size = 4 * BytesPerInstWord + MacroAssembler::b64_patchable_size,
call_stub_size = max_static_call_stub_size + MacroAssembler::trampoline_stub_size, // or smaller
exception_handler_size = MacroAssembler::b64_patchable_size, // or smaller
deopt_handler_size = MacroAssembler::bl64_patchable_size
};
#endif // CPU_PPC_VM_C1_LIRASSEMBLER_PPC_HPP

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,34 @@
/*
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "c1/c1_Instruction.hpp"
#include "c1/c1_LinearScan.hpp"
#include "utilities/bitMap.inline.hpp"
void LinearScan::allocate_fpu_stack() {
Unimplemented();
// No FPU stack on PPC
}

View file

@ -0,0 +1,73 @@
/*
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_C1_LINEARSCAN_PPC_HPP
#define CPU_PPC_VM_C1_LINEARSCAN_PPC_HPP
inline bool LinearScan::is_processed_reg_num(int reg_num) {
assert(FrameMap::R0_opr->cpu_regnr() == FrameMap::last_cpu_reg() + 1, "wrong assumption below");
assert(FrameMap::R1_opr->cpu_regnr() == FrameMap::last_cpu_reg() + 2, "wrong assumption below");
assert(FrameMap::R13_opr->cpu_regnr() == FrameMap::last_cpu_reg() + 3, "wrong assumption below");
assert(FrameMap::R16_opr->cpu_regnr() == FrameMap::last_cpu_reg() + 4, "wrong assumption below");
assert(FrameMap::R29_opr->cpu_regnr() == FrameMap::last_cpu_reg() + 5, "wrong assumption below");
return reg_num <= FrameMap::last_cpu_reg() || reg_num >= pd_nof_cpu_regs_frame_map;
}
inline int LinearScan::num_physical_regs(BasicType type) {
return 1;
}
inline bool LinearScan::requires_adjacent_regs(BasicType type) {
return false;
}
inline bool LinearScan::is_caller_save(int assigned_reg) {
return true; // assigned_reg < pd_first_callee_saved_reg;
}
inline void LinearScan::pd_add_temps(LIR_Op* op) {
// No special case behaviours yet
}
inline bool LinearScanWalker::pd_init_regs_for_alloc(Interval* cur) {
if (allocator()->gen()->is_vreg_flag_set(cur->reg_num(), LIRGenerator::callee_saved)) {
assert(cur->type() != T_FLOAT && cur->type() != T_DOUBLE, "cpu regs only");
_first_reg = pd_first_callee_saved_reg;
_last_reg = pd_last_callee_saved_reg;
ShouldNotReachHere(); // Currently no callee saved regs.
return true;
} else if (cur->type() == T_INT || cur->type() == T_LONG || cur->type() == T_OBJECT ||
cur->type() == T_ADDRESS || cur->type() == T_METADATA) {
_first_reg = pd_first_cpu_reg;
_last_reg = pd_last_cpu_reg;
return true;
}
return false;
}
#endif // CPU_PPC_VM_C1_LINEARSCAN_PPC_HPP

View file

@ -0,0 +1,486 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interpreter/interpreter.hpp"
#include "oops/arrayOop.hpp"
#include "oops/markOop.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/os.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/sharedRuntime.hpp"
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
const Register temp_reg = R12_scratch2;
verify_oop(receiver);
load_klass(temp_reg, receiver);
if (TrapBasedICMissChecks) {
trap_ic_miss_check(temp_reg, iCache);
} else {
Label L;
cmpd(CCR0, temp_reg, iCache);
beq(CCR0, L);
//load_const_optimized(temp_reg, SharedRuntime::get_ic_miss_stub(), R0);
calculate_address_from_global_toc(temp_reg, SharedRuntime::get_ic_miss_stub(), true, true, false);
mtctr(temp_reg);
bctr();
align(32, 12);
bind(L);
}
}
void C1_MacroAssembler::explicit_null_check(Register base) {
Unimplemented();
}
void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
// Make sure there is enough stack space for this method's activation.
generate_stack_overflow_check(bang_size_in_bytes);
// Create the frame.
const Register return_pc = R0;
mflr(return_pc);
// Get callers sp.
std(return_pc, _abi(lr), R1_SP); // SP->lr = return_pc
push_frame(frame_size_in_bytes, R0); // SP -= frame_size_in_bytes
}
void C1_MacroAssembler::unverified_entry(Register receiver, Register ic_klass) {
Unimplemented(); // Currently unused.
//if (C1Breakpoint) illtrap();
//inline_cache_check(receiver, ic_klass);
}
void C1_MacroAssembler::verified_entry() {
if (C1Breakpoint) illtrap();
// build frame
}
void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox, Register Rscratch, Label& slow_case) {
assert_different_registers(Rmark, Roop, Rbox, Rscratch);
Label done, cas_failed, slow_int;
// The following move must be the first instruction of emitted since debug
// information may be generated for it.
// Load object header.
ld(Rmark, oopDesc::mark_offset_in_bytes(), Roop);
verify_oop(Roop);
// Save object being locked into the BasicObjectLock...
std(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
if (UseBiasedLocking) {
biased_locking_enter(CCR0, Roop, Rmark, Rscratch, R0, done, &slow_int);
}
// ... and mark it unlocked.
ori(Rmark, Rmark, markOopDesc::unlocked_value);
// Save unlocked object header into the displaced header location on the stack.
std(Rmark, BasicLock::displaced_header_offset_in_bytes(), Rbox);
// Compare object markOop with Rmark and if equal exchange Rscratch with object markOop.
assert(oopDesc::mark_offset_in_bytes() == 0, "cas must take a zero displacement");
cmpxchgd(/*flag=*/CCR0,
/*current_value=*/Rscratch,
/*compare_value=*/Rmark,
/*exchange_value=*/Rbox,
/*where=*/Roop/*+0==mark_offset_in_bytes*/,
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
MacroAssembler::cmpxchgx_hint_acquire_lock(),
noreg,
&cas_failed,
/*check without membar and ldarx first*/true);
// If compare/exchange succeeded we found an unlocked object and we now have locked it
// hence we are done.
b(done);
bind(slow_int);
b(slow_case); // far
bind(cas_failed);
// We did not find an unlocked object so see if this is a recursive case.
sub(Rscratch, Rscratch, R1_SP);
load_const_optimized(R0, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
and_(R0/*==0?*/, Rscratch, R0);
std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), Rbox);
bne(CCR0, slow_int);
bind(done);
}
void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rbox, Label& slow_case) {
assert_different_registers(Rmark, Roop, Rbox);
Label slow_int, done;
Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
if (UseBiasedLocking) {
// Load the object out of the BasicObjectLock.
ld(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
verify_oop(Roop);
biased_locking_exit(CCR0, Roop, R0, done);
}
// Test first it it is a fast recursive unlock.
ld(Rmark, BasicLock::displaced_header_offset_in_bytes(), Rbox);
cmpdi(CCR0, Rmark, 0);
beq(CCR0, done);
if (!UseBiasedLocking) {
// Load object.
ld(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
verify_oop(Roop);
}
// Check if it is still a light weight lock, this is is true if we see
// the stack address of the basicLock in the markOop of the object.
cmpxchgd(/*flag=*/CCR0,
/*current_value=*/R0,
/*compare_value=*/Rbox,
/*exchange_value=*/Rmark,
/*where=*/Roop,
MacroAssembler::MemBarRel,
MacroAssembler::cmpxchgx_hint_release_lock(),
noreg,
&slow_int);
b(done);
bind(slow_int);
b(slow_case); // far
// Done
bind(done);
}
void C1_MacroAssembler::try_allocate(
Register obj, // result: pointer to object after successful allocation
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
int con_size_in_bytes, // object size in bytes if known at compile time
Register t1, // temp register, must be global register for incr_allocated_bytes
Register t2, // temp register
Label& slow_case // continuation point if fast allocation fails
) {
if (UseTLAB) {
tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
} else {
eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
RegisterOrConstant size_in_bytes = var_size_in_bytes->is_valid()
? RegisterOrConstant(var_size_in_bytes)
: RegisterOrConstant(con_size_in_bytes);
incr_allocated_bytes(size_in_bytes, t1, t2);
}
}
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
assert_different_registers(obj, klass, len, t1, t2);
if (UseBiasedLocking && !len->is_valid()) {
ld(t1, in_bytes(Klass::prototype_header_offset()), klass);
} else {
load_const_optimized(t1, (intx)markOopDesc::prototype());
}
std(t1, oopDesc::mark_offset_in_bytes(), obj);
store_klass(obj, klass);
if (len->is_valid()) {
stw(len, arrayOopDesc::length_offset_in_bytes(), obj);
} else if (UseCompressedClassPointers) {
// Otherwise length is in the class gap.
store_klass_gap(obj);
}
}
void C1_MacroAssembler::initialize_body(Register base, Register index) {
assert_different_registers(base, index);
srdi(index, index, LogBytesPerWord);
clear_memory_doubleword(base, index);
}
void C1_MacroAssembler::initialize_body(Register obj, Register tmp1, Register tmp2,
int obj_size_in_bytes, int hdr_size_in_bytes) {
const int index = (obj_size_in_bytes - hdr_size_in_bytes) / HeapWordSize;
const int cl_size = VM_Version::L1_data_cache_line_size(),
cl_dwords = cl_size>>3,
cl_dw_addr_bits = exact_log2(cl_dwords);
const Register tmp = R0,
base_ptr = tmp1,
cnt_dwords = tmp2;
if (index <= 6) {
// Use explicit NULL stores.
if (index > 0) { li(tmp, 0); }
for (int i = 0; i < index; ++i) { std(tmp, hdr_size_in_bytes + i * HeapWordSize, obj); }
} else if (index < (2<<cl_dw_addr_bits)-1) {
// simple loop
Label loop;
li(cnt_dwords, index);
addi(base_ptr, obj, hdr_size_in_bytes); // Compute address of first element.
li(tmp, 0);
mtctr(cnt_dwords); // Load counter.
bind(loop);
std(tmp, 0, base_ptr); // Clear 8byte aligned block.
addi(base_ptr, base_ptr, 8);
bdnz(loop);
} else {
// like clear_memory_doubleword
Label startloop, fast, fastloop, restloop, done;
addi(base_ptr, obj, hdr_size_in_bytes); // Compute address of first element.
load_const_optimized(cnt_dwords, index);
rldicl_(tmp, base_ptr, 64-3, 64-cl_dw_addr_bits); // Extract dword offset within first cache line.
beq(CCR0, fast); // Already 128byte aligned.
subfic(tmp, tmp, cl_dwords);
mtctr(tmp); // Set ctr to hit 128byte boundary (0<ctr<cl_dwords).
subf(cnt_dwords, tmp, cnt_dwords); // rest.
li(tmp, 0);
bind(startloop); // Clear at the beginning to reach 128byte boundary.
std(tmp, 0, base_ptr); // Clear 8byte aligned block.
addi(base_ptr, base_ptr, 8);
bdnz(startloop);
bind(fast); // Clear 128byte blocks.
srdi(tmp, cnt_dwords, cl_dw_addr_bits); // Loop count for 128byte loop (>0).
andi(cnt_dwords, cnt_dwords, cl_dwords-1); // Rest in dwords.
mtctr(tmp); // Load counter.
bind(fastloop);
dcbz(base_ptr); // Clear 128byte aligned block.
addi(base_ptr, base_ptr, cl_size);
bdnz(fastloop);
cmpdi(CCR0, cnt_dwords, 0); // size 0?
beq(CCR0, done); // rest == 0
li(tmp, 0);
mtctr(cnt_dwords); // Load counter.
bind(restloop); // Clear rest.
std(tmp, 0, base_ptr); // Clear 8byte aligned block.
addi(base_ptr, base_ptr, 8);
bdnz(restloop);
bind(done);
}
}
void C1_MacroAssembler::allocate_object(
Register obj, // result: pointer to object after successful allocation
Register t1, // temp register
Register t2, // temp register
Register t3, // temp register
int hdr_size, // object header size in words
int obj_size, // object size in words
Register klass, // object klass
Label& slow_case // continuation point if fast allocation fails
) {
assert_different_registers(obj, t1, t2, t3, klass);
// allocate space & initialize header
if (!is_simm16(obj_size * wordSize)) {
// Would need to use extra register to load
// object size => go the slow case for now.
b(slow_case);
return;
}
try_allocate(obj, noreg, obj_size * wordSize, t2, t3, slow_case);
initialize_object(obj, klass, noreg, obj_size * HeapWordSize, t1, t2);
}
void C1_MacroAssembler::initialize_object(
Register obj, // result: pointer to object after successful allocation
Register klass, // object klass
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
int con_size_in_bytes, // object size in bytes if known at compile time
Register t1, // temp register
Register t2 // temp register
) {
const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
initialize_header(obj, klass, noreg, t1, t2);
#ifdef ASSERT
{
lwz(t1, in_bytes(Klass::layout_helper_offset()), klass);
if (var_size_in_bytes != noreg) {
cmpw(CCR0, t1, var_size_in_bytes);
} else {
cmpwi(CCR0, t1, con_size_in_bytes);
}
asm_assert_eq("bad size in initialize_object", 0x753);
}
#endif
// Initialize body.
if (var_size_in_bytes != noreg) {
// Use a loop.
addi(t1, obj, hdr_size_in_bytes); // Compute address of first element.
addi(t2, var_size_in_bytes, -hdr_size_in_bytes); // Compute size of body.
initialize_body(t1, t2);
} else if (con_size_in_bytes > hdr_size_in_bytes) {
// Use a loop.
initialize_body(obj, t1, t2, con_size_in_bytes, hdr_size_in_bytes);
}
if (CURRENT_ENV->dtrace_alloc_probes()) {
Unimplemented();
// assert(obj == O0, "must be");
// call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)),
// relocInfo::runtime_call_type);
}
verify_oop(obj);
}
void C1_MacroAssembler::allocate_array(
Register obj, // result: pointer to array after successful allocation
Register len, // array length
Register t1, // temp register
Register t2, // temp register
Register t3, // temp register
int hdr_size, // object header size in words
int elt_size, // element size in bytes
Register klass, // object klass
Label& slow_case // continuation point if fast allocation fails
) {
assert_different_registers(obj, len, t1, t2, t3, klass);
// Determine alignment mask.
assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work");
int log2_elt_size = exact_log2(elt_size);
// Check for negative or excessive length.
size_t max_length = max_array_allocation_length >> log2_elt_size;
if (UseTLAB) {
size_t max_tlab = align_size_up(ThreadLocalAllocBuffer::max_size() >> log2_elt_size, 64*K);
if (max_tlab < max_length) { max_length = max_tlab; }
}
load_const_optimized(t1, max_length);
cmpld(CCR0, len, t1);
bc_far_optimized(Assembler::bcondCRbiIs1, bi0(CCR0, Assembler::greater), slow_case);
// compute array size
// note: If 0 <= len <= max_length, len*elt_size + header + alignment is
// smaller or equal to the largest integer; also, since top is always
// aligned, we can do the alignment here instead of at the end address
// computation.
const Register arr_size = t1;
Register arr_len_in_bytes = len;
if (elt_size != 1) {
sldi(t1, len, log2_elt_size);
arr_len_in_bytes = t1;
}
addi(arr_size, arr_len_in_bytes, hdr_size * wordSize + MinObjAlignmentInBytesMask); // Add space for header & alignment.
clrrdi(arr_size, arr_size, LogMinObjAlignmentInBytes); // Align array size.
// Allocate space & initialize header.
if (UseTLAB) {
tlab_allocate(obj, arr_size, 0, t2, slow_case);
} else {
eden_allocate(obj, arr_size, 0, t2, t3, slow_case);
}
initialize_header(obj, klass, len, t2, t3);
// Initialize body.
const Register base = t2;
const Register index = t3;
addi(base, obj, hdr_size * wordSize); // compute address of first element
addi(index, arr_size, -(hdr_size * wordSize)); // compute index = number of bytes to clear
initialize_body(base, index);
if (CURRENT_ENV->dtrace_alloc_probes()) {
Unimplemented();
//assert(obj == O0, "must be");
//call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)),
// relocInfo::runtime_call_type);
}
verify_oop(obj);
}
#ifndef PRODUCT
void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
verify_oop_addr((RegisterOrConstant)(stack_offset + STACK_BIAS), R1_SP, "broken oop in stack slot");
}
void C1_MacroAssembler::verify_not_null_oop(Register r) {
Label not_null;
cmpdi(CCR0, r, 0);
bne(CCR0, not_null);
stop("non-null oop required");
bind(not_null);
if (!VerifyOops) return;
verify_oop(r);
}
#endif // PRODUCT
void C1_MacroAssembler::null_check(Register r, Label* Lnull) {
if (TrapBasedNullChecks) { // SIGTRAP based
trap_null_check(r);
} else { // explicit
//const address exception_entry = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
assert(Lnull != NULL, "must have Label for explicit check");
cmpdi(CCR0, r, 0);
bc_far_optimized(Assembler::bcondCRbiIs1, bi0(CCR0, Assembler::equal), *Lnull);
}
}
address C1_MacroAssembler::call_c_with_frame_resize(address dest, int frame_resize) {
if (frame_resize) { resize_frame(-frame_resize, R0); }
#if defined(ABI_ELFv2)
address return_pc = call_c(dest, relocInfo::runtime_call_type);
#else
address return_pc = call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, dest), relocInfo::runtime_call_type);
#endif
if (frame_resize) { resize_frame(frame_resize, R0); }
return return_pc;
}

View file

@ -0,0 +1,93 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_C1_MACROASSEMBLER_PPC_HPP
#define CPU_PPC_VM_C1_MACROASSEMBLER_PPC_HPP
void pd_init() { /* nothing to do */ }
public:
void try_allocate(
Register obj, // result: pointer to object after successful allocation
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
int con_size_in_bytes, // object size in bytes if known at compile time
Register t1, // temp register
Register t2, // temp register
Label& slow_case // continuation point if fast allocation fails
);
void initialize_header(Register obj, Register klass, Register len, Register t1, Register t2);
void initialize_body(Register base, Register index);
void initialize_body(Register obj, Register tmp1, Register tmp2, int obj_size_in_bytes, int hdr_size_in_bytes);
// locking/unlocking
void lock_object (Register Rmark, Register Roop, Register Rbox, Register Rscratch, Label& slow_case);
void unlock_object(Register Rmark, Register Roop, Register Rbox, Label& slow_case);
void initialize_object(
Register obj, // result: pointer to object after successful allocation
Register klass, // object klass
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
int con_size_in_bytes, // object size in bytes if known at compile time
Register t1, // temp register
Register t2 // temp register
);
// Allocation of fixed-size objects
// (Can also be used to allocate fixed-size arrays, by setting
// hdr_size correctly and storing the array length afterwards.)
void allocate_object(
Register obj, // result: pointer to object after successful allocation
Register t1, // temp register
Register t2, // temp register
Register t3, // temp register
int hdr_size, // object header size in words
int obj_size, // object size in words
Register klass, // object klass
Label& slow_case // continuation point if fast allocation fails
);
enum {
max_array_allocation_length = 0x40000000 // ppc friendly value, requires lis only
};
// Allocation of arrays
void allocate_array(
Register obj, // result: pointer to array after successful allocation
Register len, // array length
Register t1, // temp register
Register t2, // temp register
Register t3, // temp register
int hdr_size, // object header size in words
int elt_size, // element size in bytes
Register klass, // object klass
Label& slow_case // continuation point if fast allocation fails
);
void null_check(Register r, Label *Lnull = NULL);
address call_c_with_frame_resize(address dest, int frame_resize);
#endif // CPU_PPC_VM_C1_MACROASSEMBLER_PPC_HPP

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,68 @@
/*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_C1_GLOBALS_PPC_HPP
#define CPU_PPC_VM_C1_GLOBALS_PPC_HPP
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
// Sets the default values for platform dependent flags used by the client compiler.
// (see c1_globals.hpp)
#ifndef TIERED
define_pd_global(bool, BackgroundCompilation, true );
define_pd_global(bool, CICompileOSR, true );
define_pd_global(bool, InlineIntrinsics, true );
define_pd_global(bool, PreferInterpreterNativeStubs, false);
define_pd_global(bool, ProfileTraps, false);
define_pd_global(bool, UseOnStackReplacement, true );
define_pd_global(bool, TieredCompilation, false);
define_pd_global(intx, CompileThreshold, 1000 );
define_pd_global(intx, OnStackReplacePercentage, 1400 );
define_pd_global(bool, UseTLAB, true );
define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(intx, FreqInlineSize, 325 );
define_pd_global(bool, ResizeTLAB, true );
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
define_pd_global(uintx,CodeCacheMinBlockLength, 1);
define_pd_global(uintx,MetaspaceSize, 12*M );
define_pd_global(bool, NeverActAsServerClassMachine, true );
define_pd_global(intx, NewSizeThreadIncrease, 16*K );
define_pd_global(uint64_t,MaxRAM, 1ULL*G);
define_pd_global(intx, InitialCodeCacheSize, 160*K);
#endif // !TIERED
define_pd_global(bool, UseTypeProfile, false);
define_pd_global(bool, RoundFPResults, false);
define_pd_global(bool, LIRFillDelaySlots, false);
define_pd_global(bool, OptimizeSinglePrecision, false);
define_pd_global(bool, CSEArrayLength, true );
define_pd_global(bool, TwoOperandLIRForm, false);
#endif // CPU_PPC_VM_C1_GLOBALS_PPC_HPP

View file

@ -39,7 +39,7 @@ define_pd_global(bool, PreferInterpreterNativeStubs, false);
define_pd_global(bool, ProfileTraps, true); define_pd_global(bool, ProfileTraps, true);
define_pd_global(bool, UseOnStackReplacement, true); define_pd_global(bool, UseOnStackReplacement, true);
define_pd_global(bool, ProfileInterpreter, true); define_pd_global(bool, ProfileInterpreter, true);
define_pd_global(bool, TieredCompilation, false); define_pd_global(bool, TieredCompilation, true);
define_pd_global(intx, CompileThreshold, 10000); define_pd_global(intx, CompileThreshold, 10000);
define_pd_global(intx, OnStackReplacePercentage, 140); define_pd_global(intx, OnStackReplacePercentage, 140);

View file

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved. * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -45,4 +45,14 @@ void Compile::pd_compiler2_init() {
FLAG_SET_ERGO(bool, InsertEndGroupPPC64, true); FLAG_SET_ERGO(bool, InsertEndGroupPPC64, true);
} }
} }
if (!VM_Version::has_isel() && FLAG_IS_DEFAULT(ConditionalMoveLimit)) {
FLAG_SET_ERGO(intx, ConditionalMoveLimit, 0);
}
if (OptimizeFill) {
warning("OptimizeFill is not supported on this CPU.");
FLAG_SET_DEFAULT(OptimizeFill, false);
}
} }

View file

@ -1,5 +1,6 @@
/* /*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -129,13 +130,20 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
// - call // - call
__ calculate_address_from_global_toc(reg_scratch, __ method_toc()); __ calculate_address_from_global_toc(reg_scratch, __ method_toc());
AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL); AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL);
__ load_const_from_method_toc(as_Register(Matcher::inline_cache_reg_encode()), ic, reg_scratch); bool success = __ load_const_from_method_toc(as_Register(Matcher::inline_cache_reg_encode()),
ic, reg_scratch, /*fixed_size*/ true);
if (!success) {
return NULL; // CodeCache is full
}
if (ReoptimizeCallSequences) { if (ReoptimizeCallSequences) {
__ b64_patchable((address)-1, relocInfo::none); __ b64_patchable((address)-1, relocInfo::none);
} else { } else {
AddressLiteral a((address)-1); AddressLiteral a((address)-1);
__ load_const_from_method_toc(reg_scratch, a, reg_scratch); success = __ load_const_from_method_toc(reg_scratch, a, reg_scratch, /*fixed_size*/ true);
if (!success) {
return NULL; // CodeCache is full
}
__ mtctr(reg_scratch); __ mtctr(reg_scratch);
__ bctr(); __ bctr();
} }
@ -153,6 +161,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
return stub; return stub;
#else #else
ShouldNotReachHere(); ShouldNotReachHere();
return NULL;
#endif #endif
} }
#undef __ #undef __

View file

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved. * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -236,39 +236,6 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
} }
#endif #endif
void frame::adjust_unextended_sp() {
// If we are returning to a compiled MethodHandle call site, the
// saved_fp will in fact be a saved value of the unextended SP. The
// simplest way to tell whether we are returning to such a call site
// is as follows:
if (is_compiled_frame() && false /*is_at_mh_callsite()*/) { // TODO PPC port
// If the sender PC is a deoptimization point, get the original
// PC. For MethodHandle call site the unextended_sp is stored in
// saved_fp.
_unextended_sp = _fp - _cb->frame_size();
#ifdef ASSERT
nmethod *sender_nm = _cb->as_nmethod_or_null();
assert(sender_nm && *_sp == *_unextended_sp, "backlink changed");
intptr_t* sp = _unextended_sp; // check if stack can be walked from here
for (int x = 0; x < 5; ++x) { // check up to a couple of backlinks
intptr_t* prev_sp = *(intptr_t**)sp;
if (prev_sp == 0) break; // end of stack
assert(prev_sp>sp, "broken stack");
sp = prev_sp;
}
if (sender_nm->is_deopt_mh_entry(_pc)) { // checks for deoptimization
address original_pc = sender_nm->get_original_pc(this);
assert(sender_nm->insts_contains(original_pc), "original PC must be in nmethod");
assert(sender_nm->is_method_handle_return(original_pc), "must be");
}
#endif
}
}
intptr_t *frame::initial_deoptimization_info() { intptr_t *frame::initial_deoptimization_info() {
// unused... but returns fp() to minimize changes introduced by 7087445 // unused... but returns fp() to minimize changes introduced by 7087445
return fp(); return fp();

View file

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved. * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -373,7 +373,6 @@
// The frame's stack pointer before it has been extended by a c2i adapter; // The frame's stack pointer before it has been extended by a c2i adapter;
// needed by deoptimization // needed by deoptimization
intptr_t* _unextended_sp; intptr_t* _unextended_sp;
void adjust_unextended_sp();
public: public:

View file

@ -39,9 +39,6 @@ inline void frame::find_codeblob_and_set_pc_and_deopt_state(address pc) {
_pc = pc; // Must be set for get_deopt_original_pc() _pc = pc; // Must be set for get_deopt_original_pc()
_fp = (intptr_t*)own_abi()->callers_sp; _fp = (intptr_t*)own_abi()->callers_sp;
// Use _fp - frame_size, needs to be done between _cb and _pc initialization
// and get_deopt_original_pc.
adjust_unextended_sp();
address original_pc = nmethod::get_deopt_original_pc(this); address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != NULL) {

View file

@ -35,11 +35,18 @@ const int BytesPerInstWord = 4;
const int StackAlignmentInBytes = 16; const int StackAlignmentInBytes = 16;
// Indicates whether the C calling conventions require that
// 32-bit integer argument values are extended to 64 bits.
const bool CCallingConventionRequiresIntsAsLongs = true;
#define SUPPORTS_NATIVE_CX8 #define SUPPORTS_NATIVE_CX8
// The PPC CPUs are NOT multiple-copy-atomic. // The PPC CPUs are NOT multiple-copy-atomic.
#define CPU_NOT_MULTIPLE_COPY_ATOMIC #define CPU_NOT_MULTIPLE_COPY_ATOMIC
// The expected size in bytes of a cache line, used to pad data structures.
#define DEFAULT_CACHE_LINE_SIZE 128
#if defined(COMPILER2) && defined(AIX) #if defined(COMPILER2) && defined(AIX)
// Include Transactional Memory lock eliding optimization // Include Transactional Memory lock eliding optimization
#define INCLUDE_RTM_OPT 1 #define INCLUDE_RTM_OPT 1

View file

@ -87,9 +87,9 @@ void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {
// own dispatch. The dispatch address in R24_dispatch_addr is used for the // own dispatch. The dispatch address in R24_dispatch_addr is used for the
// dispatch. // dispatch.
void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) { void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) {
if (bcp_incr) { addi(R14_bcp, R14_bcp, bcp_incr); }
mtctr(R24_dispatch_addr); mtctr(R24_dispatch_addr);
addi(R14_bcp, R14_bcp, bcp_incr); bcctr(bcondAlways, 0, bhintbhBCCTRisNotPredictable);
bctr();
} }
void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) { void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) {
@ -207,9 +207,6 @@ void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, Register byt
unimplemented("dispatch_Lbyte_code: verify"); // See Sparc Implementation to implement this unimplemented("dispatch_Lbyte_code: verify"); // See Sparc Implementation to implement this
} }
#ifdef FAST_DISPATCH
unimplemented("dispatch_Lbyte_code FAST_DISPATCH");
#else
assert_different_registers(bytecode, R11_scratch1); assert_different_registers(bytecode, R11_scratch1);
// Calc dispatch table address. // Calc dispatch table address.
@ -220,8 +217,7 @@ void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, Register byt
// Jump off! // Jump off!
mtctr(R11_scratch1); mtctr(R11_scratch1);
bctr(); bcctr(bcondAlways, 0, bhintbhBCCTRisNotPredictable);
#endif
} }
void InterpreterMacroAssembler::load_receiver(Register Rparam_count, Register Rrecv_dst) { void InterpreterMacroAssembler::load_receiver(Register Rparam_count, Register Rrecv_dst) {
@ -544,8 +540,8 @@ void InterpreterMacroAssembler::index_check_without_pop(Register Rarray, Registe
sldi(RsxtIndex, RsxtIndex, index_shift); sldi(RsxtIndex, RsxtIndex, index_shift);
blt(CCR0, LnotOOR); blt(CCR0, LnotOOR);
// Index should be in R17_tos, array should be in R4_ARG2. // Index should be in R17_tos, array should be in R4_ARG2.
mr(R17_tos, Rindex); mr_if_needed(R17_tos, Rindex);
mr(R4_ARG2, Rarray); mr_if_needed(R4_ARG2, Rarray);
load_dispatch_table(Rtmp, (address*)Interpreter::_throw_ArrayIndexOutOfBoundsException_entry); load_dispatch_table(Rtmp, (address*)Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
mtctr(Rtmp); mtctr(Rtmp);
bctr(); bctr();
@ -842,7 +838,6 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
// Must fence, otherwise, preceding store(s) may float below cmpxchg. // Must fence, otherwise, preceding store(s) may float below cmpxchg.
// CmpxchgX sets CCR0 to cmpX(current, displaced). // CmpxchgX sets CCR0 to cmpX(current, displaced).
fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
cmpxchgd(/*flag=*/CCR0, cmpxchgd(/*flag=*/CCR0,
/*current_value=*/current_header, /*current_value=*/current_header,
/*compare_value=*/displaced_header, /*exchange_value=*/monitor, /*compare_value=*/displaced_header, /*exchange_value=*/monitor,
@ -850,7 +845,8 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq, MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
MacroAssembler::cmpxchgx_hint_acquire_lock(), MacroAssembler::cmpxchgx_hint_acquire_lock(),
noreg, noreg,
&cas_failed); &cas_failed,
/*check without membar and ldarx first*/true);
// If the compare-and-exchange succeeded, then we found an unlocked // If the compare-and-exchange succeeded, then we found an unlocked
// object and we have now locked it. // object and we have now locked it.
@ -868,9 +864,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
sub(current_header, current_header, R1_SP); sub(current_header, current_header, R1_SP);
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
load_const_optimized(tmp, load_const_optimized(tmp, ~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place);
(address) (~(os::vm_page_size()-1) |
markOopDesc::lock_mask_in_place));
and_(R0/*==0?*/, current_header, tmp); and_(R0/*==0?*/, current_header, tmp);
// If condition is true we are done and hence we can store 0 in the displaced // If condition is true we are done and hence we can store 0 in the displaced
@ -1106,6 +1100,7 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
} }
void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count, void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count,
Register method_counters,
Register Rscratch, Register Rscratch,
Label &profile_continue) { Label &profile_continue) {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
@ -1114,12 +1109,11 @@ void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocat
Label done; Label done;
// If no method data exists, and the counter is high enough, make one. // If no method data exists, and the counter is high enough, make one.
int ipl_offs = load_const_optimized(Rscratch, &InvocationCounter::InterpreterProfileLimit, R0, true); lwz(Rscratch, in_bytes(MethodCounters::interpreter_profile_limit_offset()), method_counters);
lwz(Rscratch, ipl_offs, Rscratch);
cmpdi(CCR0, R28_mdx, 0); cmpdi(CCR0, R28_mdx, 0);
// Test to see if we should create a method data oop. // Test to see if we should create a method data oop.
cmpd(CCR1, Rscratch /* InterpreterProfileLimit */, invocation_count); cmpd(CCR1, Rscratch, invocation_count);
bne(CCR0, done); bne(CCR0, done);
bge(CCR1, profile_continue); bge(CCR1, profile_continue);
@ -1132,15 +1126,15 @@ void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocat
bind(done); bind(done);
} }
void InterpreterMacroAssembler::test_backedge_count_for_osr(Register backedge_count, Register branch_bcp, Register Rtmp) { void InterpreterMacroAssembler::test_backedge_count_for_osr(Register backedge_count, Register method_counters,
assert_different_registers(backedge_count, Rtmp, branch_bcp); Register target_bcp, Register disp, Register Rtmp) {
assert_different_registers(backedge_count, target_bcp, disp, Rtmp, R4_ARG2);
assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr"); assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");
Label did_not_overflow; Label did_not_overflow;
Label overflow_with_error; Label overflow_with_error;
int ibbl_offs = load_const_optimized(Rtmp, &InvocationCounter::InterpreterBackwardBranchLimit, R0, true); lwz(Rtmp, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()), method_counters);
lwz(Rtmp, ibbl_offs, Rtmp);
cmpw(CCR0, backedge_count, Rtmp); cmpw(CCR0, backedge_count, Rtmp);
blt(CCR0, did_not_overflow); blt(CCR0, did_not_overflow);
@ -1152,17 +1146,15 @@ void InterpreterMacroAssembler::test_backedge_count_for_osr(Register backedge_co
// the overflow function is called only once every overflow_frequency. // the overflow function is called only once every overflow_frequency.
if (ProfileInterpreter) { if (ProfileInterpreter) {
const int overflow_frequency = 1024; const int overflow_frequency = 1024;
li(Rtmp, overflow_frequency-1); andi_(Rtmp, backedge_count, overflow_frequency-1);
andr(Rtmp, Rtmp, backedge_count);
cmpwi(CCR0, Rtmp, 0);
bne(CCR0, did_not_overflow); bne(CCR0, did_not_overflow);
} }
// Overflow in loop, pass branch bytecode. // Overflow in loop, pass branch bytecode.
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, true); subf(R4_ARG2, disp, target_bcp); // Compute branch bytecode (previous bcp).
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true);
// Was an OSR adapter generated? // Was an OSR adapter generated?
// O0 = osr nmethod
cmpdi(CCR0, R3_RET, 0); cmpdi(CCR0, R3_RET, 0);
beq(CCR0, overflow_with_error); beq(CCR0, overflow_with_error);
@ -1323,7 +1315,7 @@ void InterpreterMacroAssembler::increment_backedge_counter(const Register Rcount
assert_different_registers(Rdst, Rtmp1); assert_different_registers(Rdst, Rtmp1);
const Register invocation_counter = Rtmp1; const Register invocation_counter = Rtmp1;
const Register counter = Rdst; const Register counter = Rdst;
// TODO ppc port assert(4 == InvocationCounter::sz_counter(), "unexpected field size."); // TODO: PPC port: assert(4 == InvocationCounter::sz_counter(), "unexpected field size.");
// Load backedge counter. // Load backedge counter.
lwz(counter, in_bytes(MethodCounters::backedge_counter_offset()) + lwz(counter, in_bytes(MethodCounters::backedge_counter_offset()) +
@ -1336,8 +1328,7 @@ void InterpreterMacroAssembler::increment_backedge_counter(const Register Rcount
addi(counter, counter, InvocationCounter::count_increment); addi(counter, counter, InvocationCounter::count_increment);
// Mask the invocation counter. // Mask the invocation counter.
li(Rscratch, InvocationCounter::count_mask_value); andi(invocation_counter, invocation_counter, InvocationCounter::count_mask_value);
andr(invocation_counter, invocation_counter, Rscratch);
// Store new counter value. // Store new counter value.
stw(counter, in_bytes(MethodCounters::backedge_counter_offset()) + stw(counter, in_bytes(MethodCounters::backedge_counter_offset()) +
@ -1822,15 +1813,13 @@ void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1,
test_method_data_pointer(profile_continue); test_method_data_pointer(profile_continue);
if (MethodData::profile_return_jsr292_only()) { if (MethodData::profile_return_jsr292_only()) {
assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2");
// If we don't profile all invoke bytecodes we must make sure // If we don't profile all invoke bytecodes we must make sure
// it's a bytecode we indeed profile. We can't go back to the // it's a bytecode we indeed profile. We can't go back to the
// begining of the ProfileData we intend to update to check its // begining of the ProfileData we intend to update to check its
// type because we're right after it and we don't known its // type because we're right after it and we don't known its
// length. // length.
lbz(tmp1, 0, R14_bcp); lbz(tmp1, 0, R14_bcp);
lhz(tmp2, Method::intrinsic_id_offset_in_bytes(), R19_method); lbz(tmp2, Method::intrinsic_id_offset_in_bytes(), R19_method);
cmpwi(CCR0, tmp1, Bytecodes::_invokedynamic); cmpwi(CCR0, tmp1, Bytecodes::_invokedynamic);
cmpwi(CCR1, tmp1, Bytecodes::_invokehandle); cmpwi(CCR1, tmp1, Bytecodes::_invokehandle);
cror(CCR0, Assembler::equal, CCR1, Assembler::equal); cror(CCR0, Assembler::equal, CCR1, Assembler::equal);
@ -2224,9 +2213,7 @@ void InterpreterMacroAssembler::increment_invocation_counter(Register Rcounters,
// Load the backedge counter. // Load the backedge counter.
lwz(backedge_count, be_counter_offset, Rcounters); // is unsigned int lwz(backedge_count, be_counter_offset, Rcounters); // is unsigned int
// Mask the backedge counter. // Mask the backedge counter.
Register tmp = invocation_count; andi(backedge_count, backedge_count, InvocationCounter::count_mask_value);
li(tmp, InvocationCounter::count_mask_value);
andr(backedge_count, tmp, backedge_count); // Cannot use andi, need sign extension of count_mask_value.
// Load the invocation counter. // Load the invocation counter.
lwz(invocation_count, inv_counter_offset, Rcounters); // is unsigned int lwz(invocation_count, inv_counter_offset, Rcounters); // is unsigned int
@ -2282,7 +2269,7 @@ void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Regis
bne(CCR0, test); bne(CCR0, test);
address fd = CAST_FROM_FN_PTR(address, verify_return_address); address fd = CAST_FROM_FN_PTR(address, verify_return_address);
const int nbytes_save = 11*8; // volatile gprs except R0 const int nbytes_save = MacroAssembler::num_volatile_regs * 8;
save_volatile_gprs(R1_SP, -nbytes_save); // except R0 save_volatile_gprs(R1_SP, -nbytes_save); // except R0
save_LR_CR(Rtmp); // Save in old frame. save_LR_CR(Rtmp); // Save in old frame.
push_frame_reg_args(nbytes_save, Rtmp); push_frame_reg_args(nbytes_save, Rtmp);

View file

@ -195,7 +195,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void restore_interpreter_state(Register scratch, bool bcp_and_mdx_only = false); void restore_interpreter_state(Register scratch, bool bcp_and_mdx_only = false);
void increment_backedge_counter(const Register Rcounters, Register Rtmp, Register Rtmp2, Register Rscratch); void increment_backedge_counter(const Register Rcounters, Register Rtmp, Register Rtmp2, Register Rscratch);
void test_backedge_count_for_osr(Register backedge_count, Register branch_bcp, Register Rtmp); void test_backedge_count_for_osr(Register backedge_count, Register method_counters, Register target_bcp, Register disp, Register Rtmp);
void record_static_call_in_profile(Register Rentry, Register Rtmp); void record_static_call_in_profile(Register Rentry, Register Rtmp);
void record_receiver_call_in_profile(Register Rklass, Register Rentry, Register Rtmp); void record_receiver_call_in_profile(Register Rklass, Register Rentry, Register Rtmp);
@ -211,7 +211,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void set_method_data_pointer_for_bcp(); void set_method_data_pointer_for_bcp();
void test_method_data_pointer(Label& zero_continue); void test_method_data_pointer(Label& zero_continue);
void verify_method_data_pointer(); void verify_method_data_pointer();
void test_invocation_counter_for_mdp(Register invocation_count, Register Rscratch, Label &profile_continue); void test_invocation_counter_for_mdp(Register invocation_count, Register method_counters, Register Rscratch, Label &profile_continue);
void set_mdp_data_at(int constant, Register value); void set_mdp_data_at(int constant, Register value);

View file

@ -30,6 +30,7 @@
#include "gc/shared/collectedHeap.inline.hpp" #include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp" #include "interpreter/interpreter.hpp"
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
#include "nativeInst_ppc.hpp"
#include "prims/methodHandles.hpp" #include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp" #include "runtime/biasedLocking.hpp"
#include "runtime/icache.hpp" #include "runtime/icache.hpp"
@ -114,7 +115,7 @@ void MacroAssembler::calculate_address_from_global_toc(Register dst, address add
} }
if (hi16) { if (hi16) {
addis(dst, R29, MacroAssembler::largeoffset_si16_si16_hi(offset)); addis(dst, R29_TOC, MacroAssembler::largeoffset_si16_si16_hi(offset));
} }
if (lo16) { if (lo16) {
if (add_relocation) { if (add_relocation) {
@ -256,7 +257,9 @@ narrowOop MacroAssembler::get_narrow_oop(address a, address bound) {
} }
#endif // _LP64 #endif // _LP64
void MacroAssembler::load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc) { // Returns true if successful.
bool MacroAssembler::load_const_from_method_toc(Register dst, AddressLiteral& a,
Register toc, bool fixed_size) {
int toc_offset = 0; int toc_offset = 0;
// Use RelocationHolder::none for the constant pool entry, otherwise // Use RelocationHolder::none for the constant pool entry, otherwise
// we will end up with a failing NativeCall::verify(x) where x is // we will end up with a failing NativeCall::verify(x) where x is
@ -264,11 +267,13 @@ void MacroAssembler::load_const_from_method_toc(Register dst, AddressLiteral& a,
// FIXME: We should insert relocation information for oops at the constant // FIXME: We should insert relocation information for oops at the constant
// pool entries instead of inserting it at the loads; patching of a constant // pool entries instead of inserting it at the loads; patching of a constant
// pool entry should be less expensive. // pool entry should be less expensive.
address oop_address = address_constant((address)a.value(), RelocationHolder::none); address const_address = address_constant((address)a.value(), RelocationHolder::none);
if (const_address == NULL) { return false; } // allocation failure
// Relocate at the pc of the load. // Relocate at the pc of the load.
relocate(a.rspec()); relocate(a.rspec());
toc_offset = (int)(oop_address - code()->consts()->start()); toc_offset = (int)(const_address - code()->consts()->start());
ld_largeoffset_unchecked(dst, toc_offset, toc, true); ld_largeoffset_unchecked(dst, toc_offset, toc, fixed_size);
return true;
} }
bool MacroAssembler::is_load_const_from_method_toc_at(address a) { bool MacroAssembler::is_load_const_from_method_toc_at(address a) {
@ -446,6 +451,15 @@ void MacroAssembler::bc_far(int boint, int biint, Label& dest, int optimize) {
assert(dest.is_bound() || target_pc == b_pc, "postcondition"); assert(dest.is_bound() || target_pc == b_pc, "postcondition");
} }
// 1 or 2 instructions
void MacroAssembler::bc_far_optimized(int boint, int biint, Label& dest) {
if (dest.is_bound() && is_within_range_of_bcxx(target(dest), pc())) {
bc(boint, biint, dest);
} else {
bc_far(boint, biint, dest, MacroAssembler::bc_far_optimize_on_relocate);
}
}
bool MacroAssembler::is_bc_far_at(address instruction_addr) { bool MacroAssembler::is_bc_far_at(address instruction_addr) {
return is_bc_far_variant1_at(instruction_addr) || return is_bc_far_variant1_at(instruction_addr) ||
is_bc_far_variant2_at(instruction_addr) || is_bc_far_variant2_at(instruction_addr) ||
@ -496,7 +510,7 @@ void MacroAssembler::set_dest_of_bc_far_at(address instruction_addr, address des
// variant 1, the 1st instruction contains the destination address: // variant 1, the 1st instruction contains the destination address:
// //
// bcxx DEST // bcxx DEST
// endgroup // nop
// //
const int instruction_1 = *(int*)(instruction_addr); const int instruction_1 = *(int*)(instruction_addr);
boint = inv_bo_field(instruction_1); boint = inv_bo_field(instruction_1);
@ -523,10 +537,10 @@ void MacroAssembler::set_dest_of_bc_far_at(address instruction_addr, address des
// variant 1: // variant 1:
// //
// bcxx DEST // bcxx DEST
// endgroup // nop
// //
masm.bc(boint, biint, dest); masm.bc(boint, biint, dest);
masm.endgroup(); masm.nop();
} else { } else {
// variant 2: // variant 2:
// //
@ -810,7 +824,22 @@ void MacroAssembler::save_volatile_gprs(Register dst, int offset) {
std(R9, offset, dst); offset += 8; std(R9, offset, dst); offset += 8;
std(R10, offset, dst); offset += 8; std(R10, offset, dst); offset += 8;
std(R11, offset, dst); offset += 8; std(R11, offset, dst); offset += 8;
std(R12, offset, dst); std(R12, offset, dst); offset += 8;
stfd(F0, offset, dst); offset += 8;
stfd(F1, offset, dst); offset += 8;
stfd(F2, offset, dst); offset += 8;
stfd(F3, offset, dst); offset += 8;
stfd(F4, offset, dst); offset += 8;
stfd(F5, offset, dst); offset += 8;
stfd(F6, offset, dst); offset += 8;
stfd(F7, offset, dst); offset += 8;
stfd(F8, offset, dst); offset += 8;
stfd(F9, offset, dst); offset += 8;
stfd(F10, offset, dst); offset += 8;
stfd(F11, offset, dst); offset += 8;
stfd(F12, offset, dst); offset += 8;
stfd(F13, offset, dst);
} }
// For verify_oops. // For verify_oops.
@ -825,7 +854,22 @@ void MacroAssembler::restore_volatile_gprs(Register src, int offset) {
ld(R9, offset, src); offset += 8; ld(R9, offset, src); offset += 8;
ld(R10, offset, src); offset += 8; ld(R10, offset, src); offset += 8;
ld(R11, offset, src); offset += 8; ld(R11, offset, src); offset += 8;
ld(R12, offset, src); ld(R12, offset, src); offset += 8;
lfd(F0, offset, src); offset += 8;
lfd(F1, offset, src); offset += 8;
lfd(F2, offset, src); offset += 8;
lfd(F3, offset, src); offset += 8;
lfd(F4, offset, src); offset += 8;
lfd(F5, offset, src); offset += 8;
lfd(F6, offset, src); offset += 8;
lfd(F7, offset, src); offset += 8;
lfd(F8, offset, src); offset += 8;
lfd(F9, offset, src); offset += 8;
lfd(F10, offset, src); offset += 8;
lfd(F11, offset, src); offset += 8;
lfd(F12, offset, src); offset += 8;
lfd(F13, offset, src);
} }
void MacroAssembler::save_LR_CR(Register tmp) { void MacroAssembler::save_LR_CR(Register tmp) {
@ -908,7 +952,7 @@ void MacroAssembler::push_frame(unsigned int bytes, Register tmp) {
if (is_simm(-offset, 16)) { if (is_simm(-offset, 16)) {
stdu(R1_SP, -offset, R1_SP); stdu(R1_SP, -offset, R1_SP);
} else { } else {
load_const(tmp, -offset); load_const_optimized(tmp, -offset);
stdux(R1_SP, R1_SP, tmp); stdux(R1_SP, R1_SP, tmp);
} }
} }
@ -1090,20 +1134,21 @@ address MacroAssembler::call_c_using_toc(const FunctionDescriptor* fd,
assert(fd->entry() != NULL, "function must be linked"); assert(fd->entry() != NULL, "function must be linked");
AddressLiteral fd_entry(fd->entry()); AddressLiteral fd_entry(fd->entry());
load_const_from_method_toc(R11, fd_entry, toc); bool success = load_const_from_method_toc(R11, fd_entry, toc, /*fixed_size*/ true);
mtctr(R11); mtctr(R11);
if (fd->env() == NULL) { if (fd->env() == NULL) {
li(R11, 0); li(R11, 0);
nop(); nop();
} else { } else {
AddressLiteral fd_env(fd->env()); AddressLiteral fd_env(fd->env());
load_const_from_method_toc(R11, fd_env, toc); success = success && load_const_from_method_toc(R11, fd_env, toc, /*fixed_size*/ true);
} }
AddressLiteral fd_toc(fd->toc()); AddressLiteral fd_toc(fd->toc());
load_toc_from_toc(R2_TOC, fd_toc, toc); // Set R2_TOC (load from toc)
// R2_TOC is killed. success = success && load_const_from_method_toc(R2_TOC, fd_toc, toc, /*fixed_size*/ true);
bctrl(); bctrl();
_last_calls_return_pc = pc(); _last_calls_return_pc = pc();
if (!success) { return NULL; }
} else { } else {
// It's a friend function, load the entry point and don't care about // It's a friend function, load the entry point and don't care about
// toc and env. Use an optimizable call instruction, but ensure the // toc and env. Use an optimizable call instruction, but ensure the
@ -1367,11 +1412,6 @@ void MacroAssembler::cmpxchgw(ConditionRegister flag, Register dest_current_valu
bool preset_result_reg = (int_flag_success != dest_current_value && int_flag_success != compare_value && bool preset_result_reg = (int_flag_success != dest_current_value && int_flag_success != compare_value &&
int_flag_success != exchange_value && int_flag_success != addr_base); int_flag_success != exchange_value && int_flag_success != addr_base);
// release/fence semantics
if (semantics & MemBarRel) {
release();
}
if (use_result_reg && preset_result_reg) { if (use_result_reg && preset_result_reg) {
li(int_flag_success, 0); // preset (assume cas failed) li(int_flag_success, 0); // preset (assume cas failed)
} }
@ -1383,6 +1423,11 @@ void MacroAssembler::cmpxchgw(ConditionRegister flag, Register dest_current_valu
bne(flag, failed); bne(flag, failed);
} }
// release/fence semantics
if (semantics & MemBarRel) {
release();
}
// atomic emulation loop // atomic emulation loop
bind(retry); bind(retry);
@ -1462,11 +1507,6 @@ void MacroAssembler::cmpxchgd(ConditionRegister flag,
int_flag_success!=exchange_value && int_flag_success!=addr_base); int_flag_success!=exchange_value && int_flag_success!=addr_base);
assert(int_flag_success == noreg || failed_ext == NULL, "cannot have both"); assert(int_flag_success == noreg || failed_ext == NULL, "cannot have both");
// release/fence semantics
if (semantics & MemBarRel) {
release();
}
if (use_result_reg && preset_result_reg) { if (use_result_reg && preset_result_reg) {
li(int_flag_success, 0); // preset (assume cas failed) li(int_flag_success, 0); // preset (assume cas failed)
} }
@ -1478,6 +1518,11 @@ void MacroAssembler::cmpxchgd(ConditionRegister flag,
bne(flag, failed); bne(flag, failed);
} }
// release/fence semantics
if (semantics & MemBarRel) {
release();
}
// atomic emulation loop // atomic emulation loop
bind(retry); bind(retry);
@ -1501,8 +1546,6 @@ void MacroAssembler::cmpxchgd(ConditionRegister flag,
li(int_flag_success, 1); li(int_flag_success, 1);
} }
// POWER6 doesn't need isync in CAS.
// Always emit isync to be on the safe side.
if (semantics & MemBarFenceAfter) { if (semantics & MemBarFenceAfter) {
fence(); fence();
} else if (semantics & MemBarAcq) { } else if (semantics & MemBarAcq) {
@ -1627,13 +1670,14 @@ void MacroAssembler::lookup_virtual_method(Register recv_klass,
} }
/////////////////////////////////////////// subtype checking //////////////////////////////////////////// /////////////////////////////////////////// subtype checking ////////////////////////////////////////////
void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
Register super_klass, Register super_klass,
Register temp1_reg, Register temp1_reg,
Register temp2_reg, Register temp2_reg,
Label& L_success, Label* L_success,
Label& L_failure) { Label* L_failure,
Label* L_slow_path,
RegisterOrConstant super_check_offset) {
const Register check_cache_offset = temp1_reg; const Register check_cache_offset = temp1_reg;
const Register cached_super = temp2_reg; const Register cached_super = temp2_reg;
@ -1643,6 +1687,18 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
int sco_offset = in_bytes(Klass::super_check_offset_offset()); int sco_offset = in_bytes(Klass::super_check_offset_offset());
int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
bool need_slow_path = (must_load_sco || super_check_offset.constant_or_zero() == sco_offset);
Label L_fallthrough;
int label_nulls = 0;
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
assert(label_nulls <= 1 ||
(L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),
"at most one NULL in the batch, usually");
// If the pointers are equal, we are done (e.g., String[] elements). // If the pointers are equal, we are done (e.g., String[] elements).
// This self-check enables sharing of secondary supertype arrays among // This self-check enables sharing of secondary supertype arrays among
// non-primary types such as array-of-interface. Otherwise, each such // non-primary types such as array-of-interface. Otherwise, each such
@ -1651,15 +1707,20 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
// type checks are in fact trivially successful in this manner, // type checks are in fact trivially successful in this manner,
// so we get a nicely predicted branch right at the start of the check. // so we get a nicely predicted branch right at the start of the check.
cmpd(CCR0, sub_klass, super_klass); cmpd(CCR0, sub_klass, super_klass);
beq(CCR0, L_success); beq(CCR0, *L_success);
// Check the supertype display: // Check the supertype display:
if (must_load_sco) {
// The super check offset is always positive...
lwz(check_cache_offset, sco_offset, super_klass); lwz(check_cache_offset, sco_offset, super_klass);
super_check_offset = RegisterOrConstant(check_cache_offset);
// super_check_offset is register.
assert_different_registers(sub_klass, super_klass, cached_super, super_check_offset.as_register());
}
// The loaded value is the offset from KlassOopDesc. // The loaded value is the offset from KlassOopDesc.
ldx(cached_super, check_cache_offset, sub_klass); ld(cached_super, super_check_offset, sub_klass);
cmpd(CCR0, cached_super, super_klass); cmpd(CCR0, cached_super, super_klass);
beq(CCR0, L_success);
// This check has worked decisively for primary supers. // This check has worked decisively for primary supers.
// Secondary supers are sought in the super_cache ('super_cache_addr'). // Secondary supers are sought in the super_cache ('super_cache_addr').
@ -1672,9 +1733,39 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
// So if it was a primary super, we can just fail immediately. // So if it was a primary super, we can just fail immediately.
// Otherwise, it's the slow path for us (no success at this point). // Otherwise, it's the slow path for us (no success at this point).
cmpwi(CCR0, check_cache_offset, sc_offset); #define FINAL_JUMP(label) if (&(label) != &L_fallthrough) { b(label); }
bne(CCR0, L_failure);
// bind(slow_path); // fallthru if (super_check_offset.is_register()) {
beq(CCR0, *L_success);
cmpwi(CCR0, super_check_offset.as_register(), sc_offset);
if (L_failure == &L_fallthrough) {
beq(CCR0, *L_slow_path);
} else {
bne(CCR0, *L_failure);
FINAL_JUMP(*L_slow_path);
}
} else {
if (super_check_offset.as_constant() == sc_offset) {
// Need a slow path; fast failure is impossible.
if (L_slow_path == &L_fallthrough) {
beq(CCR0, *L_success);
} else {
bne(CCR0, *L_slow_path);
FINAL_JUMP(*L_success);
}
} else {
// No slow path; it's a fast decision.
if (L_failure == &L_fallthrough) {
beq(CCR0, *L_success);
} else {
bne(CCR0, *L_failure);
FINAL_JUMP(*L_success);
}
}
}
bind(L_fallthrough);
#undef FINAL_JUMP
} }
void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
@ -1698,7 +1789,7 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
ld(array_ptr, source_offset, sub_klass); ld(array_ptr, source_offset, sub_klass);
//assert(4 == arrayOopDesc::length_length_in_bytes(), "precondition violated."); // TODO: PPC port: assert(4 == arrayOopDesc::length_length_in_bytes(), "precondition violated.");
lwz(temp, length_offset, array_ptr); lwz(temp, length_offset, array_ptr);
cmpwi(CCR0, temp, 0); cmpwi(CCR0, temp, 0);
beq(CCR0, result_reg!=noreg ? failure : fallthru); // length 0 beq(CCR0, result_reg!=noreg ? failure : fallthru); // length 0
@ -1719,8 +1810,9 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
bind(hit); bind(hit);
std(super_klass, target_offset, sub_klass); // save result to cache std(super_klass, target_offset, sub_klass); // save result to cache
if (result_reg != noreg) li(result_reg, 0); // load zero result (indicates a hit) if (result_reg != noreg) { li(result_reg, 0); } // load zero result (indicates a hit)
if (L_success != NULL) b(*L_success); if (L_success != NULL) { b(*L_success); }
else if (result_reg == noreg) { blr(); } // return with CR0.eq if neither label nor result reg provided
bind(fallthru); bind(fallthru);
} }
@ -1732,7 +1824,7 @@ void MacroAssembler::check_klass_subtype(Register sub_klass,
Register temp2_reg, Register temp2_reg,
Label& L_success) { Label& L_success) {
Label L_failure; Label L_failure;
check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg, temp2_reg, L_success, L_failure); check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg, temp2_reg, &L_success, &L_failure);
check_klass_subtype_slow_path(sub_klass, super_klass, temp1_reg, temp2_reg, &L_success); check_klass_subtype_slow_path(sub_klass, super_klass, temp1_reg, temp2_reg, &L_success);
bind(L_failure); // Fallthru if not successful. bind(L_failure); // Fallthru if not successful.
} }
@ -1765,6 +1857,7 @@ RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
} }
} }
// Supports temp2_reg = R0.
void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj_reg, void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj_reg,
Register mark_reg, Register temp_reg, Register mark_reg, Register temp_reg,
Register temp2_reg, Label& done, Label* slow_case) { Register temp2_reg, Label& done, Label* slow_case) {
@ -1788,10 +1881,10 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
"biased locking makes assumptions about bit layout"); "biased locking makes assumptions about bit layout");
if (PrintBiasedLockingStatistics) { if (PrintBiasedLockingStatistics) {
load_const(temp_reg, (address) BiasedLocking::total_entry_count_addr(), temp2_reg); load_const(temp2_reg, (address) BiasedLocking::total_entry_count_addr(), temp_reg);
lwz(temp2_reg, 0, temp_reg); lwzx(temp_reg, temp2_reg);
addi(temp2_reg, temp2_reg, 1); addi(temp_reg, temp_reg, 1);
stw(temp2_reg, 0, temp_reg); stwx(temp_reg, temp2_reg);
} }
andi(temp_reg, mark_reg, markOopDesc::biased_lock_mask_in_place); andi(temp_reg, mark_reg, markOopDesc::biased_lock_mask_in_place);
@ -1809,10 +1902,10 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
if (PrintBiasedLockingStatistics) { if (PrintBiasedLockingStatistics) {
Label l; Label l;
bne(cr_reg, l); bne(cr_reg, l);
load_const(mark_reg, (address) BiasedLocking::biased_lock_entry_count_addr()); load_const(temp2_reg, (address) BiasedLocking::biased_lock_entry_count_addr());
lwz(temp2_reg, 0, mark_reg); lwzx(mark_reg, temp2_reg);
addi(temp2_reg, temp2_reg, 1); addi(mark_reg, mark_reg, 1);
stw(temp2_reg, 0, mark_reg); stwx(mark_reg, temp2_reg);
// restore mark_reg // restore mark_reg
ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg); ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
bind(l); bind(l);
@ -1878,10 +1971,10 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
// need to revoke that bias. The revocation will occur in the // need to revoke that bias. The revocation will occur in the
// interpreter runtime in the slow case. // interpreter runtime in the slow case.
if (PrintBiasedLockingStatistics) { if (PrintBiasedLockingStatistics) {
load_const(temp_reg, (address) BiasedLocking::anonymously_biased_lock_entry_count_addr(), temp2_reg); load_const(temp2_reg, (address) BiasedLocking::anonymously_biased_lock_entry_count_addr(), temp_reg);
lwz(temp2_reg, 0, temp_reg); lwzx(temp_reg, temp2_reg);
addi(temp2_reg, temp2_reg, 1); addi(temp_reg, temp_reg, 1);
stw(temp2_reg, 0, temp_reg); stwx(temp_reg, temp2_reg);
} }
b(done); b(done);
@ -1892,15 +1985,14 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
// value as the comparison value when doing the cas to acquire the // value as the comparison value when doing the cas to acquire the
// bias in the current epoch. In other words, we allow transfer of // bias in the current epoch. In other words, we allow transfer of
// the bias from one thread to another directly in this situation. // the bias from one thread to another directly in this situation.
andi(temp_reg, mark_reg, markOopDesc::age_mask_in_place); load_klass(temp_reg, obj_reg);
orr(temp_reg, R16_thread, temp_reg); andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place);
load_klass(temp2_reg, obj_reg); orr(temp2_reg, R16_thread, temp2_reg);
ld(temp2_reg, in_bytes(Klass::prototype_header_offset()), temp2_reg); ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
orr(temp_reg, temp_reg, temp2_reg); orr(temp_reg, temp2_reg, temp_reg);
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
// CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg, cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
/*compare_value=*/mark_reg, /*exchange_value=*/temp_reg, /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
/*where=*/obj_reg, /*where=*/obj_reg,
@ -1913,10 +2005,10 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
// need to revoke that bias. The revocation will occur in the // need to revoke that bias. The revocation will occur in the
// interpreter runtime in the slow case. // interpreter runtime in the slow case.
if (PrintBiasedLockingStatistics) { if (PrintBiasedLockingStatistics) {
load_const(temp_reg, (address) BiasedLocking::rebiased_lock_entry_count_addr(), temp2_reg); load_const(temp2_reg, (address) BiasedLocking::rebiased_lock_entry_count_addr(), temp_reg);
lwz(temp2_reg, 0, temp_reg); lwzx(temp_reg, temp2_reg);
addi(temp2_reg, temp2_reg, 1); addi(temp_reg, temp_reg, 1);
stw(temp2_reg, 0, temp_reg); stwx(temp_reg, temp2_reg);
} }
b(done); b(done);
@ -1952,10 +2044,10 @@ void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj
if (PrintBiasedLockingStatistics) { if (PrintBiasedLockingStatistics) {
Label l; Label l;
bne(cr_reg, l); bne(cr_reg, l);
load_const(temp_reg, (address) BiasedLocking::revoked_lock_entry_count_addr(), temp2_reg); load_const(temp2_reg, (address) BiasedLocking::revoked_lock_entry_count_addr(), temp_reg);
lwz(temp2_reg, 0, temp_reg); lwzx(temp_reg, temp2_reg);
addi(temp2_reg, temp2_reg, 1); addi(temp_reg, temp_reg, 1);
stw(temp2_reg, 0, temp_reg); stwx(temp_reg, temp2_reg);
bind(l); bind(l);
} }
@ -1977,6 +2069,109 @@ void MacroAssembler::biased_locking_exit (ConditionRegister cr_reg, Register mar
beq(cr_reg, done); beq(cr_reg, done);
} }
// allocation (for C1)
void MacroAssembler::eden_allocate(
Register obj, // result: pointer to object after successful allocation
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
int con_size_in_bytes, // object size in bytes if known at compile time
Register t1, // temp register
Register t2, // temp register
Label& slow_case // continuation point if fast allocation fails
) {
b(slow_case);
}
void MacroAssembler::tlab_allocate(
Register obj, // result: pointer to object after successful allocation
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
int con_size_in_bytes, // object size in bytes if known at compile time
Register t1, // temp register
Label& slow_case // continuation point if fast allocation fails
) {
// make sure arguments make sense
assert_different_registers(obj, var_size_in_bytes, t1);
assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size");
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
const Register new_top = t1;
//verify_tlab(); not implemented
ld(obj, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
ld(R0, in_bytes(JavaThread::tlab_end_offset()), R16_thread);
if (var_size_in_bytes == noreg) {
addi(new_top, obj, con_size_in_bytes);
} else {
add(new_top, obj, var_size_in_bytes);
}
cmpld(CCR0, new_top, R0);
bc_far_optimized(Assembler::bcondCRbiIs1, bi0(CCR0, Assembler::greater), slow_case);
#ifdef ASSERT
// make sure new free pointer is properly aligned
{
Label L;
andi_(R0, new_top, MinObjAlignmentInBytesMask);
beq(CCR0, L);
stop("updated TLAB free is not properly aligned", 0x934);
bind(L);
}
#endif // ASSERT
// update the tlab top pointer
std(new_top, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
//verify_tlab(); not implemented
}
void MacroAssembler::tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case) {
unimplemented("tlab_refill");
}
void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, Register t1, Register t2) {
unimplemented("incr_allocated_bytes");
}
address MacroAssembler::emit_trampoline_stub(int destination_toc_offset,
int insts_call_instruction_offset, Register Rtoc) {
// Start the stub.
address stub = start_a_stub(64);
if (stub == NULL) { return NULL; } // CodeCache full: bail out
// Create a trampoline stub relocation which relates this trampoline stub
// with the call instruction at insts_call_instruction_offset in the
// instructions code-section.
relocate(trampoline_stub_Relocation::spec(code()->insts()->start() + insts_call_instruction_offset));
const int stub_start_offset = offset();
// For java_to_interp stubs we use R11_scratch1 as scratch register
// and in call trampoline stubs we use R12_scratch2. This way we
// can distinguish them (see is_NativeCallTrampolineStub_at()).
Register reg_scratch = R12_scratch2;
// Now, create the trampoline stub's code:
// - load the TOC
// - load the call target from the constant pool
// - call
if (Rtoc == noreg) {
calculate_address_from_global_toc(reg_scratch, method_toc());
Rtoc = reg_scratch;
}
ld_largeoffset_unchecked(reg_scratch, destination_toc_offset, Rtoc, false);
mtctr(reg_scratch);
bctr();
const address stub_start_addr = addr_at(stub_start_offset);
// Assert that the encoded destination_toc_offset can be identified and that it is correct.
assert(destination_toc_offset == NativeCallTrampolineStub_at(stub_start_addr)->destination_toc_offset(),
"encoded offset into the constant pool must match");
// Trampoline_stub_size should be good.
assert((uint)(offset() - stub_start_offset) <= trampoline_stub_size, "should be good size");
assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline");
// End the stub.
end_a_stub();
return stub;
}
// TM on PPC64. // TM on PPC64.
void MacroAssembler::atomic_inc_ptr(Register addr, Register result, int simm16) { void MacroAssembler::atomic_inc_ptr(Register addr, Register result, int simm16) {
Label retry; Label retry;
@ -2387,17 +2582,16 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
// Must fence, otherwise, preceding store(s) may float below cmpxchg. // Must fence, otherwise, preceding store(s) may float below cmpxchg.
// Compare object markOop with mark and if equal exchange scratch1 with object markOop. // Compare object markOop with mark and if equal exchange scratch1 with object markOop.
// CmpxchgX sets cr_reg to cmpX(current, displaced).
membar(Assembler::StoreStore);
cmpxchgd(/*flag=*/flag, cmpxchgd(/*flag=*/flag,
/*current_value=*/current_header, /*current_value=*/current_header,
/*compare_value=*/displaced_header, /*compare_value=*/displaced_header,
/*exchange_value=*/box, /*exchange_value=*/box,
/*where=*/oop, /*where=*/oop,
MacroAssembler::MemBarAcq, MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
MacroAssembler::cmpxchgx_hint_acquire_lock(), MacroAssembler::cmpxchgx_hint_acquire_lock(),
noreg, noreg,
&cas_failed); &cas_failed,
/*check without membar and ldarx first*/true);
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
// If the compare-and-exchange succeeded, then we found an unlocked // If the compare-and-exchange succeeded, then we found an unlocked
@ -2410,8 +2604,7 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
// Check if the owner is self by comparing the value in the markOop of object // Check if the owner is self by comparing the value in the markOop of object
// (current_header) with the stack pointer. // (current_header) with the stack pointer.
sub(current_header, current_header, R1_SP); sub(current_header, current_header, R1_SP);
load_const_optimized(temp, (address) (~(os::vm_page_size()-1) | load_const_optimized(temp, ~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place);
markOopDesc::lock_mask_in_place));
and_(R0/*==0?*/, current_header, temp); and_(R0/*==0?*/, current_header, temp);
// If condition is true we are cont and hence we can store 0 as the // If condition is true we are cont and hence we can store 0 as the
@ -2437,8 +2630,6 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
// Try to CAS m->owner from NULL to current thread. // Try to CAS m->owner from NULL to current thread.
addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value); addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value);
li(displaced_header, 0);
// CmpxchgX sets flag to cmpX(current, displaced).
cmpxchgd(/*flag=*/flag, cmpxchgd(/*flag=*/flag,
/*current_value=*/current_header, /*current_value=*/current_header,
/*compare_value=*/(intptr_t)0, /*compare_value=*/(intptr_t)0,
@ -2924,31 +3115,12 @@ void MacroAssembler::load_klass(Register dst, Register src) {
} }
} }
void MacroAssembler::load_klass_with_trap_null_check(Register dst, Register src) {
if (!os::zero_page_read_protected()) {
if (TrapBasedNullChecks) {
trap_null_check(src);
}
}
load_klass(dst, src);
}
void MacroAssembler::reinit_heapbase(Register d, Register tmp) {
if (Universe::heap() != NULL) {
load_const_optimized(R30, Universe::narrow_ptrs_base(), tmp);
} else {
// Heap not yet allocated. Load indirectly.
int simm16_offset = load_const_optimized(R30, Universe::narrow_ptrs_base_addr(), tmp, true);
ld(R30, simm16_offset, R30);
}
}
// Clear Array // Clear Array
// Kills both input registers. tmp == R0 is allowed. // Kills both input registers. tmp == R0 is allowed.
void MacroAssembler::clear_memory_doubleword(Register base_ptr, Register cnt_dwords, Register tmp) { void MacroAssembler::clear_memory_doubleword(Register base_ptr, Register cnt_dwords, Register tmp) {
// Procedure for large arrays (uses data cache block zero instruction). // Procedure for large arrays (uses data cache block zero instruction).
Label startloop, fast, fastloop, small_rest, restloop, done; Label startloop, fast, fastloop, small_rest, restloop, done;
const int cl_size = VM_Version::get_cache_line_size(), const int cl_size = VM_Version::L1_data_cache_line_size(),
cl_dwords = cl_size>>3, cl_dwords = cl_size>>3,
cl_dw_addr_bits = exact_log2(cl_dwords), cl_dw_addr_bits = exact_log2(cl_dwords),
dcbz_min = 1; // Min count of dcbz executions, needs to be >0. dcbz_min = 1; // Min count of dcbz executions, needs to be >0.
@ -4021,7 +4193,7 @@ void MacroAssembler::multiply_128_x_128_loop(Register x_xstart,
bind(L_check_1); bind(L_check_1);
addi(idx, idx, 0x2); addi(idx, idx, 0x2);
andi_(idx, idx, 0x1) ; andi_(idx, idx, 0x1);
addic_(idx, idx, -1); addic_(idx, idx, -1);
blt(CCR0, L_post_third_loop_done); blt(CCR0, L_post_third_loop_done);
@ -4251,17 +4423,42 @@ void MacroAssembler::verify_oop(Register oop, const char* msg) {
address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address(); address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address();
const Register tmp = R11; // Will be preserved. const Register tmp = R11; // Will be preserved.
const int nbytes_save = 11*8; // Volatile gprs except R0. const int nbytes_save = MacroAssembler::num_volatile_regs * 8;
save_volatile_gprs(R1_SP, -nbytes_save); // except R0 save_volatile_gprs(R1_SP, -nbytes_save); // except R0
if (oop == tmp) mr(R4_ARG2, oop); mr_if_needed(R4_ARG2, oop);
save_LR_CR(tmp); // save in old frame
push_frame_reg_args(nbytes_save, tmp);
// load FunctionDescriptor** / entry_address *
load_const_optimized(tmp, fd, R0);
// load FunctionDescriptor* / entry_address
ld(tmp, 0, tmp);
load_const_optimized(R3_ARG1, (address)msg, R0);
// Call destination for its side effect.
call_c(tmp);
pop_frame();
restore_LR_CR(tmp);
restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
}
void MacroAssembler::verify_oop_addr(RegisterOrConstant offs, Register base, const char* msg) {
if (!VerifyOops) {
return;
}
address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address();
const Register tmp = R11; // Will be preserved.
const int nbytes_save = MacroAssembler::num_volatile_regs * 8;
save_volatile_gprs(R1_SP, -nbytes_save); // except R0
ld(R4_ARG2, offs, base);
save_LR_CR(tmp); // save in old frame save_LR_CR(tmp); // save in old frame
push_frame_reg_args(nbytes_save, tmp); push_frame_reg_args(nbytes_save, tmp);
// load FunctionDescriptor** / entry_address * // load FunctionDescriptor** / entry_address *
load_const_optimized(tmp, fd, R0); load_const_optimized(tmp, fd, R0);
// load FunctionDescriptor* / entry_address // load FunctionDescriptor* / entry_address
ld(tmp, 0, tmp); ld(tmp, 0, tmp);
if (oop != tmp) mr_if_needed(R4_ARG2, oop);
load_const_optimized(R3_ARG1, (address)msg, R0); load_const_optimized(R3_ARG1, (address)msg, R0);
// Call destination for its side effect. // Call destination for its side effect.
call_c(tmp); call_c(tmp);

View file

@ -119,11 +119,8 @@ class MacroAssembler: public Assembler {
// Emits an oop const to the constant pool, loads the constant, and // Emits an oop const to the constant pool, loads the constant, and
// sets a relocation info with address current_pc. // sets a relocation info with address current_pc.
void load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc); // Returns true if successful.
void load_toc_from_toc(Register dst, AddressLiteral& a, Register toc) { bool load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc, bool fixed_size = false);
assert(dst == R2_TOC, "base register must be TOC");
load_const_from_method_toc(dst, a, toc);
}
static bool is_load_const_from_method_toc_at(address a); static bool is_load_const_from_method_toc_at(address a);
static int get_offset_of_load_const_from_method_toc_at(address a); static int get_offset_of_load_const_from_method_toc_at(address a);
@ -174,6 +171,7 @@ class MacroAssembler: public Assembler {
// optimize: flag for telling the conditional far branch to optimize // optimize: flag for telling the conditional far branch to optimize
// itself when relocated. // itself when relocated.
void bc_far(int boint, int biint, Label& dest, int optimize); void bc_far(int boint, int biint, Label& dest, int optimize);
void bc_far_optimized(int boint, int biint, Label& dest); // 1 or 2 instructions
// Relocation of conditional far branches. // Relocation of conditional far branches.
static bool is_bc_far_at(address instruction_addr); static bool is_bc_far_at(address instruction_addr);
static address get_dest_of_bc_far_at(address instruction_addr); static address get_dest_of_bc_far_at(address instruction_addr);
@ -262,6 +260,7 @@ class MacroAssembler: public Assembler {
// some ABI-related functions // some ABI-related functions
void save_nonvolatile_gprs( Register dst_base, int offset); void save_nonvolatile_gprs( Register dst_base, int offset);
void restore_nonvolatile_gprs(Register src_base, int offset); void restore_nonvolatile_gprs(Register src_base, int offset);
enum { num_volatile_regs = 11 + 14 }; // GPR + FPR
void save_volatile_gprs( Register dst_base, int offset); void save_volatile_gprs( Register dst_base, int offset);
void restore_volatile_gprs(Register src_base, int offset); void restore_volatile_gprs(Register src_base, int offset);
void save_LR_CR( Register tmp); // tmp contains LR on return. void save_LR_CR( Register tmp); // tmp contains LR on return.
@ -461,8 +460,10 @@ class MacroAssembler: public Assembler {
Register super_klass, Register super_klass,
Register temp1_reg, Register temp1_reg,
Register temp2_reg, Register temp2_reg,
Label& L_success, Label* L_success,
Label& L_failure); Label* L_failure,
Label* L_slow_path = NULL, // default fall through
RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
// The rest of the type check; must be wired to a corresponding fast path. // The rest of the type check; must be wired to a corresponding fast path.
// It does not repeat the fast path logic, so don't use it standalone. // It does not repeat the fast path logic, so don't use it standalone.
@ -507,6 +508,28 @@ class MacroAssembler: public Assembler {
// biased locking exit case failed. // biased locking exit case failed.
void biased_locking_exit(ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done); void biased_locking_exit(ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done);
// allocation (for C1)
void eden_allocate(
Register obj, // result: pointer to object after successful allocation
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
int con_size_in_bytes, // object size in bytes if known at compile time
Register t1, // temp register
Register t2, // temp register
Label& slow_case // continuation point if fast allocation fails
);
void tlab_allocate(
Register obj, // result: pointer to object after successful allocation
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
int con_size_in_bytes, // object size in bytes if known at compile time
Register t1, // temp register
Label& slow_case // continuation point if fast allocation fails
);
void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case);
void incr_allocated_bytes(RegisterOrConstant size_in_bytes, Register t1, Register t2);
enum { trampoline_stub_size = 6 * 4 };
address emit_trampoline_stub(int destination_toc_offset, int insts_call_instruction_offset, Register Rtoc = noreg);
void atomic_inc_ptr(Register addr, Register result, int simm16 = 1); void atomic_inc_ptr(Register addr, Register result, int simm16 = 1);
void atomic_ori_int(Register addr, Register result, int uimm16); void atomic_ori_int(Register addr, Register result, int uimm16);
@ -597,9 +620,7 @@ class MacroAssembler: public Assembler {
// Implicit or explicit null check, jumps to static address exception_entry. // Implicit or explicit null check, jumps to static address exception_entry.
inline void null_check_throw(Register a, int offset, Register temp_reg, address exception_entry); inline void null_check_throw(Register a, int offset, Register temp_reg, address exception_entry);
inline void null_check(Register a, int offset, Label *Lis_null); // implicit only if Lis_null not provided
// Check accessed object for null. Use SIGTRAP-based null checks on AIX.
inline void load_with_trap_null_check(Register d, int si16, Register s1);
// Load heap oop and decompress. Loaded oop may not be null. // Load heap oop and decompress. Loaded oop may not be null.
// Specify tmp to save one cycle. // Specify tmp to save one cycle.
@ -619,20 +640,17 @@ class MacroAssembler: public Assembler {
inline Register decode_heap_oop_not_null(Register d, Register src = noreg); inline Register decode_heap_oop_not_null(Register d, Register src = noreg);
// Null allowed. // Null allowed.
inline Register encode_heap_oop(Register d, Register src); // Prefer null check in GC barrier!
inline void decode_heap_oop(Register d); inline void decode_heap_oop(Register d);
// Load/Store klass oop from klass field. Compress. // Load/Store klass oop from klass field. Compress.
void load_klass(Register dst, Register src); void load_klass(Register dst, Register src);
void load_klass_with_trap_null_check(Register dst, Register src);
void store_klass(Register dst_oop, Register klass, Register tmp = R0); void store_klass(Register dst_oop, Register klass, Register tmp = R0);
void store_klass_gap(Register dst_oop, Register val = noreg); // Will store 0 if val not specified. void store_klass_gap(Register dst_oop, Register val = noreg); // Will store 0 if val not specified.
static int instr_size_for_decode_klass_not_null(); static int instr_size_for_decode_klass_not_null();
void decode_klass_not_null(Register dst, Register src = noreg); void decode_klass_not_null(Register dst, Register src = noreg);
Register encode_klass_not_null(Register dst, Register src = noreg); Register encode_klass_not_null(Register dst, Register src = noreg);
// Load common heap base into register.
void reinit_heapbase(Register d, Register tmp = noreg);
// SIGTRAP-based range checks for arrays. // SIGTRAP-based range checks for arrays.
inline void trap_range_check_l(Register a, Register b); inline void trap_range_check_l(Register a, Register b);
inline void trap_range_check_l(Register a, int si16); inline void trap_range_check_l(Register a, int si16);
@ -750,6 +768,7 @@ class MacroAssembler: public Assembler {
// Emit code to verify that reg contains a valid oop if +VerifyOops is set. // Emit code to verify that reg contains a valid oop if +VerifyOops is set.
void verify_oop(Register reg, const char* s = "broken oop"); void verify_oop(Register reg, const char* s = "broken oop");
void verify_oop_addr(RegisterOrConstant offs, Register base, const char* s = "contains broken oop");
// TODO: verify method and klass metadata (compare against vptr?) // TODO: verify method and klass metadata (compare against vptr?)
void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}

View file

@ -70,9 +70,11 @@ inline void MacroAssembler::endgroup_if_needed(bool needed) {
} }
inline void MacroAssembler::membar(int bits) { inline void MacroAssembler::membar(int bits) {
// TODO: use elemental_membar(bits) for Power 8 and disable optimization of acquire-release // Comment: Usage of elemental_membar(bits) is not recommended for Power 8.
// (Matcher::post_membar_release where we use PPC64_ONLY(xop == Op_MemBarRelease ||)) // If elemental_membar(bits) is used, disable optimization of acquire-release
if (bits & StoreLoad) sync(); else lwsync(); // (Matcher::post_membar_release where we use PPC64_ONLY(xop == Op_MemBarRelease ||))!
if (bits & StoreLoad) { sync(); }
else if (bits) { lwsync(); }
} }
inline void MacroAssembler::release() { membar(LoadStore | StoreStore); } inline void MacroAssembler::release() { membar(LoadStore | StoreStore); }
inline void MacroAssembler::acquire() { membar(LoadLoad | LoadStore); } inline void MacroAssembler::acquire() { membar(LoadLoad | LoadStore); }
@ -86,7 +88,7 @@ inline address MacroAssembler::global_toc() {
// Offset of given address to the global TOC. // Offset of given address to the global TOC.
inline int MacroAssembler::offset_to_global_toc(const address addr) { inline int MacroAssembler::offset_to_global_toc(const address addr) {
intptr_t offset = (intptr_t)addr - (intptr_t)MacroAssembler::global_toc(); intptr_t offset = (intptr_t)addr - (intptr_t)MacroAssembler::global_toc();
assert(Assembler::is_simm((long)offset, 31) && offset >= 0, "must be in range"); assert(Assembler::is_uimm((long)offset, 31), "must be in range");
return (int)offset; return (int)offset;
} }
@ -98,7 +100,7 @@ inline address MacroAssembler::method_toc() {
// Offset of given address to current method's TOC. // Offset of given address to current method's TOC.
inline int MacroAssembler::offset_to_method_toc(address addr) { inline int MacroAssembler::offset_to_method_toc(address addr) {
intptr_t offset = (intptr_t)addr - (intptr_t)method_toc(); intptr_t offset = (intptr_t)addr - (intptr_t)method_toc();
assert(is_simm((long)offset, 31) && offset >= 0, "must be in range"); assert(Assembler::is_uimm((long)offset, 31), "must be in range");
return (int)offset; return (int)offset;
} }
@ -190,13 +192,13 @@ inline bool MacroAssembler::is_bc_far_variant1_at(address instruction_addr) {
// Variant 1, the 1st instruction contains the destination address: // Variant 1, the 1st instruction contains the destination address:
// //
// bcxx DEST // bcxx DEST
// endgroup // nop
// //
const int instruction_1 = *(int*)(instruction_addr); const int instruction_1 = *(int*)(instruction_addr);
const int instruction_2 = *(int*)(instruction_addr + 4); const int instruction_2 = *(int*)(instruction_addr + 4);
return is_bcxx(instruction_1) && return is_bcxx(instruction_1) &&
(inv_bd_field(instruction_1, (intptr_t)instruction_addr) != (intptr_t)(instruction_addr + 2*4)) && (inv_bd_field(instruction_1, (intptr_t)instruction_addr) != (intptr_t)(instruction_addr + 2*4)) &&
is_endgroup(instruction_2); is_nop(instruction_2);
} }
// Relocation of conditional far branches. // Relocation of conditional far branches.
@ -302,13 +304,17 @@ inline void MacroAssembler::null_check_throw(Register a, int offset, Register te
} }
} }
inline void MacroAssembler::load_with_trap_null_check(Register d, int si16, Register s1) { inline void MacroAssembler::null_check(Register a, int offset, Label *Lis_null) {
if (!os::zero_page_read_protected()) { if (!ImplicitNullChecks || needs_explicit_null_check(offset) || !os::zero_page_read_protected()) {
if (TrapBasedNullChecks) { if (TrapBasedNullChecks) {
trap_null_check(s1); assert(UseSIGTRAP, "sanity");
trap_null_check(a);
} else if (Lis_null){
Label ok;
cmpdi(CCR0, a, 0);
beq(CCR0, *Lis_null);
} }
} }
ld(d, si16, s1);
} }
inline void MacroAssembler::load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1, Register tmp) { inline void MacroAssembler::load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1, Register tmp) {
@ -365,6 +371,26 @@ inline Register MacroAssembler::encode_heap_oop_not_null(Register d, Register sr
return current; // Encoded oop is in this register. return current; // Encoded oop is in this register.
} }
inline Register MacroAssembler::encode_heap_oop(Register d, Register src) {
if (Universe::narrow_oop_base() != NULL) {
if (VM_Version::has_isel()) {
cmpdi(CCR0, src, 0);
Register co = encode_heap_oop_not_null(d, src);
assert(co == d, "sanity");
isel_0(d, CCR0, Assembler::equal);
} else {
Label isNull;
or_(d, src, src); // move and compare 0
beq(CCR0, isNull);
encode_heap_oop_not_null(d, src);
bind(isNull);
}
return d;
} else {
return encode_heap_oop_not_null(d, src);
}
}
inline Register MacroAssembler::decode_heap_oop_not_null(Register d, Register src) { inline Register MacroAssembler::decode_heap_oop_not_null(Register d, Register src) {
if (Universe::narrow_oop_base_disjoint() && src != noreg && src != d && if (Universe::narrow_oop_base_disjoint() && src != noreg && src != d &&
Universe::narrow_oop_shift() != 0) { Universe::narrow_oop_shift() != 0) {

View file

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -502,8 +502,7 @@ void trace_method_handle_stub(const char* adaptername,
frame cur_frame = os::current_frame(); frame cur_frame = os::current_frame();
// Robust search of trace_calling_frame (independant of inlining). // Robust search of trace_calling_frame (independant of inlining).
// Assumes saved_regs comes from a pusha in the trace_calling_frame. assert(cur_frame.sp() <= saved_regs, "registers not saved on stack ?");
assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?");
frame trace_calling_frame = os::get_sender_for_C_frame(&cur_frame); frame trace_calling_frame = os::get_sender_for_C_frame(&cur_frame);
while (trace_calling_frame.fp() < saved_regs) { while (trace_calling_frame.fp() < saved_regs) {
trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame); trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame);
@ -537,7 +536,7 @@ void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adapt
BLOCK_COMMENT("trace_method_handle {"); BLOCK_COMMENT("trace_method_handle {");
const Register tmp = R11; // Will be preserved. const Register tmp = R11; // Will be preserved.
const int nbytes_save = 11*8; // volatile gprs except R0 const int nbytes_save = MacroAssembler::num_volatile_regs * 8;
__ save_volatile_gprs(R1_SP, -nbytes_save); // except R0 __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
__ save_LR_CR(tmp); // save in old frame __ save_LR_CR(tmp); // save in old frame

View file

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved. * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -65,13 +65,17 @@ address NativeCall::destination() const {
address destination = Assembler::bxx_destination(addr); address destination = Assembler::bxx_destination(addr);
// Do we use a trampoline stub for this call? // Do we use a trampoline stub for this call?
CodeBlob* cb = CodeCache::find_blob_unsafe(addr); // Else we get assertion if nmethod is zombie. // Trampoline stubs are located behind the main code.
assert(cb && cb->is_nmethod(), "sanity"); if (destination > addr) {
nmethod *nm = (nmethod *)cb; // Filter out recursive method invocation (call to verified/unverified entry point).
if (nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) { CodeBlob* cb = CodeCache::find_blob_unsafe(addr); // Else we get assertion if nmethod is zombie.
// Yes we do, so get the destination from the trampoline stub. assert(cb && cb->is_nmethod(), "sanity");
const address trampoline_stub_addr = destination; nmethod *nm = (nmethod *)cb;
destination = NativeCallTrampolineStub_at(trampoline_stub_addr)->destination(nm); if (nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
// Yes we do, so get the destination from the trampoline stub.
const address trampoline_stub_addr = destination;
destination = NativeCallTrampolineStub_at(trampoline_stub_addr)->destination(nm);
}
} }
return destination; return destination;
@ -267,7 +271,7 @@ void NativeMovConstReg::set_data(intptr_t data) {
oop_addr = r->oop_addr(); oop_addr = r->oop_addr();
*oop_addr = cast_to_oop(data); *oop_addr = cast_to_oop(data);
} else { } else {
assert(oop_addr == r->oop_addr(), "must be only one set-oop here") ; assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
} }
} }
if (iter.type() == relocInfo::metadata_type) { if (iter.type() == relocInfo::metadata_type) {
@ -351,6 +355,27 @@ void NativeJump::verify() {
} }
#endif // ASSERT #endif // ASSERT
void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
CodeBuffer cb(code_pos, BytesPerInstWord + 1);
MacroAssembler* a = new MacroAssembler(&cb);
a->b(entry);
ICache::ppc64_flush_icache_bytes(code_pos, NativeGeneralJump::instruction_size);
}
// MT-safe patching of a jmp instruction.
void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
// Bytes beyond offset NativeGeneralJump::instruction_size are copied by caller.
// Finally patch out the jump.
volatile juint *jump_addr = (volatile juint*)instr_addr;
// Release not needed because caller uses invalidate_range after copying the remaining bytes.
//OrderAccess::release_store(jump_addr, *((juint*)code_buffer));
*jump_addr = *((juint*)code_buffer); // atomically store code over branch instruction
ICache::ppc64_flush_icache_bytes(instr_addr, NativeGeneralJump::instruction_size);
}
//------------------------------------------------------------------- //-------------------------------------------------------------------
// Call trampoline stubs. // Call trampoline stubs.
@ -364,10 +389,12 @@ void NativeJump::verify() {
// //
address NativeCallTrampolineStub::encoded_destination_addr() const { address NativeCallTrampolineStub::encoded_destination_addr() const {
address instruction_addr = addr_at(2 * BytesPerInstWord); address instruction_addr = addr_at(0 * BytesPerInstWord);
assert(MacroAssembler::is_ld_largeoffset(instruction_addr), if (!MacroAssembler::is_ld_largeoffset(instruction_addr)) {
"must be a ld with large offset (from the constant pool)"); instruction_addr = addr_at(2 * BytesPerInstWord);
assert(MacroAssembler::is_ld_largeoffset(instruction_addr),
"must be a ld with large offset (from the constant pool)");
}
return instruction_addr; return instruction_addr;
} }

View file

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved. * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -50,6 +50,8 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
friend class Relocation; friend class Relocation;
public: public:
bool is_jump() { return Assembler::is_b(long_at(0)); } // See NativeGeneralJump.
bool is_sigtrap_ic_miss_check() { bool is_sigtrap_ic_miss_check() {
assert(UseSIGTRAP, "precondition"); assert(UseSIGTRAP, "precondition");
return MacroAssembler::is_trap_ic_miss_check(long_at(0)); return MacroAssembler::is_trap_ic_miss_check(long_at(0));
@ -235,8 +237,8 @@ inline NativeFarCall* nativeFarCall_at(address instr) {
return call; return call;
} }
// An interface for accessing/manipulating native set_oop imm, reg instructions. // An interface for accessing/manipulating native set_oop imm, reg instructions
// (used to manipulate inlined data references, etc.) // (used to manipulate inlined data references, etc.).
class NativeMovConstReg: public NativeInstruction { class NativeMovConstReg: public NativeInstruction {
public: public:
@ -384,10 +386,21 @@ class NativeCallTrampolineStub : public NativeInstruction {
void set_destination(address new_destination); void set_destination(address new_destination);
}; };
// Note: Other stubs must not begin with this pattern.
inline bool is_NativeCallTrampolineStub_at(address address) { inline bool is_NativeCallTrampolineStub_at(address address) {
int first_instr = *(int*)address; int first_instr = *(int*)address;
return Assembler::is_addis(first_instr) && // calculate_address_from_global_toc and long form of ld_largeoffset_unchecked begin with addis with target R12
(Register)(intptr_t)Assembler::inv_rt_field(first_instr) == R12_scratch2; if (Assembler::is_addis(first_instr) &&
(Register)(intptr_t)Assembler::inv_rt_field(first_instr) == R12_scratch2) return true;
// short form of ld_largeoffset_unchecked is ld which is followed by mtctr
int second_instr = *((int*)address + 1);
if (Assembler::is_ld(first_instr) &&
(Register)(intptr_t)Assembler::inv_rt_field(first_instr) == R12_scratch2 &&
Assembler::is_mtctr(second_instr) &&
(Register)(intptr_t)Assembler::inv_rs_field(second_instr) == R12_scratch2) return true;
return false;
} }
inline NativeCallTrampolineStub* NativeCallTrampolineStub_at(address address) { inline NativeCallTrampolineStub* NativeCallTrampolineStub_at(address address) {
@ -395,4 +408,102 @@ inline NativeCallTrampolineStub* NativeCallTrampolineStub_at(address address) {
return (NativeCallTrampolineStub*)address; return (NativeCallTrampolineStub*)address;
} }
///////////////////////////////////////////////////////////////////////////////////////////////////
//-------------------------------------
// N a t i v e G e n e r a l J u m p
//-------------------------------------
// Despite the name, handles only simple branches.
class NativeGeneralJump;
inline NativeGeneralJump* nativeGeneralJump_at(address address);
// Currently only implemented as single unconditional branch.
class NativeGeneralJump: public NativeInstruction {
public:
enum PPC64_specific_constants {
instruction_size = 4
};
address instruction_address() const { return addr_at(0); }
// Creation.
friend inline NativeGeneralJump* nativeGeneralJump_at(address addr) {
NativeGeneralJump* jump = (NativeGeneralJump*)(addr);
DEBUG_ONLY( jump->verify(); )
return jump;
}
// Insertion of native general jump instruction.
static void insert_unconditional(address code_pos, address entry);
address jump_destination() const {
DEBUG_ONLY( verify(); )
return addr_at(0) + Assembler::inv_li_field(long_at(0));
}
void set_jump_destination(address dest) {
DEBUG_ONLY( verify(); )
insert_unconditional(addr_at(0), dest);
}
static void replace_mt_safe(address instr_addr, address code_buffer);
void verify() const { guarantee(Assembler::is_b(long_at(0)), "invalid NativeGeneralJump"); }
};
// An interface for accessing/manipulating native load int (load_const32).
class NativeMovRegMem;
inline NativeMovRegMem* nativeMovRegMem_at(address address);
class NativeMovRegMem: public NativeInstruction {
public:
enum PPC64_specific_constants {
instruction_size = 8
};
address instruction_address() const { return addr_at(0); }
intptr_t offset() const {
#ifdef VM_LITTLE_ENDIAN
short *hi_ptr = (short*)(addr_at(0));
short *lo_ptr = (short*)(addr_at(4));
#else
short *hi_ptr = (short*)(addr_at(0) + 2);
short *lo_ptr = (short*)(addr_at(4) + 2);
#endif
return ((*hi_ptr) << 16) | ((*lo_ptr) & 0xFFFF);
}
void set_offset(intptr_t x) {
#ifdef VM_LITTLE_ENDIAN
short *hi_ptr = (short*)(addr_at(0));
short *lo_ptr = (short*)(addr_at(4));
#else
short *hi_ptr = (short*)(addr_at(0) + 2);
short *lo_ptr = (short*)(addr_at(4) + 2);
#endif
*hi_ptr = x >> 16;
*lo_ptr = x & 0xFFFF;
ICache::ppc64_flush_icache_bytes(addr_at(0), NativeMovRegMem::instruction_size);
}
void add_offset_in_bytes(intptr_t radd_offset) {
set_offset(offset() + radd_offset);
}
void verify() const {
guarantee(Assembler::is_lis(long_at(0)), "load_const32 1st instr");
guarantee(Assembler::is_ori(long_at(4)), "load_const32 2nd instr");
}
private:
friend inline NativeMovRegMem* nativeMovRegMem_at(address address) {
NativeMovRegMem* test = (NativeMovRegMem*)address;
DEBUG_ONLY( test->verify(); )
return test;
}
};
#endif // CPU_PPC_VM_NATIVEINST_PPC_HPP #endif // CPU_PPC_VM_NATIVEINST_PPC_HPP

View file

@ -698,7 +698,7 @@ reg_class ctr_reg(SR_CTR);
// ---------------------------- // ----------------------------
reg_class flt_reg( reg_class flt_reg(
/*F0*/ // scratch F0,
F1, F1,
F2, F2,
F3, F3,
@ -735,7 +735,7 @@ reg_class flt_reg(
// Double precision float registers have virtual `high halves' that // Double precision float registers have virtual `high halves' that
// are needed by the allocator. // are needed by the allocator.
reg_class dbl_reg( reg_class dbl_reg(
/*F0, F0_H*/ // scratch F0, F0_H,
F1, F1_H, F1, F1_H,
F2, F2_H, F2, F2_H,
F3, F3_H, F3, F3_H,
@ -1040,8 +1040,6 @@ source_hpp %{ // Header information of the source block.
//---< Used for optimization in Compile::Shorten_branches >--- //---< Used for optimization in Compile::Shorten_branches >---
//-------------------------------------------------------------- //--------------------------------------------------------------
const uint trampoline_stub_size = 6 * BytesPerInstWord;
class CallStubImpl { class CallStubImpl {
public: public:
@ -1053,7 +1051,7 @@ class CallStubImpl {
// This doesn't need to be accurate to the byte, but it // This doesn't need to be accurate to the byte, but it
// must be larger than or equal to the real size of the stub. // must be larger than or equal to the real size of the stub.
static uint size_call_trampoline() { static uint size_call_trampoline() {
return trampoline_stub_size; return MacroAssembler::trampoline_stub_size;
} }
// number of relocations needed by a call trampoline stub // number of relocations needed by a call trampoline stub
@ -1079,46 +1077,10 @@ source %{
// branch via CTR (LR/link still points to the call-site above) // branch via CTR (LR/link still points to the call-site above)
void CallStubImpl::emit_trampoline_stub(MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset) { void CallStubImpl::emit_trampoline_stub(MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset) {
// Start the stub. address stub = __ emit_trampoline_stub(destination_toc_offset, insts_call_instruction_offset);
address stub = __ start_a_stub(Compile::MAX_stubs_size/2);
if (stub == NULL) { if (stub == NULL) {
ciEnv::current()->record_failure("CodeCache is full"); ciEnv::current()->record_out_of_memory_failure();
return;
} }
// For java_to_interp stubs we use R11_scratch1 as scratch register
// and in call trampoline stubs we use R12_scratch2. This way we
// can distinguish them (see is_NativeCallTrampolineStub_at()).
Register reg_scratch = R12_scratch2;
// Create a trampoline stub relocation which relates this trampoline stub
// with the call instruction at insts_call_instruction_offset in the
// instructions code-section.
__ relocate(trampoline_stub_Relocation::spec(__ code()->insts()->start() + insts_call_instruction_offset));
const int stub_start_offset = __ offset();
// Now, create the trampoline stub's code:
// - load the TOC
// - load the call target from the constant pool
// - call
__ calculate_address_from_global_toc(reg_scratch, __ method_toc());
__ ld_largeoffset_unchecked(reg_scratch, destination_toc_offset, reg_scratch, false);
__ mtctr(reg_scratch);
__ bctr();
const address stub_start_addr = __ addr_at(stub_start_offset);
// FIXME: Assert that the trampoline stub can be identified and patched.
// Assert that the encoded destination_toc_offset can be identified and that it is correct.
assert(destination_toc_offset == NativeCallTrampolineStub_at(stub_start_addr)->destination_toc_offset(),
"encoded offset into the constant pool must match");
// Trampoline_stub_size should be good.
assert((uint)(__ offset() - stub_start_offset) <= trampoline_stub_size, "should be good size");
assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline");
// End the stub.
__ end_a_stub();
} }
//============================================================================= //=============================================================================
@ -1156,6 +1118,10 @@ EmitCallOffsets emit_call_with_trampoline_stub(MacroAssembler &_masm, address en
if (!Compile::current()->in_scratch_emit_size()) { if (!Compile::current()->in_scratch_emit_size()) {
// Put the entry point as a constant into the constant pool. // Put the entry point as a constant into the constant pool.
const address entry_point_toc_addr = __ address_constant(entry_point, RelocationHolder::none); const address entry_point_toc_addr = __ address_constant(entry_point, RelocationHolder::none);
if (entry_point_toc_addr == NULL) {
ciEnv::current()->record_out_of_memory_failure();
return offsets;
}
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr); const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
// Emit the trampoline stub which will be related to the branch-and-link below. // Emit the trampoline stub which will be related to the branch-and-link below.
@ -2474,6 +2440,10 @@ encode %{
// Create a non-oop constant, no relocation needed. // Create a non-oop constant, no relocation needed.
// If it is an IC, it has a virtual_call_Relocation. // If it is an IC, it has a virtual_call_Relocation.
const_toc_addr = __ long_constant((jlong)$src$$constant); const_toc_addr = __ long_constant((jlong)$src$$constant);
if (const_toc_addr == NULL) {
ciEnv::current()->record_out_of_memory_failure();
return;
}
// Get the constant's TOC offset. // Get the constant's TOC offset.
toc_offset = __ offset_to_method_toc(const_toc_addr); toc_offset = __ offset_to_method_toc(const_toc_addr);
@ -2495,6 +2465,10 @@ encode %{
// Create a non-oop constant, no relocation needed. // Create a non-oop constant, no relocation needed.
// If it is an IC, it has a virtual_call_Relocation. // If it is an IC, it has a virtual_call_Relocation.
const_toc_addr = __ long_constant((jlong)$src$$constant); const_toc_addr = __ long_constant((jlong)$src$$constant);
if (const_toc_addr == NULL) {
ciEnv::current()->record_out_of_memory_failure();
return;
}
// Get the constant's TOC offset. // Get the constant's TOC offset.
const int toc_offset = __ offset_to_method_toc(const_toc_addr); const int toc_offset = __ offset_to_method_toc(const_toc_addr);
@ -2631,6 +2605,10 @@ encode %{
const_toc_addr = __ long_constant((jlong)$src$$constant); const_toc_addr = __ long_constant((jlong)$src$$constant);
} }
if (const_toc_addr == NULL) {
ciEnv::current()->record_out_of_memory_failure();
return;
}
// Get the constant's TOC offset. // Get the constant's TOC offset.
toc_offset = __ offset_to_method_toc(const_toc_addr); toc_offset = __ offset_to_method_toc(const_toc_addr);
} }
@ -2660,6 +2638,10 @@ encode %{
const_toc_addr = __ long_constant((jlong)$src$$constant); const_toc_addr = __ long_constant((jlong)$src$$constant);
} }
if (const_toc_addr == NULL) {
ciEnv::current()->record_out_of_memory_failure();
return;
}
// Get the constant's TOC offset. // Get the constant's TOC offset.
const int toc_offset = __ offset_to_method_toc(const_toc_addr); const int toc_offset = __ offset_to_method_toc(const_toc_addr);
// Store the toc offset of the constant. // Store the toc offset of the constant.
@ -3408,13 +3390,19 @@ encode %{
// Put the entry point as a constant into the constant pool. // Put the entry point as a constant into the constant pool.
const address entry_point_toc_addr = __ address_constant(entry_point, RelocationHolder::none); const address entry_point_toc_addr = __ address_constant(entry_point, RelocationHolder::none);
if (entry_point_toc_addr == NULL) {
ciEnv::current()->record_out_of_memory_failure();
return;
}
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr); const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
// Emit the trampoline stub which will be related to the branch-and-link below. // Emit the trampoline stub which will be related to the branch-and-link below.
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset); CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
if (ciEnv::current()->failing()) { return; } // Code cache may be full. if (ciEnv::current()->failing()) { return; } // Code cache may be full.
__ relocate(_optimized_virtual ? int method_index = resolved_method_index(cbuf);
relocInfo::opt_virtual_call_type : relocInfo::static_call_type); __ relocate(_optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
: static_call_Relocation::spec(method_index));
} }
// The real call. // The real call.
@ -3433,76 +3421,6 @@ encode %{
} }
%} %}
// Emit a method handle call.
//
// Method handle calls from compiled to compiled are going thru a
// c2i -> i2c adapter, extending the frame for their arguments. The
// caller however, returns directly to the compiled callee, that has
// to cope with the extended frame. We restore the original frame by
// loading the callers sp and adding the calculated framesize.
enc_class enc_java_handle_call(method meth) %{
// TODO: PPC port $archOpcode(ppc64Opcode_compound);
MacroAssembler _masm(&cbuf);
address entry_point = (address)$meth$$method;
// Remember the offset not the address.
const int start_offset = __ offset();
// The trampoline stub.
if (!ra_->C->in_scratch_emit_size()) {
// No entry point given, use the current pc.
// Make sure branch fits into
if (entry_point == 0) entry_point = __ pc();
// Put the entry point as a constant into the constant pool.
const address entry_point_toc_addr = __ address_constant(entry_point, RelocationHolder::none);
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
// Emit the trampoline stub which will be related to the branch-and-link below.
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
if (ra_->C->env()->failing()) { return; } // Code cache may be full.
assert(_optimized_virtual, "methodHandle call should be a virtual call");
__ relocate(relocInfo::opt_virtual_call_type);
}
// The real call.
// Note: At this point we do not have the address of the trampoline
// stub, and the entry point might be too far away for bl, so __ pc()
// serves as dummy and the bl will be patched later.
cbuf.set_insts_mark();
__ bl(__ pc()); // Emits a relocation.
assert(_method, "execute next statement conditionally");
// The stub for call to interpreter.
address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
if (stub == NULL) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
// Restore original sp.
__ ld(R11_scratch1, 0, R1_SP); // Load caller sp.
const long framesize = ra_->C->frame_slots() << LogBytesPerInt;
unsigned int bytes = (unsigned int)framesize;
long offset = Assembler::align_addr(bytes, frame::alignment_in_bytes);
if (Assembler::is_simm(-offset, 16)) {
__ addi(R1_SP, R11_scratch1, -offset);
} else {
__ load_const_optimized(R12_scratch2, -offset);
__ add(R1_SP, R11_scratch1, R12_scratch2);
}
#ifdef ASSERT
__ ld(R12_scratch2, 0, R1_SP); // Load from unextended_sp.
__ cmpd(CCR0, R11_scratch1, R12_scratch2);
__ asm_assert_eq("backlink changed", 0x8000);
#endif
// If fails should store backlink before unextending.
if (ra_->C->env()->failing()) {
return;
}
%}
// Second node of expanded dynamic call - the call. // Second node of expanded dynamic call - the call.
enc_class enc_java_dynamic_call_sched(method meth) %{ enc_class enc_java_dynamic_call_sched(method meth) %{
// TODO: PPC port $archOpcode(ppc64Opcode_bl); // TODO: PPC port $archOpcode(ppc64Opcode_bl);
@ -3513,6 +3431,10 @@ encode %{
// Create a call trampoline stub for the given method. // Create a call trampoline stub for the given method.
const address entry_point = !($meth$$method) ? 0 : (address)$meth$$method; const address entry_point = !($meth$$method) ? 0 : (address)$meth$$method;
const address entry_point_const = __ address_constant(entry_point, RelocationHolder::none); const address entry_point_const = __ address_constant(entry_point, RelocationHolder::none);
if (entry_point_const == NULL) {
ciEnv::current()->record_out_of_memory_failure();
return;
}
const int entry_point_const_toc_offset = __ offset_to_method_toc(entry_point_const); const int entry_point_const_toc_offset = __ offset_to_method_toc(entry_point_const);
CallStubImpl::emit_trampoline_stub(_masm, entry_point_const_toc_offset, __ offset()); CallStubImpl::emit_trampoline_stub(_masm, entry_point_const_toc_offset, __ offset());
if (ra_->C->env()->failing()) { return; } // Code cache may be full. if (ra_->C->env()->failing()) { return; } // Code cache may be full.
@ -3530,8 +3452,8 @@ encode %{
const address virtual_call_oop_addr = __ addr_at(virtual_call_oop_addr_offset); const address virtual_call_oop_addr = __ addr_at(virtual_call_oop_addr_offset);
assert(MacroAssembler::is_load_const_from_method_toc_at(virtual_call_oop_addr), assert(MacroAssembler::is_load_const_from_method_toc_at(virtual_call_oop_addr),
"should be load from TOC"); "should be load from TOC");
int method_index = resolved_method_index(cbuf);
__ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr)); __ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr, method_index));
} }
// At this point I do not have the address of the trampoline stub, // At this point I do not have the address of the trampoline stub,
@ -3564,6 +3486,7 @@ encode %{
call->_jvmadj = _jvmadj; call->_jvmadj = _jvmadj;
call->_in_rms = _in_rms; call->_in_rms = _in_rms;
call->_nesting = _nesting; call->_nesting = _nesting;
call->_override_symbolic_info = _override_symbolic_info;
// New call needs all inputs of old call. // New call needs all inputs of old call.
// Req... // Req...
@ -3620,7 +3543,11 @@ encode %{
address virtual_call_meta_addr = __ pc(); address virtual_call_meta_addr = __ pc();
// Load a clear inline cache. // Load a clear inline cache.
AddressLiteral empty_ic((address) Universe::non_oop_word()); AddressLiteral empty_ic((address) Universe::non_oop_word());
__ load_const_from_method_toc(ic_reg, empty_ic, Rtoc); bool success = __ load_const_from_method_toc(ic_reg, empty_ic, Rtoc, /*fixed_size*/ true);
if (!success) {
ciEnv::current()->record_out_of_memory_failure();
return;
}
// CALL to fixup routine. Fixup routine uses ScopeDesc info // CALL to fixup routine. Fixup routine uses ScopeDesc info
// to determine who we intended to call. // to determine who we intended to call.
__ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr)); __ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr));
@ -3676,7 +3603,11 @@ encode %{
__ calculate_address_from_global_toc(Rtoc, __ method_toc()); __ calculate_address_from_global_toc(Rtoc, __ method_toc());
// Put entry, env, toc into the constant pool, this needs up to 3 constant // Put entry, env, toc into the constant pool, this needs up to 3 constant
// pool entries; call_c_using_toc will optimize the call. // pool entries; call_c_using_toc will optimize the call.
__ call_c_using_toc(fd, relocInfo::runtime_call_type, Rtoc); bool success = __ call_c_using_toc(fd, relocInfo::runtime_call_type, Rtoc);
if (!success) {
ciEnv::current()->record_out_of_memory_failure();
return;
}
#endif #endif
// Check the ret_addr_offset. // Check the ret_addr_offset.
@ -6263,6 +6194,10 @@ instruct loadConF(regF dst, immF src, iRegLdst toc) %{
ins_encode %{ ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_lfs); // TODO: PPC port $archOpcode(ppc64Opcode_lfs);
address float_address = __ float_constant($src$$constant); address float_address = __ float_constant($src$$constant);
if (float_address == NULL) {
ciEnv::current()->record_out_of_memory_failure();
return;
}
__ lfs($dst$$FloatRegister, __ offset_to_method_toc(float_address), $toc$$Register); __ lfs($dst$$FloatRegister, __ offset_to_method_toc(float_address), $toc$$Register);
%} %}
ins_pipe(pipe_class_memory); ins_pipe(pipe_class_memory);
@ -6284,6 +6219,10 @@ instruct loadConFComp(regF dst, immF src, iRegLdst toc) %{
FloatRegister Rdst = $dst$$FloatRegister; FloatRegister Rdst = $dst$$FloatRegister;
Register Rtoc = $toc$$Register; Register Rtoc = $toc$$Register;
address float_address = __ float_constant($src$$constant); address float_address = __ float_constant($src$$constant);
if (float_address == NULL) {
ciEnv::current()->record_out_of_memory_failure();
return;
}
int offset = __ offset_to_method_toc(float_address); int offset = __ offset_to_method_toc(float_address);
int hi = (offset + (1<<15))>>16; int hi = (offset + (1<<15))>>16;
int lo = offset - hi * (1<<16); int lo = offset - hi * (1<<16);
@ -6318,7 +6257,12 @@ instruct loadConD(regD dst, immD src, iRegLdst toc) %{
size(4); size(4);
ins_encode %{ ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_lfd); // TODO: PPC port $archOpcode(ppc64Opcode_lfd);
int offset = __ offset_to_method_toc(__ double_constant($src$$constant)); address float_address = __ double_constant($src$$constant);
if (float_address == NULL) {
ciEnv::current()->record_out_of_memory_failure();
return;
}
int offset = __ offset_to_method_toc(float_address);
__ lfd($dst$$FloatRegister, offset, $toc$$Register); __ lfd($dst$$FloatRegister, offset, $toc$$Register);
%} %}
ins_pipe(pipe_class_memory); ins_pipe(pipe_class_memory);
@ -6340,7 +6284,11 @@ instruct loadConDComp(regD dst, immD src, iRegLdst toc) %{
FloatRegister Rdst = $dst$$FloatRegister; FloatRegister Rdst = $dst$$FloatRegister;
Register Rtoc = $toc$$Register; Register Rtoc = $toc$$Register;
address float_address = __ double_constant($src$$constant); address float_address = __ double_constant($src$$constant);
int offset = __ offset_to_method_toc(float_address); if (float_address == NULL) {
ciEnv::current()->record_out_of_memory_failure();
return;
}
int offset = __ offset_to_method_toc(float_address);
int hi = (offset + (1<<15))>>16; int hi = (offset + (1<<15))>>16;
int lo = offset - hi * (1<<16); int lo = offset - hi * (1<<16);
@ -10949,16 +10897,16 @@ instruct partialSubtypeCheck(iRegPdst result, iRegP_N2P subklass, iRegP_N2P supe
// inlined locking and unlocking // inlined locking and unlocking
instruct cmpFastLock(flagsReg crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3) %{ instruct cmpFastLock(flagsReg crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2) %{
match(Set crx (FastLock oop box)); match(Set crx (FastLock oop box));
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3); effect(TEMP tmp1, TEMP tmp2);
predicate(!Compile::current()->use_rtm()); predicate(!Compile::current()->use_rtm());
format %{ "FASTLOCK $oop, $box, $tmp1, $tmp2, $tmp3" %} format %{ "FASTLOCK $oop, $box, $tmp1, $tmp2" %}
ins_encode %{ ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_compound); // TODO: PPC port $archOpcode(ppc64Opcode_compound);
__ compiler_fast_lock_object($crx$$CondRegister, $oop$$Register, $box$$Register, __ compiler_fast_lock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
$tmp3$$Register, $tmp1$$Register, $tmp2$$Register, $tmp1$$Register, $tmp2$$Register, /*tmp3*/ R0,
UseBiasedLocking && !UseOptoBiasInlining); UseBiasedLocking && !UseOptoBiasInlining);
// If locking was successfull, crx should indicate 'EQ'. // If locking was successfull, crx should indicate 'EQ'.
// The compiler generates a branch to the runtime call to // The compiler generates a branch to the runtime call to
@ -10977,7 +10925,7 @@ instruct cmpFastLock_tm(flagsReg crx, iRegPdst oop, rarg2RegP box, iRegPdst tmp1
ins_encode %{ ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_compound); // TODO: PPC port $archOpcode(ppc64Opcode_compound);
__ compiler_fast_lock_object($crx$$CondRegister, $oop$$Register, $box$$Register, __ compiler_fast_lock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
$tmp3$$Register, $tmp1$$Register, $tmp2$$Register, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
/*Biased Locking*/ false, /*Biased Locking*/ false,
_rtm_counters, _stack_rtm_counters, _rtm_counters, _stack_rtm_counters,
((Method*)(ra_->C->method()->constant_encoding()))->method_data(), ((Method*)(ra_->C->method()->constant_encoding()))->method_data(),
@ -10998,7 +10946,7 @@ instruct cmpFastUnlock(flagsReg crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1,
ins_encode %{ ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_compound); // TODO: PPC port $archOpcode(ppc64Opcode_compound);
__ compiler_fast_unlock_object($crx$$CondRegister, $oop$$Register, $box$$Register, __ compiler_fast_unlock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
$tmp3$$Register, $tmp1$$Register, $tmp2$$Register, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
UseBiasedLocking && !UseOptoBiasInlining, UseBiasedLocking && !UseOptoBiasInlining,
false); false);
// If unlocking was successfull, crx should indicate 'EQ'. // If unlocking was successfull, crx should indicate 'EQ'.
@ -11017,7 +10965,7 @@ instruct cmpFastUnlock_tm(flagsReg crx, iRegPdst oop, iRegPdst box, iRegPdst tmp
ins_encode %{ ins_encode %{
// TODO: PPC port $archOpcode(ppc64Opcode_compound); // TODO: PPC port $archOpcode(ppc64Opcode_compound);
__ compiler_fast_unlock_object($crx$$CondRegister, $oop$$Register, $box$$Register, __ compiler_fast_unlock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
$tmp3$$Register, $tmp1$$Register, $tmp2$$Register, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
/*Biased Locking*/ false, /*TM*/ true); /*Biased Locking*/ false, /*TM*/ true);
// If unlocking was successfull, crx should indicate 'EQ'. // If unlocking was successfull, crx should indicate 'EQ'.
// The compiler generates a branch to the runtime call to // The compiler generates a branch to the runtime call to
@ -11790,7 +11738,6 @@ instruct safePoint_poll_conPollAddr(rscratch2RegP poll) %{
instruct CallStaticJavaDirect(method meth) %{ instruct CallStaticJavaDirect(method meth) %{
match(CallStaticJava); match(CallStaticJava);
effect(USE meth); effect(USE meth);
predicate(!((CallStaticJavaNode*)n)->is_method_handle_invoke());
ins_cost(CALL_COST); ins_cost(CALL_COST);
ins_num_consts(3 /* up to 3 patchable constants: inline cache, 2 call targets. */); ins_num_consts(3 /* up to 3 patchable constants: inline cache, 2 call targets. */);
@ -11801,20 +11748,6 @@ instruct CallStaticJavaDirect(method meth) %{
ins_pipe(pipe_class_call); ins_pipe(pipe_class_call);
%} %}
// Schedulable version of call static node.
instruct CallStaticJavaDirectHandle(method meth) %{
match(CallStaticJava);
effect(USE meth);
predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
ins_cost(CALL_COST);
ins_num_consts(3 /* up to 3 patchable constants: inline cache, 2 call targets. */);
format %{ "CALL,static $meth \t// ==> " %}
ins_encode( enc_java_handle_call(meth) );
ins_pipe(pipe_class_call);
%}
// Call Java Dynamic Instruction // Call Java Dynamic Instruction
// Used by postalloc expand of CallDynamicJavaDirectSchedEx (actual call). // Used by postalloc expand of CallDynamicJavaDirectSchedEx (actual call).

View file

@ -609,11 +609,16 @@ REGISTER_DECLARATION(Register, R26_tmp6, R26);
REGISTER_DECLARATION(Register, R27_tmp7, R27); REGISTER_DECLARATION(Register, R27_tmp7, R27);
REGISTER_DECLARATION(Register, R28_tmp8, R28); REGISTER_DECLARATION(Register, R28_tmp8, R28);
REGISTER_DECLARATION(Register, R29_tmp9, R29); REGISTER_DECLARATION(Register, R29_tmp9, R29);
#ifndef CC_INTERP
REGISTER_DECLARATION(Register, R24_dispatch_addr, R24); REGISTER_DECLARATION(Register, R24_dispatch_addr, R24);
REGISTER_DECLARATION(Register, R25_templateTableBase, R25); REGISTER_DECLARATION(Register, R25_templateTableBase, R25);
REGISTER_DECLARATION(Register, R26_monitor, R26); REGISTER_DECLARATION(Register, R26_monitor, R26);
REGISTER_DECLARATION(Register, R27_constPoolCache, R27); REGISTER_DECLARATION(Register, R27_constPoolCache, R27);
REGISTER_DECLARATION(Register, R28_mdx, R28); REGISTER_DECLARATION(Register, R28_mdx, R28);
#endif // CC_INTERP
REGISTER_DECLARATION(Register, R19_inline_cache_reg, R19);
REGISTER_DECLARATION(Register, R29_TOC, R29);
#ifndef DONT_USE_REGISTER_DEFINES #ifndef DONT_USE_REGISTER_DEFINES
#define R21_tmp1 AS_REGISTER(Register, R21) #define R21_tmp1 AS_REGISTER(Register, R21)
@ -635,7 +640,11 @@ REGISTER_DECLARATION(Register, R28_mdx, R28);
#define R28_mdx AS_REGISTER(Register, R28) #define R28_mdx AS_REGISTER(Register, R28)
#endif #endif
#define R19_inline_cache_reg AS_REGISTER(Register, R19)
#define R29_TOC AS_REGISTER(Register, R29)
#define CCR4_is_synced AS_REGISTER(ConditionRegister, CCR4) #define CCR4_is_synced AS_REGISTER(ConditionRegister, CCR4)
#endif
// Scratch registers are volatile. // Scratch registers are volatile.
REGISTER_DECLARATION(Register, R11_scratch1, R11); REGISTER_DECLARATION(Register, R11_scratch1, R11);

View file

@ -84,13 +84,11 @@ address Relocation::pd_call_destination(address orig_addr) {
NativeConditionalFarBranch* branch = NativeConditionalFarBranch_at(inst_loc); NativeConditionalFarBranch* branch = NativeConditionalFarBranch_at(inst_loc);
return branch->branch_destination(); return branch->branch_destination();
} else { } else {
// There are two instructions at the beginning of a stub, therefore we
// load at orig_addr + 8.
orig_addr = nativeCall_at(inst_loc)->get_trampoline(); orig_addr = nativeCall_at(inst_loc)->get_trampoline();
if (orig_addr == NULL) { if (orig_addr == NULL) {
return (address) -1; return (address) -1;
} else { } else {
return (address) nativeMovConstReg_at(orig_addr + 8)->data(); return ((NativeCallTrampolineStub*)orig_addr)->destination();
} }
} }
} }

View file

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved. * Copyright 2012, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -45,16 +45,6 @@
#ifdef COMPILER2 #ifdef COMPILER2
// SP adjustment (must use unextended SP) for method handle call sites
// during exception handling.
static intptr_t adjust_SP_for_methodhandle_callsite(JavaThread *thread) {
RegisterMap map(thread, false);
// The frame constructor will do the correction for us (see frame::adjust_unextended_SP).
frame mh_caller_frame = thread->last_frame().sender(&map);
assert(mh_caller_frame.is_compiled_frame(), "Only may reach here for compiled MH call sites");
return (intptr_t) mh_caller_frame.unextended_sp();
}
//------------------------------generate_exception_blob--------------------------- //------------------------------generate_exception_blob---------------------------
// Creates exception blob at the end. // Creates exception blob at the end.
// Using exception blob, this code is jumped from a compiled method. // Using exception blob, this code is jumped from a compiled method.
@ -129,17 +119,10 @@ void OptoRuntime::generate_exception_blob() {
OopMapSet* oop_maps = new OopMapSet(); OopMapSet* oop_maps = new OopMapSet();
oop_maps->add_gc_map(calls_return_pc - start, map); oop_maps->add_gc_map(calls_return_pc - start, map);
// Get unextended_sp for method handle call sites.
Label mh_callsite, mh_done; // Use a 2nd c call if it's a method handle call site.
__ lwa(R4_ARG2, in_bytes(JavaThread::is_method_handle_return_offset()), R16_thread);
__ cmpwi(CCR0, R4_ARG2, 0);
__ bne(CCR0, mh_callsite);
__ mtctr(R3_RET); // Move address of exception handler to SR_CTR. __ mtctr(R3_RET); // Move address of exception handler to SR_CTR.
__ reset_last_Java_frame(); __ reset_last_Java_frame();
__ pop_frame(); __ pop_frame();
__ bind(mh_done);
// We have a handler in register SR_CTR (could be deopt blob). // We have a handler in register SR_CTR (could be deopt blob).
// Get the exception oop. // Get the exception oop.
@ -161,25 +144,6 @@ void OptoRuntime::generate_exception_blob() {
__ mtlr(R4_ARG2); __ mtlr(R4_ARG2);
__ bctr(); __ bctr();
// Same as above, but also set sp to unextended_sp.
__ bind(mh_callsite);
__ mr(R31, R3_RET); // Save branch address.
__ mr(R3_ARG1, R16_thread);
#if defined(ABI_ELFv2)
__ call_c((address) adjust_SP_for_methodhandle_callsite, relocInfo::none);
#else
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, adjust_SP_for_methodhandle_callsite), relocInfo::none);
#endif
// Returns unextended_sp in R3_RET.
__ mtctr(R31); // Move address of exception handler to SR_CTR.
__ reset_last_Java_frame();
__ mr(R1_SP, R3_RET); // Set sp to unextended_sp.
__ b(mh_done);
// Make sure all code is generated. // Make sure all code is generated.
masm->flush(); masm->flush();

View file

@ -44,6 +44,8 @@
#include "opto/runtime.hpp" #include "opto/runtime.hpp"
#endif #endif
#include <alloca.h>
#define __ masm-> #define __ masm->
#ifdef PRODUCT #ifdef PRODUCT
@ -62,7 +64,7 @@ class RegisterSaver {
// Support different return pc locations. // Support different return pc locations.
enum ReturnPCLocation { enum ReturnPCLocation {
return_pc_is_lr, return_pc_is_lr,
return_pc_is_r4, return_pc_is_pre_saved,
return_pc_is_thread_saved_exception_pc return_pc_is_thread_saved_exception_pc
}; };
@ -241,16 +243,17 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble
__ mfcr(R31); __ mfcr(R31);
__ std(R31, _abi(cr), R1_SP); __ std(R31, _abi(cr), R1_SP);
switch (return_pc_location) { switch (return_pc_location) {
case return_pc_is_lr: __ mflr(R31); break; case return_pc_is_lr: __ mflr(R31); break;
case return_pc_is_r4: __ mr(R31, R4); break; case return_pc_is_pre_saved: assert(return_pc_adjustment == 0, "unsupported"); break;
case return_pc_is_thread_saved_exception_pc: case return_pc_is_thread_saved_exception_pc: __ ld(R31, thread_(saved_exception_pc)); break;
__ ld(R31, thread_(saved_exception_pc)); break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
if (return_pc_adjustment != 0) { if (return_pc_location != return_pc_is_pre_saved) {
__ addi(R31, R31, return_pc_adjustment); if (return_pc_adjustment != 0) {
__ addi(R31, R31, return_pc_adjustment);
}
__ std(R31, _abi(lr), R1_SP);
} }
__ std(R31, _abi(lr), R1_SP);
// push a new frame // push a new frame
__ push_frame(frame_size_in_bytes, R31); __ push_frame(frame_size_in_bytes, R31);
@ -646,7 +649,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
return round_to(stk, 2); return round_to(stk, 2);
} }
#ifdef COMPILER2 #if defined(COMPILER1) || defined(COMPILER2)
// Calling convention for calling C code. // Calling convention for calling C code.
int SharedRuntime::c_calling_convention(const BasicType *sig_bt, int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
VMRegPair *regs, VMRegPair *regs,
@ -2566,7 +2569,7 @@ uint SharedRuntime::out_preserve_stack_slots() {
#endif #endif
} }
#ifdef COMPILER2 #if defined(COMPILER1) || defined(COMPILER2)
// Frame generation for deopt and uncommon trap blobs. // Frame generation for deopt and uncommon trap blobs.
static void push_skeleton_frame(MacroAssembler* masm, bool deopt, static void push_skeleton_frame(MacroAssembler* masm, bool deopt,
/* Read */ /* Read */
@ -2713,7 +2716,7 @@ void SharedRuntime::generate_deopt_blob() {
const address start = __ pc(); const address start = __ pc();
#ifdef COMPILER2 #if defined(COMPILER1) || defined(COMPILER2)
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// Prolog for non exception case! // Prolog for non exception case!
@ -2762,28 +2765,43 @@ void SharedRuntime::generate_deopt_blob() {
BLOCK_COMMENT("Prolog for exception case"); BLOCK_COMMENT("Prolog for exception case");
// The RegisterSaves doesn't need to adjust the return pc for this situation.
const int return_pc_adjustment_exception = 0;
// Push the "unpack frame".
// Save everything in sight.
assert(R4 == R4_ARG2, "exception pc must be in r4");
RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&first_frame_size_in_bytes,
/*generate_oop_map=*/ false,
return_pc_adjustment_exception,
RegisterSaver::return_pc_is_r4);
// Deopt during an exception. Save exec mode for unpack_frames.
__ li(exec_mode_reg, Deoptimization::Unpack_exception);
// Store exception oop and pc in thread (location known to GC). // Store exception oop and pc in thread (location known to GC).
// This is needed since the call to "fetch_unroll_info()" may safepoint. // This is needed since the call to "fetch_unroll_info()" may safepoint.
__ std(R3_ARG1, in_bytes(JavaThread::exception_oop_offset()), R16_thread); __ std(R3_ARG1, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
__ std(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread); __ std(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
__ std(R4_ARG2, _abi(lr), R1_SP);
// Vanilla deoptimization with an exception pending in exception_oop.
int exception_in_tls_offset = __ pc() - start;
// Push the "unpack frame".
// Save everything in sight.
RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&first_frame_size_in_bytes,
/*generate_oop_map=*/ false,
/*return_pc_adjustment_exception=*/ 0,
RegisterSaver::return_pc_is_pre_saved);
// Deopt during an exception. Save exec mode for unpack_frames.
__ li(exec_mode_reg, Deoptimization::Unpack_exception);
// fall through // fall through
int reexecute_offset = 0;
#ifdef COMPILER1
__ b(exec_mode_initialized);
// Reexecute entry, similar to c2 uncommon trap
reexecute_offset = __ pc() - start;
RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&first_frame_size_in_bytes,
/*generate_oop_map=*/ false,
/*return_pc_adjustment_reexecute=*/ 0,
RegisterSaver::return_pc_is_pre_saved);
__ li(exec_mode_reg, Deoptimization::Unpack_reexecute);
#endif
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
__ BIND(exec_mode_initialized); __ BIND(exec_mode_initialized);
@ -2889,7 +2907,9 @@ void SharedRuntime::generate_deopt_blob() {
int exception_offset = __ pc() - start; int exception_offset = __ pc() - start;
#endif // COMPILER2 #endif // COMPILER2
_deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, 0, first_frame_size_in_bytes / wordSize); _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset,
reexecute_offset, first_frame_size_in_bytes / wordSize);
_deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
} }
#ifdef COMPILER2 #ifdef COMPILER2
@ -3196,3 +3216,245 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_bytes/wordSize, return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_bytes/wordSize,
oop_maps, true); oop_maps, true);
} }
//------------------------------Montgomery multiplication------------------------
//
// Subtract 0:b from carry:a. Return carry.
static unsigned long
sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) {
long i = 0;
unsigned long tmp, tmp2;
__asm__ __volatile__ (
"subfc %[tmp], %[tmp], %[tmp] \n" // pre-set CA
"mtctr %[len] \n"
"0: \n"
"ldx %[tmp], %[i], %[a] \n"
"ldx %[tmp2], %[i], %[b] \n"
"subfe %[tmp], %[tmp2], %[tmp] \n" // subtract extended
"stdx %[tmp], %[i], %[a] \n"
"addi %[i], %[i], 8 \n"
"bdnz 0b \n"
"addme %[tmp], %[carry] \n" // carry + CA - 1
: [i]"+b"(i), [tmp]"=&r"(tmp), [tmp2]"=&r"(tmp2)
: [a]"r"(a), [b]"r"(b), [carry]"r"(carry), [len]"r"(len)
: "ctr", "xer", "memory"
);
return tmp;
}
// Multiply (unsigned) Long A by Long B, accumulating the double-
// length result into the accumulator formed of T0, T1, and T2.
inline void MACC(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) {
unsigned long hi, lo;
__asm__ __volatile__ (
"mulld %[lo], %[A], %[B] \n"
"mulhdu %[hi], %[A], %[B] \n"
"addc %[T0], %[T0], %[lo] \n"
"adde %[T1], %[T1], %[hi] \n"
"addze %[T2], %[T2] \n"
: [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2)
: [A]"r"(A), [B]"r"(B)
: "xer"
);
}
// As above, but add twice the double-length result into the
// accumulator.
inline void MACC2(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) {
unsigned long hi, lo;
__asm__ __volatile__ (
"mulld %[lo], %[A], %[B] \n"
"mulhdu %[hi], %[A], %[B] \n"
"addc %[T0], %[T0], %[lo] \n"
"adde %[T1], %[T1], %[hi] \n"
"addze %[T2], %[T2] \n"
"addc %[T0], %[T0], %[lo] \n"
"adde %[T1], %[T1], %[hi] \n"
"addze %[T2], %[T2] \n"
: [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2)
: [A]"r"(A), [B]"r"(B)
: "xer"
);
}
// Fast Montgomery multiplication. The derivation of the algorithm is
// in "A Cryptographic Library for the Motorola DSP56000,
// Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237".
static void
montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[],
unsigned long m[], unsigned long inv, int len) {
unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
int i;
assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
for (i = 0; i < len; i++) {
int j;
for (j = 0; j < i; j++) {
MACC(a[j], b[i-j], t0, t1, t2);
MACC(m[j], n[i-j], t0, t1, t2);
}
MACC(a[i], b[0], t0, t1, t2);
m[i] = t0 * inv;
MACC(m[i], n[0], t0, t1, t2);
assert(t0 == 0, "broken Montgomery multiply");
t0 = t1; t1 = t2; t2 = 0;
}
for (i = len; i < 2*len; i++) {
int j;
for (j = i-len+1; j < len; j++) {
MACC(a[j], b[i-j], t0, t1, t2);
MACC(m[j], n[i-j], t0, t1, t2);
}
m[i-len] = t0;
t0 = t1; t1 = t2; t2 = 0;
}
while (t0) {
t0 = sub(m, n, t0, len);
}
}
// Fast Montgomery squaring. This uses asymptotically 25% fewer
// multiplies so it should be up to 25% faster than Montgomery
// multiplication. However, its loop control is more complex and it
// may actually run slower on some machines.
static void
montgomery_square(unsigned long a[], unsigned long n[],
unsigned long m[], unsigned long inv, int len) {
unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
int i;
assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
for (i = 0; i < len; i++) {
int j;
int end = (i+1)/2;
for (j = 0; j < end; j++) {
MACC2(a[j], a[i-j], t0, t1, t2);
MACC(m[j], n[i-j], t0, t1, t2);
}
if ((i & 1) == 0) {
MACC(a[j], a[j], t0, t1, t2);
}
for (; j < i; j++) {
MACC(m[j], n[i-j], t0, t1, t2);
}
m[i] = t0 * inv;
MACC(m[i], n[0], t0, t1, t2);
assert(t0 == 0, "broken Montgomery square");
t0 = t1; t1 = t2; t2 = 0;
}
for (i = len; i < 2*len; i++) {
int start = i-len+1;
int end = start + (len - start)/2;
int j;
for (j = start; j < end; j++) {
MACC2(a[j], a[i-j], t0, t1, t2);
MACC(m[j], n[i-j], t0, t1, t2);
}
if ((i & 1) == 0) {
MACC(a[j], a[j], t0, t1, t2);
}
for (; j < len; j++) {
MACC(m[j], n[i-j], t0, t1, t2);
}
m[i-len] = t0;
t0 = t1; t1 = t2; t2 = 0;
}
while (t0) {
t0 = sub(m, n, t0, len);
}
}
// The threshold at which squaring is advantageous was determined
// experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz.
// Doesn't seem to be relevant for Power8 so we use the same value.
#define MONTGOMERY_SQUARING_THRESHOLD 64
// Copy len longwords from s to d, word-swapping as we go. The
// destination array is reversed.
static void reverse_words(unsigned long *s, unsigned long *d, int len) {
d += len;
while(len-- > 0) {
d--;
unsigned long s_val = *s;
// Swap words in a longword on little endian machines.
#ifdef VM_LITTLE_ENDIAN
s_val = (s_val << 32) | (s_val >> 32);
#endif
*d = s_val;
s++;
}
}
void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints,
jint len, jlong inv,
jint *m_ints) {
assert(len % 2 == 0, "array length in montgomery_multiply must be even");
int longwords = len/2;
assert(longwords > 0, "unsupported");
// Make very sure we don't use so much space that the stack might
// overflow. 512 jints corresponds to an 16384-bit integer and
// will use here a total of 8k bytes of stack space.
int total_allocation = longwords * sizeof (unsigned long) * 4;
guarantee(total_allocation <= 8192, "must be");
unsigned long *scratch = (unsigned long *)alloca(total_allocation);
// Local scratch arrays
unsigned long
*a = scratch + 0 * longwords,
*b = scratch + 1 * longwords,
*n = scratch + 2 * longwords,
*m = scratch + 3 * longwords;
reverse_words((unsigned long *)a_ints, a, longwords);
reverse_words((unsigned long *)b_ints, b, longwords);
reverse_words((unsigned long *)n_ints, n, longwords);
::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords);
reverse_words(m, (unsigned long *)m_ints, longwords);
}
void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
jint len, jlong inv,
jint *m_ints) {
assert(len % 2 == 0, "array length in montgomery_square must be even");
int longwords = len/2;
assert(longwords > 0, "unsupported");
// Make very sure we don't use so much space that the stack might
// overflow. 512 jints corresponds to an 16384-bit integer and
// will use here a total of 6k bytes of stack space.
int total_allocation = longwords * sizeof (unsigned long) * 3;
guarantee(total_allocation <= 8192, "must be");
unsigned long *scratch = (unsigned long *)alloca(total_allocation);
// Local scratch arrays
unsigned long
*a = scratch + 0 * longwords,
*n = scratch + 1 * longwords,
*m = scratch + 2 * longwords;
reverse_words((unsigned long *)a_ints, a, longwords);
reverse_words((unsigned long *)n_ints, n, longwords);
if (len >= MONTGOMERY_SQUARING_THRESHOLD) {
::montgomery_square(a, n, m, (unsigned long)inv, longwords);
} else {
::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords);
}
reverse_words(m, (unsigned long *)m_ints, longwords);
}

View file

@ -48,6 +48,12 @@
#define BLOCK_COMMENT(str) __ block_comment(str) #define BLOCK_COMMENT(str) __ block_comment(str)
#endif #endif
#if defined(ABI_ELFv2)
#define STUB_ENTRY(name) StubRoutines::name()
#else
#define STUB_ENTRY(name) ((FunctionDescriptor*)StubRoutines::name())->entry()
#endif
class StubGenerator: public StubCodeGenerator { class StubGenerator: public StubCodeGenerator {
private: private:
@ -252,8 +258,7 @@ class StubGenerator: public StubCodeGenerator {
// //
// global toc register // global toc register
__ load_const(R29, MacroAssembler::global_toc(), R11_scratch1); __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R11_scratch1);
// Remember the senderSP so we interpreter can pop c2i arguments off of the stack // Remember the senderSP so we interpreter can pop c2i arguments off of the stack
// when called via a c2i. // when called via a c2i.
@ -612,14 +617,17 @@ class StubGenerator: public StubCodeGenerator {
// Kills: // Kills:
// nothing // nothing
// //
void gen_write_ref_array_pre_barrier(Register from, Register to, Register count, bool dest_uninitialized, Register Rtmp1) { void gen_write_ref_array_pre_barrier(Register from, Register to, Register count, bool dest_uninitialized, Register Rtmp1,
Register preserve1 = noreg, Register preserve2 = noreg) {
BarrierSet* const bs = Universe::heap()->barrier_set(); BarrierSet* const bs = Universe::heap()->barrier_set();
switch (bs->kind()) { switch (bs->kind()) {
case BarrierSet::G1SATBCTLogging: case BarrierSet::G1SATBCTLogging:
// With G1, don't generate the call if we statically know that the target in uninitialized // With G1, don't generate the call if we statically know that the target in uninitialized
if (!dest_uninitialized) { if (!dest_uninitialized) {
const int spill_slots = 4 * wordSize; int spill_slots = 3;
const int frame_size = frame::abi_reg_args_size + spill_slots; if (preserve1 != noreg) { spill_slots++; }
if (preserve2 != noreg) { spill_slots++; }
const int frame_size = align_size_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
Label filtered; Label filtered;
// Is marking active? // Is marking active?
@ -633,17 +641,23 @@ class StubGenerator: public StubCodeGenerator {
__ beq(CCR0, filtered); __ beq(CCR0, filtered);
__ save_LR_CR(R0); __ save_LR_CR(R0);
__ push_frame_reg_args(spill_slots, R0); __ push_frame(frame_size, R0);
__ std(from, frame_size - 1 * wordSize, R1_SP); int slot_nr = 0;
__ std(to, frame_size - 2 * wordSize, R1_SP); __ std(from, frame_size - (++slot_nr) * wordSize, R1_SP);
__ std(count, frame_size - 3 * wordSize, R1_SP); __ std(to, frame_size - (++slot_nr) * wordSize, R1_SP);
__ std(count, frame_size - (++slot_nr) * wordSize, R1_SP);
if (preserve1 != noreg) { __ std(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
if (preserve2 != noreg) { __ std(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), to, count); __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), to, count);
__ ld(from, frame_size - 1 * wordSize, R1_SP); slot_nr = 0;
__ ld(to, frame_size - 2 * wordSize, R1_SP); __ ld(from, frame_size - (++slot_nr) * wordSize, R1_SP);
__ ld(count, frame_size - 3 * wordSize, R1_SP); __ ld(to, frame_size - (++slot_nr) * wordSize, R1_SP);
__ pop_frame(); __ ld(count, frame_size - (++slot_nr) * wordSize, R1_SP);
if (preserve1 != noreg) { __ ld(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
if (preserve2 != noreg) { __ ld(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
__ addi(R1_SP, R1_SP, frame_size); // pop_frame()
__ restore_LR_CR(R0); __ restore_LR_CR(R0);
__ bind(filtered); __ bind(filtered);
@ -667,27 +681,22 @@ class StubGenerator: public StubCodeGenerator {
// //
// The input registers and R0 are overwritten. // The input registers and R0 are overwritten.
// //
void gen_write_ref_array_post_barrier(Register addr, Register count, Register tmp, bool branchToEnd) { void gen_write_ref_array_post_barrier(Register addr, Register count, Register tmp, Register preserve = noreg) {
BarrierSet* const bs = Universe::heap()->barrier_set(); BarrierSet* const bs = Universe::heap()->barrier_set();
switch (bs->kind()) { switch (bs->kind()) {
case BarrierSet::G1SATBCTLogging: case BarrierSet::G1SATBCTLogging:
{ {
if (branchToEnd) { int spill_slots = (preserve != noreg) ? 1 : 0;
__ save_LR_CR(R0); const int frame_size = align_size_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
// We need this frame only to spill LR.
__ push_frame_reg_args(0, R0); __ save_LR_CR(R0);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count); __ push_frame(frame_size, R0);
__ pop_frame(); if (preserve != noreg) { __ std(preserve, frame_size - 1 * wordSize, R1_SP); }
__ restore_LR_CR(R0); __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count);
} else { if (preserve != noreg) { __ ld(preserve, frame_size - 1 * wordSize, R1_SP); }
// Tail call: fake call from stub caller by branching without linking. __ addi(R1_SP, R1_SP, frame_size); // pop_frame();
address entry_point = (address)CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post); __ restore_LR_CR(R0);
__ mr_if_needed(R3_ARG1, addr);
__ mr_if_needed(R4_ARG2, count);
__ load_const(R11, entry_point, R0);
__ call_c_and_return_to_caller(R11);
}
} }
break; break;
case BarrierSet::CardTableForRS: case BarrierSet::CardTableForRS:
@ -722,12 +731,9 @@ class StubGenerator: public StubCodeGenerator {
__ addi(addr, addr, 1); __ addi(addr, addr, 1);
__ bdnz(Lstore_loop); __ bdnz(Lstore_loop);
__ bind(Lskip_loop); __ bind(Lskip_loop);
if (!branchToEnd) __ blr();
} }
break; break;
case BarrierSet::ModRef: case BarrierSet::ModRef:
if (!branchToEnd) __ blr();
break; break;
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
@ -756,8 +762,10 @@ class StubGenerator: public StubCodeGenerator {
// Procedure for large arrays (uses data cache block zero instruction). // Procedure for large arrays (uses data cache block zero instruction).
Label dwloop, fast, fastloop, restloop, lastdword, done; Label dwloop, fast, fastloop, restloop, lastdword, done;
int cl_size=VM_Version::get_cache_line_size(), cl_dwords=cl_size>>3, cl_dwordaddr_bits=exact_log2(cl_dwords); int cl_size = VM_Version::L1_data_cache_line_size();
int min_dcbz=2; // Needs to be positive, apply dcbz only to at least min_dcbz cache lines. int cl_dwords = cl_size >> 3;
int cl_dwordaddr_bits = exact_log2(cl_dwords);
int min_dcbz = 2; // Needs to be positive, apply dcbz only to at least min_dcbz cache lines.
// Clear up to 128byte boundary if long enough, dword_cnt=(16-(base>>3))%16. // Clear up to 128byte boundary if long enough, dword_cnt=(16-(base>>3))%16.
__ dcbtst(base_ptr_reg); // Indicate write access to first cache line ... __ dcbtst(base_ptr_reg); // Indicate write access to first cache line ...
@ -1074,7 +1082,6 @@ class StubGenerator: public StubCodeGenerator {
Register tmp1 = R6_ARG4; Register tmp1 = R6_ARG4;
Register tmp2 = R7_ARG5; Register tmp2 = R7_ARG5;
Label l_overlap;
#ifdef ASSERT #ifdef ASSERT
__ srdi_(tmp2, R5_ARG3, 31); __ srdi_(tmp2, R5_ARG3, 31);
__ asm_assert_eq("missing zero extend", 0xAFFE); __ asm_assert_eq("missing zero extend", 0xAFFE);
@ -1084,19 +1091,11 @@ class StubGenerator: public StubCodeGenerator {
__ sldi(tmp2, R5_ARG3, log2_elem_size); // size in bytes __ sldi(tmp2, R5_ARG3, log2_elem_size); // size in bytes
__ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison! __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison!
__ cmpld(CCR1, tmp1, tmp2); __ cmpld(CCR1, tmp1, tmp2);
__ crand(CCR0, Assembler::less, CCR1, Assembler::less); __ crnand(CCR0, Assembler::less, CCR1, Assembler::less);
__ blt(CCR0, l_overlap); // Src before dst and distance smaller than size. // Overlaps if Src before dst and distance smaller than size.
// Branch to forward copy routine otherwise (within range of 32kB).
__ bc(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::less), no_overlap_target);
// need to copy forwards
if (__ is_within_range_of_b(no_overlap_target, __ pc())) {
__ b(no_overlap_target);
} else {
__ load_const(tmp1, no_overlap_target, tmp2);
__ mtctr(tmp1);
__ bctr();
}
__ bind(l_overlap);
// need to copy backwards // need to copy backwards
} }
@ -1241,6 +1240,7 @@ class StubGenerator: public StubCodeGenerator {
} }
__ bind(l_4); __ bind(l_4);
__ li(R3_RET, 0); // return 0
__ blr(); __ blr();
return start; return start;
@ -1262,15 +1262,9 @@ class StubGenerator: public StubCodeGenerator {
Register tmp2 = R7_ARG5; Register tmp2 = R7_ARG5;
Register tmp3 = R8_ARG6; Register tmp3 = R8_ARG6;
#if defined(ABI_ELFv2)
address nooverlap_target = aligned ? address nooverlap_target = aligned ?
StubRoutines::arrayof_jbyte_disjoint_arraycopy() : STUB_ENTRY(arrayof_jbyte_disjoint_arraycopy) :
StubRoutines::jbyte_disjoint_arraycopy(); STUB_ENTRY(jbyte_disjoint_arraycopy);
#else
address nooverlap_target = aligned ?
((FunctionDescriptor*)StubRoutines::arrayof_jbyte_disjoint_arraycopy())->entry() :
((FunctionDescriptor*)StubRoutines::jbyte_disjoint_arraycopy())->entry();
#endif
array_overlap_test(nooverlap_target, 0); array_overlap_test(nooverlap_target, 0);
// Do reverse copy. We assume the case of actual overlap is rare enough // Do reverse copy. We assume the case of actual overlap is rare enough
@ -1285,6 +1279,7 @@ class StubGenerator: public StubCodeGenerator {
__ lbzx(tmp1, R3_ARG1, R5_ARG3); __ lbzx(tmp1, R3_ARG1, R5_ARG3);
__ bge(CCR0, l_1); __ bge(CCR0, l_1);
__ li(R3_RET, 0); // return 0
__ blr(); __ blr();
return start; return start;
@ -1467,6 +1462,7 @@ class StubGenerator: public StubCodeGenerator {
__ bdnz(l_5); __ bdnz(l_5);
} }
__ bind(l_4); __ bind(l_4);
__ li(R3_RET, 0); // return 0
__ blr(); __ blr();
return start; return start;
@ -1488,15 +1484,9 @@ class StubGenerator: public StubCodeGenerator {
Register tmp2 = R7_ARG5; Register tmp2 = R7_ARG5;
Register tmp3 = R8_ARG6; Register tmp3 = R8_ARG6;
#if defined(ABI_ELFv2)
address nooverlap_target = aligned ? address nooverlap_target = aligned ?
StubRoutines::arrayof_jshort_disjoint_arraycopy() : STUB_ENTRY(arrayof_jshort_disjoint_arraycopy) :
StubRoutines::jshort_disjoint_arraycopy(); STUB_ENTRY(jshort_disjoint_arraycopy);
#else
address nooverlap_target = aligned ?
((FunctionDescriptor*)StubRoutines::arrayof_jshort_disjoint_arraycopy())->entry() :
((FunctionDescriptor*)StubRoutines::jshort_disjoint_arraycopy())->entry();
#endif
array_overlap_test(nooverlap_target, 1); array_overlap_test(nooverlap_target, 1);
@ -1510,6 +1500,7 @@ class StubGenerator: public StubCodeGenerator {
__ lhzx(tmp2, R3_ARG1, tmp1); __ lhzx(tmp2, R3_ARG1, tmp1);
__ bge(CCR0, l_1); __ bge(CCR0, l_1);
__ li(R3_RET, 0); // return 0
__ blr(); __ blr();
return start; return start;
@ -1613,6 +1604,7 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", name); StubCodeMark mark(this, "StubRoutines", name);
address start = __ function_entry(); address start = __ function_entry();
generate_disjoint_int_copy_core(aligned); generate_disjoint_int_copy_core(aligned);
__ li(R3_RET, 0); // return 0
__ blr(); __ blr();
return start; return start;
} }
@ -1697,20 +1689,15 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", name); StubCodeMark mark(this, "StubRoutines", name);
address start = __ function_entry(); address start = __ function_entry();
#if defined(ABI_ELFv2)
address nooverlap_target = aligned ? address nooverlap_target = aligned ?
StubRoutines::arrayof_jint_disjoint_arraycopy() : STUB_ENTRY(arrayof_jint_disjoint_arraycopy) :
StubRoutines::jint_disjoint_arraycopy(); STUB_ENTRY(jint_disjoint_arraycopy);
#else
address nooverlap_target = aligned ?
((FunctionDescriptor*)StubRoutines::arrayof_jint_disjoint_arraycopy())->entry() :
((FunctionDescriptor*)StubRoutines::jint_disjoint_arraycopy())->entry();
#endif
array_overlap_test(nooverlap_target, 2); array_overlap_test(nooverlap_target, 2);
generate_conjoint_int_copy_core(aligned); generate_conjoint_int_copy_core(aligned);
__ li(R3_RET, 0); // return 0
__ blr(); __ blr();
return start; return start;
@ -1789,6 +1776,7 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", name); StubCodeMark mark(this, "StubRoutines", name);
address start = __ function_entry(); address start = __ function_entry();
generate_disjoint_long_copy_core(aligned); generate_disjoint_long_copy_core(aligned);
__ li(R3_RET, 0); // return 0
__ blr(); __ blr();
return start; return start;
@ -1871,19 +1859,14 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", name); StubCodeMark mark(this, "StubRoutines", name);
address start = __ function_entry(); address start = __ function_entry();
#if defined(ABI_ELFv2)
address nooverlap_target = aligned ? address nooverlap_target = aligned ?
StubRoutines::arrayof_jlong_disjoint_arraycopy() : STUB_ENTRY(arrayof_jlong_disjoint_arraycopy) :
StubRoutines::jlong_disjoint_arraycopy(); STUB_ENTRY(jlong_disjoint_arraycopy);
#else
address nooverlap_target = aligned ?
((FunctionDescriptor*)StubRoutines::arrayof_jlong_disjoint_arraycopy())->entry() :
((FunctionDescriptor*)StubRoutines::jlong_disjoint_arraycopy())->entry();
#endif
array_overlap_test(nooverlap_target, 3); array_overlap_test(nooverlap_target, 3);
generate_conjoint_long_copy_core(aligned); generate_conjoint_long_copy_core(aligned);
__ li(R3_RET, 0); // return 0
__ blr(); __ blr();
return start; return start;
@ -1903,15 +1886,9 @@ class StubGenerator: public StubCodeGenerator {
address start = __ function_entry(); address start = __ function_entry();
#if defined(ABI_ELFv2)
address nooverlap_target = aligned ? address nooverlap_target = aligned ?
StubRoutines::arrayof_oop_disjoint_arraycopy() : STUB_ENTRY(arrayof_oop_disjoint_arraycopy) :
StubRoutines::oop_disjoint_arraycopy(); STUB_ENTRY(oop_disjoint_arraycopy);
#else
address nooverlap_target = aligned ?
((FunctionDescriptor*)StubRoutines::arrayof_oop_disjoint_arraycopy())->entry() :
((FunctionDescriptor*)StubRoutines::oop_disjoint_arraycopy())->entry();
#endif
gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7); gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
@ -1927,7 +1904,9 @@ class StubGenerator: public StubCodeGenerator {
generate_conjoint_long_copy_core(aligned); generate_conjoint_long_copy_core(aligned);
} }
gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1, /*branchToEnd*/ false); gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1);
__ li(R3_RET, 0); // return 0
__ blr();
return start; return start;
} }
@ -1957,11 +1936,460 @@ class StubGenerator: public StubCodeGenerator {
generate_disjoint_long_copy_core(aligned); generate_disjoint_long_copy_core(aligned);
} }
gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1, /*branchToEnd*/ false); gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1);
__ li(R3_RET, 0); // return 0
__ blr();
return start; return start;
} }
// Helper for generating a dynamic type check.
// Smashes only the given temp registers.
void generate_type_check(Register sub_klass,
Register super_check_offset,
Register super_klass,
Register temp,
Label& L_success) {
assert_different_registers(sub_klass, super_check_offset, super_klass);
BLOCK_COMMENT("type_check:");
Label L_miss;
__ check_klass_subtype_fast_path(sub_klass, super_klass, temp, R0, &L_success, &L_miss, NULL,
super_check_offset);
__ check_klass_subtype_slow_path(sub_klass, super_klass, temp, R0, &L_success, NULL);
// Fall through on failure!
__ bind(L_miss);
}
// Generate stub for checked oop copy.
//
// Arguments for generated stub:
// from: R3
// to: R4
// count: R5 treated as signed
// ckoff: R6 (super_check_offset)
// ckval: R7 (super_klass)
// ret: R3 zero for success; (-1^K) where K is partial transfer count
//
address generate_checkcast_copy(const char *name, bool dest_uninitialized) {
const Register R3_from = R3_ARG1; // source array address
const Register R4_to = R4_ARG2; // destination array address
const Register R5_count = R5_ARG3; // elements count
const Register R6_ckoff = R6_ARG4; // super_check_offset
const Register R7_ckval = R7_ARG5; // super_klass
const Register R8_offset = R8_ARG6; // loop var, with stride wordSize
const Register R9_remain = R9_ARG7; // loop var, with stride -1
const Register R10_oop = R10_ARG8; // actual oop copied
const Register R11_klass = R11_scratch1; // oop._klass
const Register R12_tmp = R12_scratch2;
const Register R2_minus1 = R2;
//__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ function_entry();
// TODO: Assert that int is 64 bit sign extended and arrays are not conjoint.
gen_write_ref_array_pre_barrier(R3_from, R4_to, R5_count, dest_uninitialized, R12_tmp, /* preserve: */ R6_ckoff, R7_ckval);
//inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R12_tmp, R3_RET);
Label load_element, store_element, store_null, success, do_card_marks;
__ or_(R9_remain, R5_count, R5_count); // Initialize loop index, and test it.
__ li(R8_offset, 0); // Offset from start of arrays.
__ li(R2_minus1, -1);
__ bne(CCR0, load_element);
// Empty array: Nothing to do.
__ li(R3_RET, 0); // Return 0 on (trivial) success.
__ blr();
// ======== begin loop ========
// (Entry is load_element.)
__ align(OptoLoopAlignment);
__ bind(store_element);
if (UseCompressedOops) {
__ encode_heap_oop_not_null(R10_oop);
__ bind(store_null);
__ stw(R10_oop, R8_offset, R4_to);
} else {
__ bind(store_null);
__ std(R10_oop, R8_offset, R4_to);
}
__ addi(R8_offset, R8_offset, heapOopSize); // Step to next offset.
__ add_(R9_remain, R2_minus1, R9_remain); // Decrement the count.
__ beq(CCR0, success);
// ======== loop entry is here ========
__ bind(load_element);
__ load_heap_oop(R10_oop, R8_offset, R3_from, &store_null); // Load the oop.
__ load_klass(R11_klass, R10_oop); // Query the object klass.
generate_type_check(R11_klass, R6_ckoff, R7_ckval, R12_tmp,
// Branch to this on success:
store_element);
// ======== end loop ========
// It was a real error; we must depend on the caller to finish the job.
// Register R9_remain has number of *remaining* oops, R5_count number of *total* oops.
// Emit GC store barriers for the oops we have copied (R5_count minus R9_remain),
// and report their number to the caller.
__ subf_(R5_count, R9_remain, R5_count);
__ nand(R3_RET, R5_count, R5_count); // report (-1^K) to caller
__ bne(CCR0, do_card_marks);
__ blr();
__ bind(success);
__ li(R3_RET, 0);
__ bind(do_card_marks);
// Store check on R4_to[0..R5_count-1].
gen_write_ref_array_post_barrier(R4_to, R5_count, R12_tmp, /* preserve: */ R3_RET);
__ blr();
return start;
}
// Generate 'unsafe' array copy stub.
// Though just as safe as the other stubs, it takes an unscaled
// size_t argument instead of an element count.
//
// Arguments for generated stub:
// from: R3
// to: R4
// count: R5 byte count, treated as ssize_t, can be zero
//
// Examines the alignment of the operands and dispatches
// to a long, int, short, or byte copy loop.
//
address generate_unsafe_copy(const char* name,
address byte_copy_entry,
address short_copy_entry,
address int_copy_entry,
address long_copy_entry) {
const Register R3_from = R3_ARG1; // source array address
const Register R4_to = R4_ARG2; // destination array address
const Register R5_count = R5_ARG3; // elements count (as long on PPC64)
const Register R6_bits = R6_ARG4; // test copy of low bits
const Register R7_tmp = R7_ARG5;
//__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ function_entry();
// Bump this on entry, not on exit:
//inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, R6_bits, R7_tmp);
Label short_copy, int_copy, long_copy;
__ orr(R6_bits, R3_from, R4_to);
__ orr(R6_bits, R6_bits, R5_count);
__ andi_(R0, R6_bits, (BytesPerLong-1));
__ beq(CCR0, long_copy);
__ andi_(R0, R6_bits, (BytesPerInt-1));
__ beq(CCR0, int_copy);
__ andi_(R0, R6_bits, (BytesPerShort-1));
__ beq(CCR0, short_copy);
// byte_copy:
__ b(byte_copy_entry);
__ bind(short_copy);
__ srwi(R5_count, R5_count, LogBytesPerShort);
__ b(short_copy_entry);
__ bind(int_copy);
__ srwi(R5_count, R5_count, LogBytesPerInt);
__ b(int_copy_entry);
__ bind(long_copy);
__ srwi(R5_count, R5_count, LogBytesPerLong);
__ b(long_copy_entry);
return start;
}
// Perform range checks on the proposed arraycopy.
// Kills the two temps, but nothing else.
// Also, clean the sign bits of src_pos and dst_pos.
void arraycopy_range_checks(Register src, // source array oop
Register src_pos, // source position
Register dst, // destination array oop
Register dst_pos, // destination position
Register length, // length of copy
Register temp1, Register temp2,
Label& L_failed) {
BLOCK_COMMENT("arraycopy_range_checks:");
const Register array_length = temp1; // scratch
const Register end_pos = temp2; // scratch
// if (src_pos + length > arrayOop(src)->length() ) FAIL;
__ lwa(array_length, arrayOopDesc::length_offset_in_bytes(), src);
__ add(end_pos, src_pos, length); // src_pos + length
__ cmpd(CCR0, end_pos, array_length);
__ bgt(CCR0, L_failed);
// if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
__ lwa(array_length, arrayOopDesc::length_offset_in_bytes(), dst);
__ add(end_pos, dst_pos, length); // src_pos + length
__ cmpd(CCR0, end_pos, array_length);
__ bgt(CCR0, L_failed);
BLOCK_COMMENT("arraycopy_range_checks done");
}
//
// Generate generic array copy stubs
//
// Input:
// R3 - src oop
// R4 - src_pos
// R5 - dst oop
// R6 - dst_pos
// R7 - element count
//
// Output:
// R3 == 0 - success
// R3 == -1 - need to call System.arraycopy
//
address generate_generic_copy(const char *name,
address entry_jbyte_arraycopy,
address entry_jshort_arraycopy,
address entry_jint_arraycopy,
address entry_oop_arraycopy,
address entry_disjoint_oop_arraycopy,
address entry_jlong_arraycopy,
address entry_checkcast_arraycopy) {
Label L_failed, L_objArray;
// Input registers
const Register src = R3_ARG1; // source array oop
const Register src_pos = R4_ARG2; // source position
const Register dst = R5_ARG3; // destination array oop
const Register dst_pos = R6_ARG4; // destination position
const Register length = R7_ARG5; // elements count
// registers used as temp
const Register src_klass = R8_ARG6; // source array klass
const Register dst_klass = R9_ARG7; // destination array klass
const Register lh = R10_ARG8; // layout handler
const Register temp = R2;
//__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ function_entry();
// Bump this on entry, not on exit:
//inc_counter_np(SharedRuntime::_generic_array_copy_ctr, lh, temp);
// In principle, the int arguments could be dirty.
//-----------------------------------------------------------------------
// Assembler stubs will be used for this call to arraycopy
// if the following conditions are met:
//
// (1) src and dst must not be null.
// (2) src_pos must not be negative.
// (3) dst_pos must not be negative.
// (4) length must not be negative.
// (5) src klass and dst klass should be the same and not NULL.
// (6) src and dst should be arrays.
// (7) src_pos + length must not exceed length of src.
// (8) dst_pos + length must not exceed length of dst.
BLOCK_COMMENT("arraycopy initial argument checks");
__ cmpdi(CCR1, src, 0); // if (src == NULL) return -1;
__ extsw_(src_pos, src_pos); // if (src_pos < 0) return -1;
__ cmpdi(CCR5, dst, 0); // if (dst == NULL) return -1;
__ cror(CCR1, Assembler::equal, CCR0, Assembler::less);
__ extsw_(dst_pos, dst_pos); // if (src_pos < 0) return -1;
__ cror(CCR5, Assembler::equal, CCR0, Assembler::less);
__ extsw_(length, length); // if (length < 0) return -1;
__ cror(CCR1, Assembler::equal, CCR5, Assembler::equal);
__ cror(CCR1, Assembler::equal, CCR0, Assembler::less);
__ beq(CCR1, L_failed);
BLOCK_COMMENT("arraycopy argument klass checks");
__ load_klass(src_klass, src);
__ load_klass(dst_klass, dst);
// Load layout helper
//
// |array_tag| | header_size | element_type | |log2_element_size|
// 32 30 24 16 8 2 0
//
// array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
//
int lh_offset = in_bytes(Klass::layout_helper_offset());
// Load 32-bits signed value. Use br() instruction with it to check icc.
__ lwz(lh, lh_offset, src_klass);
// Handle objArrays completely differently...
jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
__ load_const_optimized(temp, objArray_lh, R0);
__ cmpw(CCR0, lh, temp);
__ beq(CCR0, L_objArray);
__ cmpd(CCR5, src_klass, dst_klass); // if (src->klass() != dst->klass()) return -1;
__ cmpwi(CCR6, lh, Klass::_lh_neutral_value); // if (!src->is_Array()) return -1;
__ crnand(CCR5, Assembler::equal, CCR6, Assembler::less);
__ beq(CCR5, L_failed);
// At this point, it is known to be a typeArray (array_tag 0x3).
#ifdef ASSERT
{ Label L;
jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
__ load_const_optimized(temp, lh_prim_tag_in_place, R0);
__ cmpw(CCR0, lh, temp);
__ bge(CCR0, L);
__ stop("must be a primitive array");
__ bind(L);
}
#endif
arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
temp, dst_klass, L_failed);
// TypeArrayKlass
//
// src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
// dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
//
const Register offset = dst_klass; // array offset
const Register elsize = src_klass; // log2 element size
__ rldicl(offset, lh, 64 - Klass::_lh_header_size_shift, 64 - exact_log2(Klass::_lh_header_size_mask + 1));
__ andi(elsize, lh, Klass::_lh_log2_element_size_mask);
__ add(src, offset, src); // src array offset
__ add(dst, offset, dst); // dst array offset
// Next registers should be set before the jump to corresponding stub.
const Register from = R3_ARG1; // source array address
const Register to = R4_ARG2; // destination array address
const Register count = R5_ARG3; // elements count
// 'from', 'to', 'count' registers should be set in this order
// since they are the same as 'src', 'src_pos', 'dst'.
BLOCK_COMMENT("scale indexes to element size");
__ sld(src_pos, src_pos, elsize);
__ sld(dst_pos, dst_pos, elsize);
__ add(from, src_pos, src); // src_addr
__ add(to, dst_pos, dst); // dst_addr
__ mr(count, length); // length
BLOCK_COMMENT("choose copy loop based on element size");
// Using conditional branches with range 32kB.
const int bo = Assembler::bcondCRbiIs1, bi = Assembler::bi0(CCR0, Assembler::equal);
__ cmpwi(CCR0, elsize, 0);
__ bc(bo, bi, entry_jbyte_arraycopy);
__ cmpwi(CCR0, elsize, LogBytesPerShort);
__ bc(bo, bi, entry_jshort_arraycopy);
__ cmpwi(CCR0, elsize, LogBytesPerInt);
__ bc(bo, bi, entry_jint_arraycopy);
#ifdef ASSERT
{ Label L;
__ cmpwi(CCR0, elsize, LogBytesPerLong);
__ beq(CCR0, L);
__ stop("must be long copy, but elsize is wrong");
__ bind(L);
}
#endif
__ b(entry_jlong_arraycopy);
// ObjArrayKlass
__ bind(L_objArray);
// live at this point: src_klass, dst_klass, src[_pos], dst[_pos], length
Label L_disjoint_plain_copy, L_checkcast_copy;
// test array classes for subtyping
__ cmpd(CCR0, src_klass, dst_klass); // usual case is exact equality
__ bne(CCR0, L_checkcast_copy);
// Identically typed arrays can be copied without element-wise checks.
arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
temp, lh, L_failed);
__ addi(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset
__ addi(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset
__ sldi(src_pos, src_pos, LogBytesPerHeapOop);
__ sldi(dst_pos, dst_pos, LogBytesPerHeapOop);
__ add(from, src_pos, src); // src_addr
__ add(to, dst_pos, dst); // dst_addr
__ mr(count, length); // length
__ b(entry_oop_arraycopy);
__ bind(L_checkcast_copy);
// live at this point: src_klass, dst_klass
{
// Before looking at dst.length, make sure dst is also an objArray.
__ lwz(temp, lh_offset, dst_klass);
__ cmpw(CCR0, lh, temp);
__ bne(CCR0, L_failed);
// It is safe to examine both src.length and dst.length.
arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
temp, lh, L_failed);
// Marshal the base address arguments now, freeing registers.
__ addi(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset
__ addi(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset
__ sldi(src_pos, src_pos, LogBytesPerHeapOop);
__ sldi(dst_pos, dst_pos, LogBytesPerHeapOop);
__ add(from, src_pos, src); // src_addr
__ add(to, dst_pos, dst); // dst_addr
__ mr(count, length); // length
Register sco_temp = R6_ARG4; // This register is free now.
assert_different_registers(from, to, count, sco_temp,
dst_klass, src_klass);
// Generate the type check.
int sco_offset = in_bytes(Klass::super_check_offset_offset());
__ lwz(sco_temp, sco_offset, dst_klass);
generate_type_check(src_klass, sco_temp, dst_klass,
temp, L_disjoint_plain_copy);
// Fetch destination element klass from the ObjArrayKlass header.
int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
// The checkcast_copy loop needs two extra arguments:
__ ld(R7_ARG5, ek_offset, dst_klass); // dest elem klass
__ lwz(R6_ARG4, sco_offset, R7_ARG5); // sco of elem klass
__ b(entry_checkcast_arraycopy);
}
__ bind(L_disjoint_plain_copy);
__ b(entry_disjoint_oop_arraycopy);
__ bind(L_failed);
__ li(R3_RET, -1); // return -1
__ blr();
return start;
}
void generate_arraycopy_stubs() { void generate_arraycopy_stubs() {
// Note: the disjoint stubs must be generated first, some of // Note: the disjoint stubs must be generated first, some of
// the conjoint stubs use them. // the conjoint stubs use them.
@ -1998,6 +2426,24 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", false); StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", false);
StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", true); StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", true);
// special/generic versions
StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", false);
StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", true);
StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy",
STUB_ENTRY(jbyte_arraycopy),
STUB_ENTRY(jshort_arraycopy),
STUB_ENTRY(jint_arraycopy),
STUB_ENTRY(jlong_arraycopy));
StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy",
STUB_ENTRY(jbyte_arraycopy),
STUB_ENTRY(jshort_arraycopy),
STUB_ENTRY(jint_arraycopy),
STUB_ENTRY(oop_arraycopy),
STUB_ENTRY(oop_disjoint_arraycopy),
STUB_ENTRY(jlong_arraycopy),
STUB_ENTRY(checkcast_arraycopy));
// fill routines // fill routines
StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
@ -2228,6 +2674,15 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_multiplyToLen = generate_multiplyToLen(); StubRoutines::_multiplyToLen = generate_multiplyToLen();
} }
#endif #endif
if (UseMontgomeryMultiplyIntrinsic) {
StubRoutines::_montgomeryMultiply
= CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply);
}
if (UseMontgomerySquareIntrinsic) {
StubRoutines::_montgomerySquare
= CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square);
}
} }
public: public:

View file

@ -34,7 +34,7 @@
// CRC32 Intrinsics. // CRC32 Intrinsics.
void StubRoutines::ppc64::generate_load_crc_table_addr(MacroAssembler* masm, Register table) { void StubRoutines::ppc64::generate_load_crc_table_addr(MacroAssembler* masm, Register table) {
__ load_const(table, StubRoutines::_crc_table_adr); __ load_const_optimized(table, StubRoutines::_crc_table_adr, R0);
} }
// CRC32 Intrinsics. // CRC32 Intrinsics.

View file

@ -254,34 +254,33 @@ void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label*
if (TieredCompilation) { if (TieredCompilation) {
const int increment = InvocationCounter::count_increment; const int increment = InvocationCounter::count_increment;
const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
Label no_mdo; Label no_mdo;
if (ProfileInterpreter) { if (ProfileInterpreter) {
const Register Rmdo = Rscratch1; const Register Rmdo = R3_counters;
// If no method data exists, go to profile_continue. // If no method data exists, go to profile_continue.
__ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method); __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
__ cmpdi(CCR0, Rmdo, 0); __ cmpdi(CCR0, Rmdo, 0);
__ beq(CCR0, no_mdo); __ beq(CCR0, no_mdo);
// Increment backedge counter in the MDO. // Increment invocation counter in the MDO.
const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); const int mdo_ic_offs = in_bytes(MethodData::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
__ lwz(Rscratch2, mdo_bc_offs, Rmdo); __ lwz(Rscratch2, mdo_ic_offs, Rmdo);
__ lwz(Rscratch1, in_bytes(MethodData::invoke_mask_offset()), Rmdo);
__ addi(Rscratch2, Rscratch2, increment); __ addi(Rscratch2, Rscratch2, increment);
__ stw(Rscratch2, mdo_bc_offs, Rmdo); __ stw(Rscratch2, mdo_ic_offs, Rmdo);
__ load_const_optimized(Rscratch1, mask, R0);
__ and_(Rscratch1, Rscratch2, Rscratch1); __ and_(Rscratch1, Rscratch2, Rscratch1);
__ bne(CCR0, done); __ bne(CCR0, done);
__ b(*overflow); __ b(*overflow);
} }
// Increment counter in MethodCounters*. // Increment counter in MethodCounters*.
const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); const int mo_ic_offs = in_bytes(MethodCounters::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
__ bind(no_mdo); __ bind(no_mdo);
__ get_method_counters(R19_method, R3_counters, done); __ get_method_counters(R19_method, R3_counters, done);
__ lwz(Rscratch2, mo_bc_offs, R3_counters); __ lwz(Rscratch2, mo_ic_offs, R3_counters);
__ lwz(Rscratch1, in_bytes(MethodCounters::invoke_mask_offset()), R3_counters);
__ addi(Rscratch2, Rscratch2, increment); __ addi(Rscratch2, Rscratch2, increment);
__ stw(Rscratch2, mo_bc_offs, R3_counters); __ stw(Rscratch2, mo_ic_offs, R3_counters);
__ load_const_optimized(Rscratch1, mask, R0);
__ and_(Rscratch1, Rscratch2, Rscratch1); __ and_(Rscratch1, Rscratch2, Rscratch1);
__ beq(CCR0, *overflow); __ beq(CCR0, *overflow);
@ -302,8 +301,7 @@ void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label*
// Check if we must create a method data obj. // Check if we must create a method data obj.
if (ProfileInterpreter && profile_method != NULL) { if (ProfileInterpreter && profile_method != NULL) {
const Register profile_limit = Rscratch1; const Register profile_limit = Rscratch1;
int pl_offs = __ load_const_optimized(profile_limit, &InvocationCounter::InterpreterProfileLimit, R0, true); __ lwz(profile_limit, in_bytes(MethodCounters::interpreter_profile_limit_offset()), R3_counters);
__ lwz(profile_limit, pl_offs, profile_limit);
// Test to see if we should create a method data oop. // Test to see if we should create a method data oop.
__ cmpw(CCR0, Rsum_ivc_bec, profile_limit); __ cmpw(CCR0, Rsum_ivc_bec, profile_limit);
__ blt(CCR0, *profile_method_continue); __ blt(CCR0, *profile_method_continue);
@ -313,9 +311,7 @@ void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label*
// Finally check for counter overflow. // Finally check for counter overflow.
if (overflow) { if (overflow) {
const Register invocation_limit = Rscratch1; const Register invocation_limit = Rscratch1;
int il_offs = __ load_const_optimized(invocation_limit, &InvocationCounter::InterpreterInvocationLimit, R0, true); __ lwz(invocation_limit, in_bytes(MethodCounters::interpreter_invocation_limit_offset()), R3_counters);
__ lwz(invocation_limit, il_offs, invocation_limit);
assert(4 == sizeof(InvocationCounter::InterpreterInvocationLimit), "unexpected field size");
__ cmpw(CCR0, Rsum_ivc_bec, invocation_limit); __ cmpw(CCR0, Rsum_ivc_bec, invocation_limit);
__ bge(CCR0, *overflow); __ bge(CCR0, *overflow);
} }

View file

@ -139,9 +139,9 @@ void AbstractInterpreter::layout_activation(Method* method,
intptr_t* locals_base = (caller->is_interpreted_frame()) ? intptr_t* locals_base = (caller->is_interpreted_frame()) ?
caller->interpreter_frame_esp() + caller_actual_parameters : caller->interpreter_frame_esp() + caller_actual_parameters :
caller->sp() + method->max_locals() - 1 + (frame::abi_minframe_size / Interpreter::stackElementSize) ; caller->sp() + method->max_locals() - 1 + (frame::abi_minframe_size / Interpreter::stackElementSize);
intptr_t* monitor_base = caller->sp() - frame::ijava_state_size / Interpreter::stackElementSize ; intptr_t* monitor_base = caller->sp() - frame::ijava_state_size / Interpreter::stackElementSize;
intptr_t* monitor = monitor_base - (moncount * frame::interpreter_frame_monitor_size()); intptr_t* monitor = monitor_base - (moncount * frame::interpreter_frame_monitor_size());
intptr_t* esp_base = monitor - 1; intptr_t* esp_base = monitor - 1;
intptr_t* esp = esp_base - tempcount - popframe_extra_args; intptr_t* esp = esp_base - tempcount - popframe_extra_args;

View file

@ -0,0 +1,42 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2015 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_TEMPLATEINTERPRETER_PPC_HPP
#define CPU_PPC_VM_TEMPLATEINTERPRETER_PPC_HPP
protected:
// Size of interpreter code. Increase if too small. Interpreter will
// fail with a guarantee ("not enough space for interpreter generation");
// if too small.
// Run with +PrintInterpreter to get the VM to print out the size.
// Max size with JVMTI
const static int InterpreterCodeSize = 230*K;
public:
// Support abs and sqrt like in compiler.
// For others we can use a normal (native) entry.
static bool math_entry_available(AbstractInterpreter::MethodKind kind);
#endif // CPU_PPC_VM_TEMPLATEINTERPRETER_PPC_HPP

View file

@ -1624,12 +1624,13 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// Normal (non-jsr) branch handling // Normal (non-jsr) branch handling
// Bump bytecode pointer by displacement (take the branch).
__ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr.
const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
if (increment_invocation_counter_for_backward_branches) { if (increment_invocation_counter_for_backward_branches) {
//__ unimplemented("branch invocation counter");
Label Lforward; Label Lforward;
__ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. __ dispatch_prolog(vtos);
// Check branch direction. // Check branch direction.
__ cmpdi(CCR0, Rdisp, 0); __ cmpdi(CCR0, Rdisp, 0);
@ -1640,7 +1641,6 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
if (TieredCompilation) { if (TieredCompilation) {
Label Lno_mdo, Loverflow; Label Lno_mdo, Loverflow;
const int increment = InvocationCounter::count_increment; const int increment = InvocationCounter::count_increment;
const int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
if (ProfileInterpreter) { if (ProfileInterpreter) {
Register Rmdo = Rscratch1; Register Rmdo = Rscratch1;
@ -1652,7 +1652,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// Increment backedge counter in the MDO. // Increment backedge counter in the MDO.
const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
__ lwz(Rscratch2, mdo_bc_offs, Rmdo); __ lwz(Rscratch2, mdo_bc_offs, Rmdo);
__ load_const_optimized(Rscratch3, mask, R0); __ lwz(Rscratch3, in_bytes(MethodData::backedge_mask_offset()), Rmdo);
__ addi(Rscratch2, Rscratch2, increment); __ addi(Rscratch2, Rscratch2, increment);
__ stw(Rscratch2, mdo_bc_offs, Rmdo); __ stw(Rscratch2, mdo_bc_offs, Rmdo);
__ and_(Rscratch3, Rscratch2, Rscratch3); __ and_(Rscratch3, Rscratch2, Rscratch3);
@ -1664,19 +1664,19 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
__ bind(Lno_mdo); __ bind(Lno_mdo);
__ lwz(Rscratch2, mo_bc_offs, R4_counters); __ lwz(Rscratch2, mo_bc_offs, R4_counters);
__ load_const_optimized(Rscratch3, mask, R0); __ lwz(Rscratch3, in_bytes(MethodCounters::backedge_mask_offset()), R4_counters);
__ addi(Rscratch2, Rscratch2, increment); __ addi(Rscratch2, Rscratch2, increment);
__ stw(Rscratch2, mo_bc_offs, R19_method); __ stw(Rscratch2, mo_bc_offs, R4_counters);
__ and_(Rscratch3, Rscratch2, Rscratch3); __ and_(Rscratch3, Rscratch2, Rscratch3);
__ bne(CCR0, Lforward); __ bne(CCR0, Lforward);
__ bind(Loverflow); __ bind(Loverflow);
// Notify point for loop, pass branch bytecode. // Notify point for loop, pass branch bytecode.
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R14_bcp, true); __ subf(R4_ARG2, Rdisp, R14_bcp); // Compute branch bytecode (previous bcp).
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true);
// Was an OSR adapter generated? // Was an OSR adapter generated?
// O0 = osr nmethod
__ cmpdi(CCR0, R3_RET, 0); __ cmpdi(CCR0, R3_RET, 0);
__ beq(CCR0, Lforward); __ beq(CCR0, Lforward);
@ -1712,27 +1712,23 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3); __ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3);
if (ProfileInterpreter) { if (ProfileInterpreter) {
__ test_invocation_counter_for_mdp(invoke_ctr, Rscratch2, Lforward); __ test_invocation_counter_for_mdp(invoke_ctr, R4_counters, Rscratch2, Lforward);
if (UseOnStackReplacement) { if (UseOnStackReplacement) {
__ test_backedge_count_for_osr(bumped_count, R14_bcp, Rscratch2); __ test_backedge_count_for_osr(bumped_count, R4_counters, R14_bcp, Rdisp, Rscratch2);
} }
} else { } else {
if (UseOnStackReplacement) { if (UseOnStackReplacement) {
__ test_backedge_count_for_osr(invoke_ctr, R14_bcp, Rscratch2); __ test_backedge_count_for_osr(invoke_ctr, R4_counters, R14_bcp, Rdisp, Rscratch2);
} }
} }
} }
__ bind(Lforward); __ bind(Lforward);
__ dispatch_epilog(vtos);
} else { } else {
// Bump bytecode pointer by displacement (take the branch). __ dispatch_next(vtos);
__ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr.
} }
// Continue with bytecode @ target.
// %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
// %%%%% and changing dispatch_next to dispatch_only.
__ dispatch_next(vtos);
} }
// Helper function for if_cmp* methods below. // Helper function for if_cmp* methods below.

View file

@ -37,9 +37,6 @@
# include <sys/sysinfo.h> # include <sys/sysinfo.h>
int VM_Version::_features = VM_Version::unknown_m;
int VM_Version::_measured_cache_line_size = 32; // pessimistic init value
const char* VM_Version::_features_str = "";
bool VM_Version::_is_determine_features_test_running = false; bool VM_Version::_is_determine_features_test_running = false;
@ -56,7 +53,7 @@ void VM_Version::initialize() {
// If PowerArchitecturePPC64 hasn't been specified explicitly determine from features. // If PowerArchitecturePPC64 hasn't been specified explicitly determine from features.
if (FLAG_IS_DEFAULT(PowerArchitecturePPC64)) { if (FLAG_IS_DEFAULT(PowerArchitecturePPC64)) {
if (VM_Version::has_lqarx()) { if (VM_Version::has_tcheck() && VM_Version::has_lqarx()) {
FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 8); FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 8);
} else if (VM_Version::has_popcntw()) { } else if (VM_Version::has_popcntw()) {
FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 7); FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 7);
@ -68,10 +65,19 @@ void VM_Version::initialize() {
FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 0); FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 0);
} }
} }
guarantee(PowerArchitecturePPC64 == 0 || PowerArchitecturePPC64 == 5 ||
PowerArchitecturePPC64 == 6 || PowerArchitecturePPC64 == 7 || bool PowerArchitecturePPC64_ok = false;
PowerArchitecturePPC64 == 8, switch (PowerArchitecturePPC64) {
"PowerArchitecturePPC64 should be 0, 5, 6, 7, or 8"); case 8: if (!VM_Version::has_tcheck() ) break;
if (!VM_Version::has_lqarx() ) break;
case 7: if (!VM_Version::has_popcntw()) break;
case 6: if (!VM_Version::has_cmpb() ) break;
case 5: if (!VM_Version::has_popcntb()) break;
case 0: PowerArchitecturePPC64_ok = true; break;
default: break;
}
guarantee(PowerArchitecturePPC64_ok, "PowerArchitecturePPC64 cannot be set to "
UINTX_FORMAT " on this machine", PowerArchitecturePPC64);
// Power 8: Configure Data Stream Control Register. // Power 8: Configure Data Stream Control Register.
if (PowerArchitecturePPC64 >= 8) { if (PowerArchitecturePPC64 >= 8) {
@ -122,7 +128,7 @@ void VM_Version::initialize() {
(has_tcheck() ? " tcheck" : "") (has_tcheck() ? " tcheck" : "")
// Make sure number of %s matches num_features! // Make sure number of %s matches num_features!
); );
_features_str = os::strdup(buf); _features_string = os::strdup(buf);
if (Verbose) { if (Verbose) {
print_features(); print_features();
} }
@ -132,9 +138,15 @@ void VM_Version::initialize() {
// and 'atomic long memory ops' (see Unsafe_GetLongVolatile). // and 'atomic long memory ops' (see Unsafe_GetLongVolatile).
_supports_cx8 = true; _supports_cx8 = true;
// Used by C1.
_supports_atomic_getset4 = true;
_supports_atomic_getadd4 = true;
_supports_atomic_getset8 = true;
_supports_atomic_getadd8 = true;
UseSSE = 0; // Only on x86 and x64 UseSSE = 0; // Only on x86 and x64
intx cache_line_size = _measured_cache_line_size; intx cache_line_size = L1_data_cache_line_size();
if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) AllocatePrefetchStyle = 1; if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) AllocatePrefetchStyle = 1;
@ -184,6 +196,11 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseAESIntrinsics, false); FLAG_SET_DEFAULT(UseAESIntrinsics, false);
} }
if (UseAESCTRIntrinsics) {
warning("AES/CTR intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
}
if (UseGHASHIntrinsics) { if (UseGHASHIntrinsics) {
warning("GHASH intrinsics are not available on this CPU"); warning("GHASH intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseGHASHIntrinsics, false); FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
@ -208,6 +225,18 @@ void VM_Version::initialize() {
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
UseMultiplyToLenIntrinsic = true; UseMultiplyToLenIntrinsic = true;
} }
if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
UseMontgomeryMultiplyIntrinsic = true;
}
if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
UseMontgomerySquareIntrinsic = true;
}
if (UseVectorizedMismatchIntrinsic) {
warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU.");
FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
}
// Adjust RTM (Restricted Transactional Memory) flags. // Adjust RTM (Restricted Transactional Memory) flags.
if (UseRTMLocking) { if (UseRTMLocking) {
@ -276,11 +305,9 @@ void VM_Version::initialize() {
} }
} }
// This machine does not allow unaligned memory accesses // This machine allows unaligned memory accesses
if (UseUnalignedAccesses) { if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
if (!FLAG_IS_DEFAULT(UseUnalignedAccesses)) FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
warning("Unaligned memory access is not available on this CPU");
FLAG_SET_DEFAULT(UseUnalignedAccesses, false);
} }
} }
@ -306,7 +333,7 @@ bool VM_Version::use_biased_locking() {
} }
void VM_Version::print_features() { void VM_Version::print_features() {
tty->print_cr("Version: %s cache_line_size = %d", cpu_features(), (int) get_cache_line_size()); tty->print_cr("Version: %s L1_data_cache_line_size=%d", features_string(), L1_data_cache_line_size());
} }
#ifdef COMPILER2 #ifdef COMPILER2
@ -607,7 +634,7 @@ void VM_Version::determine_features() {
int count = 0; // count zeroed bytes int count = 0; // count zeroed bytes
for (int i = 0; i < BUFFER_SIZE; i++) if (test_area[i] == 0) count++; for (int i = 0; i < BUFFER_SIZE; i++) if (test_area[i] == 0) count++;
guarantee(is_power_of_2(count), "cache line size needs to be a power of 2"); guarantee(is_power_of_2(count), "cache line size needs to be a power of 2");
_measured_cache_line_size = count; _L1_data_cache_line_size = count;
// Execute code. Illegal instructions will be replaced by 0 in the signal handler. // Execute code. Illegal instructions will be replaced by 0 in the signal handler.
VM_Version::_is_determine_features_test_running = true; VM_Version::_is_determine_features_test_running = true;
@ -705,7 +732,7 @@ void VM_Version::config_dscr() {
} }
} }
static int saved_features = 0; static uint64_t saved_features = 0;
void VM_Version::allow_all() { void VM_Version::allow_all() {
saved_features = _features; saved_features = _features;

View file

@ -62,11 +62,9 @@ protected:
vcipher_m = (1 << vcipher), vcipher_m = (1 << vcipher),
vpmsumb_m = (1 << vpmsumb), vpmsumb_m = (1 << vpmsumb),
tcheck_m = (1 << tcheck ), tcheck_m = (1 << tcheck ),
all_features_m = -1 all_features_m = (unsigned long)-1
}; };
static int _features;
static int _measured_cache_line_size;
static const char* _features_str;
static bool _is_determine_features_test_running; static bool _is_determine_features_test_running;
static void print_features(); static void print_features();
@ -97,10 +95,6 @@ public:
static bool has_vpmsumb() { return (_features & vpmsumb_m) != 0; } static bool has_vpmsumb() { return (_features & vpmsumb_m) != 0; }
static bool has_tcheck() { return (_features & tcheck_m) != 0; } static bool has_tcheck() { return (_features & tcheck_m) != 0; }
static const char* cpu_features() { return _features_str; }
static int get_cache_line_size() { return _measured_cache_line_size; }
// Assembler testing // Assembler testing
static void allow_all(); static void allow_all();
static void revert(); static void revert();

View file

@ -76,7 +76,8 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// We might implicit NULL fault here. // We might implicit NULL fault here.
address npe_addr = __ pc(); // npe = null pointer exception address npe_addr = __ pc(); // npe = null pointer exception
__ load_klass_with_trap_null_check(rcvr_klass, R3); __ null_check(R3, oopDesc::klass_offset_in_bytes(), /*implicit only*/NULL);
__ load_klass(rcvr_klass, R3);
// Set method (in case of interpreted method), and destination address. // Set method (in case of interpreted method), and destination address.
int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
@ -111,8 +112,8 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// If the vtable entry is null, the method is abstract. // If the vtable entry is null, the method is abstract.
address ame_addr = __ pc(); // ame = abstract method error address ame_addr = __ pc(); // ame = abstract method error
__ null_check(R19_method, in_bytes(Method::from_compiled_offset()), /*implicit only*/NULL);
__ load_with_trap_null_check(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method); __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
__ mtctr(R12_scratch2); __ mtctr(R12_scratch2);
__ bctr(); __ bctr();
masm->flush(); masm->flush();
@ -158,7 +159,8 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
// We might implicit NULL fault here. // We might implicit NULL fault here.
address npe_addr = __ pc(); // npe = null pointer exception address npe_addr = __ pc(); // npe = null pointer exception
__ load_klass_with_trap_null_check(rcvr_klass, R3_ARG1); __ null_check(R3_ARG1, oopDesc::klass_offset_in_bytes(), /*implicit only*/NULL);
__ load_klass(rcvr_klass, R3_ARG1);
BLOCK_COMMENT("Load start of itable entries into itable_entry."); BLOCK_COMMENT("Load start of itable entries into itable_entry.");
__ lwz(vtable_len, InstanceKlass::vtable_length_offset() * wordSize, rcvr_klass); __ lwz(vtable_len, InstanceKlass::vtable_length_offset() * wordSize, rcvr_klass);
@ -217,15 +219,7 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
address ame_addr = __ pc(); // ame = abstract method error address ame_addr = __ pc(); // ame = abstract method error
// Must do an explicit check if implicit checks are disabled. // Must do an explicit check if implicit checks are disabled.
assert(!MacroAssembler::needs_explicit_null_check(in_bytes(Method::from_compiled_offset())), "sanity"); __ null_check(R19_method, in_bytes(Method::from_compiled_offset()), &throw_icce);
if (!ImplicitNullChecks || !os::zero_page_read_protected()) {
if (TrapBasedNullChecks) {
__ trap_null_check(R19_method);
} else {
__ cmpdi(CCR0, R19_method, 0);
__ beq(CCR0, throw_icce);
}
}
__ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method); __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
__ mtctr(R12_scratch2); __ mtctr(R12_scratch2);
__ bctr(); __ bctr();

View file

@ -677,11 +677,8 @@ class Assembler : public AbstractAssembler {
protected: protected:
// Insert a nop if the previous is cbcond // Insert a nop if the previous is cbcond
void insert_nop_after_cbcond() { inline void insert_nop_after_cbcond();
if (UseCBCond && cbcond_before()) {
nop();
}
}
// Delay slot helpers // Delay slot helpers
// cti is called when emitting control-transfer instruction, // cti is called when emitting control-transfer instruction,
// BEFORE doing the emitting. // BEFORE doing the emitting.
@ -739,7 +736,7 @@ public:
} }
inline void emit_int32(int); // shadows AbstractAssembler::emit_int32 inline void emit_int32(int); // shadows AbstractAssembler::emit_int32
inline void emit_data(int x) { emit_int32(x); } inline void emit_data(int x);
inline void emit_data(int, RelocationHolder const&); inline void emit_data(int, RelocationHolder const&);
inline void emit_data(int, relocInfo::relocType rtype); inline void emit_data(int, relocInfo::relocType rtype);
// helper for above fcns // helper for above fcns
@ -754,31 +751,31 @@ public:
inline void add(Register s1, Register s2, Register d ); inline void add(Register s1, Register s2, Register d );
inline void add(Register s1, int simm13a, Register d ); inline void add(Register s1, int simm13a, Register d );
void addcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); } inline void addcc( Register s1, Register s2, Register d );
void addcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void addcc( Register s1, int simm13a, Register d );
void addc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(addc_op3 ) | rs1(s1) | rs2(s2) ); } inline void addc( Register s1, Register s2, Register d );
void addc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(addc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void addc( Register s1, int simm13a, Register d );
void addccc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); } inline void addccc( Register s1, Register s2, Register d );
void addccc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void addccc( Register s1, int simm13a, Register d );
// 4-operand AES instructions // 4-operand AES instructions
void aes_eround01( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround01_op5) | fs2(s2, FloatRegisterImpl::D) ); } inline void aes_eround01( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d );
void aes_eround23( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround23_op5) | fs2(s2, FloatRegisterImpl::D) ); } inline void aes_eround23( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d );
void aes_dround01( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround01_op5) | fs2(s2, FloatRegisterImpl::D) ); } inline void aes_dround01( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d );
void aes_dround23( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround23_op5) | fs2(s2, FloatRegisterImpl::D) ); } inline void aes_dround23( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d );
void aes_eround01_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround01_l_op5) | fs2(s2, FloatRegisterImpl::D) ); } inline void aes_eround01_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d );
void aes_eround23_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround23_l_op5) | fs2(s2, FloatRegisterImpl::D) ); } inline void aes_eround23_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d );
void aes_dround01_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround01_l_op5) | fs2(s2, FloatRegisterImpl::D) ); } inline void aes_dround01_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d );
void aes_dround23_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround23_l_op5) | fs2(s2, FloatRegisterImpl::D) ); } inline void aes_dround23_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d );
void aes_kexpand1( FloatRegister s1, FloatRegister s2, int imm5a, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | u_field(imm5a, 13, 9) | op5(aes_kexpand1_op5) | fs2(s2, FloatRegisterImpl::D) ); } inline void aes_kexpand1( FloatRegister s1, FloatRegister s2, int imm5a, FloatRegister d );
// 3-operand AES instructions // 3-operand AES instructions
void aes_kexpand0( FloatRegister s1, FloatRegister s2, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes3_op3) | fs1(s1, FloatRegisterImpl::D) | opf(aes_kexpand0_opf) | fs2(s2, FloatRegisterImpl::D) ); } inline void aes_kexpand0( FloatRegister s1, FloatRegister s2, FloatRegister d );
void aes_kexpand2( FloatRegister s1, FloatRegister s2, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes3_op3) | fs1(s1, FloatRegisterImpl::D) | opf(aes_kexpand2_opf) | fs2(s2, FloatRegisterImpl::D) ); } inline void aes_kexpand2( FloatRegister s1, FloatRegister s2, FloatRegister d );
// pp 136 // pp 136
@ -816,6 +813,8 @@ public:
inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type ); inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type );
inline void call( Label& L, relocInfo::relocType rt = relocInfo::runtime_call_type ); inline void call( Label& L, relocInfo::relocType rt = relocInfo::runtime_call_type );
inline void call( address d, RelocationHolder const& rspec );
public: public:
// pp 150 // pp 150
@ -825,70 +824,70 @@ public:
// at address s1 is swapped with the data in d. If the values are not equal, // at address s1 is swapped with the data in d. If the values are not equal,
// the the contents of memory at s1 is loaded into d, without the swap. // the the contents of memory at s1 is loaded into d, without the swap.
void casa( Register s1, Register s2, Register d, int ia = -1 ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(casa_op3 ) | rs1(s1) | (ia == -1 ? immed(true) : imm_asi(ia)) | rs2(s2)); } inline void casa( Register s1, Register s2, Register d, int ia = -1 );
void casxa( Register s1, Register s2, Register d, int ia = -1 ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(casxa_op3) | rs1(s1) | (ia == -1 ? immed(true) : imm_asi(ia)) | rs2(s2)); } inline void casxa( Register s1, Register s2, Register d, int ia = -1 );
// pp 152 // pp 152
void udiv( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(udiv_op3 ) | rs1(s1) | rs2(s2)); } inline void udiv( Register s1, Register s2, Register d );
void udiv( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(udiv_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void udiv( Register s1, int simm13a, Register d );
void sdiv( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sdiv_op3 ) | rs1(s1) | rs2(s2)); } inline void sdiv( Register s1, Register s2, Register d );
void sdiv( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sdiv_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void sdiv( Register s1, int simm13a, Register d );
void udivcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(udiv_op3 | cc_bit_op3) | rs1(s1) | rs2(s2)); } inline void udivcc( Register s1, Register s2, Register d );
void udivcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(udiv_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void udivcc( Register s1, int simm13a, Register d );
void sdivcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sdiv_op3 | cc_bit_op3) | rs1(s1) | rs2(s2)); } inline void sdivcc( Register s1, Register s2, Register d );
void sdivcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sdiv_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void sdivcc( Register s1, int simm13a, Register d );
// pp 155 // pp 155
void done() { v9_only(); cti(); emit_int32( op(arith_op) | fcn(0) | op3(done_op3) ); } inline void done();
void retry() { v9_only(); cti(); emit_int32( op(arith_op) | fcn(1) | op3(retry_op3) ); } inline void retry();
// pp 156 // pp 156
void fadd( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x40 + w) | fs2(s2, w)); } inline void fadd( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d );
void fsub( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x44 + w) | fs2(s2, w)); } inline void fsub( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d );
// pp 157 // pp 157
void fcmp( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); } inline void fcmp( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2);
void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); } inline void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2);
// pp 159 // pp 159
void ftox( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(fpop1_op3) | opf(0x80 + w) | fs2(s, w)); } inline void ftox( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d );
void ftoi( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::S) | op3(fpop1_op3) | opf(0xd0 + w) | fs2(s, w)); } inline void ftoi( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d );
// pp 160 // pp 160
void ftof( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, dw) | op3(fpop1_op3) | opf(0xc0 + sw + dw*4) | fs2(s, sw)); } inline void ftof( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s, FloatRegister d );
// pp 161 // pp 161
void fxtof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only(); emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x80 + w*4) | fs2(s, FloatRegisterImpl::D)); } inline void fxtof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d );
void fitof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0xc0 + w*4) | fs2(s, FloatRegisterImpl::S)); } inline void fitof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d );
// pp 162 // pp 162
void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); } inline void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d );
void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); } inline void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d );
void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); } inline void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d );
// pp 163 // pp 163
void fmul( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x48 + w) | fs2(s2, w)); } inline void fmul( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d );
void fmul( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, dw) | op3(fpop1_op3) | fs1(s1, sw) | opf(0x60 + sw + dw*4) | fs2(s2, sw)); } inline void fmul( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s1, FloatRegister s2, FloatRegister d );
void fdiv( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x4c + w) | fs2(s2, w)); } inline void fdiv( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d );
// FXORs/FXORd instructions // FXORs/FXORd instructions
void fxor( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, w) | op3(flog3_op3) | fs1(s1, w) | opf(0x6E - w) | fs2(s2, w)); } inline void fxor( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d );
// pp 164 // pp 164
void fsqrt( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x28 + w) | fs2(s, w)); } inline void fsqrt( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d );
// pp 165 // pp 165
@ -897,17 +896,17 @@ public:
// pp 167 // pp 167
void flushw() { v9_only(); emit_int32( op(arith_op) | op3(flushw_op3) ); } void flushw();
// pp 168 // pp 168
void illtrap( int const22a) { if (const22a != 0) v9_only(); emit_int32( op(branch_op) | u_field(const22a, 21, 0) ); } void illtrap( int const22a);
// v8 unimp == illtrap(0) // v8 unimp == illtrap(0)
// pp 169 // pp 169
void impdep1( int id1, int const19a ) { v9_only(); emit_int32( op(arith_op) | fcn(id1) | op3(impdep1_op3) | u_field(const19a, 18, 0)); } void impdep1( int id1, int const19a );
void impdep2( int id1, int const19a ) { v9_only(); emit_int32( op(arith_op) | fcn(id1) | op3(impdep2_op3) | u_field(const19a, 18, 0)); } void impdep2( int id1, int const19a );
// pp 170 // pp 170
@ -927,8 +926,8 @@ public:
// 173 // 173
void ldfa( FloatRegisterImpl::Width w, Register s1, Register s2, int ia, FloatRegister d ) { v9_only(); emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } inline void ldfa( FloatRegisterImpl::Width w, Register s1, Register s2, int ia, FloatRegister d );
void ldfa( FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d ) { v9_only(); emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void ldfa( FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d );
// pp 175, lduw is ld on v8 // pp 175, lduw is ld on v8
@ -951,119 +950,119 @@ public:
// pp 177 // pp 177
void ldsba( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldsb_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } inline void ldsba( Register s1, Register s2, int ia, Register d );
void ldsba( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldsb_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void ldsba( Register s1, int simm13a, Register d );
void ldsha( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldsh_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } inline void ldsha( Register s1, Register s2, int ia, Register d );
void ldsha( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldsh_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void ldsha( Register s1, int simm13a, Register d );
void ldswa( Register s1, Register s2, int ia, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldsw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } inline void ldswa( Register s1, Register s2, int ia, Register d );
void ldswa( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldsw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void ldswa( Register s1, int simm13a, Register d );
void lduba( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldub_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } inline void lduba( Register s1, Register s2, int ia, Register d );
void lduba( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldub_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void lduba( Register s1, int simm13a, Register d );
void lduha( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(lduh_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } inline void lduha( Register s1, Register s2, int ia, Register d );
void lduha( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(lduh_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void lduha( Register s1, int simm13a, Register d );
void lduwa( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } inline void lduwa( Register s1, Register s2, int ia, Register d );
void lduwa( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void lduwa( Register s1, int simm13a, Register d );
void ldxa( Register s1, Register s2, int ia, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } inline void ldxa( Register s1, Register s2, int ia, Register d );
void ldxa( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void ldxa( Register s1, int simm13a, Register d );
// pp 181 // pp 181
void and3( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | rs2(s2) ); } inline void and3( Register s1, Register s2, Register d );
void and3( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void and3( Register s1, int simm13a, Register d );
void andcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(and_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); } inline void andcc( Register s1, Register s2, Register d );
void andcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(and_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void andcc( Register s1, int simm13a, Register d );
void andn( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(andn_op3 ) | rs1(s1) | rs2(s2) ); } inline void andn( Register s1, Register s2, Register d );
void andn( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(andn_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void andn( Register s1, int simm13a, Register d );
void andncc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); } inline void andncc( Register s1, Register s2, Register d );
void andncc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void andncc( Register s1, int simm13a, Register d );
void or3( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | rs2(s2) ); } inline void or3( Register s1, Register s2, Register d );
void or3( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void or3( Register s1, int simm13a, Register d );
void orcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(or_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); } inline void orcc( Register s1, Register s2, Register d );
void orcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(or_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void orcc( Register s1, int simm13a, Register d );
void orn( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | rs2(s2) ); } inline void orn( Register s1, Register s2, Register d );
void orn( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void orn( Register s1, int simm13a, Register d );
void orncc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(orn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); } inline void orncc( Register s1, Register s2, Register d );
void orncc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(orn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void orncc( Register s1, int simm13a, Register d );
void xor3( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | rs2(s2) ); } inline void xor3( Register s1, Register s2, Register d );
void xor3( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void xor3( Register s1, int simm13a, Register d );
void xorcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xor_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); } inline void xorcc( Register s1, Register s2, Register d );
void xorcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xor_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void xorcc( Register s1, int simm13a, Register d );
void xnor( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xnor_op3 ) | rs1(s1) | rs2(s2) ); } inline void xnor( Register s1, Register s2, Register d );
void xnor( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xnor_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void xnor( Register s1, int simm13a, Register d );
void xnorcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xnor_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); } inline void xnorcc( Register s1, Register s2, Register d );
void xnorcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xnor_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void xnorcc( Register s1, int simm13a, Register d );
// pp 183 // pp 183
void membar( Membar_mask_bits const7a ) { v9_only(); emit_int32( op(arith_op) | op3(membar_op3) | rs1(O7) | immed(true) | u_field( int(const7a), 6, 0)); } inline void membar( Membar_mask_bits const7a );
// pp 185 // pp 185
void fmov( FloatRegisterImpl::Width w, Condition c, bool floatCC, CC cca, FloatRegister s2, FloatRegister d ) { v9_only(); emit_int32( op(arith_op) | fd(d, w) | op3(fpop2_op3) | cond_mov(c) | opf_cc(cca, floatCC) | opf_low6(w) | fs2(s2, w)); } inline void fmov( FloatRegisterImpl::Width w, Condition c, bool floatCC, CC cca, FloatRegister s2, FloatRegister d );
// pp 189 // pp 189
void fmov( FloatRegisterImpl::Width w, RCondition c, Register s1, FloatRegister s2, FloatRegister d ) { v9_only(); emit_int32( op(arith_op) | fd(d, w) | op3(fpop2_op3) | rs1(s1) | rcond(c) | opf_low5(4 + w) | fs2(s2, w)); } inline void fmov( FloatRegisterImpl::Width w, RCondition c, Register s1, FloatRegister s2, FloatRegister d );
// pp 191 // pp 191
void movcc( Condition c, bool floatCC, CC cca, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(movcc_op3) | mov_cc(cca, floatCC) | cond_mov(c) | rs2(s2) ); } inline void movcc( Condition c, bool floatCC, CC cca, Register s2, Register d );
void movcc( Condition c, bool floatCC, CC cca, int simm11a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(movcc_op3) | mov_cc(cca, floatCC) | cond_mov(c) | immed(true) | simm(simm11a, 11) ); } inline void movcc( Condition c, bool floatCC, CC cca, int simm11a, Register d );
// pp 195 // pp 195
void movr( RCondition c, Register s1, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(movr_op3) | rs1(s1) | rcond(c) | rs2(s2) ); } inline void movr( RCondition c, Register s1, Register s2, Register d );
void movr( RCondition c, Register s1, int simm10a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(movr_op3) | rs1(s1) | rcond(c) | immed(true) | simm(simm10a, 10) ); } inline void movr( RCondition c, Register s1, int simm10a, Register d );
// pp 196 // pp 196
void mulx( Register s1, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(mulx_op3 ) | rs1(s1) | rs2(s2) ); } inline void mulx( Register s1, Register s2, Register d );
void mulx( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(mulx_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void mulx( Register s1, int simm13a, Register d );
void sdivx( Register s1, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(sdivx_op3) | rs1(s1) | rs2(s2) ); } inline void sdivx( Register s1, Register s2, Register d );
void sdivx( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(sdivx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void sdivx( Register s1, int simm13a, Register d );
void udivx( Register s1, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(udivx_op3) | rs1(s1) | rs2(s2) ); } inline void udivx( Register s1, Register s2, Register d );
void udivx( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(udivx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void udivx( Register s1, int simm13a, Register d );
// pp 197 // pp 197
void umul( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(umul_op3 ) | rs1(s1) | rs2(s2) ); } inline void umul( Register s1, Register s2, Register d );
void umul( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(umul_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void umul( Register s1, int simm13a, Register d );
void smul( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 ) | rs1(s1) | rs2(s2) ); } inline void smul( Register s1, Register s2, Register d );
void smul( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void smul( Register s1, int simm13a, Register d );
void umulcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(umul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); } inline void umulcc( Register s1, Register s2, Register d );
void umulcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(umul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void umulcc( Register s1, int simm13a, Register d );
void smulcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); } inline void smulcc( Register s1, Register s2, Register d );
void smulcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void smulcc( Register s1, int simm13a, Register d );
// pp 201 // pp 201
void nop() { emit_int32( op(branch_op) | op2(sethi_op2) ); } inline void nop();
void sw_count() { emit_int32( op(branch_op) | op2(sethi_op2) | 0x3f0 ); } inline void sw_count();
// pp 202 // pp 202
void popc( Register s, Register d) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(popc_op3) | rs2(s)); } inline void popc( Register s, Register d);
void popc( int simm13a, Register d) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(popc_op3) | immed(true) | simm(simm13a, 13)); } inline void popc( int simm13a, Register d);
// pp 203 // pp 203
void prefetch( Register s1, Register s2, PrefetchFcn f) { v9_only(); emit_int32( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | rs2(s2) ); } inline void prefetch( Register s1, Register s2, PrefetchFcn f);
void prefetch( Register s1, int simm13a, PrefetchFcn f) { v9_only(); emit_data( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } inline void prefetch( Register s1, int simm13a, PrefetchFcn f);
void prefetcha( Register s1, Register s2, int ia, PrefetchFcn f ) { v9_only(); emit_int32( op(ldst_op) | fcn(f) | op3(prefetch_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } inline void prefetcha( Register s1, Register s2, int ia, PrefetchFcn f );
void prefetcha( Register s1, int simm13a, PrefetchFcn f ) { v9_only(); emit_int32( op(ldst_op) | fcn(f) | op3(prefetch_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void prefetcha( Register s1, int simm13a, PrefetchFcn f );
// pp 208 // pp 208
// not implementing read privileged register // not implementing read privileged register
inline void rdy( Register d) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(0, 18, 14)); } inline void rdy( Register d);
inline void rdccr( Register d) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(2, 18, 14)); } inline void rdccr( Register d);
inline void rdasi( Register d) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(3, 18, 14)); } inline void rdasi( Register d);
inline void rdtick( Register d) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(4, 18, 14)); } // Spoon! inline void rdtick( Register d);
inline void rdpc( Register d) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(5, 18, 14)); } inline void rdpc( Register d);
inline void rdfprs( Register d) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(6, 18, 14)); } inline void rdfprs( Register d);
// pp 213 // pp 213
@ -1072,47 +1071,43 @@ public:
// pp 214 // pp 214
void save( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(save_op3) | rs1(s1) | rs2(s2) ); } inline void save( Register s1, Register s2, Register d );
void save( Register s1, int simm13a, Register d ) { inline void save( Register s1, int simm13a, Register d );
// make sure frame is at least large enough for the register save area
assert(-simm13a >= 16 * wordSize, "frame too small");
emit_int32( op(arith_op) | rd(d) | op3(save_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) );
}
void restore( Register s1 = G0, Register s2 = G0, Register d = G0 ) { emit_int32( op(arith_op) | rd(d) | op3(restore_op3) | rs1(s1) | rs2(s2) ); } inline void restore( Register s1 = G0, Register s2 = G0, Register d = G0 );
void restore( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(restore_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void restore( Register s1, int simm13a, Register d );
// pp 216 // pp 216
void saved() { v9_only(); emit_int32( op(arith_op) | fcn(0) | op3(saved_op3)); } inline void saved();
void restored() { v9_only(); emit_int32( op(arith_op) | fcn(1) | op3(saved_op3)); } inline void restored();
// pp 217 // pp 217
inline void sethi( int imm22a, Register d, RelocationHolder const& rspec = RelocationHolder() ); inline void sethi( int imm22a, Register d, RelocationHolder const& rspec = RelocationHolder() );
// pp 218 // pp 218
void sll( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(0) | rs2(s2) ); } inline void sll( Register s1, Register s2, Register d );
void sll( Register s1, int imm5a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0) ); } inline void sll( Register s1, int imm5a, Register d );
void srl( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(0) | rs2(s2) ); } inline void srl( Register s1, Register s2, Register d );
void srl( Register s1, int imm5a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0) ); } inline void srl( Register s1, int imm5a, Register d );
void sra( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(0) | rs2(s2) ); } inline void sra( Register s1, Register s2, Register d );
void sra( Register s1, int imm5a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0) ); } inline void sra( Register s1, int imm5a, Register d );
void sllx( Register s1, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(1) | rs2(s2) ); } inline void sllx( Register s1, Register s2, Register d );
void sllx( Register s1, int imm6a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0) ); } inline void sllx( Register s1, int imm6a, Register d );
void srlx( Register s1, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(1) | rs2(s2) ); } inline void srlx( Register s1, Register s2, Register d );
void srlx( Register s1, int imm6a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0) ); } inline void srlx( Register s1, int imm6a, Register d );
void srax( Register s1, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(1) | rs2(s2) ); } inline void srax( Register s1, Register s2, Register d );
void srax( Register s1, int imm6a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0) ); } inline void srax( Register s1, int imm6a, Register d );
// pp 220 // pp 220
void sir( int simm13a ) { emit_int32( op(arith_op) | fcn(15) | op3(sir_op3) | immed(true) | simm(simm13a, 13)); } inline void sir( int simm13a );
// pp 221 // pp 221
void stbar() { emit_int32( op(arith_op) | op3(membar_op3) | u_field(15, 18, 14)); } inline void stbar();
// pp 222 // pp 222
@ -1126,8 +1121,8 @@ public:
// pp 224 // pp 224
void stfa( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2, int ia ) { v9_only(); emit_int32( op(ldst_op) | fd(d, w) | alt_op3(stf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } inline void stfa( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2, int ia );
void stfa( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a ) { v9_only(); emit_int32( op(ldst_op) | fd(d, w) | alt_op3(stf_op3 | alt_bit_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void stfa( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a );
// p 226 // p 226
@ -1144,28 +1139,28 @@ public:
// pp 177 // pp 177
void stba( Register d, Register s1, Register s2, int ia ) { emit_int32( op(ldst_op) | rd(d) | op3(stb_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } inline void stba( Register d, Register s1, Register s2, int ia );
void stba( Register d, Register s1, int simm13a ) { emit_int32( op(ldst_op) | rd(d) | op3(stb_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void stba( Register d, Register s1, int simm13a );
void stha( Register d, Register s1, Register s2, int ia ) { emit_int32( op(ldst_op) | rd(d) | op3(sth_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } inline void stha( Register d, Register s1, Register s2, int ia );
void stha( Register d, Register s1, int simm13a ) { emit_int32( op(ldst_op) | rd(d) | op3(sth_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void stha( Register d, Register s1, int simm13a );
void stwa( Register d, Register s1, Register s2, int ia ) { emit_int32( op(ldst_op) | rd(d) | op3(stw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } inline void stwa( Register d, Register s1, Register s2, int ia );
void stwa( Register d, Register s1, int simm13a ) { emit_int32( op(ldst_op) | rd(d) | op3(stw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void stwa( Register d, Register s1, int simm13a );
void stxa( Register d, Register s1, Register s2, int ia ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(stx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } inline void stxa( Register d, Register s1, Register s2, int ia );
void stxa( Register d, Register s1, int simm13a ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(stx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void stxa( Register d, Register s1, int simm13a );
void stda( Register d, Register s1, Register s2, int ia ) { emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } inline void stda( Register d, Register s1, Register s2, int ia );
void stda( Register d, Register s1, int simm13a ) { emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void stda( Register d, Register s1, int simm13a );
// pp 230 // pp 230
void sub( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | rs2(s2) ); } inline void sub( Register s1, Register s2, Register d );
void sub( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void sub( Register s1, int simm13a, Register d );
void subcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3 | cc_bit_op3 ) | rs1(s1) | rs2(s2) ); } inline void subcc( Register s1, Register s2, Register d );
void subcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3 | cc_bit_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void subcc( Register s1, int simm13a, Register d );
void subc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(subc_op3 ) | rs1(s1) | rs2(s2) ); } inline void subc( Register s1, Register s2, Register d );
void subc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(subc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void subc( Register s1, int simm13a, Register d );
void subccc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(subc_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); } inline void subccc( Register s1, Register s2, Register d );
void subccc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(subc_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void subccc( Register s1, int simm13a, Register d );
// pp 231 // pp 231
@ -1174,86 +1169,80 @@ public:
// pp 232 // pp 232
void swapa( Register s1, Register s2, int ia, Register d ) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(swap_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } inline void swapa( Register s1, Register s2, int ia, Register d );
void swapa( Register s1, int simm13a, Register d ) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(swap_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void swapa( Register s1, int simm13a, Register d );
// pp 234, note op in book is wrong, see pp 268 // pp 234, note op in book is wrong, see pp 268
void taddcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | rs2(s2) ); } inline void taddcc( Register s1, Register s2, Register d );
void taddcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void taddcc( Register s1, int simm13a, Register d );
// pp 235 // pp 235
void tsubcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | rs2(s2) ); } inline void tsubcc( Register s1, Register s2, Register d );
void tsubcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void tsubcc( Register s1, int simm13a, Register d );
// pp 237 // pp 237
void trap( Condition c, CC cc, Register s1, Register s2 ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); } inline void trap( Condition c, CC cc, Register s1, Register s2 );
void trap( Condition c, CC cc, Register s1, int trapa ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); } inline void trap( Condition c, CC cc, Register s1, int trapa );
// simple uncond. trap // simple uncond. trap
void trap( int trapa ) { trap( always, icc, G0, trapa ); } inline void trap( int trapa );
// pp 239 omit write priv register for now // pp 239 omit write priv register for now
inline void wry( Register d) { v9_dep(); emit_int32( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(0, 29, 25)); } inline void wry( Register d);
inline void wrccr(Register s) { v9_only(); emit_int32( op(arith_op) | rs1(s) | op3(wrreg_op3) | u_field(2, 29, 25)); } inline void wrccr(Register s);
inline void wrccr(Register s, int simm13a) { v9_only(); emit_int32( op(arith_op) | inline void wrccr(Register s, int simm13a);
rs1(s) | inline void wrasi(Register d);
op3(wrreg_op3) |
u_field(2, 29, 25) |
immed(true) |
simm(simm13a, 13)); }
inline void wrasi(Register d) { v9_only(); emit_int32( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(3, 29, 25)); }
// wrasi(d, imm) stores (d xor imm) to asi // wrasi(d, imm) stores (d xor imm) to asi
inline void wrasi(Register d, int simm13a) { v9_only(); emit_int32( op(arith_op) | rs1(d) | op3(wrreg_op3) | inline void wrasi(Register d, int simm13a);
u_field(3, 29, 25) | immed(true) | simm(simm13a, 13)); } inline void wrfprs( Register d);
inline void wrfprs( Register d) { v9_only(); emit_int32( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(6, 29, 25)); }
// VIS1 instructions // VIS1 instructions
void alignaddr( Register s1, Register s2, Register d ) { vis1_only(); emit_int32( op(arith_op) | rd(d) | op3(alignaddr_op3) | rs1(s1) | opf(alignaddr_opf) | rs2(s2)); } inline void alignaddr( Register s1, Register s2, Register d );
void faligndata( FloatRegister s1, FloatRegister s2, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(faligndata_op3) | fs1(s1, FloatRegisterImpl::D) | opf(faligndata_opf) | fs2(s2, FloatRegisterImpl::D)); } inline void faligndata( FloatRegister s1, FloatRegister s2, FloatRegister d );
void fzero( FloatRegisterImpl::Width w, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, w) | op3(fzero_op3) | opf(0x62 - w)); } inline void fzero( FloatRegisterImpl::Width w, FloatRegister d );
void fsrc2( FloatRegisterImpl::Width w, FloatRegister s2, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, w) | op3(fsrc_op3) | opf(0x7A - w) | fs2(s2, w)); } inline void fsrc2( FloatRegisterImpl::Width w, FloatRegister s2, FloatRegister d );
void fnot1( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, w) | op3(fnot_op3) | fs1(s1, w) | opf(0x6C - w)); } inline void fnot1( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister d );
void fpmerge( FloatRegister s1, FloatRegister s2, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(0x36) | fs1(s1, FloatRegisterImpl::S) | opf(0x4b) | fs2(s2, FloatRegisterImpl::S)); } inline void fpmerge( FloatRegister s1, FloatRegister s2, FloatRegister d );
void stpartialf( Register s1, Register s2, FloatRegister d, int ia = -1 ) { vis1_only(); emit_int32( op(ldst_op) | fd(d, FloatRegisterImpl::D) | op3(stpartialf_op3) | rs1(s1) | imm_asi(ia) | rs2(s2)); } inline void stpartialf( Register s1, Register s2, FloatRegister d, int ia = -1 );
// VIS2 instructions // VIS2 instructions
void edge8n( Register s1, Register s2, Register d ) { vis2_only(); emit_int32( op(arith_op) | rd(d) | op3(edge_op3) | rs1(s1) | opf(edge8n_opf) | rs2(s2)); } inline void edge8n( Register s1, Register s2, Register d );
void bmask( Register s1, Register s2, Register d ) { vis2_only(); emit_int32( op(arith_op) | rd(d) | op3(bmask_op3) | rs1(s1) | opf(bmask_opf) | rs2(s2)); } inline void bmask( Register s1, Register s2, Register d );
void bshuffle( FloatRegister s1, FloatRegister s2, FloatRegister d ) { vis2_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(bshuffle_op3) | fs1(s1, FloatRegisterImpl::D) | opf(bshuffle_opf) | fs2(s2, FloatRegisterImpl::D)); } inline void bshuffle( FloatRegister s1, FloatRegister s2, FloatRegister d );
// VIS3 instructions // VIS3 instructions
void movstosw( FloatRegister s, Register d ) { vis3_only(); emit_int32( op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mstosw_opf) | fs2(s, FloatRegisterImpl::S)); } inline void movstosw( FloatRegister s, Register d );
void movstouw( FloatRegister s, Register d ) { vis3_only(); emit_int32( op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mstouw_opf) | fs2(s, FloatRegisterImpl::S)); } inline void movstouw( FloatRegister s, Register d );
void movdtox( FloatRegister s, Register d ) { vis3_only(); emit_int32( op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mdtox_opf) | fs2(s, FloatRegisterImpl::D)); } inline void movdtox( FloatRegister s, Register d );
void movwtos( Register s, FloatRegister d ) { vis3_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::S) | op3(mftoi_op3) | opf(mwtos_opf) | rs2(s)); } inline void movwtos( Register s, FloatRegister d );
void movxtod( Register s, FloatRegister d ) { vis3_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(mftoi_op3) | opf(mxtod_opf) | rs2(s)); } inline void movxtod( Register s, FloatRegister d );
void xmulx(Register s1, Register s2, Register d) { vis3_only(); emit_int32( op(arith_op) | rd(d) | op3(xmulx_op3) | rs1(s1) | opf(xmulx_opf) | rs2(s2)); } inline void xmulx(Register s1, Register s2, Register d);
void xmulxhi(Register s1, Register s2, Register d) { vis3_only(); emit_int32( op(arith_op) | rd(d) | op3(xmulx_op3) | rs1(s1) | opf(xmulxhi_opf) | rs2(s2)); } inline void xmulxhi(Register s1, Register s2, Register d);
// Crypto SHA instructions // Crypto SHA instructions
void sha1() { sha1_only(); emit_int32( op(arith_op) | op3(sha_op3) | opf(sha1_opf)); } inline void sha1();
void sha256() { sha256_only(); emit_int32( op(arith_op) | op3(sha_op3) | opf(sha256_opf)); } inline void sha256();
void sha512() { sha512_only(); emit_int32( op(arith_op) | op3(sha_op3) | opf(sha512_opf)); } inline void sha512();
// CRC32C instruction // CRC32C instruction
void crc32c( FloatRegister s1, FloatRegister s2, FloatRegister d ) { crc32c_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(crc32c_op3) | fs1(s1, FloatRegisterImpl::D) | opf(crc32c_opf) | fs2(s2, FloatRegisterImpl::D)); } inline void crc32c( FloatRegister s1, FloatRegister s2, FloatRegister d );
// Creation // Creation
Assembler(CodeBuffer* code) : AbstractAssembler(code) { Assembler(CodeBuffer* code) : AbstractAssembler(code) {

View file

@ -28,6 +28,12 @@
#include "asm/assembler.hpp" #include "asm/assembler.hpp"
inline void Assembler::insert_nop_after_cbcond() {
if (UseCBCond && cbcond_before()) {
nop();
}
}
inline void Assembler::check_delay() { inline void Assembler::check_delay() {
# ifdef CHECK_DELAY # ifdef CHECK_DELAY
guarantee( delay_state != at_delay_slot, "must say delayed() when filling delay slot"); guarantee( delay_state != at_delay_slot, "must say delayed() when filling delay slot");
@ -40,6 +46,10 @@ inline void Assembler::emit_int32(int x) {
AbstractAssembler::emit_int32(x); AbstractAssembler::emit_int32(x);
} }
inline void Assembler::emit_data(int x) {
emit_int32(x);
}
inline void Assembler::emit_data(int x, relocInfo::relocType rtype) { inline void Assembler::emit_data(int x, relocInfo::relocType rtype) {
relocate(rtype); relocate(rtype);
emit_int32(x); emit_int32(x);
@ -54,6 +64,29 @@ inline void Assembler::emit_data(int x, RelocationHolder const& rspec) {
inline void Assembler::add(Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::add(Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::add(Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } inline void Assembler::add(Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::addcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::addcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::addc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(addc_op3 ) | rs1(s1) | rs2(s2) ); }
inline void Assembler::addc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(addc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::addccc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::addccc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::aes_eround01( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround01_op5) | fs2(s2, FloatRegisterImpl::D) ); }
inline void Assembler::aes_eround23( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround23_op5) | fs2(s2, FloatRegisterImpl::D) ); }
inline void Assembler::aes_dround01( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround01_op5) | fs2(s2, FloatRegisterImpl::D) ); }
inline void Assembler::aes_dround23( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround23_op5) | fs2(s2, FloatRegisterImpl::D) ); }
inline void Assembler::aes_eround01_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround01_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
inline void Assembler::aes_eround23_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround23_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
inline void Assembler::aes_dround01_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround01_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
inline void Assembler::aes_dround23_l( FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround23_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
inline void Assembler::aes_kexpand1( FloatRegister s1, FloatRegister s2, int imm5a, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | u_field(imm5a, 13, 9) | op5(aes_kexpand1_op5) | fs2(s2, FloatRegisterImpl::D) ); }
// 3-operand AES instructions
inline void Assembler::aes_kexpand0( FloatRegister s1, FloatRegister s2, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes3_op3) | fs1(s1, FloatRegisterImpl::D) | opf(aes_kexpand0_opf) | fs2(s2, FloatRegisterImpl::D) ); }
inline void Assembler::aes_kexpand2( FloatRegister s1, FloatRegister s2, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes3_op3) | fs1(s1, FloatRegisterImpl::D) | opf(aes_kexpand2_opf) | fs2(s2, FloatRegisterImpl::D) ); }
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); } inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); }
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { insert_nop_after_cbcond(); bpr( c, a, p, s1, target(L)); } inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { insert_nop_after_cbcond(); bpr( c, a, p, s1, target(L)); }
@ -76,9 +109,58 @@ inline void Assembler::cbcond(Condition c, CC cc, Register s1, int simm5, Label&
inline void Assembler::call( address d, relocInfo::relocType rt ) { insert_nop_after_cbcond(); cti(); emit_data( op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rt); has_delay_slot(); assert(rt != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec"); } inline void Assembler::call( address d, relocInfo::relocType rt ) { insert_nop_after_cbcond(); cti(); emit_data( op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rt); has_delay_slot(); assert(rt != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec"); }
inline void Assembler::call( Label& L, relocInfo::relocType rt ) { insert_nop_after_cbcond(); call( target(L), rt); } inline void Assembler::call( Label& L, relocInfo::relocType rt ) { insert_nop_after_cbcond(); call( target(L), rt); }
inline void Assembler::call( address d, RelocationHolder const& rspec ) { insert_nop_after_cbcond(); cti(); emit_data( op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rspec); has_delay_slot(); assert(rspec.type() != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec"); }
inline void Assembler::casa( Register s1, Register s2, Register d, int ia ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(casa_op3 ) | rs1(s1) | (ia == -1 ? immed(true) : imm_asi(ia)) | rs2(s2)); }
inline void Assembler::casxa( Register s1, Register s2, Register d, int ia ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(casxa_op3) | rs1(s1) | (ia == -1 ? immed(true) : imm_asi(ia)) | rs2(s2)); }
inline void Assembler::udiv( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(udiv_op3 ) | rs1(s1) | rs2(s2)); }
inline void Assembler::udiv( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(udiv_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::sdiv( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sdiv_op3 ) | rs1(s1) | rs2(s2)); }
inline void Assembler::sdiv( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sdiv_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::udivcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(udiv_op3 | cc_bit_op3) | rs1(s1) | rs2(s2)); }
inline void Assembler::udivcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(udiv_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::sdivcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sdiv_op3 | cc_bit_op3) | rs1(s1) | rs2(s2)); }
inline void Assembler::sdivcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sdiv_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::done() { v9_only(); cti(); emit_int32( op(arith_op) | fcn(0) | op3(done_op3) ); }
inline void Assembler::retry() { v9_only(); cti(); emit_int32( op(arith_op) | fcn(1) | op3(retry_op3) ); }
inline void Assembler::fadd( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x40 + w) | fs2(s2, w)); }
inline void Assembler::fsub( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x44 + w) | fs2(s2, w)); }
inline void Assembler::fcmp( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); }
inline void Assembler::fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); }
inline void Assembler::ftox( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(fpop1_op3) | opf(0x80 + w) | fs2(s, w)); }
inline void Assembler::ftoi( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::S) | op3(fpop1_op3) | opf(0xd0 + w) | fs2(s, w)); }
inline void Assembler::ftof( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, dw) | op3(fpop1_op3) | opf(0xc0 + sw + dw*4) | fs2(s, sw)); }
inline void Assembler::fxtof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only(); emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x80 + w*4) | fs2(s, FloatRegisterImpl::D)); }
inline void Assembler::fitof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0xc0 + w*4) | fs2(s, FloatRegisterImpl::S)); }
inline void Assembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); }
inline void Assembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); }
inline void Assembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); }
inline void Assembler::fmul( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x48 + w) | fs2(s2, w)); }
inline void Assembler::fmul( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, dw) | op3(fpop1_op3) | fs1(s1, sw) | opf(0x60 + sw + dw*4) | fs2(s2, sw)); }
inline void Assembler::fdiv( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x4c + w) | fs2(s2, w)); }
inline void Assembler::fxor( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, w) | op3(flog3_op3) | fs1(s1, w) | opf(0x6E - w) | fs2(s2, w)); }
inline void Assembler::fsqrt( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x28 + w) | fs2(s, w)); }
inline void Assembler::flush( Register s1, Register s2) { emit_int32( op(arith_op) | op3(flush_op3) | rs1(s1) | rs2(s2)); } inline void Assembler::flush( Register s1, Register s2) { emit_int32( op(arith_op) | op3(flush_op3) | rs1(s1) | rs2(s2)); }
inline void Assembler::flush( Register s1, int simm13a) { emit_data( op(arith_op) | op3(flush_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } inline void Assembler::flush( Register s1, int simm13a) { emit_data( op(arith_op) | op3(flush_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::flushw() { v9_only(); emit_int32( op(arith_op) | op3(flushw_op3) ); }
inline void Assembler::illtrap( int const22a) { if (const22a != 0) v9_only(); emit_int32( op(branch_op) | u_field(const22a, 21, 0) ); }
inline void Assembler::impdep1( int id1, int const19a ) { v9_only(); emit_int32( op(arith_op) | fcn(id1) | op3(impdep1_op3) | u_field(const19a, 18, 0)); }
inline void Assembler::impdep2( int id1, int const19a ) { v9_only(); emit_int32( op(arith_op) | fcn(id1) | op3(impdep2_op3) | u_field(const19a, 18, 0)); }
inline void Assembler::jmpl( Register s1, Register s2, Register d ) { insert_nop_after_cbcond(); cti(); emit_int32( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); } inline void Assembler::jmpl( Register s1, Register s2, Register d ) { insert_nop_after_cbcond(); cti(); emit_int32( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { insert_nop_after_cbcond(); cti(); emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); } inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { insert_nop_after_cbcond(); cti(); emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); }
@ -88,6 +170,9 @@ inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a,
inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } inline void Assembler::ldxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::ldfa( FloatRegisterImpl::Width w, Register s1, Register s2, int ia, FloatRegister d ) { v9_only(); emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
inline void Assembler::ldfa( FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d ) { v9_only(); emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::ldsb( Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::ldsb( Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldsb( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } inline void Assembler::ldsb( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
@ -107,11 +192,134 @@ inline void Assembler::ldx( Register s1, int simm13a, Register d) { v9_only();
inline void Assembler::ldd( Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::ldd( Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldd( Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } inline void Assembler::ldd( Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::ldsba( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldsb_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
inline void Assembler::ldsba( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldsb_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::ldsha( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldsh_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
inline void Assembler::ldsha( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldsh_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::ldswa( Register s1, Register s2, int ia, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldsw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
inline void Assembler::ldswa( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldsw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::lduba( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldub_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
inline void Assembler::lduba( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldub_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::lduha( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(lduh_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
inline void Assembler::lduha( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(lduh_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::lduwa( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
inline void Assembler::lduwa( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::ldxa( Register s1, Register s2, int ia, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
inline void Assembler::ldxa( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::and3( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | rs2(s2) ); }
inline void Assembler::and3( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(and_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::andcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(and_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::andcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(and_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::andn( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(andn_op3 ) | rs1(s1) | rs2(s2) ); }
inline void Assembler::andn( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(andn_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::andncc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::andncc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::or3( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | rs2(s2) ); }
inline void Assembler::or3( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::orcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(or_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::orcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(or_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::orn( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::orn( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::orncc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(orn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::orncc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(orn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::xor3( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | rs2(s2) ); }
inline void Assembler::xor3( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xor_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::xorcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xor_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::xorcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xor_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::xnor( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xnor_op3 ) | rs1(s1) | rs2(s2) ); }
inline void Assembler::xnor( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xnor_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::xnorcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xnor_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::xnorcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(xnor_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::membar( Membar_mask_bits const7a ) { v9_only(); emit_int32( op(arith_op) | op3(membar_op3) | rs1(O7) | immed(true) | u_field( int(const7a), 6, 0)); }
inline void Assembler::fmov( FloatRegisterImpl::Width w, Condition c, bool floatCC, CC cca, FloatRegister s2, FloatRegister d ) { v9_only(); emit_int32( op(arith_op) | fd(d, w) | op3(fpop2_op3) | cond_mov(c) | opf_cc(cca, floatCC) | opf_low6(w) | fs2(s2, w)); }
inline void Assembler::fmov( FloatRegisterImpl::Width w, RCondition c, Register s1, FloatRegister s2, FloatRegister d ) { v9_only(); emit_int32( op(arith_op) | fd(d, w) | op3(fpop2_op3) | rs1(s1) | rcond(c) | opf_low5(4 + w) | fs2(s2, w)); }
inline void Assembler::movcc( Condition c, bool floatCC, CC cca, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(movcc_op3) | mov_cc(cca, floatCC) | cond_mov(c) | rs2(s2) ); }
inline void Assembler::movcc( Condition c, bool floatCC, CC cca, int simm11a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(movcc_op3) | mov_cc(cca, floatCC) | cond_mov(c) | immed(true) | simm(simm11a, 11) ); }
inline void Assembler::movr( RCondition c, Register s1, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(movr_op3) | rs1(s1) | rcond(c) | rs2(s2) ); }
inline void Assembler::movr( RCondition c, Register s1, int simm10a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(movr_op3) | rs1(s1) | rcond(c) | immed(true) | simm(simm10a, 10) ); }
inline void Assembler::mulx( Register s1, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(mulx_op3 ) | rs1(s1) | rs2(s2) ); }
inline void Assembler::mulx( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(mulx_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::sdivx( Register s1, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(sdivx_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::sdivx( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(sdivx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::udivx( Register s1, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(udivx_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::udivx( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(udivx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::umul( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(umul_op3 ) | rs1(s1) | rs2(s2) ); }
inline void Assembler::umul( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(umul_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::smul( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 ) | rs1(s1) | rs2(s2) ); }
inline void Assembler::smul( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::umulcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(umul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::umulcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(umul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::smulcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::smulcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::nop() { emit_int32( op(branch_op) | op2(sethi_op2) ); }
inline void Assembler::sw_count() { emit_int32( op(branch_op) | op2(sethi_op2) | 0x3f0 ); }
inline void Assembler::popc( Register s, Register d) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(popc_op3) | rs2(s)); }
inline void Assembler::popc( int simm13a, Register d) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(popc_op3) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::prefetch( Register s1, Register s2, PrefetchFcn f) { v9_only(); emit_int32( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::prefetch( Register s1, int simm13a, PrefetchFcn f) { v9_only(); emit_data( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::prefetcha( Register s1, Register s2, int ia, PrefetchFcn f ) { v9_only(); emit_int32( op(ldst_op) | fcn(f) | op3(prefetch_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
inline void Assembler::prefetcha( Register s1, int simm13a, PrefetchFcn f ) { v9_only(); emit_int32( op(ldst_op) | fcn(f) | op3(prefetch_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::rdy( Register d) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(0, 18, 14)); }
inline void Assembler::rdccr( Register d) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(2, 18, 14)); }
inline void Assembler::rdasi( Register d) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(3, 18, 14)); }
inline void Assembler::rdtick( Register d) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(4, 18, 14)); } // Spoon!
inline void Assembler::rdpc( Register d) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(5, 18, 14)); }
inline void Assembler::rdfprs( Register d) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(6, 18, 14)); }
inline void Assembler::rett( Register s1, Register s2 ) { cti(); emit_int32( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); } inline void Assembler::rett( Register s1, Register s2 ) { cti(); emit_int32( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { cti(); emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt); has_delay_slot(); } inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { cti(); emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt); has_delay_slot(); }
inline void Assembler::save( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(save_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::save( Register s1, int simm13a, Register d ) {
// make sure frame is at least large enough for the register save area
assert(-simm13a >= 16 * wordSize, "frame too small");
emit_int32( op(arith_op) | rd(d) | op3(save_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) );
}
inline void Assembler::restore( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(restore_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::restore( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(restore_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
// pp 216
inline void Assembler::saved() { v9_only(); emit_int32( op(arith_op) | fcn(0) | op3(saved_op3)); }
inline void Assembler::restored() { v9_only(); emit_int32( op(arith_op) | fcn(1) | op3(saved_op3)); }
inline void Assembler::sethi( int imm22a, Register d, RelocationHolder const& rspec ) { emit_data( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(imm22a), rspec); } inline void Assembler::sethi( int imm22a, Register d, RelocationHolder const& rspec ) { emit_data( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(imm22a), rspec); }
inline void Assembler::sll( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(0) | rs2(s2) ); }
inline void Assembler::sll( Register s1, int imm5a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0) ); }
inline void Assembler::srl( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(0) | rs2(s2) ); }
inline void Assembler::srl( Register s1, int imm5a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0) ); }
inline void Assembler::sra( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(0) | rs2(s2) ); }
inline void Assembler::sra( Register s1, int imm5a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0) ); }
inline void Assembler::sllx( Register s1, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(1) | rs2(s2) ); }
inline void Assembler::sllx( Register s1, int imm6a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0) ); }
inline void Assembler::srlx( Register s1, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(1) | rs2(s2) ); }
inline void Assembler::srlx( Register s1, int imm6a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0) ); }
inline void Assembler::srax( Register s1, Register s2, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(1) | rs2(s2) ); }
inline void Assembler::srax( Register s1, int imm6a, Register d ) { v9_only(); emit_int32( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0) ); }
inline void Assembler::sir( int simm13a ) { emit_int32( op(arith_op) | fcn(15) | op3(sir_op3) | immed(true) | simm(simm13a, 13)); }
// pp 221
inline void Assembler::stbar() { emit_int32( op(arith_op) | op3(membar_op3) | u_field(15, 18, 14)); }
// pp 222 // pp 222
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); } inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); }
@ -120,6 +328,9 @@ inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Regi
inline void Assembler::stxfsr( Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::stxfsr( Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } inline void Assembler::stxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::stfa( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2, int ia ) { v9_only(); emit_int32( op(ldst_op) | fd(d, w) | alt_op3(stf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
inline void Assembler::stfa( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a ) { v9_only(); emit_int32( op(ldst_op) | fd(d, w) | alt_op3(stf_op3 | alt_bit_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
// p 226 // p 226
inline void Assembler::stb( Register d, Register s1, Register s2) { emit_int32( op(ldst_op) | rd(d) | op3(stb_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::stb( Register d, Register s1, Register s2) { emit_int32( op(ldst_op) | rd(d) | op3(stb_op3) | rs1(s1) | rs2(s2) ); }
@ -135,9 +346,103 @@ inline void Assembler::stx( Register d, Register s1, int simm13a) { v9_only();
inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::stba( Register d, Register s1, Register s2, int ia ) { emit_int32( op(ldst_op) | rd(d) | op3(stb_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
inline void Assembler::stba( Register d, Register s1, int simm13a ) { emit_int32( op(ldst_op) | rd(d) | op3(stb_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::stha( Register d, Register s1, Register s2, int ia ) { emit_int32( op(ldst_op) | rd(d) | op3(sth_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
inline void Assembler::stha( Register d, Register s1, int simm13a ) { emit_int32( op(ldst_op) | rd(d) | op3(sth_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::stwa( Register d, Register s1, Register s2, int ia ) { emit_int32( op(ldst_op) | rd(d) | op3(stw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
inline void Assembler::stwa( Register d, Register s1, int simm13a ) { emit_int32( op(ldst_op) | rd(d) | op3(stw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::stxa( Register d, Register s1, Register s2, int ia ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(stx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
inline void Assembler::stxa( Register d, Register s1, int simm13a ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(stx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::stda( Register d, Register s1, Register s2, int ia ) { emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
inline void Assembler::stda( Register d, Register s1, int simm13a ) { emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
// pp 230
inline void Assembler::sub( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | rs2(s2) ); }
inline void Assembler::sub( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::subcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3 | cc_bit_op3 ) | rs1(s1) | rs2(s2) ); }
inline void Assembler::subcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3 | cc_bit_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::subc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(subc_op3 ) | rs1(s1) | rs2(s2) ); }
inline void Assembler::subc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(subc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::subccc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(subc_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::subccc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(subc_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
// pp 231 // pp 231
inline void Assembler::swap( Register s1, Register s2, Register d) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::swap( Register s1, Register s2, Register d) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::swap( Register s1, int simm13a, Register d) { v9_dep(); emit_data( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } inline void Assembler::swap( Register s1, int simm13a, Register d) { v9_dep(); emit_data( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::swapa( Register s1, Register s2, int ia, Register d ) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(swap_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
inline void Assembler::swapa( Register s1, int simm13a, Register d ) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(swap_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
// pp 234, note op in book is wrong, see pp 268
inline void Assembler::taddcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | rs2(s2) ); }
inline void Assembler::taddcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
// pp 235
inline void Assembler::tsubcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | rs2(s2) ); }
inline void Assembler::tsubcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
// pp 237
inline void Assembler::trap( Condition c, CC cc, Register s1, Register s2 ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); }
inline void Assembler::trap( Condition c, CC cc, Register s1, int trapa ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); }
// simple uncond. trap
inline void Assembler::trap( int trapa ) { trap( always, icc, G0, trapa ); }
inline void Assembler::wry(Register d) { v9_dep(); emit_int32(op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(0, 29, 25)); }
inline void Assembler::wrccr(Register s) { v9_only(); emit_int32(op(arith_op) | rs1(s) | op3(wrreg_op3) | u_field(2, 29, 25)); }
inline void Assembler::wrccr(Register s, int simm13a) { v9_only(); emit_int32(op(arith_op) | rs1(s) | op3(wrreg_op3) | u_field(2, 29, 25) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::wrasi(Register d) { v9_only(); emit_int32(op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(3, 29, 25)); }
// wrasi(d, imm) stores (d xor imm) to asi
inline void Assembler::wrasi(Register d, int simm13a) { v9_only(); emit_int32(op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(3, 29, 25) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::wrfprs(Register d) { v9_only(); emit_int32(op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(6, 29, 25)); }
inline void Assembler::alignaddr( Register s1, Register s2, Register d ) { vis1_only(); emit_int32( op(arith_op) | rd(d) | op3(alignaddr_op3) | rs1(s1) | opf(alignaddr_opf) | rs2(s2)); }
inline void Assembler::faligndata( FloatRegister s1, FloatRegister s2, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(faligndata_op3) | fs1(s1, FloatRegisterImpl::D) | opf(faligndata_opf) | fs2(s2, FloatRegisterImpl::D)); }
inline void Assembler::fzero( FloatRegisterImpl::Width w, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, w) | op3(fzero_op3) | opf(0x62 - w)); }
inline void Assembler::fsrc2( FloatRegisterImpl::Width w, FloatRegister s2, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, w) | op3(fsrc_op3) | opf(0x7A - w) | fs2(s2, w)); }
inline void Assembler::fnot1( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, w) | op3(fnot_op3) | fs1(s1, w) | opf(0x6C - w)); }
inline void Assembler::fpmerge( FloatRegister s1, FloatRegister s2, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(0x36) | fs1(s1, FloatRegisterImpl::S) | opf(0x4b) | fs2(s2, FloatRegisterImpl::S)); }
inline void Assembler::stpartialf( Register s1, Register s2, FloatRegister d, int ia ) { vis1_only(); emit_int32( op(ldst_op) | fd(d, FloatRegisterImpl::D) | op3(stpartialf_op3) | rs1(s1) | imm_asi(ia) | rs2(s2)); }
// VIS2 instructions
inline void Assembler::edge8n( Register s1, Register s2, Register d ) { vis2_only(); emit_int32( op(arith_op) | rd(d) | op3(edge_op3) | rs1(s1) | opf(edge8n_opf) | rs2(s2)); }
inline void Assembler::bmask( Register s1, Register s2, Register d ) { vis2_only(); emit_int32( op(arith_op) | rd(d) | op3(bmask_op3) | rs1(s1) | opf(bmask_opf) | rs2(s2)); }
inline void Assembler::bshuffle( FloatRegister s1, FloatRegister s2, FloatRegister d ) { vis2_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(bshuffle_op3) | fs1(s1, FloatRegisterImpl::D) | opf(bshuffle_opf) | fs2(s2, FloatRegisterImpl::D)); }
// VIS3 instructions
inline void Assembler::movstosw( FloatRegister s, Register d ) { vis3_only(); emit_int32( op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mstosw_opf) | fs2(s, FloatRegisterImpl::S)); }
inline void Assembler::movstouw( FloatRegister s, Register d ) { vis3_only(); emit_int32( op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mstouw_opf) | fs2(s, FloatRegisterImpl::S)); }
inline void Assembler::movdtox( FloatRegister s, Register d ) { vis3_only(); emit_int32( op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mdtox_opf) | fs2(s, FloatRegisterImpl::D)); }
inline void Assembler::movwtos( Register s, FloatRegister d ) { vis3_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::S) | op3(mftoi_op3) | opf(mwtos_opf) | rs2(s)); }
inline void Assembler::movxtod( Register s, FloatRegister d ) { vis3_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(mftoi_op3) | opf(mxtod_opf) | rs2(s)); }
inline void Assembler::xmulx(Register s1, Register s2, Register d) { vis3_only(); emit_int32( op(arith_op) | rd(d) | op3(xmulx_op3) | rs1(s1) | opf(xmulx_opf) | rs2(s2)); }
inline void Assembler::xmulxhi(Register s1, Register s2, Register d) { vis3_only(); emit_int32( op(arith_op) | rd(d) | op3(xmulx_op3) | rs1(s1) | opf(xmulxhi_opf) | rs2(s2)); }
// Crypto SHA instructions
inline void Assembler::sha1() { sha1_only(); emit_int32( op(arith_op) | op3(sha_op3) | opf(sha1_opf)); }
inline void Assembler::sha256() { sha256_only(); emit_int32( op(arith_op) | op3(sha_op3) | opf(sha256_opf)); }
inline void Assembler::sha512() { sha512_only(); emit_int32( op(arith_op) | op3(sha_op3) | opf(sha512_opf)); }
// CRC32C instruction
inline void Assembler::crc32c( FloatRegister s1, FloatRegister s2, FloatRegister d ) { crc32c_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(crc32c_op3) | fs1(s1, FloatRegisterImpl::D) | opf(crc32c_opf) | fs2(s2, FloatRegisterImpl::D)); }
#endif // CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP #endif // CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP

Some files were not shown because too many files have changed in this diff Show more