mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-28 15:24:43 +02:00
Merge
This commit is contained in:
commit
4e4e5a196b
1404 changed files with 86167 additions and 18708 deletions
|
@ -5,3 +5,5 @@ nbproject/private/
|
||||||
^.hgtip
|
^.hgtip
|
||||||
^.bridge2
|
^.bridge2
|
||||||
.DS_Store
|
.DS_Store
|
||||||
|
.metadata/
|
||||||
|
.recommenders/
|
||||||
|
|
|
@ -330,3 +330,4 @@ ce5c14d97d95084504c32b9320cb33cce4235588 jdk9-b83
|
||||||
1f345217c9bab05f192d00cf1665b3286c49ccdb jdk9-b85
|
1f345217c9bab05f192d00cf1665b3286c49ccdb jdk9-b85
|
||||||
2aa1daf98d3e2ee37f20f6858c53cc37020f6937 jdk9-b86
|
2aa1daf98d3e2ee37f20f6858c53cc37020f6937 jdk9-b86
|
||||||
fd4f4f7561074dc0dbc1772c8489c7b902b6b8a9 jdk9-b87
|
fd4f4f7561074dc0dbc1772c8489c7b902b6b8a9 jdk9-b87
|
||||||
|
0bb87e05d83e1cf41cfb7ddeb2c8eaec539fd907 jdk9-b88
|
||||||
|
|
3406
README-builds.html
3406
README-builds.html
File diff suppressed because it is too large
Load diff
1263
README-builds.md
Normal file
1263
README-builds.md
Normal file
File diff suppressed because it is too large
Load diff
|
@ -410,6 +410,7 @@ AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS],
|
||||||
BASIC_REQUIRE_PROGS(NAWK, [nawk gawk awk])
|
BASIC_REQUIRE_PROGS(NAWK, [nawk gawk awk])
|
||||||
BASIC_REQUIRE_PROGS(PRINTF, printf)
|
BASIC_REQUIRE_PROGS(PRINTF, printf)
|
||||||
BASIC_REQUIRE_PROGS(RM, rm)
|
BASIC_REQUIRE_PROGS(RM, rm)
|
||||||
|
BASIC_REQUIRE_PROGS(RMDIR, rmdir)
|
||||||
BASIC_REQUIRE_PROGS(SH, sh)
|
BASIC_REQUIRE_PROGS(SH, sh)
|
||||||
BASIC_REQUIRE_PROGS(SORT, sort)
|
BASIC_REQUIRE_PROGS(SORT, sort)
|
||||||
BASIC_REQUIRE_PROGS(TAIL, tail)
|
BASIC_REQUIRE_PROGS(TAIL, tail)
|
||||||
|
|
|
@ -305,6 +305,16 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK],
|
||||||
BOOT_JDK_SOURCETARGET="-source 8 -target 8"
|
BOOT_JDK_SOURCETARGET="-source 8 -target 8"
|
||||||
AC_SUBST(BOOT_JDK_SOURCETARGET)
|
AC_SUBST(BOOT_JDK_SOURCETARGET)
|
||||||
AC_SUBST(JAVAC_FLAGS)
|
AC_SUBST(JAVAC_FLAGS)
|
||||||
|
|
||||||
|
# Check if the boot jdk is 32 or 64 bit
|
||||||
|
if "$JAVA" -d64 -version > /dev/null 2>&1; then
|
||||||
|
BOOT_JDK_BITS="64"
|
||||||
|
else
|
||||||
|
BOOT_JDK_BITS="32"
|
||||||
|
fi
|
||||||
|
AC_MSG_CHECKING([if Boot JDK is 32 or 64 bits])
|
||||||
|
AC_MSG_RESULT([$BOOT_JDK_BITS])
|
||||||
|
AC_SUBST(BOOT_JDK_BITS)
|
||||||
])
|
])
|
||||||
|
|
||||||
AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
|
AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
|
||||||
|
@ -341,7 +351,7 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
|
||||||
# Maximum amount of heap memory.
|
# Maximum amount of heap memory.
|
||||||
# Maximum stack size.
|
# Maximum stack size.
|
||||||
JVM_MAX_HEAP=`expr $MEMORY_SIZE / 2`
|
JVM_MAX_HEAP=`expr $MEMORY_SIZE / 2`
|
||||||
if test "x$BUILD_NUM_BITS" = x32; then
|
if test "x$BOOT_JDK_BITS" = "x32"; then
|
||||||
if test "$JVM_MAX_HEAP" -gt "1100"; then
|
if test "$JVM_MAX_HEAP" -gt "1100"; then
|
||||||
JVM_MAX_HEAP=1100
|
JVM_MAX_HEAP=1100
|
||||||
elif test "$JVM_MAX_HEAP" -lt "512"; then
|
elif test "$JVM_MAX_HEAP" -lt "512"; then
|
||||||
|
@ -349,10 +359,7 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
|
||||||
fi
|
fi
|
||||||
STACK_SIZE=768
|
STACK_SIZE=768
|
||||||
else
|
else
|
||||||
# Running Javac on a JVM on a 64-bit machine, takes more space since 64-bit
|
# Running a 64 bit JVM allows for and requires a bigger heap
|
||||||
# pointers are used. Apparently, we need to increase the heap and stack
|
|
||||||
# space for the jvm. More specifically, when running javac to build huge
|
|
||||||
# jdk batch
|
|
||||||
if test "$JVM_MAX_HEAP" -gt "1600"; then
|
if test "$JVM_MAX_HEAP" -gt "1600"; then
|
||||||
JVM_MAX_HEAP=1600
|
JVM_MAX_HEAP=1600
|
||||||
elif test "$JVM_MAX_HEAP" -lt "512"; then
|
elif test "$JVM_MAX_HEAP" -lt "512"; then
|
||||||
|
|
|
@ -328,12 +328,23 @@ AC_DEFUN_ONCE([BPERF_SETUP_SMART_JAVAC],
|
||||||
|
|
||||||
AC_ARG_ENABLE([sjavac], [AS_HELP_STRING([--enable-sjavac],
|
AC_ARG_ENABLE([sjavac], [AS_HELP_STRING([--enable-sjavac],
|
||||||
[use sjavac to do fast incremental compiles @<:@disabled@:>@])],
|
[use sjavac to do fast incremental compiles @<:@disabled@:>@])],
|
||||||
[ENABLE_SJAVAC="${enableval}"], [ENABLE_SJAVAC='no'])
|
[ENABLE_SJAVAC="${enableval}"], [ENABLE_SJAVAC="no"])
|
||||||
if test "x$JVM_ARG_OK" = "xfalse"; then
|
if test "x$JVM_ARG_OK" = "xfalse"; then
|
||||||
AC_MSG_WARN([Could not set -Xms${MS_VALUE}M -Xmx${MX_VALUE}M, disabling sjavac])
|
AC_MSG_WARN([Could not set -Xms${MS_VALUE}M -Xmx${MX_VALUE}M, disabling sjavac])
|
||||||
ENABLE_SJAVAC=no;
|
ENABLE_SJAVAC="no"
|
||||||
fi
|
fi
|
||||||
AC_MSG_CHECKING([whether to use sjavac])
|
AC_MSG_CHECKING([whether to use sjavac])
|
||||||
AC_MSG_RESULT([$ENABLE_SJAVAC])
|
AC_MSG_RESULT([$ENABLE_SJAVAC])
|
||||||
AC_SUBST(ENABLE_SJAVAC)
|
AC_SUBST(ENABLE_SJAVAC)
|
||||||
|
|
||||||
|
AC_ARG_ENABLE([javac-server], [AS_HELP_STRING([--enable-javac-server],
|
||||||
|
[use only the server part of sjavac for faster javac compiles @<:@disabled@:>@])],
|
||||||
|
[ENABLE_JAVAC_SERVER="${enableval}"], [ENABLE_JAVAC_SERVER="no"])
|
||||||
|
if test "x$JVM_ARG_OK" = "xfalse"; then
|
||||||
|
AC_MSG_WARN([Could not set -Xms${MS_VALUE}M -Xmx${MX_VALUE}M, disabling javac server])
|
||||||
|
ENABLE_JAVAC_SERVER="no"
|
||||||
|
fi
|
||||||
|
AC_MSG_CHECKING([whether to use javac server])
|
||||||
|
AC_MSG_RESULT([$ENABLE_JAVAC_SERVER])
|
||||||
|
AC_SUBST(ENABLE_JAVAC_SERVER)
|
||||||
])
|
])
|
||||||
|
|
|
@ -66,7 +66,7 @@ export STRIP="@STRIP@ @STRIPFLAGS@"
|
||||||
export TEE="@TEE@"
|
export TEE="@TEE@"
|
||||||
export UNIQ="@UNIQ@"
|
export UNIQ="@UNIQ@"
|
||||||
export UNPACK200="@FIXPATH@ @BOOT_JDK@/bin/unpack200"
|
export UNPACK200="@FIXPATH@ @BOOT_JDK@/bin/unpack200"
|
||||||
export UNZIP="@UNZIP@"
|
export UNARCHIVE="@UNZIP@ -q"
|
||||||
|
|
||||||
export SRC_ROOT="@TOPDIR@"
|
export SRC_ROOT="@TOPDIR@"
|
||||||
export OUTPUT_ROOT="@OUTPUT_ROOT@"
|
export OUTPUT_ROOT="@OUTPUT_ROOT@"
|
||||||
|
|
|
@ -632,6 +632,7 @@ LIBOBJS
|
||||||
CFLAGS_CCACHE
|
CFLAGS_CCACHE
|
||||||
CCACHE
|
CCACHE
|
||||||
USE_PRECOMPILED_HEADER
|
USE_PRECOMPILED_HEADER
|
||||||
|
ENABLE_JAVAC_SERVER
|
||||||
ENABLE_SJAVAC
|
ENABLE_SJAVAC
|
||||||
SJAVAC_SERVER_JAVA_FLAGS
|
SJAVAC_SERVER_JAVA_FLAGS
|
||||||
SJAVAC_SERVER_JAVA
|
SJAVAC_SERVER_JAVA
|
||||||
|
@ -815,6 +816,7 @@ JAXWS_TOPDIR
|
||||||
JAXP_TOPDIR
|
JAXP_TOPDIR
|
||||||
CORBA_TOPDIR
|
CORBA_TOPDIR
|
||||||
LANGTOOLS_TOPDIR
|
LANGTOOLS_TOPDIR
|
||||||
|
BOOT_JDK_BITS
|
||||||
JAVAC_FLAGS
|
JAVAC_FLAGS
|
||||||
BOOT_JDK_SOURCETARGET
|
BOOT_JDK_SOURCETARGET
|
||||||
JARSIGNER
|
JARSIGNER
|
||||||
|
@ -968,6 +970,7 @@ TAR
|
||||||
TAIL
|
TAIL
|
||||||
SORT
|
SORT
|
||||||
SH
|
SH
|
||||||
|
RMDIR
|
||||||
RM
|
RM
|
||||||
PRINTF
|
PRINTF
|
||||||
NAWK
|
NAWK
|
||||||
|
@ -1115,6 +1118,7 @@ with_jobs
|
||||||
with_boot_jdk_jvmargs
|
with_boot_jdk_jvmargs
|
||||||
with_sjavac_server_java
|
with_sjavac_server_java
|
||||||
enable_sjavac
|
enable_sjavac
|
||||||
|
enable_javac_server
|
||||||
enable_precompiled_headers
|
enable_precompiled_headers
|
||||||
enable_ccache
|
enable_ccache
|
||||||
with_ccache_dir
|
with_ccache_dir
|
||||||
|
@ -1146,6 +1150,7 @@ MV
|
||||||
NAWK
|
NAWK
|
||||||
PRINTF
|
PRINTF
|
||||||
RM
|
RM
|
||||||
|
RMDIR
|
||||||
SH
|
SH
|
||||||
SORT
|
SORT
|
||||||
TAIL
|
TAIL
|
||||||
|
@ -1864,6 +1869,8 @@ Optional Features:
|
||||||
--with-freetype, disabled otherwise]
|
--with-freetype, disabled otherwise]
|
||||||
--enable-sjavac use sjavac to do fast incremental compiles
|
--enable-sjavac use sjavac to do fast incremental compiles
|
||||||
[disabled]
|
[disabled]
|
||||||
|
--enable-javac-server use only the server part of sjavac for faster javac
|
||||||
|
compiles [disabled]
|
||||||
--disable-precompiled-headers
|
--disable-precompiled-headers
|
||||||
disable using precompiled headers when compiling C++
|
disable using precompiled headers when compiling C++
|
||||||
[enabled]
|
[enabled]
|
||||||
|
@ -2025,6 +2032,7 @@ Some influential environment variables:
|
||||||
NAWK Override default value for NAWK
|
NAWK Override default value for NAWK
|
||||||
PRINTF Override default value for PRINTF
|
PRINTF Override default value for PRINTF
|
||||||
RM Override default value for RM
|
RM Override default value for RM
|
||||||
|
RMDIR Override default value for RMDIR
|
||||||
SH Override default value for SH
|
SH Override default value for SH
|
||||||
SORT Override default value for SORT
|
SORT Override default value for SORT
|
||||||
TAIL Override default value for TAIL
|
TAIL Override default value for TAIL
|
||||||
|
@ -4587,7 +4595,7 @@ VS_SDK_PLATFORM_NAME_2013=
|
||||||
#CUSTOM_AUTOCONF_INCLUDE
|
#CUSTOM_AUTOCONF_INCLUDE
|
||||||
|
|
||||||
# Do not change or remove the following line, it is needed for consistency checks:
|
# Do not change or remove the following line, it is needed for consistency checks:
|
||||||
DATE_WHEN_GENERATED=1444643341
|
DATE_WHEN_GENERATED=1445964676
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
#
|
#
|
||||||
|
@ -9511,6 +9519,209 @@ $as_echo "$tool_specified" >&6; }
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Publish this variable in the help.
|
||||||
|
|
||||||
|
|
||||||
|
if [ -z "${RMDIR+x}" ]; then
|
||||||
|
# The variable is not set by user, try to locate tool using the code snippet
|
||||||
|
for ac_prog in rmdir
|
||||||
|
do
|
||||||
|
# Extract the first word of "$ac_prog", so it can be a program name with args.
|
||||||
|
set dummy $ac_prog; ac_word=$2
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
|
||||||
|
$as_echo_n "checking for $ac_word... " >&6; }
|
||||||
|
if ${ac_cv_path_RMDIR+:} false; then :
|
||||||
|
$as_echo_n "(cached) " >&6
|
||||||
|
else
|
||||||
|
case $RMDIR in
|
||||||
|
[\\/]* | ?:[\\/]*)
|
||||||
|
ac_cv_path_RMDIR="$RMDIR" # Let the user override the test with a path.
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
|
||||||
|
for as_dir in $PATH
|
||||||
|
do
|
||||||
|
IFS=$as_save_IFS
|
||||||
|
test -z "$as_dir" && as_dir=.
|
||||||
|
for ac_exec_ext in '' $ac_executable_extensions; do
|
||||||
|
if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
|
||||||
|
ac_cv_path_RMDIR="$as_dir/$ac_word$ac_exec_ext"
|
||||||
|
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
|
||||||
|
break 2
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
done
|
||||||
|
IFS=$as_save_IFS
|
||||||
|
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
RMDIR=$ac_cv_path_RMDIR
|
||||||
|
if test -n "$RMDIR"; then
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $RMDIR" >&5
|
||||||
|
$as_echo "$RMDIR" >&6; }
|
||||||
|
else
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
|
||||||
|
$as_echo "no" >&6; }
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
test -n "$RMDIR" && break
|
||||||
|
done
|
||||||
|
|
||||||
|
else
|
||||||
|
# The variable is set, but is it from the command line or the environment?
|
||||||
|
|
||||||
|
# Try to remove the string !RMDIR! from our list.
|
||||||
|
try_remove_var=${CONFIGURE_OVERRIDDEN_VARIABLES//!RMDIR!/}
|
||||||
|
if test "x$try_remove_var" = "x$CONFIGURE_OVERRIDDEN_VARIABLES"; then
|
||||||
|
# If it failed, the variable was not from the command line. Ignore it,
|
||||||
|
# but warn the user (except for BASH, which is always set by the calling BASH).
|
||||||
|
if test "xRMDIR" != xBASH; then
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Ignoring value of RMDIR from the environment. Use command line variables instead." >&5
|
||||||
|
$as_echo "$as_me: WARNING: Ignoring value of RMDIR from the environment. Use command line variables instead." >&2;}
|
||||||
|
fi
|
||||||
|
# Try to locate tool using the code snippet
|
||||||
|
for ac_prog in rmdir
|
||||||
|
do
|
||||||
|
# Extract the first word of "$ac_prog", so it can be a program name with args.
|
||||||
|
set dummy $ac_prog; ac_word=$2
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
|
||||||
|
$as_echo_n "checking for $ac_word... " >&6; }
|
||||||
|
if ${ac_cv_path_RMDIR+:} false; then :
|
||||||
|
$as_echo_n "(cached) " >&6
|
||||||
|
else
|
||||||
|
case $RMDIR in
|
||||||
|
[\\/]* | ?:[\\/]*)
|
||||||
|
ac_cv_path_RMDIR="$RMDIR" # Let the user override the test with a path.
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
|
||||||
|
for as_dir in $PATH
|
||||||
|
do
|
||||||
|
IFS=$as_save_IFS
|
||||||
|
test -z "$as_dir" && as_dir=.
|
||||||
|
for ac_exec_ext in '' $ac_executable_extensions; do
|
||||||
|
if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
|
||||||
|
ac_cv_path_RMDIR="$as_dir/$ac_word$ac_exec_ext"
|
||||||
|
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
|
||||||
|
break 2
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
done
|
||||||
|
IFS=$as_save_IFS
|
||||||
|
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
RMDIR=$ac_cv_path_RMDIR
|
||||||
|
if test -n "$RMDIR"; then
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $RMDIR" >&5
|
||||||
|
$as_echo "$RMDIR" >&6; }
|
||||||
|
else
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
|
||||||
|
$as_echo "no" >&6; }
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
test -n "$RMDIR" && break
|
||||||
|
done
|
||||||
|
|
||||||
|
else
|
||||||
|
# If it succeeded, then it was overridden by the user. We will use it
|
||||||
|
# for the tool.
|
||||||
|
|
||||||
|
# First remove it from the list of overridden variables, so we can test
|
||||||
|
# for unknown variables in the end.
|
||||||
|
CONFIGURE_OVERRIDDEN_VARIABLES="$try_remove_var"
|
||||||
|
|
||||||
|
# Check if we try to supply an empty value
|
||||||
|
if test "x$RMDIR" = x; then
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: Setting user supplied tool RMDIR= (no value)" >&5
|
||||||
|
$as_echo "$as_me: Setting user supplied tool RMDIR= (no value)" >&6;}
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for RMDIR" >&5
|
||||||
|
$as_echo_n "checking for RMDIR... " >&6; }
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: disabled" >&5
|
||||||
|
$as_echo "disabled" >&6; }
|
||||||
|
else
|
||||||
|
# Check if the provided tool contains a complete path.
|
||||||
|
tool_specified="$RMDIR"
|
||||||
|
tool_basename="${tool_specified##*/}"
|
||||||
|
if test "x$tool_basename" = "x$tool_specified"; then
|
||||||
|
# A command without a complete path is provided, search $PATH.
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: Will search for user supplied tool RMDIR=$tool_basename" >&5
|
||||||
|
$as_echo "$as_me: Will search for user supplied tool RMDIR=$tool_basename" >&6;}
|
||||||
|
# Extract the first word of "$tool_basename", so it can be a program name with args.
|
||||||
|
set dummy $tool_basename; ac_word=$2
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
|
||||||
|
$as_echo_n "checking for $ac_word... " >&6; }
|
||||||
|
if ${ac_cv_path_RMDIR+:} false; then :
|
||||||
|
$as_echo_n "(cached) " >&6
|
||||||
|
else
|
||||||
|
case $RMDIR in
|
||||||
|
[\\/]* | ?:[\\/]*)
|
||||||
|
ac_cv_path_RMDIR="$RMDIR" # Let the user override the test with a path.
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
|
||||||
|
for as_dir in $PATH
|
||||||
|
do
|
||||||
|
IFS=$as_save_IFS
|
||||||
|
test -z "$as_dir" && as_dir=.
|
||||||
|
for ac_exec_ext in '' $ac_executable_extensions; do
|
||||||
|
if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
|
||||||
|
ac_cv_path_RMDIR="$as_dir/$ac_word$ac_exec_ext"
|
||||||
|
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
|
||||||
|
break 2
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
done
|
||||||
|
IFS=$as_save_IFS
|
||||||
|
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
RMDIR=$ac_cv_path_RMDIR
|
||||||
|
if test -n "$RMDIR"; then
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $RMDIR" >&5
|
||||||
|
$as_echo "$RMDIR" >&6; }
|
||||||
|
else
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
|
||||||
|
$as_echo "no" >&6; }
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
if test "x$RMDIR" = x; then
|
||||||
|
as_fn_error $? "User supplied tool $tool_basename could not be found" "$LINENO" 5
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# Otherwise we believe it is a complete path. Use it as it is.
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: Will use user supplied tool RMDIR=$tool_specified" >&5
|
||||||
|
$as_echo "$as_me: Will use user supplied tool RMDIR=$tool_specified" >&6;}
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for RMDIR" >&5
|
||||||
|
$as_echo_n "checking for RMDIR... " >&6; }
|
||||||
|
if test ! -x "$tool_specified"; then
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: not found" >&5
|
||||||
|
$as_echo "not found" >&6; }
|
||||||
|
as_fn_error $? "User supplied tool RMDIR=$tool_specified does not exist or is not executable" "$LINENO" 5
|
||||||
|
fi
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $tool_specified" >&5
|
||||||
|
$as_echo "$tool_specified" >&6; }
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if test "x$RMDIR" = x; then
|
||||||
|
as_fn_error $? "Could not find required tool for RMDIR" "$LINENO" 5
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Publish this variable in the help.
|
# Publish this variable in the help.
|
||||||
|
|
||||||
|
|
||||||
|
@ -26920,6 +27131,18 @@ $as_echo "$tool_specified" >&6; }
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Check if the boot jdk is 32 or 64 bit
|
||||||
|
if "$JAVA" -d64 -version > /dev/null 2>&1; then
|
||||||
|
BOOT_JDK_BITS="64"
|
||||||
|
else
|
||||||
|
BOOT_JDK_BITS="32"
|
||||||
|
fi
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if Boot JDK is 32 or 64 bits" >&5
|
||||||
|
$as_echo_n "checking if Boot JDK is 32 or 64 bits... " >&6; }
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $BOOT_JDK_BITS" >&5
|
||||||
|
$as_echo "$BOOT_JDK_BITS" >&6; }
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
#
|
#
|
||||||
|
@ -53099,7 +53322,7 @@ $as_echo_n "checking flags for boot jdk java command for big workloads... " >&6;
|
||||||
# Maximum amount of heap memory.
|
# Maximum amount of heap memory.
|
||||||
# Maximum stack size.
|
# Maximum stack size.
|
||||||
JVM_MAX_HEAP=`expr $MEMORY_SIZE / 2`
|
JVM_MAX_HEAP=`expr $MEMORY_SIZE / 2`
|
||||||
if test "x$BUILD_NUM_BITS" = x32; then
|
if test "x$BOOT_JDK_BITS" = "x32"; then
|
||||||
if test "$JVM_MAX_HEAP" -gt "1100"; then
|
if test "$JVM_MAX_HEAP" -gt "1100"; then
|
||||||
JVM_MAX_HEAP=1100
|
JVM_MAX_HEAP=1100
|
||||||
elif test "$JVM_MAX_HEAP" -lt "512"; then
|
elif test "$JVM_MAX_HEAP" -lt "512"; then
|
||||||
|
@ -53107,10 +53330,7 @@ $as_echo_n "checking flags for boot jdk java command for big workloads... " >&6;
|
||||||
fi
|
fi
|
||||||
STACK_SIZE=768
|
STACK_SIZE=768
|
||||||
else
|
else
|
||||||
# Running Javac on a JVM on a 64-bit machine, takes more space since 64-bit
|
# Running a 64 bit JVM allows for and requires a bigger heap
|
||||||
# pointers are used. Apparently, we need to increase the heap and stack
|
|
||||||
# space for the jvm. More specifically, when running javac to build huge
|
|
||||||
# jdk batch
|
|
||||||
if test "$JVM_MAX_HEAP" -gt "1600"; then
|
if test "$JVM_MAX_HEAP" -gt "1600"; then
|
||||||
JVM_MAX_HEAP=1600
|
JVM_MAX_HEAP=1600
|
||||||
elif test "$JVM_MAX_HEAP" -lt "512"; then
|
elif test "$JVM_MAX_HEAP" -lt "512"; then
|
||||||
|
@ -53299,13 +53519,13 @@ fi
|
||||||
if test "${enable_sjavac+set}" = set; then :
|
if test "${enable_sjavac+set}" = set; then :
|
||||||
enableval=$enable_sjavac; ENABLE_SJAVAC="${enableval}"
|
enableval=$enable_sjavac; ENABLE_SJAVAC="${enableval}"
|
||||||
else
|
else
|
||||||
ENABLE_SJAVAC='no'
|
ENABLE_SJAVAC="no"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if test "x$JVM_ARG_OK" = "xfalse"; then
|
if test "x$JVM_ARG_OK" = "xfalse"; then
|
||||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Could not set -Xms${MS_VALUE}M -Xmx${MX_VALUE}M, disabling sjavac" >&5
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Could not set -Xms${MS_VALUE}M -Xmx${MX_VALUE}M, disabling sjavac" >&5
|
||||||
$as_echo "$as_me: WARNING: Could not set -Xms${MS_VALUE}M -Xmx${MX_VALUE}M, disabling sjavac" >&2;}
|
$as_echo "$as_me: WARNING: Could not set -Xms${MS_VALUE}M -Xmx${MX_VALUE}M, disabling sjavac" >&2;}
|
||||||
ENABLE_SJAVAC=no;
|
ENABLE_SJAVAC="no"
|
||||||
fi
|
fi
|
||||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use sjavac" >&5
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use sjavac" >&5
|
||||||
$as_echo_n "checking whether to use sjavac... " >&6; }
|
$as_echo_n "checking whether to use sjavac... " >&6; }
|
||||||
|
@ -53313,6 +53533,24 @@ $as_echo_n "checking whether to use sjavac... " >&6; }
|
||||||
$as_echo "$ENABLE_SJAVAC" >&6; }
|
$as_echo "$ENABLE_SJAVAC" >&6; }
|
||||||
|
|
||||||
|
|
||||||
|
# Check whether --enable-javac-server was given.
|
||||||
|
if test "${enable_javac_server+set}" = set; then :
|
||||||
|
enableval=$enable_javac_server; ENABLE_JAVAC_SERVER="${enableval}"
|
||||||
|
else
|
||||||
|
ENABLE_JAVAC_SERVER="no"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if test "x$JVM_ARG_OK" = "xfalse"; then
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Could not set -Xms${MS_VALUE}M -Xmx${MX_VALUE}M, disabling javac server" >&5
|
||||||
|
$as_echo "$as_me: WARNING: Could not set -Xms${MS_VALUE}M -Xmx${MX_VALUE}M, disabling javac server" >&2;}
|
||||||
|
ENABLE_JAVAC_SERVER="no"
|
||||||
|
fi
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use javac server" >&5
|
||||||
|
$as_echo_n "checking whether to use javac server... " >&6; }
|
||||||
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ENABLE_JAVAC_SERVER" >&5
|
||||||
|
$as_echo "$ENABLE_JAVAC_SERVER" >&6; }
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Can the C/C++ compiler use precompiled headers?
|
# Can the C/C++ compiler use precompiled headers?
|
||||||
|
|
||||||
|
|
|
@ -245,6 +245,7 @@ MEMORY_SIZE:=@MEMORY_SIZE@
|
||||||
# Enable sjavac support = use a javac server,
|
# Enable sjavac support = use a javac server,
|
||||||
# multi core javac compilation and dependency tracking.
|
# multi core javac compilation and dependency tracking.
|
||||||
ENABLE_SJAVAC:=@ENABLE_SJAVAC@
|
ENABLE_SJAVAC:=@ENABLE_SJAVAC@
|
||||||
|
ENABLE_JAVAC_SERVER:=@ENABLE_JAVAC_SERVER@
|
||||||
# Store sjavac server synchronization files here, and
|
# Store sjavac server synchronization files here, and
|
||||||
# the sjavac server log files.
|
# the sjavac server log files.
|
||||||
SJAVAC_SERVER_DIR=$(MAKESUPPORT_OUTPUTDIR)/javacservers
|
SJAVAC_SERVER_DIR=$(MAKESUPPORT_OUTPUTDIR)/javacservers
|
||||||
|
@ -504,6 +505,7 @@ PATCH:=@PATCH@
|
||||||
PRINTF:=@PRINTF@
|
PRINTF:=@PRINTF@
|
||||||
PWD:=@THEPWDCMD@
|
PWD:=@THEPWDCMD@
|
||||||
RM:=@RM@
|
RM:=@RM@
|
||||||
|
RMDIR:=@RMDIR@
|
||||||
SED:=@SED@
|
SED:=@SED@
|
||||||
SH:=@SH@
|
SH:=@SH@
|
||||||
SORT:=@SORT@
|
SORT:=@SORT@
|
||||||
|
|
|
@ -51,8 +51,6 @@ else
|
||||||
STAT_PRINT_SIZE="-c %s"
|
STAT_PRINT_SIZE="-c %s"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
UNARCHIVE="$UNZIP -q"
|
|
||||||
|
|
||||||
COMPARE_EXCEPTIONS_INCLUDE="$SRC_ROOT/common/bin/compare_exceptions.sh.incl"
|
COMPARE_EXCEPTIONS_INCLUDE="$SRC_ROOT/common/bin/compare_exceptions.sh.incl"
|
||||||
if [ ! -e "$COMPARE_EXCEPTIONS_INCLUDE" ]; then
|
if [ ! -e "$COMPARE_EXCEPTIONS_INCLUDE" ]; then
|
||||||
echo "Error: Cannot locate the exceptions file, it should have been here: $COMPARE_EXCEPTIONS_INCLUDE"
|
echo "Error: Cannot locate the exceptions file, it should have been here: $COMPARE_EXCEPTIONS_INCLUDE"
|
||||||
|
|
62
common/bin/update-build-readme.sh
Normal file
62
common/bin/update-build-readme.sh
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Get an absolute path to this script, since that determines the top-level
|
||||||
|
# directory.
|
||||||
|
this_script_dir=`dirname $0`
|
||||||
|
TOPDIR=`cd $this_script_dir/../.. > /dev/null && pwd`
|
||||||
|
|
||||||
|
GREP=grep
|
||||||
|
MD_FILE=$TOPDIR/README-builds.md
|
||||||
|
HTML_FILE=$TOPDIR/README-builds.html
|
||||||
|
|
||||||
|
# Locate the markdown processor tool and check that it is the correct version.
|
||||||
|
locate_markdown_processor() {
|
||||||
|
if [ -z "$MARKDOWN" ]; then
|
||||||
|
MARKDOWN=`which markdown 2> /dev/null`
|
||||||
|
if [ -z "$MARKDOWN" ]; then
|
||||||
|
echo "Error: Cannot locate markdown processor" 1>&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test version
|
||||||
|
MARKDOWN_VERSION=`$MARKDOWN -version | $GREP version`
|
||||||
|
if [ "x$MARKDOWN_VERSION" != "xThis is Markdown, version 1.0.1." ]; then
|
||||||
|
echo "Error: Expected markdown version 1.0.1." 1>&2
|
||||||
|
echo "Actual version found: $MARKDOWN_VERSION" 1>&2
|
||||||
|
echo "Download markdown here: https://daringfireball.net/projects/markdown/" 1>&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
# Verify that the source markdown file looks sound.
|
||||||
|
verify_source_code() {
|
||||||
|
TOO_LONG_LINES=`$GREP -E -e '^.{80}.+$' $MD_FILE`
|
||||||
|
if [ "x$TOO_LONG_LINES" != x ]; then
|
||||||
|
echo "Warning: The following lines are longer than 80 characters:"
|
||||||
|
$GREP -E -e '^.{80}.+$' $MD_FILE
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Convert the markdown file to html format.
|
||||||
|
process_source() {
|
||||||
|
echo "Generating html file from markdown"
|
||||||
|
cat > $HTML_FILE << END
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>OpenJDK Build README</title>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
END
|
||||||
|
markdown $MD_FILE >> $HTML_FILE
|
||||||
|
cat >> $HTML_FILE <<END
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
END
|
||||||
|
echo "Done"
|
||||||
|
}
|
||||||
|
|
||||||
|
locate_markdown_processor
|
||||||
|
verify_source_code
|
||||||
|
process_source
|
|
@ -330,3 +330,4 @@ df70bb200356fec686681f0295c50cc3ed43c3b3 jdk9-b84
|
||||||
3ec06af1368924469f7ce60a00324bac55eaeecc jdk9-b85
|
3ec06af1368924469f7ce60a00324bac55eaeecc jdk9-b85
|
||||||
0a3f0d25c201b40575a7c3920fce4d6f4d3ae310 jdk9-b86
|
0a3f0d25c201b40575a7c3920fce4d6f4d3ae310 jdk9-b86
|
||||||
a5c40ac9b916ff44d512ee764fa919ed2097e149 jdk9-b87
|
a5c40ac9b916ff44d512ee764fa919ed2097e149 jdk9-b87
|
||||||
|
00f48ecbc09915f793d9e5ad74ab0b25f2549bf5 jdk9-b88
|
||||||
|
|
|
@ -567,6 +567,11 @@ public class IIOPInputStream
|
||||||
// XXX I18N, logging needed.
|
// XXX I18N, logging needed.
|
||||||
throw new NotActiveException("defaultReadObjectDelegate");
|
throw new NotActiveException("defaultReadObjectDelegate");
|
||||||
|
|
||||||
|
if (!currentClassDesc.forClass().isAssignableFrom(
|
||||||
|
currentObject.getClass())) {
|
||||||
|
throw new IOException("Object Type mismatch");
|
||||||
|
}
|
||||||
|
|
||||||
// The array will be null unless fields were retrieved
|
// The array will be null unless fields were retrieved
|
||||||
// remotely because of a serializable version difference.
|
// remotely because of a serializable version difference.
|
||||||
// Bug fix for 4365188. See the definition of
|
// Bug fix for 4365188. See the definition of
|
||||||
|
@ -1063,6 +1068,9 @@ public class IIOPInputStream
|
||||||
|
|
||||||
int spBase = spClass; // current top of stack
|
int spBase = spClass; // current top of stack
|
||||||
|
|
||||||
|
if (currentClass.getName().equals("java.lang.String")) {
|
||||||
|
return this.readUTF();
|
||||||
|
}
|
||||||
/* The object's classes should be processed from supertype to subtype
|
/* The object's classes should be processed from supertype to subtype
|
||||||
* Push all the clases of the current object onto a stack.
|
* Push all the clases of the current object onto a stack.
|
||||||
* Note that only the serializable classes are represented
|
* Note that only the serializable classes are represented
|
||||||
|
@ -2257,6 +2265,27 @@ public class IIOPInputStream
|
||||||
|
|
||||||
try {
|
try {
|
||||||
Class fieldCl = fields[i].getClazz();
|
Class fieldCl = fields[i].getClazz();
|
||||||
|
if ((objectValue != null)
|
||||||
|
&& (!fieldCl.isAssignableFrom(
|
||||||
|
objectValue.getClass()))) {
|
||||||
|
throw new IllegalArgumentException("Field mismatch");
|
||||||
|
}
|
||||||
|
Field classField = null;
|
||||||
|
try {
|
||||||
|
classField = cl.getDeclaredField(fields[i].getName());
|
||||||
|
} catch (NoSuchFieldException nsfEx) {
|
||||||
|
throw new IllegalArgumentException(nsfEx);
|
||||||
|
} catch (SecurityException secEx) {
|
||||||
|
throw new IllegalArgumentException(secEx.getCause());
|
||||||
|
}
|
||||||
|
Class<?> declaredFieldClass = classField.getType();
|
||||||
|
|
||||||
|
// check input field type is a declared field type
|
||||||
|
// input field is a subclass of the declared field
|
||||||
|
if (!declaredFieldClass.isAssignableFrom(fieldCl)) {
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
"Field Type mismatch");
|
||||||
|
}
|
||||||
if (objectValue != null && !fieldCl.isInstance(objectValue)) {
|
if (objectValue != null && !fieldCl.isInstance(objectValue)) {
|
||||||
throw new IllegalArgumentException();
|
throw new IllegalArgumentException();
|
||||||
}
|
}
|
||||||
|
|
|
@ -559,6 +559,10 @@ public class IIOPOutputStream
|
||||||
* Push all the clases of the current object onto a stack.
|
* Push all the clases of the current object onto a stack.
|
||||||
* Remember the stack pointer where this set of classes is being pushed.
|
* Remember the stack pointer where this set of classes is being pushed.
|
||||||
*/
|
*/
|
||||||
|
if (currentClassDesc.forClass().getName().equals("java.lang.String")) {
|
||||||
|
this.writeUTF((String)obj);
|
||||||
|
return;
|
||||||
|
}
|
||||||
int stackMark = classDescStack.size();
|
int stackMark = classDescStack.size();
|
||||||
try {
|
try {
|
||||||
ObjectStreamClass next;
|
ObjectStreamClass next;
|
||||||
|
|
|
@ -446,6 +446,9 @@ public class StubGenerator extends sun.rmi.rmic.iiop.Generator {
|
||||||
if (emitPermissionCheck) {
|
if (emitPermissionCheck) {
|
||||||
|
|
||||||
// produce the following generated code for example
|
// produce the following generated code for example
|
||||||
|
//
|
||||||
|
// private transient boolean _instantiated = false;
|
||||||
|
//
|
||||||
// private static Void checkPermission() {
|
// private static Void checkPermission() {
|
||||||
// SecurityManager sm = System.getSecurityManager();
|
// SecurityManager sm = System.getSecurityManager();
|
||||||
// if (sm != null) {
|
// if (sm != null) {
|
||||||
|
@ -460,11 +463,21 @@ public class StubGenerator extends sun.rmi.rmic.iiop.Generator {
|
||||||
//
|
//
|
||||||
// public _XXXXX_Stub() {
|
// public _XXXXX_Stub() {
|
||||||
// this(checkPermission());
|
// this(checkPermission());
|
||||||
|
// _instantiated = true;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException {
|
||||||
|
// checkPermission();
|
||||||
|
// s.defaultReadObject();
|
||||||
|
// _instantiated = true;
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// where XXXXX is the name of the remote interface
|
// where XXXXX is the name of the remote interface
|
||||||
|
|
||||||
p.pln();
|
p.pln();
|
||||||
|
p.plnI("private transient boolean _instantiated = false;");
|
||||||
|
p.pln();
|
||||||
|
p.pO();
|
||||||
p.plnI("private static Void checkPermission() {");
|
p.plnI("private static Void checkPermission() {");
|
||||||
p.plnI("SecurityManager sm = System.getSecurityManager();");
|
p.plnI("SecurityManager sm = System.getSecurityManager();");
|
||||||
p.pln("if (sm != null) {");
|
p.pln("if (sm != null) {");
|
||||||
|
@ -481,13 +494,23 @@ public class StubGenerator extends sun.rmi.rmic.iiop.Generator {
|
||||||
p.pO();
|
p.pO();
|
||||||
|
|
||||||
p.pI();
|
p.pI();
|
||||||
p.pln("private " + currentClass + "(Void ignore) { }");
|
p.plnI("private " + currentClass + "(Void ignore) { }");
|
||||||
p.pln();
|
p.pln();
|
||||||
|
p.pO();
|
||||||
|
|
||||||
p.plnI("public " + currentClass + "() { ");
|
p.plnI("public " + currentClass + "() { ");
|
||||||
p.pln("this(checkPermission());");
|
p.pln("this(checkPermission());");
|
||||||
|
p.pln("_instantiated = true;");
|
||||||
p.pOln("}");
|
p.pOln("}");
|
||||||
p.pln();
|
p.pln();
|
||||||
|
p.plnI("private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException {");
|
||||||
|
p.plnI("checkPermission();");
|
||||||
|
p.pO();
|
||||||
|
p.pln("s.defaultReadObject();");
|
||||||
|
p.pln("_instantiated = true;");
|
||||||
|
p.pOln("}");
|
||||||
|
p.pln();
|
||||||
|
//p.pO();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!emitPermissionCheck) {
|
if (!emitPermissionCheck) {
|
||||||
|
@ -894,6 +917,7 @@ public class StubGenerator extends sun.rmi.rmic.iiop.Generator {
|
||||||
String paramNames[] = method.getArgumentNames();
|
String paramNames[] = method.getArgumentNames();
|
||||||
Type returnType = method.getReturnType();
|
Type returnType = method.getReturnType();
|
||||||
ValueType[] exceptions = getStubExceptions(method,false);
|
ValueType[] exceptions = getStubExceptions(method,false);
|
||||||
|
boolean hasIOException = false;
|
||||||
|
|
||||||
addNamesInUse(method);
|
addNamesInUse(method);
|
||||||
addNameInUse("_type_ids");
|
addNameInUse("_type_ids");
|
||||||
|
@ -921,6 +945,13 @@ public class StubGenerator extends sun.rmi.rmic.iiop.Generator {
|
||||||
p.plnI(" {");
|
p.plnI(" {");
|
||||||
|
|
||||||
// Now create the method body...
|
// Now create the method body...
|
||||||
|
if (emitPermissionCheck) {
|
||||||
|
p.pln("if ((System.getSecurityManager() != null) && (!_instantiated)) {");
|
||||||
|
p.plnI(" throw new java.io.IOError(new java.io.IOException(\"InvalidObject \"));");
|
||||||
|
p.pOln("}");
|
||||||
|
p.pln();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
if (localStubs) {
|
if (localStubs) {
|
||||||
writeLocalStubMethodBody(p,method,theType);
|
writeLocalStubMethodBody(p,method,theType);
|
||||||
|
|
|
@ -490,3 +490,4 @@ e9e63d93bbfe2c6c23447e2c1f5cc71c98671cba jdk9-b79
|
||||||
03845376ea9dbf9690b6a9cfb4ed63f8cc0541c0 jdk9-b85
|
03845376ea9dbf9690b6a9cfb4ed63f8cc0541c0 jdk9-b85
|
||||||
1ae4191359d811a51512f17dca80ffe79837a5ff jdk9-b86
|
1ae4191359d811a51512f17dca80ffe79837a5ff jdk9-b86
|
||||||
d7ffd16382fe7071181b967932b47cff6d1312e1 jdk9-b87
|
d7ffd16382fe7071181b967932b47cff6d1312e1 jdk9-b87
|
||||||
|
bc48b669bc6610fac97e16593050c0f559cf6945 jdk9-b88
|
||||||
|
|
|
@ -67,9 +67,6 @@ public class ImmutableOopMapSet extends VMObject {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void visitValueLocation(Address valueAddr) {
|
|
||||||
}
|
|
||||||
|
|
||||||
public void visitNarrowOopLocation(Address narrowOopAddr) {
|
public void visitNarrowOopLocation(Address narrowOopAddr) {
|
||||||
addressVisitor.visitCompOopAddress(narrowOopAddr);
|
addressVisitor.visitCompOopAddress(narrowOopAddr);
|
||||||
}
|
}
|
||||||
|
@ -216,9 +213,9 @@ public class ImmutableOopMapSet extends VMObject {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// We want narow oop, value and oop oop_types
|
// We want narow oop and oop oop_types
|
||||||
OopMapValue.OopTypes[] values = new OopMapValue.OopTypes[]{
|
OopMapValue.OopTypes[] values = new OopMapValue.OopTypes[] {
|
||||||
OopMapValue.OopTypes.OOP_VALUE, OopMapValue.OopTypes.VALUE_VALUE, OopMapValue.OopTypes.NARROWOOP_VALUE
|
OopMapValue.OopTypes.OOP_VALUE, OopMapValue.OopTypes.NARROWOOP_VALUE
|
||||||
};
|
};
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -231,8 +228,6 @@ public class ImmutableOopMapSet extends VMObject {
|
||||||
// to detect in the debugging system
|
// to detect in the debugging system
|
||||||
// assert(Universe::is_heap_or_null(*loc), "found non oop pointer");
|
// assert(Universe::is_heap_or_null(*loc), "found non oop pointer");
|
||||||
visitor.visitOopLocation(loc);
|
visitor.visitOopLocation(loc);
|
||||||
} else if (omv.getType() == OopMapValue.OopTypes.VALUE_VALUE) {
|
|
||||||
visitor.visitValueLocation(loc);
|
|
||||||
} else if (omv.getType() == OopMapValue.OopTypes.NARROWOOP_VALUE) {
|
} else if (omv.getType() == OopMapValue.OopTypes.NARROWOOP_VALUE) {
|
||||||
visitor.visitNarrowOopLocation(loc);
|
visitor.visitNarrowOopLocation(loc);
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,7 +49,6 @@ public class OopMapValue {
|
||||||
// Types of OopValues
|
// Types of OopValues
|
||||||
static int UNUSED_VALUE;
|
static int UNUSED_VALUE;
|
||||||
static int OOP_VALUE;
|
static int OOP_VALUE;
|
||||||
static int VALUE_VALUE;
|
|
||||||
static int NARROWOOP_VALUE;
|
static int NARROWOOP_VALUE;
|
||||||
static int CALLEE_SAVED_VALUE;
|
static int CALLEE_SAVED_VALUE;
|
||||||
static int DERIVED_OOP_VALUE;
|
static int DERIVED_OOP_VALUE;
|
||||||
|
@ -73,7 +72,6 @@ public class OopMapValue {
|
||||||
REGISTER_MASK_IN_PLACE = db.lookupIntConstant("OopMapValue::register_mask_in_place").intValue();
|
REGISTER_MASK_IN_PLACE = db.lookupIntConstant("OopMapValue::register_mask_in_place").intValue();
|
||||||
UNUSED_VALUE = db.lookupIntConstant("OopMapValue::unused_value").intValue();
|
UNUSED_VALUE = db.lookupIntConstant("OopMapValue::unused_value").intValue();
|
||||||
OOP_VALUE = db.lookupIntConstant("OopMapValue::oop_value").intValue();
|
OOP_VALUE = db.lookupIntConstant("OopMapValue::oop_value").intValue();
|
||||||
VALUE_VALUE = db.lookupIntConstant("OopMapValue::value_value").intValue();
|
|
||||||
NARROWOOP_VALUE = db.lookupIntConstant("OopMapValue::narrowoop_value").intValue();
|
NARROWOOP_VALUE = db.lookupIntConstant("OopMapValue::narrowoop_value").intValue();
|
||||||
CALLEE_SAVED_VALUE = db.lookupIntConstant("OopMapValue::callee_saved_value").intValue();
|
CALLEE_SAVED_VALUE = db.lookupIntConstant("OopMapValue::callee_saved_value").intValue();
|
||||||
DERIVED_OOP_VALUE = db.lookupIntConstant("OopMapValue::derived_oop_value").intValue();
|
DERIVED_OOP_VALUE = db.lookupIntConstant("OopMapValue::derived_oop_value").intValue();
|
||||||
|
@ -82,7 +80,6 @@ public class OopMapValue {
|
||||||
public static abstract class OopTypes {
|
public static abstract class OopTypes {
|
||||||
public static final OopTypes UNUSED_VALUE = new OopTypes() { int getValue() { return OopMapValue.UNUSED_VALUE; }};
|
public static final OopTypes UNUSED_VALUE = new OopTypes() { int getValue() { return OopMapValue.UNUSED_VALUE; }};
|
||||||
public static final OopTypes OOP_VALUE = new OopTypes() { int getValue() { return OopMapValue.OOP_VALUE; }};
|
public static final OopTypes OOP_VALUE = new OopTypes() { int getValue() { return OopMapValue.OOP_VALUE; }};
|
||||||
public static final OopTypes VALUE_VALUE = new OopTypes() { int getValue() { return OopMapValue.VALUE_VALUE; }};
|
|
||||||
public static final OopTypes NARROWOOP_VALUE = new OopTypes() { int getValue() { return OopMapValue.NARROWOOP_VALUE; }};
|
public static final OopTypes NARROWOOP_VALUE = new OopTypes() { int getValue() { return OopMapValue.NARROWOOP_VALUE; }};
|
||||||
public static final OopTypes CALLEE_SAVED_VALUE = new OopTypes() { int getValue() { return OopMapValue.CALLEE_SAVED_VALUE; }};
|
public static final OopTypes CALLEE_SAVED_VALUE = new OopTypes() { int getValue() { return OopMapValue.CALLEE_SAVED_VALUE; }};
|
||||||
public static final OopTypes DERIVED_OOP_VALUE = new OopTypes() { int getValue() { return OopMapValue.DERIVED_OOP_VALUE; }};
|
public static final OopTypes DERIVED_OOP_VALUE = new OopTypes() { int getValue() { return OopMapValue.DERIVED_OOP_VALUE; }};
|
||||||
|
@ -105,7 +102,6 @@ public class OopMapValue {
|
||||||
|
|
||||||
// Querying
|
// Querying
|
||||||
public boolean isOop() { return (getValue() & TYPE_MASK_IN_PLACE) == OOP_VALUE; }
|
public boolean isOop() { return (getValue() & TYPE_MASK_IN_PLACE) == OOP_VALUE; }
|
||||||
public boolean isValue() { return (getValue() & TYPE_MASK_IN_PLACE) == VALUE_VALUE; }
|
|
||||||
public boolean isNarrowOop() { return (getValue() & TYPE_MASK_IN_PLACE) == NARROWOOP_VALUE; }
|
public boolean isNarrowOop() { return (getValue() & TYPE_MASK_IN_PLACE) == NARROWOOP_VALUE; }
|
||||||
public boolean isCalleeSaved() { return (getValue() & TYPE_MASK_IN_PLACE) == CALLEE_SAVED_VALUE; }
|
public boolean isCalleeSaved() { return (getValue() & TYPE_MASK_IN_PLACE) == CALLEE_SAVED_VALUE; }
|
||||||
public boolean isDerivedOop() { return (getValue() & TYPE_MASK_IN_PLACE) == DERIVED_OOP_VALUE; }
|
public boolean isDerivedOop() { return (getValue() & TYPE_MASK_IN_PLACE) == DERIVED_OOP_VALUE; }
|
||||||
|
@ -117,7 +113,6 @@ public class OopMapValue {
|
||||||
int which = (getValue() & TYPE_MASK_IN_PLACE);
|
int which = (getValue() & TYPE_MASK_IN_PLACE);
|
||||||
if (which == UNUSED_VALUE) return OopTypes.UNUSED_VALUE;
|
if (which == UNUSED_VALUE) return OopTypes.UNUSED_VALUE;
|
||||||
else if (which == OOP_VALUE) return OopTypes.OOP_VALUE;
|
else if (which == OOP_VALUE) return OopTypes.OOP_VALUE;
|
||||||
else if (which == VALUE_VALUE) return OopTypes.VALUE_VALUE;
|
|
||||||
else if (which == NARROWOOP_VALUE) return OopTypes.NARROWOOP_VALUE;
|
else if (which == NARROWOOP_VALUE) return OopTypes.NARROWOOP_VALUE;
|
||||||
else if (which == CALLEE_SAVED_VALUE) return OopTypes.CALLEE_SAVED_VALUE;
|
else if (which == CALLEE_SAVED_VALUE) return OopTypes.CALLEE_SAVED_VALUE;
|
||||||
else if (which == DERIVED_OOP_VALUE) return OopTypes.DERIVED_OOP_VALUE;
|
else if (which == DERIVED_OOP_VALUE) return OopTypes.DERIVED_OOP_VALUE;
|
||||||
|
|
|
@ -31,6 +31,5 @@ import sun.jvm.hotspot.debugger.*;
|
||||||
public interface OopMapVisitor {
|
public interface OopMapVisitor {
|
||||||
public void visitOopLocation(Address oopAddr);
|
public void visitOopLocation(Address oopAddr);
|
||||||
public void visitDerivedOopLocation(Address baseOopAddr, Address derivedOopAddr);
|
public void visitDerivedOopLocation(Address baseOopAddr, Address derivedOopAddr);
|
||||||
public void visitValueLocation(Address valueAddr);
|
|
||||||
public void visitNarrowOopLocation(Address narrowOopAddr);
|
public void visitNarrowOopLocation(Address narrowOopAddr);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -52,21 +52,19 @@ public class Method extends Metadata {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
|
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
|
||||||
Type type = db.lookupType("Method");
|
type = db.lookupType("Method");
|
||||||
constMethod = type.getAddressField("_constMethod");
|
constMethod = type.getAddressField("_constMethod");
|
||||||
methodData = type.getAddressField("_method_data");
|
methodData = type.getAddressField("_method_data");
|
||||||
methodCounters = type.getAddressField("_method_counters");
|
methodCounters = type.getAddressField("_method_counters");
|
||||||
methodSize = new CIntField(type.getCIntegerField("_method_size"), 0);
|
|
||||||
accessFlags = new CIntField(type.getCIntegerField("_access_flags"), 0);
|
accessFlags = new CIntField(type.getCIntegerField("_access_flags"), 0);
|
||||||
code = type.getAddressField("_code");
|
code = type.getAddressField("_code");
|
||||||
vtableIndex = new CIntField(type.getCIntegerField("_vtable_index"), 0);
|
vtableIndex = new CIntField(type.getCIntegerField("_vtable_index"), 0);
|
||||||
bytecodeOffset = type.getSize();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
interpreterEntry = type.getAddressField("_interpreter_entry");
|
|
||||||
fromCompiledCodeEntryPoint = type.getAddressField("_from_compiled_code_entry_point");
|
fromCompiledCodeEntryPoint = type.getAddressField("_from_compiled_code_entry_point");
|
||||||
|
interpreterEntry = type.getAddressField("_from_interpreted_entry");
|
||||||
*/
|
*/
|
||||||
|
|
||||||
objectInitializerName = null;
|
objectInitializerName = null;
|
||||||
classInitializerName = null;
|
classInitializerName = null;
|
||||||
}
|
}
|
||||||
|
@ -77,16 +75,22 @@ public class Method extends Metadata {
|
||||||
|
|
||||||
public boolean isMethod() { return true; }
|
public boolean isMethod() { return true; }
|
||||||
|
|
||||||
|
// Not a Method field, used to keep type.
|
||||||
|
private static Type type;
|
||||||
|
|
||||||
// Fields
|
// Fields
|
||||||
private static AddressField constMethod;
|
private static AddressField constMethod;
|
||||||
private static AddressField methodData;
|
private static AddressField methodData;
|
||||||
private static AddressField methodCounters;
|
private static AddressField methodCounters;
|
||||||
private static CIntField methodSize;
|
|
||||||
private static CIntField accessFlags;
|
private static CIntField accessFlags;
|
||||||
private static CIntField vtableIndex;
|
private static CIntField vtableIndex;
|
||||||
private static long bytecodeOffset;
|
|
||||||
|
|
||||||
private static AddressField code;
|
private static AddressField code;
|
||||||
|
/*
|
||||||
|
private static AddressCField fromCompiledCodeEntryPoint;
|
||||||
|
private static AddressField interpreterEntry;
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
// constant method names - <init>, <clinit>
|
// constant method names - <init>, <clinit>
|
||||||
// Initialized lazily to avoid initialization ordering dependencies between Method and SymbolTable
|
// Initialized lazily to avoid initialization ordering dependencies between Method and SymbolTable
|
||||||
|
@ -106,11 +110,6 @@ public class Method extends Metadata {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
private static AddressCField interpreterEntry;
|
|
||||||
private static AddressCField fromCompiledCodeEntryPoint;
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Accessors for declared fields
|
// Accessors for declared fields
|
||||||
public ConstMethod getConstMethod() {
|
public ConstMethod getConstMethod() {
|
||||||
Address addr = constMethod.getValue(getAddress());
|
Address addr = constMethod.getValue(getAddress());
|
||||||
|
@ -128,7 +127,6 @@ public class Method extends Metadata {
|
||||||
return (MethodCounters) VMObjectFactory.newObject(MethodCounters.class, addr);
|
return (MethodCounters) VMObjectFactory.newObject(MethodCounters.class, addr);
|
||||||
}
|
}
|
||||||
/** WARNING: this is in words, not useful in this system; use getObjectSize() instead */
|
/** WARNING: this is in words, not useful in this system; use getObjectSize() instead */
|
||||||
public long getMethodSize() { return methodSize.getValue(this); }
|
|
||||||
public long getMaxStack() { return getConstMethod().getMaxStack(); }
|
public long getMaxStack() { return getConstMethod().getMaxStack(); }
|
||||||
public long getMaxLocals() { return getConstMethod().getMaxLocals(); }
|
public long getMaxLocals() { return getConstMethod().getMaxLocals(); }
|
||||||
public long getSizeOfParameters() { return getConstMethod().getSizeOfParameters(); }
|
public long getSizeOfParameters() { return getConstMethod().getSizeOfParameters(); }
|
||||||
|
@ -265,7 +263,7 @@ public class Method extends Metadata {
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getSize() {
|
public long getSize() {
|
||||||
return getMethodSize();
|
return type.getSize() + (isNative() ? 2: 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void printValueOn(PrintStream tty) {
|
public void printValueOn(PrintStream tty) {
|
||||||
|
@ -273,7 +271,6 @@ public class Method extends Metadata {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void iterateFields(MetadataVisitor visitor) {
|
public void iterateFields(MetadataVisitor visitor) {
|
||||||
visitor.doCInt(methodSize, true);
|
|
||||||
visitor.doCInt(accessFlags, true);
|
visitor.doCInt(accessFlags, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -536,9 +536,6 @@ public abstract class Frame implements Cloneable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void visitValueLocation(Address valueAddr) {
|
|
||||||
}
|
|
||||||
|
|
||||||
public void visitNarrowOopLocation(Address compOopAddr) {
|
public void visitNarrowOopLocation(Address compOopAddr) {
|
||||||
addressVisitor.visitCompOopAddress(compOopAddr);
|
addressVisitor.visitCompOopAddress(compOopAddr);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1220,9 +1220,6 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
|
||||||
oms = new OopMapStream(map, OopMapValue.OopTypes.NARROWOOP_VALUE);
|
oms = new OopMapStream(map, OopMapValue.OopTypes.NARROWOOP_VALUE);
|
||||||
buf.append(omvIterator.iterate(oms, "NarrowOops:", false));
|
buf.append(omvIterator.iterate(oms, "NarrowOops:", false));
|
||||||
|
|
||||||
oms = new OopMapStream(map, OopMapValue.OopTypes.VALUE_VALUE);
|
|
||||||
buf.append(omvIterator.iterate(oms, "Values:", false));
|
|
||||||
|
|
||||||
oms = new OopMapStream(map, OopMapValue.OopTypes.CALLEE_SAVED_VALUE);
|
oms = new OopMapStream(map, OopMapValue.OopTypes.CALLEE_SAVED_VALUE);
|
||||||
buf.append(omvIterator.iterate(oms, "Callee saved:", true));
|
buf.append(omvIterator.iterate(oms, "Callee saved:", true));
|
||||||
|
|
||||||
|
|
|
@ -28,4 +28,7 @@ TYPE=COMPILER1
|
||||||
|
|
||||||
VM_SUBDIR = client
|
VM_SUBDIR = client
|
||||||
|
|
||||||
|
# We don't support the JVMCI in a client VM.
|
||||||
|
INCLUDE_JVMCI := false
|
||||||
|
|
||||||
CFLAGS += -DCOMPILER1
|
CFLAGS += -DCOMPILER1
|
||||||
|
|
|
@ -149,6 +149,7 @@ ifeq ($(USE_CLANG), true)
|
||||||
PCH_FLAG/sharedRuntimeTrig.o = $(PCH_FLAG/NO_PCH)
|
PCH_FLAG/sharedRuntimeTrig.o = $(PCH_FLAG/NO_PCH)
|
||||||
PCH_FLAG/sharedRuntimeTrans.o = $(PCH_FLAG/NO_PCH)
|
PCH_FLAG/sharedRuntimeTrans.o = $(PCH_FLAG/NO_PCH)
|
||||||
PCH_FLAG/unsafe.o = $(PCH_FLAG/NO_PCH)
|
PCH_FLAG/unsafe.o = $(PCH_FLAG/NO_PCH)
|
||||||
|
PCH_FLAG/jvmciCompilerToVM.o = $(PCH_FLAG/NO_PCH)
|
||||||
|
|
||||||
endif
|
endif
|
||||||
else # ($(USE_CLANG), true)
|
else # ($(USE_CLANG), true)
|
||||||
|
@ -313,10 +314,11 @@ endif
|
||||||
|
|
||||||
# Work around some compiler bugs.
|
# Work around some compiler bugs.
|
||||||
ifeq ($(USE_CLANG), true)
|
ifeq ($(USE_CLANG), true)
|
||||||
# Clang <= 6.1
|
# Clang < 6 | <= 6.1 | <= 7.0
|
||||||
ifeq ($(shell expr \
|
ifeq ($(shell expr \
|
||||||
$(CC_VER_MAJOR) \< 6 \| \
|
$(CC_VER_MAJOR) \< 6 \| \
|
||||||
\( $(CC_VER_MAJOR) = 6 \& $(CC_VER_MINOR) \<= 1 \) \
|
\( $(CC_VER_MAJOR) = 6 \& $(CC_VER_MINOR) \<= 1 \) \| \
|
||||||
|
\( $(CC_VER_MAJOR) = 7 \& $(CC_VER_MINOR) \<= 0 \) \
|
||||||
), 1)
|
), 1)
|
||||||
OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT)
|
OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT)
|
||||||
OPT_CFLAGS/unsafe.o += -O1
|
OPT_CFLAGS/unsafe.o += -O1
|
||||||
|
|
|
@ -62,7 +62,7 @@ endif
|
||||||
$(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
|
$(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
|
||||||
@echo $(LOG_INFO) Making signal interposition lib...
|
@echo $(LOG_INFO) Making signal interposition lib...
|
||||||
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
|
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
|
||||||
$(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $@ $<
|
$(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) $(EXTRA_CFLAGS) -o $@ $<
|
||||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||||
ifeq ($(OS_VENDOR), Darwin)
|
ifeq ($(OS_VENDOR), Darwin)
|
||||||
$(DSYMUTIL) $@
|
$(DSYMUTIL) $@
|
||||||
|
|
|
@ -38,6 +38,7 @@ INCLUDE_ALL_GCS := false
|
||||||
INCLUDE_NMT := false
|
INCLUDE_NMT := false
|
||||||
INCLUDE_TRACE := false
|
INCLUDE_TRACE := false
|
||||||
INCLUDE_CDS := false
|
INCLUDE_CDS := false
|
||||||
|
INCLUDE_JVMCI := false
|
||||||
|
|
||||||
CXXFLAGS += -DMINIMAL_JVM -DCOMPILER1 -DVMTYPE=\"Minimal\"
|
CXXFLAGS += -DMINIMAL_JVM -DCOMPILER1 -DVMTYPE=\"Minimal\"
|
||||||
CFLAGS += -DMINIMAL_JVM -DCOMPILER1 -DVMTYPE=\"Minimal\"
|
CFLAGS += -DMINIMAL_JVM -DCOMPILER1 -DVMTYPE=\"Minimal\"
|
||||||
|
|
|
@ -106,6 +106,25 @@ ifeq ($(INCLUDE_NMT), false)
|
||||||
memTracker.cpp nmtDCmd.cpp mallocSiteTable.cpp
|
memTracker.cpp nmtDCmd.cpp mallocSiteTable.cpp
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifneq (,$(findstring $(Platform_arch_model), x86_64, sparc))
|
||||||
|
# JVMCI is supported only on x86_64 and SPARC.
|
||||||
|
else
|
||||||
|
INCLUDE_JVMCI := false
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifeq ($(INCLUDE_JVMCI), false)
|
||||||
|
CXXFLAGS += -DINCLUDE_JVMCI=0
|
||||||
|
CFLAGS += -DINCLUDE_JVMCI=0
|
||||||
|
|
||||||
|
jvmci_dir := $(HS_COMMON_SRC)/share/vm/jvmci
|
||||||
|
jvmci_dir_alt := $(HS_ALT_SRC)/share/vm/jvmci
|
||||||
|
jvmci_exclude := $(notdir $(wildcard $(jvmci_dir)/*.cpp)) \
|
||||||
|
$(notdir $(wildcard $(jvmci_dir_alt)/*.cpp))
|
||||||
|
Src_Files_EXCLUDE += $(jvmci_exclude) \
|
||||||
|
jvmciCodeInstaller_aarch64.cpp jvmciCodeInstaller_ppc.cpp jvmciCodeInstaller_sparc.cpp \
|
||||||
|
jvmciCodeInstaller_x86.cpp
|
||||||
|
endif
|
||||||
|
|
||||||
-include $(HS_ALT_MAKE)/excludeSrc.make
|
-include $(HS_ALT_MAKE)/excludeSrc.make
|
||||||
|
|
||||||
.PHONY: $(HS_ALT_MAKE)/excludeSrc.make
|
.PHONY: $(HS_ALT_MAKE)/excludeSrc.make
|
||||||
|
|
122
hotspot/make/gensrc/Gensrc-jdk.vm.ci.gmk
Normal file
122
hotspot/make/gensrc/Gensrc-jdk.vm.ci.gmk
Normal file
|
@ -0,0 +1,122 @@
|
||||||
|
#
|
||||||
|
# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
#
|
||||||
|
# This code is free software; you can redistribute it and/or modify it
|
||||||
|
# under the terms of the GNU General Public License version 2 only, as
|
||||||
|
# published by the Free Software Foundation. Oracle designates this
|
||||||
|
# particular file as subject to the "Classpath" exception as provided
|
||||||
|
# by Oracle in the LICENSE file that accompanied this code.
|
||||||
|
#
|
||||||
|
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
# version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
# accompanied this code).
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License version
|
||||||
|
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
#
|
||||||
|
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
# or visit www.oracle.com if you need additional information or have any
|
||||||
|
# questions.
|
||||||
|
#
|
||||||
|
|
||||||
|
default: all
|
||||||
|
|
||||||
|
include $(SPEC)
|
||||||
|
include MakeBase.gmk
|
||||||
|
include JavaCompilation.gmk
|
||||||
|
include SetupJavaCompilers.gmk
|
||||||
|
|
||||||
|
GENSRC_DIR := $(SUPPORT_OUTPUTDIR)/gensrc/jdk.vm.ci
|
||||||
|
SRC_DIR := $(HOTSPOT_TOPDIR)/src/jdk.vm.ci/share/classes
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# Compile the annotation processor
|
||||||
|
|
||||||
|
$(eval $(call SetupJavaCompilation, BUILD_JVMCI_OPTIONS, \
|
||||||
|
SETUP := GENERATE_OLDBYTECODE, \
|
||||||
|
SRC := $(SRC_DIR)/jdk.vm.ci.options/src \
|
||||||
|
$(SRC_DIR)/jdk.vm.ci.options.processor/src \
|
||||||
|
$(SRC_DIR)/jdk.vm.ci.inittimer/src, \
|
||||||
|
BIN := $(BUILDTOOLS_OUTPUTDIR)/jvmci_options, \
|
||||||
|
JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.ci.options.jar, \
|
||||||
|
))
|
||||||
|
|
||||||
|
$(eval $(call SetupJavaCompilation, BUILD_JVMCI_SERVICE, \
|
||||||
|
SETUP := GENERATE_OLDBYTECODE, \
|
||||||
|
SRC := $(SRC_DIR)/jdk.vm.ci.service/src \
|
||||||
|
$(SRC_DIR)/jdk.vm.ci.service.processor/src, \
|
||||||
|
BIN := $(BUILDTOOLS_OUTPUTDIR)/jvmci_service, \
|
||||||
|
JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.ci.service.jar, \
|
||||||
|
))
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
PROC_SRC_SUBDIRS := \
|
||||||
|
jdk.vm.ci.compiler \
|
||||||
|
jdk.vm.ci.hotspot \
|
||||||
|
jdk.vm.ci.hotspot.amd64 \
|
||||||
|
jdk.vm.ci.hotspot.sparc \
|
||||||
|
#
|
||||||
|
|
||||||
|
PROC_SRC_DIRS := $(patsubst %, $(SRC_DIR)/%/src, $(PROC_SRC_SUBDIRS))
|
||||||
|
|
||||||
|
PROC_SRCS := $(filter %.java, $(call CacheFind, $(PROC_SRC_DIRS)))
|
||||||
|
|
||||||
|
ALL_SRC_DIRS := $(wildcard $(SRC_DIR)/*/src)
|
||||||
|
SOURCEPATH := $(call PathList, $(ALL_SRC_DIRS))
|
||||||
|
PROCESSOR_PATH := $(call PathList, \
|
||||||
|
$(BUILDTOOLS_OUTPUTDIR)/jdk.vm.ci.options.jar \
|
||||||
|
$(BUILDTOOLS_OUTPUTDIR)/jdk.vm.ci.service.jar)
|
||||||
|
|
||||||
|
$(GENSRC_DIR)/_gensrc_proc_done: $(PROC_SRCS) \
|
||||||
|
$(BUILD_JVMCI_OPTIONS) $(BUILD_JVMCI_SERVICE)
|
||||||
|
$(MKDIR) -p $(@D)
|
||||||
|
$(eval $(call ListPathsSafely,PROC_SRCS,$(@D)/_gensrc_proc_files))
|
||||||
|
$(JAVA_SMALL) $(NEW_JAVAC) \
|
||||||
|
-XDignore.symbol.file \
|
||||||
|
-sourcepath $(SOURCEPATH) \
|
||||||
|
-implicit:none \
|
||||||
|
-proc:only \
|
||||||
|
-processorpath $(PROCESSOR_PATH) \
|
||||||
|
-d $(GENSRC_DIR) \
|
||||||
|
-s $(GENSRC_DIR) \
|
||||||
|
@$(@D)/_gensrc_proc_files
|
||||||
|
$(TOUCH) $@
|
||||||
|
|
||||||
|
TARGETS += $(GENSRC_DIR)/_gensrc_proc_done
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
$(GENSRC_DIR)/META-INF/services/jdk.vm.ci.options.OptionDescriptors: \
|
||||||
|
$(GENSRC_DIR)/_gensrc_proc_done
|
||||||
|
$(MKDIR) -p $(@D)
|
||||||
|
($(CD) $(GENSRC_DIR)/META-INF/jvmci.options && \
|
||||||
|
$(RM) -f $@; \
|
||||||
|
for i in $$(ls); do \
|
||||||
|
echo $${i}_OptionDescriptors >> $@; \
|
||||||
|
done)
|
||||||
|
|
||||||
|
TARGETS += $(GENSRC_DIR)/META-INF/services/jdk.vm.ci.options.OptionDescriptors
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
$(GENSRC_DIR)/_providers_converted: $(GENSRC_DIR)/_gensrc_proc_done
|
||||||
|
$(MKDIR) -p $(GENSRC_DIR)/META-INF/services
|
||||||
|
($(CD) $(GENSRC_DIR)/META-INF/jvmci.providers && \
|
||||||
|
for i in $$($(LS)); do \
|
||||||
|
c=$$($(CAT) $$i | $(TR) -d '\n\r'); \
|
||||||
|
$(ECHO) $$i >> $(GENSRC_DIR)/META-INF/services/$$c; \
|
||||||
|
done)
|
||||||
|
$(TOUCH) $@
|
||||||
|
|
||||||
|
TARGETS += $(GENSRC_DIR)/_providers_converted
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
all: $(TARGETS)
|
||||||
|
|
||||||
|
.PHONY: default all
|
|
@ -28,4 +28,7 @@ TYPE=COMPILER1
|
||||||
|
|
||||||
VM_SUBDIR = client
|
VM_SUBDIR = client
|
||||||
|
|
||||||
|
# We don't support the JVMCI in a client VM.
|
||||||
|
INCLUDE_JVMCI := false
|
||||||
|
|
||||||
CFLAGS += -DCOMPILER1
|
CFLAGS += -DCOMPILER1
|
||||||
|
|
|
@ -213,12 +213,16 @@ ifeq ($(USE_CLANG),)
|
||||||
# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
|
# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
|
||||||
# conversions which might affect the values. Only enable it in earlier versions.
|
# conversions which might affect the values. Only enable it in earlier versions.
|
||||||
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
|
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
|
||||||
|
# GCC < 4.3
|
||||||
WARNING_FLAGS += -Wconversion
|
WARNING_FLAGS += -Wconversion
|
||||||
endif
|
endif
|
||||||
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1"
|
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1"
|
||||||
|
# GCC >= 4.8
|
||||||
# This flag is only known since GCC 4.3. Gcc 4.8 contains a fix so that with templates no
|
# This flag is only known since GCC 4.3. Gcc 4.8 contains a fix so that with templates no
|
||||||
# warnings are issued: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=11856
|
# warnings are issued: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=11856
|
||||||
WARNING_FLAGS += -Wtype-limits
|
WARNING_FLAGS += -Wtype-limits
|
||||||
|
# GCC < 4.8 don't accept this flag for C++.
|
||||||
|
WARNING_FLAGS += -Wno-format-zero-length
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
|
@ -38,6 +38,7 @@ INCLUDE_ALL_GCS := false
|
||||||
INCLUDE_NMT := false
|
INCLUDE_NMT := false
|
||||||
INCLUDE_TRACE := false
|
INCLUDE_TRACE := false
|
||||||
INCLUDE_CDS := false
|
INCLUDE_CDS := false
|
||||||
|
INCLUDE_JVMCI := false
|
||||||
|
|
||||||
CXXFLAGS += -DMINIMAL_JVM -DCOMPILER1 -DVMTYPE=\"Minimal\"
|
CXXFLAGS += -DMINIMAL_JVM -DCOMPILER1 -DVMTYPE=\"Minimal\"
|
||||||
CFLAGS += -DMINIMAL_JVM -DCOMPILER1 -DVMTYPE=\"Minimal\"
|
CFLAGS += -DMINIMAL_JVM -DCOMPILER1 -DVMTYPE=\"Minimal\"
|
||||||
|
|
|
@ -28,4 +28,7 @@ TYPE=COMPILER1
|
||||||
|
|
||||||
VM_SUBDIR = client
|
VM_SUBDIR = client
|
||||||
|
|
||||||
|
# We don't support the JVMCI in a client VM.
|
||||||
|
INCLUDE_JVMCI := false
|
||||||
|
|
||||||
CFLAGS += -DCOMPILER1
|
CFLAGS += -DCOMPILER1
|
||||||
|
|
|
@ -52,6 +52,7 @@ UNIQ="$MKS_HOME/uniq.exe"
|
||||||
CAT="$MKS_HOME/cat.exe"
|
CAT="$MKS_HOME/cat.exe"
|
||||||
RM="$MKS_HOME/rm.exe"
|
RM="$MKS_HOME/rm.exe"
|
||||||
DUMPBIN="link.exe /dump"
|
DUMPBIN="link.exe /dump"
|
||||||
|
export VS_UNICODE_OUTPUT=
|
||||||
|
|
||||||
if [ "$1" = "-nosa" ]; then
|
if [ "$1" = "-nosa" ]; then
|
||||||
echo EXPORTS > vm.def
|
echo EXPORTS > vm.def
|
||||||
|
|
|
@ -111,6 +111,7 @@ esac
|
||||||
|
|
||||||
COMPILER2_SPECIFIC_FILES="opto libadt bcEscapeAnalyzer.cpp c2_* runtime_*"
|
COMPILER2_SPECIFIC_FILES="opto libadt bcEscapeAnalyzer.cpp c2_* runtime_*"
|
||||||
COMPILER1_SPECIFIC_FILES="c1_*"
|
COMPILER1_SPECIFIC_FILES="c1_*"
|
||||||
|
JVMCI_SPECIFIC_FILES="*jvmci* *JVMCI*"
|
||||||
SHARK_SPECIFIC_FILES="shark"
|
SHARK_SPECIFIC_FILES="shark"
|
||||||
ZERO_SPECIFIC_FILES="zero"
|
ZERO_SPECIFIC_FILES="zero"
|
||||||
|
|
||||||
|
@ -119,11 +120,11 @@ Src_Files_EXCLUDE="jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp"
|
||||||
|
|
||||||
# Exclude per type.
|
# Exclude per type.
|
||||||
case "${TYPE}" in
|
case "${TYPE}" in
|
||||||
"compiler1") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${COMPILER2_SPECIFIC_FILES} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES} ciTypeFlow.cpp" ;;
|
"compiler1") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${COMPILER2_SPECIFIC_FILES} ${JVMCI_SPECIFIC_FILES} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES} ciTypeFlow.cpp" ;;
|
||||||
"compiler2") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${COMPILER1_SPECIFIC_FILES} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES}" ;;
|
"compiler2") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${COMPILER1_SPECIFIC_FILES} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES}" ;;
|
||||||
"tiered") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES}" ;;
|
"tiered") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES}" ;;
|
||||||
"zero") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${COMPILER1_SPECIFIC_FILES} ${COMPILER2_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES} ciTypeFlow.cpp" ;;
|
"zero") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${COMPILER1_SPECIFIC_FILES} ${COMPILER2_SPECIFIC_FILES} ${JVMCI_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES} ciTypeFlow.cpp" ;;
|
||||||
"shark") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${COMPILER1_SPECIFIC_FILES} ${COMPILER2_SPECIFIC_FILES} ${ZERO_SPECIFIC_FILES}" ;;
|
"shark") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${COMPILER1_SPECIFIC_FILES} ${COMPILER2_SPECIFIC_FILES} ${JVMCI_SPECIFIC_FILES} ${ZERO_SPECIFIC_FILES}" ;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
# Special handling of arch model.
|
# Special handling of arch model.
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
#
|
#
|
||||||
# Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
# Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
#
|
#
|
||||||
# This code is free software; you can redistribute it and/or modify it
|
# This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -31,6 +31,7 @@ CXX=cl.exe
|
||||||
# /nologo Supress copyright message at every cl.exe startup
|
# /nologo Supress copyright message at every cl.exe startup
|
||||||
# /W3 Warning level 3
|
# /W3 Warning level 3
|
||||||
# /Zi Include debugging information
|
# /Zi Include debugging information
|
||||||
|
# /d2Zi+ Extended debugging symbols for optimized code (/Zo in VS2013 Update 3 and later)
|
||||||
# /WX Treat any warning error as a fatal error
|
# /WX Treat any warning error as a fatal error
|
||||||
# /MD Use dynamic multi-threaded runtime (msvcrt.dll or msvc*NN.dll)
|
# /MD Use dynamic multi-threaded runtime (msvcrt.dll or msvc*NN.dll)
|
||||||
# /MTd Use static multi-threaded runtime debug versions
|
# /MTd Use static multi-threaded runtime debug versions
|
||||||
|
@ -57,7 +58,7 @@ CXX_FLAGS=$(EXTRA_CFLAGS) /nologo /W3 /WX
|
||||||
|
|
||||||
# Let's add debug information when Full Debug Symbols is enabled
|
# Let's add debug information when Full Debug Symbols is enabled
|
||||||
!if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
|
!if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
|
||||||
CXX_FLAGS=$(CXX_FLAGS) /Zi
|
CXX_FLAGS=$(CXX_FLAGS) /Zi /d2Zi+
|
||||||
!endif
|
!endif
|
||||||
|
|
||||||
# Based on BUILDARCH we add some flags and select the default compiler name
|
# Based on BUILDARCH we add some flags and select the default compiler name
|
||||||
|
|
|
@ -145,6 +145,10 @@ ProjectCreatorIDEOptionsIgnoreCompiler1=\
|
||||||
-ignorePath_TARGET tiered \
|
-ignorePath_TARGET tiered \
|
||||||
-ignorePath_TARGET c1_
|
-ignorePath_TARGET c1_
|
||||||
|
|
||||||
|
ProjectCreatorIDEOptionsIgnoreJVMCI=\
|
||||||
|
-ignorePath_TARGET src/share/vm/jvmci \
|
||||||
|
-ignorePath_TARGET vm/jvmci
|
||||||
|
|
||||||
ProjectCreatorIDEOptionsIgnoreCompiler2=\
|
ProjectCreatorIDEOptionsIgnoreCompiler2=\
|
||||||
-ignorePath_TARGET compiler2 \
|
-ignorePath_TARGET compiler2 \
|
||||||
-ignorePath_TARGET tiered \
|
-ignorePath_TARGET tiered \
|
||||||
|
@ -165,6 +169,8 @@ ProjectCreatorIDEOptionsIgnoreCompiler2=\
|
||||||
##################################################
|
##################################################
|
||||||
ProjectCreatorIDEOptions=$(ProjectCreatorIDEOptions) \
|
ProjectCreatorIDEOptions=$(ProjectCreatorIDEOptions) \
|
||||||
-define_compiler1 COMPILER1 \
|
-define_compiler1 COMPILER1 \
|
||||||
|
-define_compiler1 INCLUDE_JVMCI=0 \
|
||||||
|
$(ProjectCreatorIDEOptionsIgnoreJVMCI:TARGET=compiler1) \
|
||||||
$(ProjectCreatorIDEOptionsIgnoreCompiler2:TARGET=compiler1)
|
$(ProjectCreatorIDEOptionsIgnoreCompiler2:TARGET=compiler1)
|
||||||
|
|
||||||
##################################################
|
##################################################
|
||||||
|
|
|
@ -40,7 +40,7 @@ CXX_FLAGS=$(CXX_FLAGS) /homeparams
|
||||||
!endif
|
!endif
|
||||||
|
|
||||||
!if "$(Variant)" == "compiler1"
|
!if "$(Variant)" == "compiler1"
|
||||||
CXX_FLAGS=$(CXX_FLAGS) /D "COMPILER1"
|
CXX_FLAGS=$(CXX_FLAGS) /D "COMPILER1" /D INCLUDE_JVMCI=0
|
||||||
!endif
|
!endif
|
||||||
|
|
||||||
!if "$(Variant)" == "compiler2"
|
!if "$(Variant)" == "compiler2"
|
||||||
|
@ -152,6 +152,7 @@ VM_PATH=$(VM_PATH);../generated/adfiles
|
||||||
VM_PATH=$(VM_PATH);../generated/jvmtifiles
|
VM_PATH=$(VM_PATH);../generated/jvmtifiles
|
||||||
VM_PATH=$(VM_PATH);../generated/tracefiles
|
VM_PATH=$(VM_PATH);../generated/tracefiles
|
||||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/c1
|
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/c1
|
||||||
|
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/jvmci
|
||||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/compiler
|
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/compiler
|
||||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/code
|
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/code
|
||||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/interpreter
|
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/interpreter
|
||||||
|
@ -163,6 +164,7 @@ VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc/serial
|
||||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc/cms
|
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc/cms
|
||||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc/g1
|
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc/g1
|
||||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/asm
|
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/asm
|
||||||
|
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/logging
|
||||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/memory
|
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/memory
|
||||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/oops
|
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/oops
|
||||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/prims
|
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/prims
|
||||||
|
@ -232,6 +234,9 @@ bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWi
|
||||||
{$(COMMONSRC)\share\vm\classfile}.cpp.obj::
|
{$(COMMONSRC)\share\vm\classfile}.cpp.obj::
|
||||||
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
|
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
|
||||||
|
|
||||||
|
{$(COMMONSRC)\share\vm\jvmci}.cpp.obj::
|
||||||
|
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
|
||||||
|
|
||||||
{$(COMMONSRC)\share\vm\gc\parallel}.cpp.obj::
|
{$(COMMONSRC)\share\vm\gc\parallel}.cpp.obj::
|
||||||
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
|
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
|
||||||
|
|
||||||
|
@ -250,6 +255,9 @@ bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWi
|
||||||
{$(COMMONSRC)\share\vm\asm}.cpp.obj::
|
{$(COMMONSRC)\share\vm\asm}.cpp.obj::
|
||||||
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
|
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
|
||||||
|
|
||||||
|
{$(COMMONSRC)\share\vm\logging}.cpp.obj::
|
||||||
|
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
|
||||||
|
|
||||||
{$(COMMONSRC)\share\vm\memory}.cpp.obj::
|
{$(COMMONSRC)\share\vm\memory}.cpp.obj::
|
||||||
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
|
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
|
||||||
|
|
||||||
|
@ -330,6 +338,9 @@ bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWi
|
||||||
{$(ALTSRC)\share\vm\asm}.cpp.obj::
|
{$(ALTSRC)\share\vm\asm}.cpp.obj::
|
||||||
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
|
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
|
||||||
|
|
||||||
|
{$(ALTSRC)\share\vm\logging}.cpp.obj::
|
||||||
|
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
|
||||||
|
|
||||||
{$(ALTSRC)\share\vm\memory}.cpp.obj::
|
{$(ALTSRC)\share\vm\memory}.cpp.obj::
|
||||||
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
|
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -2311,6 +2311,12 @@ public:
|
||||||
|
|
||||||
#define MSG "invalid arrangement"
|
#define MSG "invalid arrangement"
|
||||||
|
|
||||||
|
#define ASSERTION (T == T2S || T == T4S || T == T2D)
|
||||||
|
INSN(fsqrt, 1, 0b11111);
|
||||||
|
INSN(fabs, 0, 0b01111);
|
||||||
|
INSN(fneg, 1, 0b01111);
|
||||||
|
#undef ASSERTION
|
||||||
|
|
||||||
#define ASSERTION (T == T8B || T == T16B || T == T4H || T == T8H || T == T2S || T == T4S)
|
#define ASSERTION (T == T8B || T == T16B || T == T4H || T == T8H || T == T2S || T == T4S)
|
||||||
INSN(rev64, 0, 0b00000);
|
INSN(rev64, 0, 0b00000);
|
||||||
#undef ASSERTION
|
#undef ASSERTION
|
||||||
|
|
|
@ -68,10 +68,11 @@ define_pd_global(intx, RegisterCostAreaRatio, 16000);
|
||||||
|
|
||||||
// Peephole and CISC spilling both break the graph, and so makes the
|
// Peephole and CISC spilling both break the graph, and so makes the
|
||||||
// scheduler sick.
|
// scheduler sick.
|
||||||
define_pd_global(bool, OptoPeephole, true);
|
define_pd_global(bool, OptoPeephole, false);
|
||||||
define_pd_global(bool, UseCISCSpill, true);
|
define_pd_global(bool, UseCISCSpill, true);
|
||||||
define_pd_global(bool, OptoScheduling, false);
|
define_pd_global(bool, OptoScheduling, false);
|
||||||
define_pd_global(bool, OptoBundling, false);
|
define_pd_global(bool, OptoBundling, false);
|
||||||
|
define_pd_global(bool, OptoRegScheduling, false);
|
||||||
|
|
||||||
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
|
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
|
||||||
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);
|
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);
|
||||||
|
|
|
@ -51,13 +51,15 @@ bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
#define __ _masm.
|
#define __ _masm.
|
||||||
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
|
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
|
||||||
// Stub is fixed up when the corresponding call is converted from
|
// Stub is fixed up when the corresponding call is converted from
|
||||||
// calling compiled code to calling interpreted code.
|
// calling compiled code to calling interpreted code.
|
||||||
// mov rmethod, 0
|
// mov rmethod, 0
|
||||||
// jmp -4 # to self
|
// jmp -4 # to self
|
||||||
|
|
||||||
address mark = cbuf.insts_mark(); // Get mark within main instrs section.
|
if (mark == NULL) {
|
||||||
|
mark = cbuf.insts_mark(); // Get mark within main instrs section.
|
||||||
|
}
|
||||||
|
|
||||||
// Note that the code buffer's insts_mark is always relative to insts.
|
// Note that the code buffer's insts_mark is always relative to insts.
|
||||||
// That's why we must use the macroassembler to generate a stub.
|
// That's why we must use the macroassembler to generate a stub.
|
||||||
|
|
|
@ -30,5 +30,6 @@
|
||||||
|
|
||||||
void generate_more_monitors();
|
void generate_more_monitors();
|
||||||
void generate_deopt_handling();
|
void generate_deopt_handling();
|
||||||
|
void lock_method(void);
|
||||||
|
|
||||||
#endif // CPU_AARCH64_VM_CPPINTERPRETERGENERATOR_AARCH64_HPP
|
#endif // CPU_AARCH64_VM_CPPINTERPRETERGENERATOR_AARCH64_HPP
|
||||||
|
|
|
@ -55,10 +55,17 @@ define_pd_global(intx, CodeEntryAlignment, 16);
|
||||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||||
define_pd_global(intx, InlineFrequencyCount, 100);
|
define_pd_global(intx, InlineFrequencyCount, 100);
|
||||||
|
|
||||||
define_pd_global(intx, StackYellowPages, 2);
|
#define DEFAULT_STACK_YELLOW_PAGES (2)
|
||||||
define_pd_global(intx, StackRedPages, 1);
|
#define DEFAULT_STACK_RED_PAGES (1)
|
||||||
|
#define DEFAULT_STACK_SHADOW_PAGES (4 DEBUG_ONLY(+5))
|
||||||
|
|
||||||
define_pd_global(intx, StackShadowPages, 4 DEBUG_ONLY(+5));
|
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
|
||||||
|
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
|
||||||
|
#define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES
|
||||||
|
|
||||||
|
define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
|
||||||
|
define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
|
||||||
|
define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
|
||||||
|
|
||||||
define_pd_global(bool, RewriteBytecodes, true);
|
define_pd_global(bool, RewriteBytecodes, true);
|
||||||
define_pd_global(bool, RewriteFrequentPairs, true);
|
define_pd_global(bool, RewriteFrequentPairs, true);
|
||||||
|
|
|
@ -42,6 +42,11 @@
|
||||||
|
|
||||||
// Implementation of InterpreterMacroAssembler
|
// Implementation of InterpreterMacroAssembler
|
||||||
|
|
||||||
|
void InterpreterMacroAssembler::jump_to_entry(address entry) {
|
||||||
|
assert(entry, "Entry must have been generated by now");
|
||||||
|
b(entry);
|
||||||
|
}
|
||||||
|
|
||||||
#ifndef CC_INTERP
|
#ifndef CC_INTERP
|
||||||
|
|
||||||
void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
|
void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
|
||||||
|
@ -1542,14 +1547,14 @@ void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register ca
|
||||||
if (MethodData::profile_arguments()) {
|
if (MethodData::profile_arguments()) {
|
||||||
Label done;
|
Label done;
|
||||||
int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
|
int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
|
||||||
add(mdp, mdp, off_to_args);
|
|
||||||
|
|
||||||
for (int i = 0; i < TypeProfileArgsLimit; i++) {
|
for (int i = 0; i < TypeProfileArgsLimit; i++) {
|
||||||
if (i > 0 || MethodData::profile_return()) {
|
if (i > 0 || MethodData::profile_return()) {
|
||||||
// If return value type is profiled we may have no argument to profile
|
// If return value type is profiled we may have no argument to profile
|
||||||
ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
|
ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
|
||||||
sub(tmp, tmp, i*TypeStackSlotEntries::per_arg_count());
|
sub(tmp, tmp, i*TypeStackSlotEntries::per_arg_count());
|
||||||
cmp(tmp, TypeStackSlotEntries::per_arg_count());
|
cmp(tmp, TypeStackSlotEntries::per_arg_count());
|
||||||
|
add(rscratch1, mdp, off_to_args);
|
||||||
br(Assembler::LT, done);
|
br(Assembler::LT, done);
|
||||||
}
|
}
|
||||||
ldr(tmp, Address(callee, Method::const_offset()));
|
ldr(tmp, Address(callee, Method::const_offset()));
|
||||||
|
@ -1557,26 +1562,27 @@ void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register ca
|
||||||
// stack offset o (zero based) from the start of the argument
|
// stack offset o (zero based) from the start of the argument
|
||||||
// list, for n arguments translates into offset n - o - 1 from
|
// list, for n arguments translates into offset n - o - 1 from
|
||||||
// the end of the argument list
|
// the end of the argument list
|
||||||
ldr(rscratch1, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));
|
ldr(rscratch1, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))));
|
||||||
sub(tmp, tmp, rscratch1);
|
sub(tmp, tmp, rscratch1);
|
||||||
sub(tmp, tmp, 1);
|
sub(tmp, tmp, 1);
|
||||||
Address arg_addr = argument_address(tmp);
|
Address arg_addr = argument_address(tmp);
|
||||||
ldr(tmp, arg_addr);
|
ldr(tmp, arg_addr);
|
||||||
|
|
||||||
Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
|
Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i)));
|
||||||
profile_obj_type(tmp, mdo_arg_addr);
|
profile_obj_type(tmp, mdo_arg_addr);
|
||||||
|
|
||||||
int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
|
int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
|
||||||
add(mdp, mdp, to_add);
|
|
||||||
off_to_args += to_add;
|
off_to_args += to_add;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (MethodData::profile_return()) {
|
if (MethodData::profile_return()) {
|
||||||
ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
|
ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
|
||||||
sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
|
sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
add(rscratch1, mdp, off_to_args);
|
||||||
bind(done);
|
bind(done);
|
||||||
|
mov(mdp, rscratch1);
|
||||||
|
|
||||||
if (MethodData::profile_return()) {
|
if (MethodData::profile_return()) {
|
||||||
// We're right after the type profile for the last
|
// We're right after the type profile for the last
|
||||||
|
|
|
@ -66,6 +66,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||||
|
|
||||||
void load_earlyret_value(TosState state);
|
void load_earlyret_value(TosState state);
|
||||||
|
|
||||||
|
void jump_to_entry(address entry);
|
||||||
|
|
||||||
#ifdef CC_INTERP
|
#ifdef CC_INTERP
|
||||||
void save_bcp() { /* not needed in c++ interpreter and harmless */ }
|
void save_bcp() { /* not needed in c++ interpreter and harmless */ }
|
||||||
void restore_bcp() { /* not needed in c++ interpreter and harmless */ }
|
void restore_bcp() { /* not needed in c++ interpreter and harmless */ }
|
||||||
|
|
|
@ -41,14 +41,13 @@ private:
|
||||||
address generate_native_entry(bool synchronized);
|
address generate_native_entry(bool synchronized);
|
||||||
address generate_abstract_entry(void);
|
address generate_abstract_entry(void);
|
||||||
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
||||||
address generate_jump_to_normal_entry(void);
|
address generate_accessor_entry(void) { return NULL; }
|
||||||
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
|
address generate_empty_entry(void) { return NULL; }
|
||||||
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
|
|
||||||
void generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpargs);
|
void generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpargs);
|
||||||
address generate_Reference_get_entry();
|
address generate_Reference_get_entry();
|
||||||
address generate_CRC32_update_entry();
|
address generate_CRC32_update_entry();
|
||||||
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
|
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
|
||||||
void lock_method(void);
|
address generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||||
void generate_stack_overflow_check(void);
|
void generate_stack_overflow_check(void);
|
||||||
|
|
||||||
void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
|
void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
|
||||||
|
|
|
@ -236,17 +236,6 @@ void InterpreterGenerator::generate_transcendental_entry(AbstractInterpreter::Me
|
||||||
__ blrt(rscratch1, gpargs, fpargs, rtype);
|
__ blrt(rscratch1, gpargs, fpargs, rtype);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Jump into normal path for accessor and empty entry to jump to normal entry
|
|
||||||
// The "fast" optimization don't update compilation count therefore can disable inlining
|
|
||||||
// for these functions that should be inlined.
|
|
||||||
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
|
|
||||||
address entry_point = __ pc();
|
|
||||||
|
|
||||||
assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
|
|
||||||
__ b(Interpreter::entry_for_kind(Interpreter::zerolocals));
|
|
||||||
return entry_point;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Abstract method entry
|
// Abstract method entry
|
||||||
// Attempt to execute abstract method. Throw exception
|
// Attempt to execute abstract method. Throw exception
|
||||||
address InterpreterGenerator::generate_abstract_entry(void) {
|
address InterpreterGenerator::generate_abstract_entry(void) {
|
||||||
|
|
68
hotspot/src/cpu/aarch64/vm/jvmciCodeInstaller_aarch64.cpp
Normal file
68
hotspot/src/cpu/aarch64/vm/jvmciCodeInstaller_aarch64.cpp
Normal file
|
@ -0,0 +1,68 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "jvmci/jvmciCodeInstaller.hpp"
|
||||||
|
#include "jvmci/jvmciRuntime.hpp"
|
||||||
|
#include "jvmci/jvmciCompilerToVM.hpp"
|
||||||
|
#include "jvmci/jvmciJavaClasses.hpp"
|
||||||
|
#include "oops/oop.inline.hpp"
|
||||||
|
#include "runtime/sharedRuntime.hpp"
|
||||||
|
#include "vmreg_aarch64.inline.hpp"
|
||||||
|
|
||||||
|
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, oop method) {
|
||||||
|
Unimplemented();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) {
|
||||||
|
Unimplemented();
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset) {
|
||||||
|
Unimplemented();
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeInstaller::pd_relocate_CodeBlob(CodeBlob* cb, NativeInstruction* inst) {
|
||||||
|
Unimplemented();
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination) {
|
||||||
|
Unimplemented();
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeInstaller::pd_relocate_JavaMethod(oop hotspot_method, jint pc_offset) {
|
||||||
|
Unimplemented();
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeInstaller::pd_relocate_poll(address pc, jint mark) {
|
||||||
|
Unimplemented();
|
||||||
|
}
|
||||||
|
|
||||||
|
// convert JVMCI register indices (as used in oop maps) to HotSpot registers
|
||||||
|
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CodeInstaller::is_general_purpose_reg(VMReg hotspotRegister) {
|
||||||
|
return false;
|
||||||
|
}
|
|
@ -1709,6 +1709,20 @@ int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb,
|
||||||
return idivq_offset;
|
return idivq_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MacroAssembler::membar(Membar_mask_bits order_constraint) {
|
||||||
|
address prev = pc() - NativeMembar::instruction_size;
|
||||||
|
if (prev == code()->last_membar()) {
|
||||||
|
NativeMembar *bar = NativeMembar_at(prev);
|
||||||
|
// We are merging two memory barrier instructions. On AArch64 we
|
||||||
|
// can do this simply by ORing them together.
|
||||||
|
bar->set_kind(bar->get_kind() | order_constraint);
|
||||||
|
BLOCK_COMMENT("merged membar");
|
||||||
|
} else {
|
||||||
|
code()->set_last_membar(pc());
|
||||||
|
dmb(Assembler::barrier(order_constraint));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// MacroAssembler routines found actually to be needed
|
// MacroAssembler routines found actually to be needed
|
||||||
|
|
||||||
void MacroAssembler::push(Register src)
|
void MacroAssembler::push(Register src)
|
||||||
|
@ -2238,7 +2252,7 @@ void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[])
|
||||||
ttyLocker ttyl;
|
ttyLocker ttyl;
|
||||||
::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
|
::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
|
||||||
msg);
|
msg);
|
||||||
assert(false, err_msg("DEBUG MESSAGE: %s", msg));
|
assert(false, "DEBUG MESSAGE: %s", msg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2286,18 +2300,30 @@ void MacroAssembler::c_stub_prolog(int gp_arg_count, int fp_arg_count, int ret_t
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void MacroAssembler::push_CPU_state() {
|
void MacroAssembler::push_CPU_state(bool save_vectors) {
|
||||||
push(0x3fffffff, sp); // integer registers except lr & sp
|
push(0x3fffffff, sp); // integer registers except lr & sp
|
||||||
|
|
||||||
|
if (!save_vectors) {
|
||||||
for (int i = 30; i >= 0; i -= 2)
|
for (int i = 30; i >= 0; i -= 2)
|
||||||
stpd(as_FloatRegister(i), as_FloatRegister(i+1),
|
stpd(as_FloatRegister(i), as_FloatRegister(i+1),
|
||||||
Address(pre(sp, -2 * wordSize)));
|
Address(pre(sp, -2 * wordSize)));
|
||||||
|
} else {
|
||||||
|
for (int i = 30; i >= 0; i -= 2)
|
||||||
|
stpq(as_FloatRegister(i), as_FloatRegister(i+1),
|
||||||
|
Address(pre(sp, -4 * wordSize)));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::pop_CPU_state() {
|
void MacroAssembler::pop_CPU_state(bool restore_vectors) {
|
||||||
|
if (!restore_vectors) {
|
||||||
for (int i = 0; i < 32; i += 2)
|
for (int i = 0; i < 32; i += 2)
|
||||||
ldpd(as_FloatRegister(i), as_FloatRegister(i+1),
|
ldpd(as_FloatRegister(i), as_FloatRegister(i+1),
|
||||||
Address(post(sp, 2 * wordSize)));
|
Address(post(sp, 2 * wordSize)));
|
||||||
|
} else {
|
||||||
|
for (int i = 0; i < 32; i += 2)
|
||||||
|
ldpq(as_FloatRegister(i), as_FloatRegister(i+1),
|
||||||
|
Address(post(sp, 4 * wordSize)));
|
||||||
|
}
|
||||||
|
|
||||||
pop(0x3fffffff, sp); // integer registers except lr & sp
|
pop(0x3fffffff, sp); // integer registers except lr & sp
|
||||||
}
|
}
|
||||||
|
@ -3027,6 +3053,24 @@ SkipIfEqual::~SkipIfEqual() {
|
||||||
_masm->bind(_label);
|
_masm->bind(_label);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MacroAssembler::addptr(const Address &dst, int32_t src) {
|
||||||
|
Address adr;
|
||||||
|
switch(dst.getMode()) {
|
||||||
|
case Address::base_plus_offset:
|
||||||
|
// This is the expected mode, although we allow all the other
|
||||||
|
// forms below.
|
||||||
|
adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
lea(rscratch2, dst);
|
||||||
|
adr = Address(rscratch2);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
ldr(rscratch1, adr);
|
||||||
|
add(rscratch1, rscratch1, src);
|
||||||
|
str(rscratch1, adr);
|
||||||
|
}
|
||||||
|
|
||||||
void MacroAssembler::cmpptr(Register src1, Address src2) {
|
void MacroAssembler::cmpptr(Register src1, Address src2) {
|
||||||
unsigned long offset;
|
unsigned long offset;
|
||||||
adrp(rscratch1, src2, offset);
|
adrp(rscratch1, src2, offset);
|
||||||
|
@ -3063,11 +3107,15 @@ void MacroAssembler::store_check(Register obj) {
|
||||||
|
|
||||||
if (UseCondCardMark) {
|
if (UseCondCardMark) {
|
||||||
Label L_already_dirty;
|
Label L_already_dirty;
|
||||||
|
membar(StoreLoad);
|
||||||
ldrb(rscratch2, Address(obj, rscratch1));
|
ldrb(rscratch2, Address(obj, rscratch1));
|
||||||
cbz(rscratch2, L_already_dirty);
|
cbz(rscratch2, L_already_dirty);
|
||||||
strb(zr, Address(obj, rscratch1));
|
strb(zr, Address(obj, rscratch1));
|
||||||
bind(L_already_dirty);
|
bind(L_already_dirty);
|
||||||
} else {
|
} else {
|
||||||
|
if (UseConcMarkSweepGC && CMSPrecleaningEnabled) {
|
||||||
|
membar(StoreStore);
|
||||||
|
}
|
||||||
strb(zr, Address(obj, rscratch1));
|
strb(zr, Address(obj, rscratch1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -152,6 +152,13 @@ class MacroAssembler: public Assembler {
|
||||||
strw(scratch, a);
|
strw(scratch, a);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void bind(Label& L) {
|
||||||
|
Assembler::bind(L);
|
||||||
|
code()->clear_last_membar();
|
||||||
|
}
|
||||||
|
|
||||||
|
void membar(Membar_mask_bits order_constraint);
|
||||||
|
|
||||||
// Frame creation and destruction shared between JITs.
|
// Frame creation and destruction shared between JITs.
|
||||||
void build_frame(int framesize);
|
void build_frame(int framesize);
|
||||||
void remove_frame(int framesize);
|
void remove_frame(int framesize);
|
||||||
|
@ -777,8 +784,8 @@ public:
|
||||||
|
|
||||||
DEBUG_ONLY(void verify_heapbase(const char* msg);)
|
DEBUG_ONLY(void verify_heapbase(const char* msg);)
|
||||||
|
|
||||||
void push_CPU_state();
|
void push_CPU_state(bool save_vectors = false);
|
||||||
void pop_CPU_state() ;
|
void pop_CPU_state(bool restore_vectors = false) ;
|
||||||
|
|
||||||
// Round up to a power of two
|
// Round up to a power of two
|
||||||
void round_to(Register reg, int modulus);
|
void round_to(Register reg, int modulus);
|
||||||
|
@ -908,13 +915,7 @@ public:
|
||||||
|
|
||||||
// Arithmetics
|
// Arithmetics
|
||||||
|
|
||||||
void addptr(Address dst, int32_t src) {
|
void addptr(const Address &dst, int32_t src);
|
||||||
lea(rscratch2, dst);
|
|
||||||
ldr(rscratch1, Address(rscratch2));
|
|
||||||
add(rscratch1, rscratch1, src);
|
|
||||||
str(rscratch1, Address(rscratch2));
|
|
||||||
}
|
|
||||||
|
|
||||||
void cmpptr(Register src1, Address src2);
|
void cmpptr(Register src1, Address src2);
|
||||||
|
|
||||||
// Various forms of CAS
|
// Various forms of CAS
|
||||||
|
|
|
@ -50,7 +50,7 @@ void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
static int check_nonzero(const char* xname, int x) {
|
static int check_nonzero(const char* xname, int x) {
|
||||||
assert(x != 0, err_msg("%s should be nonzero", xname));
|
assert(x != 0, "%s should be nonzero", xname);
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
#define NONZERO(x) check_nonzero(#x, x)
|
#define NONZERO(x) check_nonzero(#x, x)
|
||||||
|
@ -407,7 +407,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
|
fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -101,6 +101,12 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
|
||||||
static bool maybe_cpool_ref(address instr) {
|
static bool maybe_cpool_ref(address instr) {
|
||||||
return is_adrp_at(instr) || is_ldr_literal_at(instr);
|
return is_adrp_at(instr) || is_ldr_literal_at(instr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool is_Membar() {
|
||||||
|
unsigned int insn = uint_at(0);
|
||||||
|
return Instruction_aarch64::extract(insn, 31, 12) == 0b11010101000000110011 &&
|
||||||
|
Instruction_aarch64::extract(insn, 7, 0) == 0b10111111;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
inline NativeInstruction* nativeInstruction_at(address address) {
|
inline NativeInstruction* nativeInstruction_at(address address) {
|
||||||
|
@ -487,4 +493,15 @@ inline NativeCallTrampolineStub* nativeCallTrampolineStub_at(address addr) {
|
||||||
return (NativeCallTrampolineStub*)addr;
|
return (NativeCallTrampolineStub*)addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class NativeMembar : public NativeInstruction {
|
||||||
|
public:
|
||||||
|
unsigned int get_kind() { return Instruction_aarch64::extract(uint_at(0), 11, 8); }
|
||||||
|
void set_kind(int order_kind) { Instruction_aarch64::patch(addr_at(0), 11, 8, order_kind); }
|
||||||
|
};
|
||||||
|
|
||||||
|
inline NativeMembar *NativeMembar_at(address addr) {
|
||||||
|
assert(nativeInstruction_at(addr)->is_Membar(), "no membar found");
|
||||||
|
return (NativeMembar*)addr;
|
||||||
|
}
|
||||||
|
|
||||||
#endif // CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP
|
#endif // CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP
|
||||||
|
|
|
@ -102,12 +102,5 @@ void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffe
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
|
|
||||||
if (NativeInstruction::maybe_cpool_ref(addr())) {
|
|
||||||
address old_addr = old_addr_for(addr(), src, dest);
|
|
||||||
MacroAssembler::pd_patch_instruction(addr(), MacroAssembler::target_addr_for_insn(old_addr));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void metadata_Relocation::pd_fix_value(address x) {
|
void metadata_Relocation::pd_fix_value(address x) {
|
||||||
}
|
}
|
||||||
|
|
|
@ -75,8 +75,8 @@ class SimpleRuntimeFrame {
|
||||||
// FIXME -- this is used by C1
|
// FIXME -- this is used by C1
|
||||||
class RegisterSaver {
|
class RegisterSaver {
|
||||||
public:
|
public:
|
||||||
static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
|
static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors = false);
|
||||||
static void restore_live_registers(MacroAssembler* masm);
|
static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
|
||||||
|
|
||||||
// Offsets into the register save area
|
// Offsets into the register save area
|
||||||
// Used by deoptimization when it is managing result register
|
// Used by deoptimization when it is managing result register
|
||||||
|
@ -108,7 +108,17 @@ class RegisterSaver {
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
|
OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
|
||||||
|
#ifdef COMPILER2
|
||||||
|
if (save_vectors) {
|
||||||
|
// Save upper half of vector registers
|
||||||
|
int vect_words = 32 * 8 / wordSize;
|
||||||
|
additional_frame_words += vect_words;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
assert(!save_vectors, "vectors are generated only by C2");
|
||||||
|
#endif
|
||||||
|
|
||||||
int frame_size_in_bytes = round_to(additional_frame_words*wordSize +
|
int frame_size_in_bytes = round_to(additional_frame_words*wordSize +
|
||||||
reg_save_size*BytesPerInt, 16);
|
reg_save_size*BytesPerInt, 16);
|
||||||
// OopMap frame size is in compiler stack slots (jint's) not bytes or words
|
// OopMap frame size is in compiler stack slots (jint's) not bytes or words
|
||||||
|
@ -122,7 +132,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
|
||||||
// Save registers, fpu state, and flags.
|
// Save registers, fpu state, and flags.
|
||||||
|
|
||||||
__ enter();
|
__ enter();
|
||||||
__ push_CPU_state();
|
__ push_CPU_state(save_vectors);
|
||||||
|
|
||||||
// Set an oopmap for the call site. This oopmap will map all
|
// Set an oopmap for the call site. This oopmap will map all
|
||||||
// oop-registers and debug-info registers as callee-saved. This
|
// oop-registers and debug-info registers as callee-saved. This
|
||||||
|
@ -139,14 +149,14 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
|
||||||
// register slots are 8 bytes
|
// register slots are 8 bytes
|
||||||
// wide, 32 floating-point
|
// wide, 32 floating-point
|
||||||
// registers
|
// registers
|
||||||
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
|
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots),
|
||||||
r->as_VMReg());
|
r->as_VMReg());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < FloatRegisterImpl::number_of_registers; i++) {
|
for (int i = 0; i < FloatRegisterImpl::number_of_registers; i++) {
|
||||||
FloatRegister r = as_FloatRegister(i);
|
FloatRegister r = as_FloatRegister(i);
|
||||||
int sp_offset = 2 * i;
|
int sp_offset = save_vectors ? (4 * i) : (2 * i);
|
||||||
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
|
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
|
||||||
r->as_VMReg());
|
r->as_VMReg());
|
||||||
}
|
}
|
||||||
|
@ -154,8 +164,11 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
|
||||||
return oop_map;
|
return oop_map;
|
||||||
}
|
}
|
||||||
|
|
||||||
void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
|
void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
|
||||||
__ pop_CPU_state();
|
#ifndef COMPILER2
|
||||||
|
assert(!restore_vectors, "vectors are generated only by C2");
|
||||||
|
#endif
|
||||||
|
__ pop_CPU_state(restore_vectors);
|
||||||
__ leave();
|
__ leave();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -177,9 +190,9 @@ void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Is vector's size (in bytes) bigger than a size saved by default?
|
// Is vector's size (in bytes) bigger than a size saved by default?
|
||||||
// 16 bytes XMM registers are saved by default using fxsave/fxrstor instructions.
|
// 8 bytes vector registers are saved by default on AArch64.
|
||||||
bool SharedRuntime::is_wide_vector(int size) {
|
bool SharedRuntime::is_wide_vector(int size) {
|
||||||
return size > 16;
|
return size > 8;
|
||||||
}
|
}
|
||||||
// The java_calling_convention describes stack locations as ideal slots on
|
// The java_calling_convention describes stack locations as ideal slots on
|
||||||
// a frame with no abi restrictions. Since we must observe abi restrictions
|
// a frame with no abi restrictions. Since we must observe abi restrictions
|
||||||
|
@ -460,7 +473,7 @@ static void gen_c2i_adapter(MacroAssembler *masm,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void gen_i2c_adapter(MacroAssembler *masm,
|
void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
|
||||||
int total_args_passed,
|
int total_args_passed,
|
||||||
int comp_args_on_stack,
|
int comp_args_on_stack,
|
||||||
const BasicType *sig_bt,
|
const BasicType *sig_bt,
|
||||||
|
@ -1146,7 +1159,7 @@ static void rt_call(MacroAssembler* masm, address dest, int gpargs, int fpargs,
|
||||||
assert((unsigned)gpargs < 256, "eek!");
|
assert((unsigned)gpargs < 256, "eek!");
|
||||||
assert((unsigned)fpargs < 32, "eek!");
|
assert((unsigned)fpargs < 32, "eek!");
|
||||||
__ lea(rscratch1, RuntimeAddress(dest));
|
__ lea(rscratch1, RuntimeAddress(dest));
|
||||||
__ mov(rscratch2, (gpargs << 6) | (fpargs << 2) | type);
|
if (UseBuiltinSim) __ mov(rscratch2, (gpargs << 6) | (fpargs << 2) | type);
|
||||||
__ blrt(rscratch1, rscratch2);
|
__ blrt(rscratch1, rscratch2);
|
||||||
__ maybe_isb();
|
__ maybe_isb();
|
||||||
}
|
}
|
||||||
|
@ -1194,7 +1207,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
|
||||||
} else if (iid == vmIntrinsics::_invokeBasic) {
|
} else if (iid == vmIntrinsics::_invokeBasic) {
|
||||||
has_receiver = true;
|
has_receiver = true;
|
||||||
} else {
|
} else {
|
||||||
fatal(err_msg_res("unexpected intrinsic id %d", iid));
|
fatal("unexpected intrinsic id %d", iid);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (member_reg != noreg) {
|
if (member_reg != noreg) {
|
||||||
|
@ -1521,14 +1534,13 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||||
|
|
||||||
int vep_offset = ((intptr_t)__ pc()) - start;
|
int vep_offset = ((intptr_t)__ pc()) - start;
|
||||||
|
|
||||||
// Generate stack overflow check
|
|
||||||
|
|
||||||
// If we have to make this method not-entrant we'll overwrite its
|
// If we have to make this method not-entrant we'll overwrite its
|
||||||
// first instruction with a jump. For this action to be legal we
|
// first instruction with a jump. For this action to be legal we
|
||||||
// must ensure that this first instruction is a B, BL, NOP, BKPT,
|
// must ensure that this first instruction is a B, BL, NOP, BKPT,
|
||||||
// SVC, HVC, or SMC. Make it a NOP.
|
// SVC, HVC, or SMC. Make it a NOP.
|
||||||
__ nop();
|
__ nop();
|
||||||
|
|
||||||
|
// Generate stack overflow check
|
||||||
if (UseStackBanging) {
|
if (UseStackBanging) {
|
||||||
__ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
|
__ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
|
||||||
} else {
|
} else {
|
||||||
|
@ -1709,23 +1721,20 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||||
// need to spill before we call out
|
// need to spill before we call out
|
||||||
int c_arg = total_c_args - total_in_args;
|
int c_arg = total_c_args - total_in_args;
|
||||||
|
|
||||||
// Pre-load a static method's oop into r20. Used both by locking code and
|
// Pre-load a static method's oop into c_rarg1.
|
||||||
// the normal JNI call code.
|
|
||||||
if (method->is_static() && !is_critical_native) {
|
if (method->is_static() && !is_critical_native) {
|
||||||
|
|
||||||
// load oop into a register
|
// load oop into a register
|
||||||
__ movoop(oop_handle_reg,
|
__ movoop(c_rarg1,
|
||||||
JNIHandles::make_local(method->method_holder()->java_mirror()),
|
JNIHandles::make_local(method->method_holder()->java_mirror()),
|
||||||
/*immediate*/true);
|
/*immediate*/true);
|
||||||
|
|
||||||
// Now handlize the static class mirror it's known not-null.
|
// Now handlize the static class mirror it's known not-null.
|
||||||
__ str(oop_handle_reg, Address(sp, klass_offset));
|
__ str(c_rarg1, Address(sp, klass_offset));
|
||||||
map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
|
map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
|
||||||
|
|
||||||
// Now get the handle
|
// Now get the handle
|
||||||
__ lea(oop_handle_reg, Address(sp, klass_offset));
|
__ lea(c_rarg1, Address(sp, klass_offset));
|
||||||
// store the klass handle as second argument
|
|
||||||
__ mov(c_rarg1, oop_handle_reg);
|
|
||||||
// and protect the arg if we must spill
|
// and protect the arg if we must spill
|
||||||
c_arg--;
|
c_arg--;
|
||||||
}
|
}
|
||||||
|
@ -1740,19 +1749,13 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||||
|
|
||||||
__ set_last_Java_frame(sp, noreg, (address)the_pc, rscratch1);
|
__ set_last_Java_frame(sp, noreg, (address)the_pc, rscratch1);
|
||||||
|
|
||||||
|
Label dtrace_method_entry, dtrace_method_entry_done;
|
||||||
// We have all of the arguments setup at this point. We must not touch any register
|
|
||||||
// argument registers at this point (what if we save/restore them there are no oop?
|
|
||||||
|
|
||||||
{
|
{
|
||||||
SkipIfEqual skip(masm, &DTraceMethodProbes, false);
|
unsigned long offset;
|
||||||
// protect the args we've loaded
|
__ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
|
||||||
save_args(masm, total_c_args, c_arg, out_regs);
|
__ ldrb(rscratch1, Address(rscratch1, offset));
|
||||||
__ mov_metadata(c_rarg1, method());
|
__ cbnzw(rscratch1, dtrace_method_entry);
|
||||||
__ call_VM_leaf(
|
__ bind(dtrace_method_entry_done);
|
||||||
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
|
|
||||||
rthread, c_rarg1);
|
|
||||||
restore_args(masm, total_c_args, c_arg, out_regs);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RedefineClasses() tracing support for obsolete method entry
|
// RedefineClasses() tracing support for obsolete method entry
|
||||||
|
@ -1782,7 +1785,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||||
if (method->is_synchronized()) {
|
if (method->is_synchronized()) {
|
||||||
assert(!is_critical_native, "unhandled");
|
assert(!is_critical_native, "unhandled");
|
||||||
|
|
||||||
|
|
||||||
const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
|
const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
|
||||||
|
|
||||||
// Get the handle (the 2nd argument)
|
// Get the handle (the 2nd argument)
|
||||||
|
@ -1838,7 +1840,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||||
|
|
||||||
// Finally just about ready to make the JNI call
|
// Finally just about ready to make the JNI call
|
||||||
|
|
||||||
|
|
||||||
// get JNIEnv* which is first argument to native
|
// get JNIEnv* which is first argument to native
|
||||||
if (!is_critical_native) {
|
if (!is_critical_native) {
|
||||||
__ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
|
__ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
|
||||||
|
@ -1904,14 +1905,17 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||||
// Thread A is resumed to finish this native method, but doesn't block here since it
|
// Thread A is resumed to finish this native method, but doesn't block here since it
|
||||||
// didn't see any synchronization is progress, and escapes.
|
// didn't see any synchronization is progress, and escapes.
|
||||||
__ mov(rscratch1, _thread_in_native_trans);
|
__ mov(rscratch1, _thread_in_native_trans);
|
||||||
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
|
|
||||||
__ stlrw(rscratch1, rscratch2);
|
|
||||||
|
|
||||||
if(os::is_MP()) {
|
if(os::is_MP()) {
|
||||||
if (UseMembar) {
|
if (UseMembar) {
|
||||||
|
__ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
|
||||||
|
|
||||||
// Force this write out before the read below
|
// Force this write out before the read below
|
||||||
__ dmb(Assembler::SY);
|
__ dmb(Assembler::SY);
|
||||||
} else {
|
} else {
|
||||||
|
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
|
||||||
|
__ stlrw(rscratch1, rscratch2);
|
||||||
|
|
||||||
// Write serialization page so VM thread can do a pseudo remote membar.
|
// Write serialization page so VM thread can do a pseudo remote membar.
|
||||||
// We use the current thread pointer to calculate a thread specific
|
// We use the current thread pointer to calculate a thread specific
|
||||||
// offset to write to within the page. This minimizes bus traffic
|
// offset to write to within the page. This minimizes bus traffic
|
||||||
|
@ -1920,54 +1924,23 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Label after_transition;
|
|
||||||
|
|
||||||
// check for safepoint operation in progress and/or pending suspend requests
|
// check for safepoint operation in progress and/or pending suspend requests
|
||||||
|
Label safepoint_in_progress, safepoint_in_progress_done;
|
||||||
{
|
{
|
||||||
Label Continue;
|
assert(SafepointSynchronize::_not_synchronized == 0, "fix this code");
|
||||||
|
unsigned long offset;
|
||||||
{ unsigned long offset;
|
|
||||||
__ adrp(rscratch1,
|
__ adrp(rscratch1,
|
||||||
ExternalAddress((address)SafepointSynchronize::address_of_state()),
|
ExternalAddress((address)SafepointSynchronize::address_of_state()),
|
||||||
offset);
|
offset);
|
||||||
__ ldrw(rscratch1, Address(rscratch1, offset));
|
__ ldrw(rscratch1, Address(rscratch1, offset));
|
||||||
}
|
__ cbnzw(rscratch1, safepoint_in_progress);
|
||||||
__ cmpw(rscratch1, SafepointSynchronize::_not_synchronized);
|
|
||||||
|
|
||||||
Label L;
|
|
||||||
__ br(Assembler::NE, L);
|
|
||||||
__ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
|
__ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
|
||||||
__ cbz(rscratch1, Continue);
|
__ cbnzw(rscratch1, safepoint_in_progress);
|
||||||
__ bind(L);
|
__ bind(safepoint_in_progress_done);
|
||||||
|
|
||||||
// Don't use call_VM as it will see a possible pending exception and forward it
|
|
||||||
// and never return here preventing us from clearing _last_native_pc down below.
|
|
||||||
//
|
|
||||||
save_native_result(masm, ret_type, stack_slots);
|
|
||||||
__ mov(c_rarg0, rthread);
|
|
||||||
#ifndef PRODUCT
|
|
||||||
assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
|
|
||||||
#endif
|
|
||||||
if (!is_critical_native) {
|
|
||||||
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
|
|
||||||
} else {
|
|
||||||
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
|
|
||||||
}
|
|
||||||
__ blrt(rscratch1, 1, 0, 1);
|
|
||||||
__ maybe_isb();
|
|
||||||
// Restore any method result value
|
|
||||||
restore_native_result(masm, ret_type, stack_slots);
|
|
||||||
|
|
||||||
if (is_critical_native) {
|
|
||||||
// The call above performed the transition to thread_in_Java so
|
|
||||||
// skip the transition logic below.
|
|
||||||
__ b(after_transition);
|
|
||||||
}
|
|
||||||
|
|
||||||
__ bind(Continue);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// change thread state
|
// change thread state
|
||||||
|
Label after_transition;
|
||||||
__ mov(rscratch1, _thread_in_Java);
|
__ mov(rscratch1, _thread_in_Java);
|
||||||
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
|
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
|
||||||
__ stlrw(rscratch1, rscratch2);
|
__ stlrw(rscratch1, rscratch2);
|
||||||
|
@ -2024,16 +1997,15 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||||
}
|
}
|
||||||
|
|
||||||
__ bind(done);
|
__ bind(done);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Label dtrace_method_exit, dtrace_method_exit_done;
|
||||||
{
|
{
|
||||||
SkipIfEqual skip(masm, &DTraceMethodProbes, false);
|
unsigned long offset;
|
||||||
save_native_result(masm, ret_type, stack_slots);
|
__ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
|
||||||
__ mov_metadata(c_rarg1, method());
|
__ ldrb(rscratch1, Address(rscratch1, offset));
|
||||||
__ call_VM_leaf(
|
__ cbnzw(rscratch1, dtrace_method_exit);
|
||||||
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
|
__ bind(dtrace_method_exit_done);
|
||||||
rthread, c_rarg1);
|
|
||||||
restore_native_result(masm, ret_type, stack_slots);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
__ reset_last_Java_frame(false, true);
|
__ reset_last_Java_frame(false, true);
|
||||||
|
@ -2082,7 +2054,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||||
// Slow path locking & unlocking
|
// Slow path locking & unlocking
|
||||||
if (method->is_synchronized()) {
|
if (method->is_synchronized()) {
|
||||||
|
|
||||||
// BEGIN Slow path lock
|
__ block_comment("Slow path lock {");
|
||||||
__ bind(slow_path_lock);
|
__ bind(slow_path_lock);
|
||||||
|
|
||||||
// has last_Java_frame setup. No exceptions so do vanilla call not call_VM
|
// has last_Java_frame setup. No exceptions so do vanilla call not call_VM
|
||||||
|
@ -2109,9 +2081,9 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||||
#endif
|
#endif
|
||||||
__ b(lock_done);
|
__ b(lock_done);
|
||||||
|
|
||||||
// END Slow path lock
|
__ block_comment("} Slow path lock");
|
||||||
|
|
||||||
// BEGIN Slow path unlock
|
__ block_comment("Slow path unlock {");
|
||||||
__ bind(slow_path_unlock);
|
__ bind(slow_path_unlock);
|
||||||
|
|
||||||
// If we haven't already saved the native result we must save it now as xmm registers
|
// If we haven't already saved the native result we must save it now as xmm registers
|
||||||
|
@ -2149,7 +2121,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||||
}
|
}
|
||||||
__ b(unlock_done);
|
__ b(unlock_done);
|
||||||
|
|
||||||
// END Slow path unlock
|
__ block_comment("} Slow path unlock");
|
||||||
|
|
||||||
} // synchronized
|
} // synchronized
|
||||||
|
|
||||||
|
@ -2162,6 +2134,69 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||||
// and continue
|
// and continue
|
||||||
__ b(reguard_done);
|
__ b(reguard_done);
|
||||||
|
|
||||||
|
// SLOW PATH safepoint
|
||||||
|
{
|
||||||
|
__ block_comment("safepoint {");
|
||||||
|
__ bind(safepoint_in_progress);
|
||||||
|
|
||||||
|
// Don't use call_VM as it will see a possible pending exception and forward it
|
||||||
|
// and never return here preventing us from clearing _last_native_pc down below.
|
||||||
|
//
|
||||||
|
save_native_result(masm, ret_type, stack_slots);
|
||||||
|
__ mov(c_rarg0, rthread);
|
||||||
|
#ifndef PRODUCT
|
||||||
|
assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
|
||||||
|
#endif
|
||||||
|
if (!is_critical_native) {
|
||||||
|
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
|
||||||
|
} else {
|
||||||
|
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
|
||||||
|
}
|
||||||
|
__ blrt(rscratch1, 1, 0, 1);
|
||||||
|
__ maybe_isb();
|
||||||
|
// Restore any method result value
|
||||||
|
restore_native_result(masm, ret_type, stack_slots);
|
||||||
|
|
||||||
|
if (is_critical_native) {
|
||||||
|
// The call above performed the transition to thread_in_Java so
|
||||||
|
// skip the transition logic above.
|
||||||
|
__ b(after_transition);
|
||||||
|
}
|
||||||
|
|
||||||
|
__ b(safepoint_in_progress_done);
|
||||||
|
__ block_comment("} safepoint");
|
||||||
|
}
|
||||||
|
|
||||||
|
// SLOW PATH dtrace support
|
||||||
|
{
|
||||||
|
__ block_comment("dtrace entry {");
|
||||||
|
__ bind(dtrace_method_entry);
|
||||||
|
|
||||||
|
// We have all of the arguments setup at this point. We must not touch any register
|
||||||
|
// argument registers at this point (what if we save/restore them there are no oop?
|
||||||
|
|
||||||
|
save_args(masm, total_c_args, c_arg, out_regs);
|
||||||
|
__ mov_metadata(c_rarg1, method());
|
||||||
|
__ call_VM_leaf(
|
||||||
|
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
|
||||||
|
rthread, c_rarg1);
|
||||||
|
restore_args(masm, total_c_args, c_arg, out_regs);
|
||||||
|
__ b(dtrace_method_entry_done);
|
||||||
|
__ block_comment("} dtrace entry");
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
__ block_comment("dtrace exit {");
|
||||||
|
__ bind(dtrace_method_exit);
|
||||||
|
save_native_result(masm, ret_type, stack_slots);
|
||||||
|
__ mov_metadata(c_rarg1, method());
|
||||||
|
__ call_VM_leaf(
|
||||||
|
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
|
||||||
|
rthread, c_rarg1);
|
||||||
|
restore_native_result(masm, ret_type, stack_slots);
|
||||||
|
__ b(dtrace_method_exit_done);
|
||||||
|
__ block_comment("} dtrace exit");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
__ flush();
|
__ flush();
|
||||||
|
@ -2742,7 +2777,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
|
||||||
bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
|
bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
|
||||||
|
|
||||||
// Save registers, fpu state, and flags
|
// Save registers, fpu state, and flags
|
||||||
map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
|
map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors);
|
||||||
|
|
||||||
// The following is basically a call_VM. However, we need the precise
|
// The following is basically a call_VM. However, we need the precise
|
||||||
// address of the call in order to generate an oopmap. Hence, we do all the
|
// address of the call in order to generate an oopmap. Hence, we do all the
|
||||||
|
@ -2793,7 +2828,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
|
||||||
__ bind(noException);
|
__ bind(noException);
|
||||||
|
|
||||||
// Normal exit, restore registers and exit.
|
// Normal exit, restore registers and exit.
|
||||||
RegisterSaver::restore_live_registers(masm);
|
RegisterSaver::restore_live_registers(masm, save_vectors);
|
||||||
|
|
||||||
__ ret(lr);
|
__ ret(lr);
|
||||||
|
|
||||||
|
|
|
@ -746,6 +746,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
const Register count = end; // 'end' register contains bytes count now
|
const Register count = end; // 'end' register contains bytes count now
|
||||||
__ mov(scratch, (address)ct->byte_map_base);
|
__ mov(scratch, (address)ct->byte_map_base);
|
||||||
__ add(start, start, scratch);
|
__ add(start, start, scratch);
|
||||||
|
if (UseConcMarkSweepGC) {
|
||||||
|
__ membar(__ StoreStore);
|
||||||
|
}
|
||||||
__ BIND(L_loop);
|
__ BIND(L_loop);
|
||||||
__ strb(zr, Address(start, count));
|
__ strb(zr, Address(start, count));
|
||||||
__ subs(count, count, 1);
|
__ subs(count, count, 1);
|
||||||
|
@ -2395,6 +2398,274 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
return start;
|
return start;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/***
|
||||||
|
* Arguments:
|
||||||
|
*
|
||||||
|
* Inputs:
|
||||||
|
* c_rarg0 - int adler
|
||||||
|
* c_rarg1 - byte* buff
|
||||||
|
* c_rarg2 - int len
|
||||||
|
*
|
||||||
|
* Output:
|
||||||
|
* c_rarg0 - int adler result
|
||||||
|
*/
|
||||||
|
address generate_updateBytesAdler32() {
|
||||||
|
__ align(CodeEntryAlignment);
|
||||||
|
StubCodeMark mark(this, "StubRoutines", "updateBytesAdler32");
|
||||||
|
address start = __ pc();
|
||||||
|
|
||||||
|
Label L_simple_by1_loop, L_nmax, L_nmax_loop, L_by16, L_by16_loop, L_by1_loop, L_do_mod, L_combine, L_by1;
|
||||||
|
|
||||||
|
// Aliases
|
||||||
|
Register adler = c_rarg0;
|
||||||
|
Register s1 = c_rarg0;
|
||||||
|
Register s2 = c_rarg3;
|
||||||
|
Register buff = c_rarg1;
|
||||||
|
Register len = c_rarg2;
|
||||||
|
Register nmax = r4;
|
||||||
|
Register base = r5;
|
||||||
|
Register count = r6;
|
||||||
|
Register temp0 = rscratch1;
|
||||||
|
Register temp1 = rscratch2;
|
||||||
|
Register temp2 = r7;
|
||||||
|
|
||||||
|
// Max number of bytes we can process before having to take the mod
|
||||||
|
// 0x15B0 is 5552 in decimal, the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
|
||||||
|
unsigned long BASE = 0xfff1;
|
||||||
|
unsigned long NMAX = 0x15B0;
|
||||||
|
|
||||||
|
__ mov(base, BASE);
|
||||||
|
__ mov(nmax, NMAX);
|
||||||
|
|
||||||
|
// s1 is initialized to the lower 16 bits of adler
|
||||||
|
// s2 is initialized to the upper 16 bits of adler
|
||||||
|
__ ubfx(s2, adler, 16, 16); // s2 = ((adler >> 16) & 0xffff)
|
||||||
|
__ uxth(s1, adler); // s1 = (adler & 0xffff)
|
||||||
|
|
||||||
|
// The pipelined loop needs at least 16 elements for 1 iteration
|
||||||
|
// It does check this, but it is more effective to skip to the cleanup loop
|
||||||
|
__ cmp(len, 16);
|
||||||
|
__ br(Assembler::HS, L_nmax);
|
||||||
|
__ cbz(len, L_combine);
|
||||||
|
|
||||||
|
__ bind(L_simple_by1_loop);
|
||||||
|
__ ldrb(temp0, Address(__ post(buff, 1)));
|
||||||
|
__ add(s1, s1, temp0);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ subs(len, len, 1);
|
||||||
|
__ br(Assembler::HI, L_simple_by1_loop);
|
||||||
|
|
||||||
|
// s1 = s1 % BASE
|
||||||
|
__ subs(temp0, s1, base);
|
||||||
|
__ csel(s1, temp0, s1, Assembler::HS);
|
||||||
|
|
||||||
|
// s2 = s2 % BASE
|
||||||
|
__ lsr(temp0, s2, 16);
|
||||||
|
__ lsl(temp1, temp0, 4);
|
||||||
|
__ sub(temp1, temp1, temp0);
|
||||||
|
__ add(s2, temp1, s2, ext::uxth);
|
||||||
|
|
||||||
|
__ subs(temp0, s2, base);
|
||||||
|
__ csel(s2, temp0, s2, Assembler::HS);
|
||||||
|
|
||||||
|
__ b(L_combine);
|
||||||
|
|
||||||
|
__ bind(L_nmax);
|
||||||
|
__ subs(len, len, nmax);
|
||||||
|
__ sub(count, nmax, 16);
|
||||||
|
__ br(Assembler::LO, L_by16);
|
||||||
|
|
||||||
|
__ bind(L_nmax_loop);
|
||||||
|
|
||||||
|
__ ldp(temp0, temp1, Address(__ post(buff, 16)));
|
||||||
|
|
||||||
|
__ add(s1, s1, temp0, ext::uxtb);
|
||||||
|
__ ubfx(temp2, temp0, 8, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ ubfx(temp2, temp0, 16, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ ubfx(temp2, temp0, 24, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ ubfx(temp2, temp0, 32, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ ubfx(temp2, temp0, 40, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ ubfx(temp2, temp0, 48, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp0, Assembler::LSR, 56);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
|
||||||
|
__ add(s1, s1, temp1, ext::uxtb);
|
||||||
|
__ ubfx(temp2, temp1, 8, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ ubfx(temp2, temp1, 16, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ ubfx(temp2, temp1, 24, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ ubfx(temp2, temp1, 32, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ ubfx(temp2, temp1, 40, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ ubfx(temp2, temp1, 48, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp1, Assembler::LSR, 56);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
|
||||||
|
__ subs(count, count, 16);
|
||||||
|
__ br(Assembler::HS, L_nmax_loop);
|
||||||
|
|
||||||
|
// s1 = s1 % BASE
|
||||||
|
__ lsr(temp0, s1, 16);
|
||||||
|
__ lsl(temp1, temp0, 4);
|
||||||
|
__ sub(temp1, temp1, temp0);
|
||||||
|
__ add(temp1, temp1, s1, ext::uxth);
|
||||||
|
|
||||||
|
__ lsr(temp0, temp1, 16);
|
||||||
|
__ lsl(s1, temp0, 4);
|
||||||
|
__ sub(s1, s1, temp0);
|
||||||
|
__ add(s1, s1, temp1, ext:: uxth);
|
||||||
|
|
||||||
|
__ subs(temp0, s1, base);
|
||||||
|
__ csel(s1, temp0, s1, Assembler::HS);
|
||||||
|
|
||||||
|
// s2 = s2 % BASE
|
||||||
|
__ lsr(temp0, s2, 16);
|
||||||
|
__ lsl(temp1, temp0, 4);
|
||||||
|
__ sub(temp1, temp1, temp0);
|
||||||
|
__ add(temp1, temp1, s2, ext::uxth);
|
||||||
|
|
||||||
|
__ lsr(temp0, temp1, 16);
|
||||||
|
__ lsl(s2, temp0, 4);
|
||||||
|
__ sub(s2, s2, temp0);
|
||||||
|
__ add(s2, s2, temp1, ext:: uxth);
|
||||||
|
|
||||||
|
__ subs(temp0, s2, base);
|
||||||
|
__ csel(s2, temp0, s2, Assembler::HS);
|
||||||
|
|
||||||
|
__ subs(len, len, nmax);
|
||||||
|
__ sub(count, nmax, 16);
|
||||||
|
__ br(Assembler::HS, L_nmax_loop);
|
||||||
|
|
||||||
|
__ bind(L_by16);
|
||||||
|
__ adds(len, len, count);
|
||||||
|
__ br(Assembler::LO, L_by1);
|
||||||
|
|
||||||
|
__ bind(L_by16_loop);
|
||||||
|
|
||||||
|
__ ldp(temp0, temp1, Address(__ post(buff, 16)));
|
||||||
|
|
||||||
|
__ add(s1, s1, temp0, ext::uxtb);
|
||||||
|
__ ubfx(temp2, temp0, 8, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ ubfx(temp2, temp0, 16, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ ubfx(temp2, temp0, 24, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ ubfx(temp2, temp0, 32, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ ubfx(temp2, temp0, 40, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ ubfx(temp2, temp0, 48, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp0, Assembler::LSR, 56);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
|
||||||
|
__ add(s1, s1, temp1, ext::uxtb);
|
||||||
|
__ ubfx(temp2, temp1, 8, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ ubfx(temp2, temp1, 16, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ ubfx(temp2, temp1, 24, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ ubfx(temp2, temp1, 32, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ ubfx(temp2, temp1, 40, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ ubfx(temp2, temp1, 48, 8);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp2);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ add(s1, s1, temp1, Assembler::LSR, 56);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
|
||||||
|
__ subs(len, len, 16);
|
||||||
|
__ br(Assembler::HS, L_by16_loop);
|
||||||
|
|
||||||
|
__ bind(L_by1);
|
||||||
|
__ adds(len, len, 15);
|
||||||
|
__ br(Assembler::LO, L_do_mod);
|
||||||
|
|
||||||
|
__ bind(L_by1_loop);
|
||||||
|
__ ldrb(temp0, Address(__ post(buff, 1)));
|
||||||
|
__ add(s1, temp0, s1);
|
||||||
|
__ add(s2, s2, s1);
|
||||||
|
__ subs(len, len, 1);
|
||||||
|
__ br(Assembler::HS, L_by1_loop);
|
||||||
|
|
||||||
|
__ bind(L_do_mod);
|
||||||
|
// s1 = s1 % BASE
|
||||||
|
__ lsr(temp0, s1, 16);
|
||||||
|
__ lsl(temp1, temp0, 4);
|
||||||
|
__ sub(temp1, temp1, temp0);
|
||||||
|
__ add(temp1, temp1, s1, ext::uxth);
|
||||||
|
|
||||||
|
__ lsr(temp0, temp1, 16);
|
||||||
|
__ lsl(s1, temp0, 4);
|
||||||
|
__ sub(s1, s1, temp0);
|
||||||
|
__ add(s1, s1, temp1, ext:: uxth);
|
||||||
|
|
||||||
|
__ subs(temp0, s1, base);
|
||||||
|
__ csel(s1, temp0, s1, Assembler::HS);
|
||||||
|
|
||||||
|
// s2 = s2 % BASE
|
||||||
|
__ lsr(temp0, s2, 16);
|
||||||
|
__ lsl(temp1, temp0, 4);
|
||||||
|
__ sub(temp1, temp1, temp0);
|
||||||
|
__ add(temp1, temp1, s2, ext::uxth);
|
||||||
|
|
||||||
|
__ lsr(temp0, temp1, 16);
|
||||||
|
__ lsl(s2, temp0, 4);
|
||||||
|
__ sub(s2, s2, temp0);
|
||||||
|
__ add(s2, s2, temp1, ext:: uxth);
|
||||||
|
|
||||||
|
__ subs(temp0, s2, base);
|
||||||
|
__ csel(s2, temp0, s2, Assembler::HS);
|
||||||
|
|
||||||
|
// Combine lower bits and higher bits
|
||||||
|
__ bind(L_combine);
|
||||||
|
__ orr(s1, s1, s2, Assembler::LSL, 16); // adler = s1 | (s2 << 16)
|
||||||
|
|
||||||
|
__ ret(lr);
|
||||||
|
|
||||||
|
return start;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Arguments:
|
* Arguments:
|
||||||
*
|
*
|
||||||
|
@ -3613,6 +3884,11 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
|
StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// generate Adler32 intrinsics code
|
||||||
|
if (UseAdler32Intrinsics) {
|
||||||
|
StubRoutines::_updateBytesAdler32 = generate_updateBytesAdler32();
|
||||||
|
}
|
||||||
|
|
||||||
// Safefetch stubs.
|
// Safefetch stubs.
|
||||||
generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
|
generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
|
||||||
&StubRoutines::_safefetch32_fault_pc,
|
&StubRoutines::_safefetch32_fault_pc,
|
||||||
|
|
|
@ -535,7 +535,7 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
|
||||||
// r0
|
// r0
|
||||||
// c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
|
// c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
|
||||||
// rscratch1, rscratch2 (scratch regs)
|
// rscratch1, rscratch2 (scratch regs)
|
||||||
void InterpreterGenerator::lock_method(void) {
|
void TemplateInterpreterGenerator::lock_method() {
|
||||||
// synchronize method
|
// synchronize method
|
||||||
const Address access_flags(rmethod, Method::access_flags_offset());
|
const Address access_flags(rmethod, Method::access_flags_offset());
|
||||||
const Address monitor_block_top(
|
const Address monitor_block_top(
|
||||||
|
@ -721,8 +721,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||||
|
|
||||||
// generate a vanilla interpreter entry as the slow path
|
// generate a vanilla interpreter entry as the slow path
|
||||||
__ bind(slow_path);
|
__ bind(slow_path);
|
||||||
(void) generate_normal_entry(false);
|
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
|
||||||
|
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
|
@ -779,12 +778,10 @@ address InterpreterGenerator::generate_CRC32_update_entry() {
|
||||||
|
|
||||||
// generate a vanilla native entry as the slow path
|
// generate a vanilla native entry as the slow path
|
||||||
__ bind(slow_path);
|
__ bind(slow_path);
|
||||||
|
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
|
||||||
(void) generate_native_entry(false);
|
|
||||||
|
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
return generate_native_entry(false);
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -841,12 +838,10 @@ address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpret
|
||||||
|
|
||||||
// generate a vanilla native entry as the slow path
|
// generate a vanilla native entry as the slow path
|
||||||
__ bind(slow_path);
|
__ bind(slow_path);
|
||||||
|
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
|
||||||
(void) generate_native_entry(false);
|
|
||||||
|
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
return generate_native_entry(false);
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void InterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
|
void InterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
|
||||||
|
|
|
@ -178,9 +178,8 @@ void VM_Version::get_processor_features() {
|
||||||
warning("UseCRC32 specified, but not supported on this CPU");
|
warning("UseCRC32 specified, but not supported on this CPU");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (UseAdler32Intrinsics) {
|
if (FLAG_IS_DEFAULT(UseAdler32Intrinsics)) {
|
||||||
warning("Adler32Intrinsics not available on this CPU.");
|
FLAG_SET_DEFAULT(UseAdler32Intrinsics, true);
|
||||||
FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (auxv & HWCAP_AES) {
|
if (auxv & HWCAP_AES) {
|
||||||
|
|
|
@ -60,6 +60,7 @@ define_pd_global(intx, LoopUnrollLimit, 60);
|
||||||
define_pd_global(bool, OptoPeephole, false);
|
define_pd_global(bool, OptoPeephole, false);
|
||||||
define_pd_global(bool, UseCISCSpill, false);
|
define_pd_global(bool, UseCISCSpill, false);
|
||||||
define_pd_global(bool, OptoBundling, false);
|
define_pd_global(bool, OptoBundling, false);
|
||||||
|
define_pd_global(bool, OptoRegScheduling, false);
|
||||||
// GL:
|
// GL:
|
||||||
// Detected a problem with unscaled compressed oops and
|
// Detected a problem with unscaled compressed oops and
|
||||||
// narrow_oop_use_complex_address() == false.
|
// narrow_oop_use_complex_address() == false.
|
||||||
|
|
|
@ -94,10 +94,12 @@ bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
|
||||||
|
|
||||||
const int IC_pos_in_java_to_interp_stub = 8;
|
const int IC_pos_in_java_to_interp_stub = 8;
|
||||||
#define __ _masm.
|
#define __ _masm.
|
||||||
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
|
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = NULL*/) {
|
||||||
#ifdef COMPILER2
|
#ifdef COMPILER2
|
||||||
|
if (mark == NULL) {
|
||||||
// Get the mark within main instrs section which is set to the address of the call.
|
// Get the mark within main instrs section which is set to the address of the call.
|
||||||
address call_addr = cbuf.insts_mark();
|
mark = cbuf.insts_mark();
|
||||||
|
}
|
||||||
|
|
||||||
// Note that the code buffer's insts_mark is always relative to insts.
|
// Note that the code buffer's insts_mark is always relative to insts.
|
||||||
// That's why we must use the macroassembler to generate a stub.
|
// That's why we must use the macroassembler to generate a stub.
|
||||||
|
@ -117,7 +119,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
|
||||||
// Create a static stub relocation which relates this stub
|
// Create a static stub relocation which relates this stub
|
||||||
// with the call instruction at insts_call_instruction_offset in the
|
// with the call instruction at insts_call_instruction_offset in the
|
||||||
// instructions code-section.
|
// instructions code-section.
|
||||||
__ relocate(static_stub_Relocation::spec(call_addr));
|
__ relocate(static_stub_Relocation::spec(mark));
|
||||||
const int stub_start_offset = __ offset();
|
const int stub_start_offset = __ offset();
|
||||||
|
|
||||||
// Now, create the stub's code:
|
// Now, create the stub's code:
|
||||||
|
|
|
@ -41,6 +41,18 @@ define_pd_global(bool, ImplicitNullChecks, true); // Generate code for impli
|
||||||
define_pd_global(bool, TrapBasedNullChecks, true);
|
define_pd_global(bool, TrapBasedNullChecks, true);
|
||||||
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs passed to check cast.
|
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs passed to check cast.
|
||||||
|
|
||||||
|
#define DEFAULT_STACK_YELLOW_PAGES (6)
|
||||||
|
#define DEFAULT_STACK_RED_PAGES (1)
|
||||||
|
#define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2))
|
||||||
|
|
||||||
|
#define MIN_STACK_YELLOW_PAGES (1)
|
||||||
|
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
|
||||||
|
#define MIN_STACK_SHADOW_PAGES (1)
|
||||||
|
|
||||||
|
define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
|
||||||
|
define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
|
||||||
|
define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
|
||||||
|
|
||||||
// Use large code-entry alignment.
|
// Use large code-entry alignment.
|
||||||
define_pd_global(intx, CodeEntryAlignment, 128);
|
define_pd_global(intx, CodeEntryAlignment, 128);
|
||||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||||
|
|
|
@ -46,7 +46,7 @@ void InterpreterMacroAssembler::null_check_throw(Register a, int offset, Registe
|
||||||
MacroAssembler::null_check_throw(a, offset, temp_reg, exception_entry);
|
MacroAssembler::null_check_throw(a, offset, temp_reg, exception_entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
void InterpreterMacroAssembler::branch_to_entry(address entry, Register Rscratch) {
|
void InterpreterMacroAssembler::jump_to_entry(address entry, Register Rscratch) {
|
||||||
assert(entry, "Entry must have been generated by now");
|
assert(entry, "Entry must have been generated by now");
|
||||||
if (is_within_range_of_b(entry, pc())) {
|
if (is_within_range_of_b(entry, pc())) {
|
||||||
b(entry);
|
b(entry);
|
||||||
|
|
|
@ -39,7 +39,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||||
|
|
||||||
void null_check_throw(Register a, int offset, Register temp_reg);
|
void null_check_throw(Register a, int offset, Register temp_reg);
|
||||||
|
|
||||||
void branch_to_entry(address entry, Register Rscratch);
|
void jump_to_entry(address entry, Register Rscratch);
|
||||||
|
|
||||||
// Handy address generation macros.
|
// Handy address generation macros.
|
||||||
#define thread_(field_name) in_bytes(JavaThread::field_name ## _offset()), R16_thread
|
#define thread_(field_name) in_bytes(JavaThread::field_name ## _offset()), R16_thread
|
||||||
|
|
|
@ -31,12 +31,12 @@
|
||||||
private:
|
private:
|
||||||
|
|
||||||
address generate_abstract_entry(void);
|
address generate_abstract_entry(void);
|
||||||
address generate_jump_to_normal_entry(void);
|
address generate_accessor_entry(void) { return NULL; }
|
||||||
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
|
address generate_empty_entry(void) { return NULL; }
|
||||||
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
|
|
||||||
address generate_Reference_get_entry(void);
|
address generate_Reference_get_entry(void);
|
||||||
|
|
||||||
address generate_CRC32_update_entry();
|
address generate_CRC32_update_entry();
|
||||||
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
|
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
|
||||||
|
address generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||||
|
|
||||||
#endif // CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP
|
#endif // CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP
|
||||||
|
|
|
@ -427,18 +427,6 @@ address AbstractInterpreterGenerator::generate_result_handler_for(BasicType type
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call an accessor method (assuming it is resolved, otherwise drop into
|
|
||||||
// vanilla (slow path) entry.
|
|
||||||
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
|
|
||||||
address entry = __ pc();
|
|
||||||
address normal_entry = Interpreter::entry_for_kind(Interpreter::zerolocals);
|
|
||||||
assert(normal_entry != NULL, "should already be generated.");
|
|
||||||
__ branch_to_entry(normal_entry, R11_scratch1);
|
|
||||||
__ flush();
|
|
||||||
|
|
||||||
return entry;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Abstract method entry.
|
// Abstract method entry.
|
||||||
//
|
//
|
||||||
address InterpreterGenerator::generate_abstract_entry(void) {
|
address InterpreterGenerator::generate_abstract_entry(void) {
|
||||||
|
@ -529,12 +517,12 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||||
// regular method entry code to generate the NPE.
|
// regular method entry code to generate the NPE.
|
||||||
//
|
//
|
||||||
|
|
||||||
|
if (UseG1GC) {
|
||||||
address entry = __ pc();
|
address entry = __ pc();
|
||||||
|
|
||||||
const int referent_offset = java_lang_ref_Reference::referent_offset;
|
const int referent_offset = java_lang_ref_Reference::referent_offset;
|
||||||
guarantee(referent_offset > 0, "referent offset not initialized");
|
guarantee(referent_offset > 0, "referent offset not initialized");
|
||||||
|
|
||||||
if (UseG1GC) {
|
|
||||||
Label slow_path;
|
Label slow_path;
|
||||||
|
|
||||||
// Debugging not possible, so can't use __ skip_if_jvmti_mode(slow_path, GR31_SCRATCH);
|
// Debugging not possible, so can't use __ skip_if_jvmti_mode(slow_path, GR31_SCRATCH);
|
||||||
|
@ -577,13 +565,11 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||||
|
|
||||||
// Generate regular method entry.
|
// Generate regular method entry.
|
||||||
__ bind(slow_path);
|
__ bind(slow_path);
|
||||||
__ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
|
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
|
||||||
__ flush();
|
|
||||||
|
|
||||||
return entry;
|
return entry;
|
||||||
} else {
|
|
||||||
return generate_jump_to_normal_entry();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
|
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
|
||||||
|
|
68
hotspot/src/cpu/ppc/vm/jvmciCodeInstaller_ppc.cpp
Normal file
68
hotspot/src/cpu/ppc/vm/jvmciCodeInstaller_ppc.cpp
Normal file
|
@ -0,0 +1,68 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "jvmci/jvmciCodeInstaller.hpp"
|
||||||
|
#include "jvmci/jvmciRuntime.hpp"
|
||||||
|
#include "jvmci/jvmciCompilerToVM.hpp"
|
||||||
|
#include "jvmci/jvmciJavaClasses.hpp"
|
||||||
|
#include "oops/oop.inline.hpp"
|
||||||
|
#include "runtime/sharedRuntime.hpp"
|
||||||
|
#include "vmreg_ppc.inline.hpp"
|
||||||
|
|
||||||
|
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, oop method) {
|
||||||
|
Unimplemented();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) {
|
||||||
|
Unimplemented();
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset) {
|
||||||
|
Unimplemented();
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeInstaller::pd_relocate_CodeBlob(CodeBlob* cb, NativeInstruction* inst) {
|
||||||
|
Unimplemented();
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination) {
|
||||||
|
Unimplemented();
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeInstaller::pd_relocate_JavaMethod(oop hotspot_method, jint pc_offset) {
|
||||||
|
Unimplemented();
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeInstaller::pd_relocate_poll(address pc, jint mark) {
|
||||||
|
Unimplemented();
|
||||||
|
}
|
||||||
|
|
||||||
|
// convert JVMCI register indices (as used in oop maps) to HotSpot registers
|
||||||
|
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CodeInstaller::is_general_purpose_reg(VMReg hotspotRegister) {
|
||||||
|
return false;
|
||||||
|
}
|
|
@ -594,13 +594,6 @@ void MacroAssembler::bxx64_patchable(address dest, relocInfo::relocType rt, bool
|
||||||
"can't identify emitted call");
|
"can't identify emitted call");
|
||||||
} else {
|
} else {
|
||||||
// variant 1:
|
// variant 1:
|
||||||
#if defined(ABI_ELFv2)
|
|
||||||
nop();
|
|
||||||
calculate_address_from_global_toc(R12, dest, true, true, false);
|
|
||||||
mtctr(R12);
|
|
||||||
nop();
|
|
||||||
nop();
|
|
||||||
#else
|
|
||||||
mr(R0, R11); // spill R11 -> R0.
|
mr(R0, R11); // spill R11 -> R0.
|
||||||
|
|
||||||
// Load the destination address into CTR,
|
// Load the destination address into CTR,
|
||||||
|
@ -610,7 +603,6 @@ void MacroAssembler::bxx64_patchable(address dest, relocInfo::relocType rt, bool
|
||||||
mtctr(R11);
|
mtctr(R11);
|
||||||
mr(R11, R0); // spill R11 <- R0.
|
mr(R11, R0); // spill R11 <- R0.
|
||||||
nop();
|
nop();
|
||||||
#endif
|
|
||||||
|
|
||||||
// do the call/jump
|
// do the call/jump
|
||||||
if (link) {
|
if (link) {
|
||||||
|
@ -4292,7 +4284,7 @@ const char* stop_types[] = {
|
||||||
|
|
||||||
static void stop_on_request(int tp, const char* msg) {
|
static void stop_on_request(int tp, const char* msg) {
|
||||||
tty->print("PPC assembly code requires stop: (%s) %s\n", stop_types[tp%/*stop_end*/4], msg);
|
tty->print("PPC assembly code requires stop: (%s) %s\n", stop_types[tp%/*stop_end*/4], msg);
|
||||||
guarantee(false, err_msg("PPC assembly code requires stop: %s", msg));
|
guarantee(false, "PPC assembly code requires stop: %s", msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call a C-function that prints output.
|
// Call a C-function that prints output.
|
||||||
|
|
|
@ -60,7 +60,7 @@ void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
static int check_nonzero(const char* xname, int x) {
|
static int check_nonzero(const char* xname, int x) {
|
||||||
assert(x != 0, err_msg("%s should be nonzero", xname));
|
assert(x != 0, "%s should be nonzero", xname);
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
#define NONZERO(x) check_nonzero(#x, x)
|
#define NONZERO(x) check_nonzero(#x, x)
|
||||||
|
@ -434,7 +434,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
|
fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -149,7 +149,7 @@ void NativeCall::verify() {
|
||||||
if (!NativeCall::is_call_at(addr)) {
|
if (!NativeCall::is_call_at(addr)) {
|
||||||
tty->print_cr("not a NativeCall at " PTR_FORMAT, p2i(addr));
|
tty->print_cr("not a NativeCall at " PTR_FORMAT, p2i(addr));
|
||||||
// TODO: PPC port: Disassembler::decode(addr - 20, addr + 20, tty);
|
// TODO: PPC port: Disassembler::decode(addr - 20, addr + 20, tty);
|
||||||
fatal(err_msg("not a NativeCall at " PTR_FORMAT, p2i(addr)));
|
fatal("not a NativeCall at " PTR_FORMAT, p2i(addr));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif // ASSERT
|
#endif // ASSERT
|
||||||
|
@ -162,7 +162,7 @@ void NativeFarCall::verify() {
|
||||||
if (!NativeFarCall::is_far_call_at(addr)) {
|
if (!NativeFarCall::is_far_call_at(addr)) {
|
||||||
tty->print_cr("not a NativeFarCall at " PTR_FORMAT, p2i(addr));
|
tty->print_cr("not a NativeFarCall at " PTR_FORMAT, p2i(addr));
|
||||||
// TODO: PPC port: Disassembler::decode(addr, 20, 20, tty);
|
// TODO: PPC port: Disassembler::decode(addr, 20, 20, tty);
|
||||||
fatal(err_msg("not a NativeFarCall at " PTR_FORMAT, p2i(addr)));
|
fatal("not a NativeFarCall at " PTR_FORMAT, p2i(addr));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif // ASSERT
|
#endif // ASSERT
|
||||||
|
@ -308,7 +308,7 @@ void NativeMovConstReg::verify() {
|
||||||
! MacroAssembler::is_bl(*((int*) addr))) {
|
! MacroAssembler::is_bl(*((int*) addr))) {
|
||||||
tty->print_cr("not a NativeMovConstReg at " PTR_FORMAT, p2i(addr));
|
tty->print_cr("not a NativeMovConstReg at " PTR_FORMAT, p2i(addr));
|
||||||
// TODO: PPC port: Disassembler::decode(addr, 20, 20, tty);
|
// TODO: PPC port: Disassembler::decode(addr, 20, 20, tty);
|
||||||
fatal(err_msg("not a NativeMovConstReg at " PTR_FORMAT, p2i(addr)));
|
fatal("not a NativeMovConstReg at " PTR_FORMAT, p2i(addr));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -346,7 +346,7 @@ void NativeJump::verify() {
|
||||||
if (!NativeJump::is_jump_at(addr)) {
|
if (!NativeJump::is_jump_at(addr)) {
|
||||||
tty->print_cr("not a NativeJump at " PTR_FORMAT, p2i(addr));
|
tty->print_cr("not a NativeJump at " PTR_FORMAT, p2i(addr));
|
||||||
// TODO: PPC port: Disassembler::decode(addr, 20, 20, tty);
|
// TODO: PPC port: Disassembler::decode(addr, 20, 20, tty);
|
||||||
fatal(err_msg("not a NativeJump at " PTR_FORMAT, p2i(addr)));
|
fatal("not a NativeJump at " PTR_FORMAT, p2i(addr));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif // ASSERT
|
#endif // ASSERT
|
||||||
|
|
|
@ -2064,6 +2064,10 @@ const bool Matcher::match_rule_supported(int opcode) {
|
||||||
return true; // Per default match rules are supported.
|
return true; // Per default match rules are supported.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const int Matcher::float_pressure(int default_pressure_threshold) {
|
||||||
|
return default_pressure_threshold;
|
||||||
|
}
|
||||||
|
|
||||||
int Matcher::regnum_to_fpu_offset(int regnum) {
|
int Matcher::regnum_to_fpu_offset(int regnum) {
|
||||||
// No user for this method?
|
// No user for this method?
|
||||||
Unimplemented();
|
Unimplemented();
|
||||||
|
|
|
@ -125,8 +125,5 @@ address Relocation::pd_get_address_from_code() {
|
||||||
void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
|
void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
|
|
||||||
}
|
|
||||||
|
|
||||||
void metadata_Relocation::pd_fix_value(address x) {
|
void metadata_Relocation::pd_fix_value(address x) {
|
||||||
}
|
}
|
||||||
|
|
|
@ -475,9 +475,8 @@ void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_siz
|
||||||
|
|
||||||
// Is vector's size (in bytes) bigger than a size saved by default?
|
// Is vector's size (in bytes) bigger than a size saved by default?
|
||||||
bool SharedRuntime::is_wide_vector(int size) {
|
bool SharedRuntime::is_wide_vector(int size) {
|
||||||
ResourceMark rm;
|
|
||||||
// Note, MaxVectorSize == 8 on PPC64.
|
// Note, MaxVectorSize == 8 on PPC64.
|
||||||
assert(size <= 8, err_msg_res("%d bytes vectors are not supported", size));
|
assert(size <= 8, "%d bytes vectors are not supported", size);
|
||||||
return size > 8;
|
return size > 8;
|
||||||
}
|
}
|
||||||
#ifdef COMPILER2
|
#ifdef COMPILER2
|
||||||
|
@ -957,7 +956,7 @@ static address gen_c2i_adapter(MacroAssembler *masm,
|
||||||
return c2i_entrypoint;
|
return c2i_entrypoint;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gen_i2c_adapter(MacroAssembler *masm,
|
void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
|
||||||
int total_args_passed,
|
int total_args_passed,
|
||||||
int comp_args_on_stack,
|
int comp_args_on_stack,
|
||||||
const BasicType *sig_bt,
|
const BasicType *sig_bt,
|
||||||
|
@ -1631,7 +1630,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
|
||||||
} else if (iid == vmIntrinsics::_invokeBasic) {
|
} else if (iid == vmIntrinsics::_invokeBasic) {
|
||||||
has_receiver = true;
|
has_receiver = true;
|
||||||
} else {
|
} else {
|
||||||
fatal(err_msg_res("unexpected intrinsic id %d", iid));
|
fatal("unexpected intrinsic id %d", iid);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (member_reg != noreg) {
|
if (member_reg != noreg) {
|
||||||
|
|
|
@ -841,7 +841,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
// Only called by MacroAssembler::verify_oop
|
// Only called by MacroAssembler::verify_oop
|
||||||
static void verify_oop_helper(const char* message, oop o) {
|
static void verify_oop_helper(const char* message, oop o) {
|
||||||
if (!o->is_oop_or_null()) {
|
if (!o->is_oop_or_null()) {
|
||||||
fatal(message);
|
fatal("%s", message);
|
||||||
}
|
}
|
||||||
++ StubRoutines::_verify_oop_count;
|
++ StubRoutines::_verify_oop_count;
|
||||||
}
|
}
|
||||||
|
|
|
@ -620,7 +620,7 @@ inline bool math_entry_available(AbstractInterpreter::MethodKind kind) {
|
||||||
address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
|
address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
|
||||||
if (!math_entry_available(kind)) {
|
if (!math_entry_available(kind)) {
|
||||||
NOT_PRODUCT(__ should_not_reach_here();)
|
NOT_PRODUCT(__ should_not_reach_here();)
|
||||||
return Interpreter::entry_for_kind(Interpreter::zerolocals);
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
address entry = __ pc();
|
address entry = __ pc();
|
||||||
|
@ -1126,14 +1126,6 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||||
|
|
||||||
generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals);
|
generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals);
|
||||||
|
|
||||||
#ifdef FAST_DISPATCH
|
|
||||||
__ unimplemented("Fast dispatch in generate_normal_entry");
|
|
||||||
#if 0
|
|
||||||
__ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables);
|
|
||||||
// Set bytecode dispatch table base.
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// --------------------------------------------------------------------------
|
// --------------------------------------------------------------------------
|
||||||
// Zero out non-parameter locals.
|
// Zero out non-parameter locals.
|
||||||
// Note: *Always* zero out non-parameter locals as Sparc does. It's not
|
// Note: *Always* zero out non-parameter locals as Sparc does. It's not
|
||||||
|
@ -1266,9 +1258,8 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||||
* int java.util.zip.CRC32.update(int crc, int b)
|
* int java.util.zip.CRC32.update(int crc, int b)
|
||||||
*/
|
*/
|
||||||
address InterpreterGenerator::generate_CRC32_update_entry() {
|
address InterpreterGenerator::generate_CRC32_update_entry() {
|
||||||
address start = __ pc(); // Remember stub start address (is rtn value).
|
|
||||||
|
|
||||||
if (UseCRC32Intrinsics) {
|
if (UseCRC32Intrinsics) {
|
||||||
|
address start = __ pc(); // Remember stub start address (is rtn value).
|
||||||
Label slow_path;
|
Label slow_path;
|
||||||
|
|
||||||
// Safepoint check
|
// Safepoint check
|
||||||
|
@ -1313,11 +1304,11 @@ address InterpreterGenerator::generate_CRC32_update_entry() {
|
||||||
// Generate a vanilla native entry as the slow path.
|
// Generate a vanilla native entry as the slow path.
|
||||||
BLOCK_COMMENT("} CRC32_update");
|
BLOCK_COMMENT("} CRC32_update");
|
||||||
BIND(slow_path);
|
BIND(slow_path);
|
||||||
|
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1);
|
||||||
|
return start;
|
||||||
}
|
}
|
||||||
|
|
||||||
(void) generate_native_entry(false);
|
return NULL;
|
||||||
|
|
||||||
return start;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CRC32 Intrinsics.
|
// CRC32 Intrinsics.
|
||||||
|
@ -1327,9 +1318,8 @@ address InterpreterGenerator::generate_CRC32_update_entry() {
|
||||||
* int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len)
|
* int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len)
|
||||||
*/
|
*/
|
||||||
address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
|
address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
|
||||||
address start = __ pc(); // Remember stub start address (is rtn value).
|
|
||||||
|
|
||||||
if (UseCRC32Intrinsics) {
|
if (UseCRC32Intrinsics) {
|
||||||
|
address start = __ pc(); // Remember stub start address (is rtn value).
|
||||||
Label slow_path;
|
Label slow_path;
|
||||||
|
|
||||||
// Safepoint check
|
// Safepoint check
|
||||||
|
@ -1406,11 +1396,11 @@ address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpret
|
||||||
// Generate a vanilla native entry as the slow path.
|
// Generate a vanilla native entry as the slow path.
|
||||||
BLOCK_COMMENT("} CRC32_updateBytes(Buffer)");
|
BLOCK_COMMENT("} CRC32_updateBytes(Buffer)");
|
||||||
BIND(slow_path);
|
BIND(slow_path);
|
||||||
|
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1);
|
||||||
|
return start;
|
||||||
}
|
}
|
||||||
|
|
||||||
(void) generate_native_entry(false);
|
return NULL;
|
||||||
|
|
||||||
return start;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// These should never be compiled since the interpreter will prefer
|
// These should never be compiled since the interpreter will prefer
|
||||||
|
|
|
@ -389,7 +389,7 @@ class Assembler : public AbstractAssembler {
|
||||||
|
|
||||||
static void assert_signed_range(intptr_t x, int nbits) {
|
static void assert_signed_range(intptr_t x, int nbits) {
|
||||||
assert(nbits == 32 || (-(1 << nbits-1) <= x && x < ( 1 << nbits-1)),
|
assert(nbits == 32 || (-(1 << nbits-1) <= x && x < ( 1 << nbits-1)),
|
||||||
err_msg("value out of range: x=" INTPTR_FORMAT ", nbits=%d", x, nbits));
|
"value out of range: x=" INTPTR_FORMAT ", nbits=%d", x, nbits);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void assert_signed_word_disp_range(intptr_t x, int nbits) {
|
static void assert_signed_word_disp_range(intptr_t x, int nbits) {
|
||||||
|
|
|
@ -64,6 +64,7 @@ define_pd_global(bool, OptoPeephole, false);
|
||||||
define_pd_global(bool, UseCISCSpill, false);
|
define_pd_global(bool, UseCISCSpill, false);
|
||||||
define_pd_global(bool, OptoBundling, false);
|
define_pd_global(bool, OptoBundling, false);
|
||||||
define_pd_global(bool, OptoScheduling, true);
|
define_pd_global(bool, OptoScheduling, true);
|
||||||
|
define_pd_global(bool, OptoRegScheduling, false);
|
||||||
|
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
// We need to make sure that all generated code is within
|
// We need to make sure that all generated code is within
|
||||||
|
|
|
@ -53,14 +53,15 @@ bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
#define __ _masm.
|
#define __ _masm.
|
||||||
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
|
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
|
||||||
#ifdef COMPILER2
|
|
||||||
// Stub is fixed up when the corresponding call is converted from calling
|
// Stub is fixed up when the corresponding call is converted from calling
|
||||||
// compiled code to calling interpreted code.
|
// compiled code to calling interpreted code.
|
||||||
// set (empty), G5
|
// set (empty), G5
|
||||||
// jmp -1
|
// jmp -1
|
||||||
|
|
||||||
address mark = cbuf.insts_mark(); // Get mark within main instrs section.
|
if (mark == NULL) {
|
||||||
|
mark = cbuf.insts_mark(); // Get mark within main instrs section.
|
||||||
|
}
|
||||||
|
|
||||||
MacroAssembler _masm(&cbuf);
|
MacroAssembler _masm(&cbuf);
|
||||||
|
|
||||||
|
@ -80,12 +81,11 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
|
||||||
|
|
||||||
__ delayed()->nop();
|
__ delayed()->nop();
|
||||||
|
|
||||||
|
assert(__ pc() - base <= to_interp_stub_size(), "wrong stub size");
|
||||||
|
|
||||||
// Update current stubs pointer and restore code_end.
|
// Update current stubs pointer and restore code_end.
|
||||||
__ end_a_stub();
|
__ end_a_stub();
|
||||||
return base;
|
return base;
|
||||||
#else
|
|
||||||
ShouldNotReachHere();
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
#undef __
|
#undef __
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,7 @@
|
||||||
|
|
||||||
void generate_more_monitors();
|
void generate_more_monitors();
|
||||||
void generate_deopt_handling();
|
void generate_deopt_handling();
|
||||||
|
void lock_method(void);
|
||||||
void adjust_callers_stack(Register args);
|
void adjust_callers_stack(Register args);
|
||||||
void generate_compute_interpreter_state(const Register state,
|
void generate_compute_interpreter_state(const Register state,
|
||||||
const Register prev_state,
|
const Register prev_state,
|
||||||
|
|
|
@ -468,7 +468,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||||
|
|
||||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||||
// Reference.get is an accessor
|
// Reference.get is an accessor
|
||||||
return generate_jump_to_normal_entry();
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
|
@ -1164,7 +1164,7 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
|
||||||
}
|
}
|
||||||
// Find preallocated monitor and lock method (C++ interpreter)
|
// Find preallocated monitor and lock method (C++ interpreter)
|
||||||
//
|
//
|
||||||
void InterpreterGenerator::lock_method(void) {
|
void CppInterpreterGenerator::lock_method() {
|
||||||
// Lock the current method.
|
// Lock the current method.
|
||||||
// Destroys registers L2_scratch, L3_scratch, O0
|
// Destroys registers L2_scratch, L3_scratch, O0
|
||||||
//
|
//
|
||||||
|
|
|
@ -52,19 +52,27 @@ define_pd_global(intx, OptoLoopAlignment, 16); // = 4*wordSize
|
||||||
define_pd_global(intx, InlineFrequencyCount, 50); // we can use more inlining on the SPARC
|
define_pd_global(intx, InlineFrequencyCount, 50); // we can use more inlining on the SPARC
|
||||||
define_pd_global(intx, InlineSmallCode, 1500);
|
define_pd_global(intx, InlineSmallCode, 1500);
|
||||||
|
|
||||||
|
#define DEFAULT_STACK_YELLOW_PAGES (2)
|
||||||
|
#define DEFAULT_STACK_RED_PAGES (1)
|
||||||
|
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
// Stack slots are 2X larger in LP64 than in the 32 bit VM.
|
// Stack slots are 2X larger in LP64 than in the 32 bit VM.
|
||||||
define_pd_global(intx, ThreadStackSize, 1024);
|
define_pd_global(intx, ThreadStackSize, 1024);
|
||||||
define_pd_global(intx, VMThreadStackSize, 1024);
|
define_pd_global(intx, VMThreadStackSize, 1024);
|
||||||
define_pd_global(intx, StackShadowPages, 10 DEBUG_ONLY(+1));
|
#define DEFAULT_STACK_SHADOW_PAGES (10 DEBUG_ONLY(+1))
|
||||||
#else
|
#else
|
||||||
define_pd_global(intx, ThreadStackSize, 512);
|
define_pd_global(intx, ThreadStackSize, 512);
|
||||||
define_pd_global(intx, VMThreadStackSize, 512);
|
define_pd_global(intx, VMThreadStackSize, 512);
|
||||||
define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1));
|
#define DEFAULT_STACK_SHADOW_PAGES (3 DEBUG_ONLY(+1))
|
||||||
#endif
|
#endif // _LP64
|
||||||
|
|
||||||
define_pd_global(intx, StackYellowPages, 2);
|
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
|
||||||
define_pd_global(intx, StackRedPages, 1);
|
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
|
||||||
|
#define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES
|
||||||
|
|
||||||
|
define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
|
||||||
|
define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
|
||||||
|
define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
|
||||||
|
|
||||||
define_pd_global(bool, RewriteBytecodes, true);
|
define_pd_global(bool, RewriteBytecodes, true);
|
||||||
define_pd_global(bool, RewriteFrequentPairs, true);
|
define_pd_global(bool, RewriteFrequentPairs, true);
|
||||||
|
@ -82,6 +90,7 @@ define_pd_global(uintx, TypeProfileLevel, 111);
|
||||||
\
|
\
|
||||||
product(intx, UseVIS, 99, \
|
product(intx, UseVIS, 99, \
|
||||||
"Highest supported VIS instructions set on Sparc") \
|
"Highest supported VIS instructions set on Sparc") \
|
||||||
|
range(0, 99) \
|
||||||
\
|
\
|
||||||
product(bool, UseCBCond, false, \
|
product(bool, UseCBCond, false, \
|
||||||
"Use compare and branch instruction on SPARC") \
|
"Use compare and branch instruction on SPARC") \
|
||||||
|
@ -91,12 +100,14 @@ define_pd_global(uintx, TypeProfileLevel, 111);
|
||||||
\
|
\
|
||||||
product(intx, BlockZeroingLowLimit, 2048, \
|
product(intx, BlockZeroingLowLimit, 2048, \
|
||||||
"Minimum size in bytes when block zeroing will be used") \
|
"Minimum size in bytes when block zeroing will be used") \
|
||||||
|
range(1, max_jint) \
|
||||||
\
|
\
|
||||||
product(bool, UseBlockCopy, false, \
|
product(bool, UseBlockCopy, false, \
|
||||||
"Use special cpu instructions for block copy") \
|
"Use special cpu instructions for block copy") \
|
||||||
\
|
\
|
||||||
product(intx, BlockCopyLowLimit, 2048, \
|
product(intx, BlockCopyLowLimit, 2048, \
|
||||||
"Minimum size in bytes when block copy will be used") \
|
"Minimum size in bytes when block copy will be used") \
|
||||||
|
range(1, max_jint) \
|
||||||
\
|
\
|
||||||
develop(bool, UseV8InstrsOnly, false, \
|
develop(bool, UseV8InstrsOnly, false, \
|
||||||
"Use SPARC-V8 Compliant instruction subset") \
|
"Use SPARC-V8 Compliant instruction subset") \
|
||||||
|
@ -108,9 +119,11 @@ define_pd_global(uintx, TypeProfileLevel, 111);
|
||||||
"Do not use swap instructions, but only CAS (in a loop) on SPARC")\
|
"Do not use swap instructions, but only CAS (in a loop) on SPARC")\
|
||||||
\
|
\
|
||||||
product(uintx, ArraycopySrcPrefetchDistance, 0, \
|
product(uintx, ArraycopySrcPrefetchDistance, 0, \
|
||||||
"Distance to prefetch source array in arracopy") \
|
"Distance to prefetch source array in arraycopy") \
|
||||||
|
constraint(ArraycopySrcPrefetchDistanceConstraintFunc, AfterErgo) \
|
||||||
\
|
\
|
||||||
product(uintx, ArraycopyDstPrefetchDistance, 0, \
|
product(uintx, ArraycopyDstPrefetchDistance, 0, \
|
||||||
"Distance to prefetch destination array in arracopy") \
|
"Distance to prefetch destination array in arraycopy") \
|
||||||
|
constraint(ArraycopyDstPrefetchDistanceConstraintFunc, AfterErgo) \
|
||||||
|
|
||||||
#endif // CPU_SPARC_VM_GLOBALS_SPARC_HPP
|
#endif // CPU_SPARC_VM_GLOBALS_SPARC_HPP
|
||||||
|
|
|
@ -59,6 +59,13 @@ const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_s
|
||||||
|
|
||||||
#endif // CC_INTERP
|
#endif // CC_INTERP
|
||||||
|
|
||||||
|
void InterpreterMacroAssembler::jump_to_entry(address entry) {
|
||||||
|
assert(entry, "Entry must have been generated by now");
|
||||||
|
AddressLiteral al(entry);
|
||||||
|
jump_to(al, G3_scratch);
|
||||||
|
delayed()->nop();
|
||||||
|
}
|
||||||
|
|
||||||
void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) {
|
void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) {
|
||||||
// Note: this algorithm is also used by C1's OSR entry sequence.
|
// Note: this algorithm is also used by C1's OSR entry sequence.
|
||||||
// Any changes should also be applied to CodeEmitter::emit_osr_entry().
|
// Any changes should also be applied to CodeEmitter::emit_osr_entry().
|
||||||
|
@ -1643,26 +1650,73 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
|
||||||
bind(skip_receiver_profile);
|
bind(skip_receiver_profile);
|
||||||
|
|
||||||
// The method data pointer needs to be updated to reflect the new target.
|
// The method data pointer needs to be updated to reflect the new target.
|
||||||
|
#if INCLUDE_JVMCI
|
||||||
|
if (MethodProfileWidth == 0) {
|
||||||
update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
|
update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
|
||||||
bind (profile_continue);
|
}
|
||||||
|
#else
|
||||||
|
update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
|
||||||
|
#endif
|
||||||
|
bind(profile_continue);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
#if INCLUDE_JVMCI
|
||||||
Register receiver, Register scratch,
|
void InterpreterMacroAssembler::profile_called_method(Register method, Register scratch) {
|
||||||
int start_row, Label& done, bool is_virtual_call) {
|
assert_different_registers(method, scratch);
|
||||||
|
if (ProfileInterpreter && MethodProfileWidth > 0) {
|
||||||
|
Label profile_continue;
|
||||||
|
|
||||||
|
// If no method data exists, go to profile_continue.
|
||||||
|
test_method_data_pointer(profile_continue);
|
||||||
|
|
||||||
|
Label done;
|
||||||
|
record_item_in_profile_helper(method, scratch, 0, done, MethodProfileWidth,
|
||||||
|
&VirtualCallData::method_offset, &VirtualCallData::method_count_offset, in_bytes(VirtualCallData::nonprofiled_receiver_count_offset()));
|
||||||
|
bind(done);
|
||||||
|
|
||||||
|
update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
|
||||||
|
bind(profile_continue);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif // INCLUDE_JVMCI
|
||||||
|
|
||||||
|
void InterpreterMacroAssembler::record_klass_in_profile_helper(Register receiver, Register scratch,
|
||||||
|
Label& done, bool is_virtual_call) {
|
||||||
if (TypeProfileWidth == 0) {
|
if (TypeProfileWidth == 0) {
|
||||||
if (is_virtual_call) {
|
if (is_virtual_call) {
|
||||||
increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
|
increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
|
||||||
}
|
}
|
||||||
return;
|
#if INCLUDE_JVMCI
|
||||||
|
else if (EnableJVMCI) {
|
||||||
|
increment_mdp_data_at(in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset()), scratch);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
} else {
|
||||||
|
int non_profiled_offset = -1;
|
||||||
|
if (is_virtual_call) {
|
||||||
|
non_profiled_offset = in_bytes(CounterData::count_offset());
|
||||||
|
}
|
||||||
|
#if INCLUDE_JVMCI
|
||||||
|
else if (EnableJVMCI) {
|
||||||
|
non_profiled_offset = in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset());
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
int last_row = VirtualCallData::row_limit() - 1;
|
record_item_in_profile_helper(receiver, scratch, 0, done, TypeProfileWidth,
|
||||||
|
&VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset, non_profiled_offset);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void InterpreterMacroAssembler::record_item_in_profile_helper(Register item,
|
||||||
|
Register scratch, int start_row, Label& done, int total_rows,
|
||||||
|
OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn,
|
||||||
|
int non_profiled_offset) {
|
||||||
|
int last_row = total_rows - 1;
|
||||||
assert(start_row <= last_row, "must be work left to do");
|
assert(start_row <= last_row, "must be work left to do");
|
||||||
// Test this row for both the receiver and for null.
|
// Test this row for both the item and for null.
|
||||||
// Take any of three different outcomes:
|
// Take any of three different outcomes:
|
||||||
// 1. found receiver => increment count and goto done
|
// 1. found item => increment count and goto done
|
||||||
// 2. found null => keep looking for case 1, maybe allocate this cell
|
// 2. found null => keep looking for case 1, maybe allocate this cell
|
||||||
// 3. found something else => keep looking for cases 1 and 2
|
// 3. found something else => keep looking for cases 1 and 2
|
||||||
// Case 3 is handled by a recursive call.
|
// Case 3 is handled by a recursive call.
|
||||||
|
@ -1670,28 +1724,28 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
||||||
Label next_test;
|
Label next_test;
|
||||||
bool test_for_null_also = (row == start_row);
|
bool test_for_null_also = (row == start_row);
|
||||||
|
|
||||||
// See if the receiver is receiver[n].
|
// See if the item is item[n].
|
||||||
int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
|
int item_offset = in_bytes(item_offset_fn(row));
|
||||||
test_mdp_data_at(recvr_offset, receiver, next_test, scratch);
|
test_mdp_data_at(item_offset, item, next_test, scratch);
|
||||||
// delayed()->tst(scratch);
|
// delayed()->tst(scratch);
|
||||||
|
|
||||||
// The receiver is receiver[n]. Increment count[n].
|
// The receiver is item[n]. Increment count[n].
|
||||||
int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
|
int count_offset = in_bytes(item_count_offset_fn(row));
|
||||||
increment_mdp_data_at(count_offset, scratch);
|
increment_mdp_data_at(count_offset, scratch);
|
||||||
ba_short(done);
|
ba_short(done);
|
||||||
bind(next_test);
|
bind(next_test);
|
||||||
|
|
||||||
if (test_for_null_also) {
|
if (test_for_null_also) {
|
||||||
Label found_null;
|
Label found_null;
|
||||||
// Failed the equality check on receiver[n]... Test for null.
|
// Failed the equality check on item[n]... Test for null.
|
||||||
if (start_row == last_row) {
|
if (start_row == last_row) {
|
||||||
// The only thing left to do is handle the null case.
|
// The only thing left to do is handle the null case.
|
||||||
if (is_virtual_call) {
|
if (non_profiled_offset >= 0) {
|
||||||
brx(Assembler::zero, false, Assembler::pn, found_null);
|
brx(Assembler::zero, false, Assembler::pn, found_null);
|
||||||
delayed()->nop();
|
delayed()->nop();
|
||||||
// Receiver did not match any saved receiver and there is no empty row for it.
|
// Item did not match any saved item and there is no empty row for it.
|
||||||
// Increment total counter to indicate polymorphic case.
|
// Increment total counter to indicate polymorphic case.
|
||||||
increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
|
increment_mdp_data_at(non_profiled_offset, scratch);
|
||||||
ba_short(done);
|
ba_short(done);
|
||||||
bind(found_null);
|
bind(found_null);
|
||||||
} else {
|
} else {
|
||||||
|
@ -1705,21 +1759,22 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
||||||
delayed()->nop();
|
delayed()->nop();
|
||||||
|
|
||||||
// Put all the "Case 3" tests here.
|
// Put all the "Case 3" tests here.
|
||||||
record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call);
|
record_item_in_profile_helper(item, scratch, start_row + 1, done, total_rows,
|
||||||
|
item_offset_fn, item_count_offset_fn, non_profiled_offset);
|
||||||
|
|
||||||
// Found a null. Keep searching for a matching receiver,
|
// Found a null. Keep searching for a matching item,
|
||||||
// but remember that this is an empty (unused) slot.
|
// but remember that this is an empty (unused) slot.
|
||||||
bind(found_null);
|
bind(found_null);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// In the fall-through case, we found no matching receiver, but we
|
// In the fall-through case, we found no matching item, but we
|
||||||
// observed the receiver[start_row] is NULL.
|
// observed the item[start_row] is NULL.
|
||||||
|
|
||||||
// Fill in the receiver field and increment the count.
|
// Fill in the item field and increment the count.
|
||||||
int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
|
int item_offset = in_bytes(item_offset_fn(start_row));
|
||||||
set_mdp_data_at(recvr_offset, receiver);
|
set_mdp_data_at(item_offset, item);
|
||||||
int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
|
int count_offset = in_bytes(item_count_offset_fn(start_row));
|
||||||
mov(DataLayout::counter_increment, scratch);
|
mov(DataLayout::counter_increment, scratch);
|
||||||
set_mdp_data_at(count_offset, scratch);
|
set_mdp_data_at(count_offset, scratch);
|
||||||
if (start_row > 0) {
|
if (start_row > 0) {
|
||||||
|
@ -1732,7 +1787,7 @@ void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
|
||||||
assert(ProfileInterpreter, "must be profiling");
|
assert(ProfileInterpreter, "must be profiling");
|
||||||
Label done;
|
Label done;
|
||||||
|
|
||||||
record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call);
|
record_klass_in_profile_helper(receiver, scratch, done, is_virtual_call);
|
||||||
|
|
||||||
bind (done);
|
bind (done);
|
||||||
}
|
}
|
||||||
|
@ -1788,7 +1843,7 @@ void InterpreterMacroAssembler::profile_null_seen(Register scratch) {
|
||||||
// The method data pointer needs to be updated.
|
// The method data pointer needs to be updated.
|
||||||
int mdp_delta = in_bytes(BitData::bit_data_size());
|
int mdp_delta = in_bytes(BitData::bit_data_size());
|
||||||
if (TypeProfileCasts) {
|
if (TypeProfileCasts) {
|
||||||
mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
|
mdp_delta = in_bytes(ReceiverTypeData::receiver_type_data_size());
|
||||||
}
|
}
|
||||||
update_mdp_by_constant(mdp_delta);
|
update_mdp_by_constant(mdp_delta);
|
||||||
|
|
||||||
|
@ -1806,7 +1861,7 @@ void InterpreterMacroAssembler::profile_typecheck(Register klass,
|
||||||
|
|
||||||
int mdp_delta = in_bytes(BitData::bit_data_size());
|
int mdp_delta = in_bytes(BitData::bit_data_size());
|
||||||
if (TypeProfileCasts) {
|
if (TypeProfileCasts) {
|
||||||
mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
|
mdp_delta = in_bytes(ReceiverTypeData::receiver_type_data_size());
|
||||||
|
|
||||||
// Record the object type.
|
// Record the object type.
|
||||||
record_klass_in_profile(klass, scratch, false);
|
record_klass_in_profile(klass, scratch, false);
|
||||||
|
@ -1828,7 +1883,7 @@ void InterpreterMacroAssembler::profile_typecheck_failed(Register scratch) {
|
||||||
|
|
||||||
int count_offset = in_bytes(CounterData::count_offset());
|
int count_offset = in_bytes(CounterData::count_offset());
|
||||||
// Back up the address, since we have already bumped the mdp.
|
// Back up the address, since we have already bumped the mdp.
|
||||||
count_offset -= in_bytes(VirtualCallData::virtual_call_data_size());
|
count_offset -= in_bytes(ReceiverTypeData::receiver_type_data_size());
|
||||||
|
|
||||||
// *Decrement* the counter. We expect to see zero or small negatives.
|
// *Decrement* the counter. We expect to see zero or small negatives.
|
||||||
increment_mdp_data_at(count_offset, scratch, true);
|
increment_mdp_data_at(count_offset, scratch, true);
|
||||||
|
|
|
@ -30,6 +30,8 @@
|
||||||
|
|
||||||
// This file specializes the assember with interpreter-specific macros
|
// This file specializes the assember with interpreter-specific macros
|
||||||
|
|
||||||
|
typedef ByteSize (*OffsetFunction)(uint);
|
||||||
|
|
||||||
REGISTER_DECLARATION( Register, Otos_i , O0); // tos for ints, etc
|
REGISTER_DECLARATION( Register, Otos_i , O0); // tos for ints, etc
|
||||||
REGISTER_DECLARATION( Register, Otos_l , O0); // for longs
|
REGISTER_DECLARATION( Register, Otos_l , O0); // for longs
|
||||||
REGISTER_DECLARATION( Register, Otos_l1, O0); // for 1st part of longs
|
REGISTER_DECLARATION( Register, Otos_l1, O0); // for 1st part of longs
|
||||||
|
@ -80,6 +82,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||||
InterpreterMacroAssembler(CodeBuffer* c)
|
InterpreterMacroAssembler(CodeBuffer* c)
|
||||||
: MacroAssembler(c) {}
|
: MacroAssembler(c) {}
|
||||||
|
|
||||||
|
void jump_to_entry(address entry);
|
||||||
|
|
||||||
#ifndef CC_INTERP
|
#ifndef CC_INTERP
|
||||||
virtual void load_earlyret_value(TosState state);
|
virtual void load_earlyret_value(TosState state);
|
||||||
|
|
||||||
|
@ -299,7 +303,11 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||||
|
|
||||||
void record_klass_in_profile(Register receiver, Register scratch, bool is_virtual_call);
|
void record_klass_in_profile(Register receiver, Register scratch, bool is_virtual_call);
|
||||||
void record_klass_in_profile_helper(Register receiver, Register scratch,
|
void record_klass_in_profile_helper(Register receiver, Register scratch,
|
||||||
int start_row, Label& done, bool is_virtual_call);
|
Label& done, bool is_virtual_call);
|
||||||
|
void record_item_in_profile_helper(Register item,
|
||||||
|
Register scratch, int start_row, Label& done, int total_rows,
|
||||||
|
OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn,
|
||||||
|
int non_profiled_offset);
|
||||||
|
|
||||||
void update_mdp_by_offset(int offset_of_disp, Register scratch);
|
void update_mdp_by_offset(int offset_of_disp, Register scratch);
|
||||||
void update_mdp_by_offset(Register reg, int offset_of_disp,
|
void update_mdp_by_offset(Register reg, int offset_of_disp,
|
||||||
|
@ -312,6 +320,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||||
void profile_call(Register scratch);
|
void profile_call(Register scratch);
|
||||||
void profile_final_call(Register scratch);
|
void profile_final_call(Register scratch);
|
||||||
void profile_virtual_call(Register receiver, Register scratch, bool receiver_can_be_null = false);
|
void profile_virtual_call(Register receiver, Register scratch, bool receiver_can_be_null = false);
|
||||||
|
void profile_called_method(Register method, Register scratch) NOT_JVMCI_RETURN;
|
||||||
void profile_ret(TosState state, Register return_bci, Register scratch);
|
void profile_ret(TosState state, Register return_bci, Register scratch);
|
||||||
void profile_null_seen(Register scratch);
|
void profile_null_seen(Register scratch);
|
||||||
void profile_typecheck(Register klass, Register scratch);
|
void profile_typecheck(Register klass, Register scratch);
|
||||||
|
|
|
@ -34,11 +34,9 @@
|
||||||
address generate_abstract_entry(void);
|
address generate_abstract_entry(void);
|
||||||
// there are no math intrinsics on sparc
|
// there are no math intrinsics on sparc
|
||||||
address generate_math_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
address generate_math_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||||
address generate_jump_to_normal_entry(void);
|
address generate_accessor_entry(void) { return NULL; }
|
||||||
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
|
address generate_empty_entry(void) { return NULL; }
|
||||||
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
|
|
||||||
address generate_Reference_get_entry(void);
|
address generate_Reference_get_entry(void);
|
||||||
void lock_method(void);
|
|
||||||
void save_native_result(void);
|
void save_native_result(void);
|
||||||
void restore_native_result(void);
|
void restore_native_result(void);
|
||||||
|
|
||||||
|
@ -48,4 +46,5 @@
|
||||||
// Not supported
|
// Not supported
|
||||||
address generate_CRC32_update_entry() { return NULL; }
|
address generate_CRC32_update_entry() { return NULL; }
|
||||||
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||||
|
address generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||||
#endif // CPU_SPARC_VM_INTERPRETERGENERATOR_SPARC_HPP
|
#endif // CPU_SPARC_VM_INTERPRETERGENERATOR_SPARC_HPP
|
||||||
|
|
|
@ -241,15 +241,6 @@ void InterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
|
||||||
|
|
||||||
// Various method entries
|
// Various method entries
|
||||||
|
|
||||||
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
|
|
||||||
address entry = __ pc();
|
|
||||||
assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
|
|
||||||
AddressLiteral al(Interpreter::entry_for_kind(Interpreter::zerolocals));
|
|
||||||
__ jump_to(al, G3_scratch);
|
|
||||||
__ delayed()->nop();
|
|
||||||
return entry;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Abstract method entry
|
// Abstract method entry
|
||||||
// Attempt to execute abstract method. Throw exception
|
// Attempt to execute abstract method. Throw exception
|
||||||
//
|
//
|
||||||
|
|
186
hotspot/src/cpu/sparc/vm/jvmciCodeInstaller_sparc.cpp
Normal file
186
hotspot/src/cpu/sparc/vm/jvmciCodeInstaller_sparc.cpp
Normal file
|
@ -0,0 +1,186 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "jvmci/jvmciCodeInstaller.hpp"
|
||||||
|
#include "jvmci/jvmciRuntime.hpp"
|
||||||
|
#include "jvmci/jvmciCompilerToVM.hpp"
|
||||||
|
#include "jvmci/jvmciJavaClasses.hpp"
|
||||||
|
#include "oops/oop.inline.hpp"
|
||||||
|
#include "runtime/sharedRuntime.hpp"
|
||||||
|
#include "vmreg_sparc.inline.hpp"
|
||||||
|
|
||||||
|
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, oop method) {
|
||||||
|
if (inst->is_call() || inst->is_jump()) {
|
||||||
|
return pc_offset + NativeCall::instruction_size;
|
||||||
|
} else if (inst->is_call_reg()) {
|
||||||
|
return pc_offset + NativeCallReg::instruction_size;
|
||||||
|
} else if (inst->is_sethi()) {
|
||||||
|
return pc_offset + NativeFarCall::instruction_size;
|
||||||
|
} else {
|
||||||
|
fatal("unsupported type of instruction for call site");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) {
|
||||||
|
address pc = _instructions->start() + pc_offset;
|
||||||
|
Handle obj = HotSpotObjectConstantImpl::object(constant);
|
||||||
|
jobject value = JNIHandles::make_local(obj());
|
||||||
|
if (HotSpotObjectConstantImpl::compressed(constant)) {
|
||||||
|
#ifdef _LP64
|
||||||
|
int oop_index = _oop_recorder->find_index(value);
|
||||||
|
RelocationHolder rspec = oop_Relocation::spec(oop_index);
|
||||||
|
_instructions->relocate(pc, rspec, 1);
|
||||||
|
#else
|
||||||
|
fatal("compressed oop on 32bit");
|
||||||
|
#endif
|
||||||
|
} else {
|
||||||
|
NativeMovConstReg* move = nativeMovConstReg_at(pc);
|
||||||
|
move->set_data((intptr_t) value);
|
||||||
|
|
||||||
|
// We need two relocations: one on the sethi and one on the add.
|
||||||
|
int oop_index = _oop_recorder->find_index(value);
|
||||||
|
RelocationHolder rspec = oop_Relocation::spec(oop_index);
|
||||||
|
_instructions->relocate(pc + NativeMovConstReg::sethi_offset, rspec);
|
||||||
|
_instructions->relocate(pc + NativeMovConstReg::add_offset, rspec);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset) {
|
||||||
|
address pc = _instructions->start() + pc_offset;
|
||||||
|
NativeInstruction* inst = nativeInstruction_at(pc);
|
||||||
|
NativeInstruction* inst1 = nativeInstruction_at(pc + 4);
|
||||||
|
if(inst->is_sethi() && inst1->is_nop()) {
|
||||||
|
address const_start = _constants->start();
|
||||||
|
address dest = _constants->start() + data_offset;
|
||||||
|
if(_constants_size > 0) {
|
||||||
|
_instructions->relocate(pc + NativeMovConstReg::sethi_offset, internal_word_Relocation::spec((address) dest));
|
||||||
|
_instructions->relocate(pc + NativeMovConstReg::add_offset, internal_word_Relocation::spec((address) dest));
|
||||||
|
}
|
||||||
|
TRACE_jvmci_3("relocating at " PTR_FORMAT " (+%d) with destination at %d", p2i(pc), pc_offset, data_offset);
|
||||||
|
}else {
|
||||||
|
int const_size = align_size_up(_constants->end()-_constants->start(), CodeEntryAlignment);
|
||||||
|
NativeMovRegMem* load = nativeMovRegMem_at(pc);
|
||||||
|
// This offset must match with SPARCLoadConstantTableBaseOp.emitCode
|
||||||
|
load->set_offset(- (const_size - data_offset + Assembler::min_simm13()));
|
||||||
|
TRACE_jvmci_3("relocating ld at " PTR_FORMAT " (+%d) with destination at %d", p2i(pc), pc_offset, data_offset);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeInstaller::pd_relocate_CodeBlob(CodeBlob* cb, NativeInstruction* inst) {
|
||||||
|
fatal("CodeInstaller::pd_relocate_CodeBlob - sparc unimp");
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination) {
|
||||||
|
address pc = (address) inst;
|
||||||
|
if (inst->is_call()) {
|
||||||
|
NativeCall* call = nativeCall_at(pc);
|
||||||
|
call->set_destination((address) foreign_call_destination);
|
||||||
|
_instructions->relocate(call->instruction_address(), runtime_call_Relocation::spec());
|
||||||
|
} else if (inst->is_sethi()) {
|
||||||
|
NativeJump* jump = nativeJump_at(pc);
|
||||||
|
jump->set_jump_destination((address) foreign_call_destination);
|
||||||
|
_instructions->relocate(jump->instruction_address(), runtime_call_Relocation::spec());
|
||||||
|
} else {
|
||||||
|
fatal(err_msg("unknown call or jump instruction at " PTR_FORMAT, p2i(pc)));
|
||||||
|
}
|
||||||
|
TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst));
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeInstaller::pd_relocate_JavaMethod(oop hotspot_method, jint pc_offset) {
|
||||||
|
#ifdef ASSERT
|
||||||
|
Method* method = NULL;
|
||||||
|
// we need to check, this might also be an unresolved method
|
||||||
|
if (hotspot_method->is_a(HotSpotResolvedJavaMethodImpl::klass())) {
|
||||||
|
method = getMethodFromHotSpotMethod(hotspot_method);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
switch (_next_call_type) {
|
||||||
|
case INLINE_INVOKE:
|
||||||
|
break;
|
||||||
|
case INVOKEVIRTUAL:
|
||||||
|
case INVOKEINTERFACE: {
|
||||||
|
assert(method == NULL || !method->is_static(), "cannot call static method with invokeinterface");
|
||||||
|
NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
|
||||||
|
call->set_destination(SharedRuntime::get_resolve_virtual_call_stub());
|
||||||
|
_instructions->relocate(call->instruction_address(), virtual_call_Relocation::spec(_invoke_mark_pc));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case INVOKESTATIC: {
|
||||||
|
assert(method == NULL || method->is_static(), "cannot call non-static method with invokestatic");
|
||||||
|
NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
|
||||||
|
call->set_destination(SharedRuntime::get_resolve_static_call_stub());
|
||||||
|
_instructions->relocate(call->instruction_address(), relocInfo::static_call_type);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case INVOKESPECIAL: {
|
||||||
|
assert(method == NULL || !method->is_static(), "cannot call static method with invokespecial");
|
||||||
|
NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
|
||||||
|
call->set_destination(SharedRuntime::get_resolve_opt_virtual_call_stub());
|
||||||
|
_instructions->relocate(call->instruction_address(), relocInfo::opt_virtual_call_type);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
fatal("invalid _next_call_type value");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void CodeInstaller::pd_relocate_poll(address pc, jint mark) {
|
||||||
|
switch (mark) {
|
||||||
|
case POLL_NEAR:
|
||||||
|
fatal("unimplemented");
|
||||||
|
break;
|
||||||
|
case POLL_FAR:
|
||||||
|
_instructions->relocate(pc, relocInfo::poll_type);
|
||||||
|
break;
|
||||||
|
case POLL_RETURN_NEAR:
|
||||||
|
fatal("unimplemented");
|
||||||
|
break;
|
||||||
|
case POLL_RETURN_FAR:
|
||||||
|
_instructions->relocate(pc, relocInfo::poll_return_type);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
fatal("invalid mark value");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// convert JVMCI register indices (as used in oop maps) to HotSpot registers
|
||||||
|
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg) {
|
||||||
|
if (jvmci_reg < RegisterImpl::number_of_registers) {
|
||||||
|
return as_Register(jvmci_reg)->as_VMReg();
|
||||||
|
} else {
|
||||||
|
jint floatRegisterNumber = jvmci_reg - RegisterImpl::number_of_registers;
|
||||||
|
floatRegisterNumber += MAX2(0, floatRegisterNumber-32); // Beginning with f32, only every second register is going to be addressed
|
||||||
|
if (floatRegisterNumber < FloatRegisterImpl::number_of_registers) {
|
||||||
|
return as_FloatRegister(floatRegisterNumber)->as_VMReg();
|
||||||
|
}
|
||||||
|
ShouldNotReachHere();
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CodeInstaller::is_general_purpose_reg(VMReg hotspotRegister) {
|
||||||
|
return !hotspotRegister->is_FloatRegister();
|
||||||
|
}
|
|
@ -1596,7 +1596,7 @@ void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) {
|
||||||
else {
|
else {
|
||||||
::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
|
::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
|
||||||
}
|
}
|
||||||
assert(false, err_msg("DEBUG MESSAGE: %s", msg));
|
assert(false, "DEBUG MESSAGE: %s", msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -61,8 +61,8 @@ inline void fill_subword(void* start, void* end, int value) {
|
||||||
" sub %[offset], %[end], %[offset]\n\t" // offset := start - end
|
" sub %[offset], %[end], %[offset]\n\t" // offset := start - end
|
||||||
" sllx %[offset], 2, %[offset]\n\t" // scale offset for instruction size of 4
|
" sllx %[offset], 2, %[offset]\n\t" // scale offset for instruction size of 4
|
||||||
" add %[offset], 40, %[offset]\n\t" // offset += 10 * instruction size
|
" add %[offset], 40, %[offset]\n\t" // offset += 10 * instruction size
|
||||||
" rd %pc, %[pc]\n\t" // dispatch on scaled offset
|
" rd %%pc, %[pc]\n\t" // dispatch on scaled offset
|
||||||
" jmpl %[pc]+%[offset], %g0\n\t"
|
" jmpl %[pc]+%[offset], %%g0\n\t"
|
||||||
" nop\n\t"
|
" nop\n\t"
|
||||||
// DISPATCH: no direct reference, but without it the store block may be elided.
|
// DISPATCH: no direct reference, but without it the store block may be elided.
|
||||||
"1:\n\t"
|
"1:\n\t"
|
||||||
|
@ -108,7 +108,7 @@ void memset_with_concurrent_readers(void* to, int value, size_t size) {
|
||||||
// Unroll loop x8.
|
// Unroll loop x8.
|
||||||
" sub %[aend], %[ato], %[temp]\n\t"
|
" sub %[aend], %[ato], %[temp]\n\t"
|
||||||
" cmp %[temp], 56\n\t" // cc := (aligned_end - aligned_to) > 7 words
|
" cmp %[temp], 56\n\t" // cc := (aligned_end - aligned_to) > 7 words
|
||||||
" ba %xcc, 2f\n\t" // goto TEST always
|
" ba %%xcc, 2f\n\t" // goto TEST always
|
||||||
" sub %[aend], 56, %[temp]\n\t" // limit := aligned_end - 7 words
|
" sub %[aend], 56, %[temp]\n\t" // limit := aligned_end - 7 words
|
||||||
// LOOP:
|
// LOOP:
|
||||||
"1:\n\t" // unrolled x8 store loop top
|
"1:\n\t" // unrolled x8 store loop top
|
||||||
|
@ -123,7 +123,7 @@ void memset_with_concurrent_readers(void* to, int value, size_t size) {
|
||||||
" stx %[xvalue], [%[ato]-8]\n\t"
|
" stx %[xvalue], [%[ato]-8]\n\t"
|
||||||
// TEST:
|
// TEST:
|
||||||
"2:\n\t"
|
"2:\n\t"
|
||||||
" bgu,a %xcc, 1b\n\t" // goto LOOP if more than 7 words remaining
|
" bgu,a %%xcc, 1b\n\t" // goto LOOP if more than 7 words remaining
|
||||||
" add %[ato], 64, %[ato]\n\t" // aligned_to += 8, for next iteration
|
" add %[ato], 64, %[ato]\n\t" // aligned_to += 8, for next iteration
|
||||||
// Fill remaining < 8 full words.
|
// Fill remaining < 8 full words.
|
||||||
// Dispatch on (aligned_end - aligned_to).
|
// Dispatch on (aligned_end - aligned_to).
|
||||||
|
@ -132,8 +132,8 @@ void memset_with_concurrent_readers(void* to, int value, size_t size) {
|
||||||
" sub %[ato], %[aend], %[ato]\n\t" // offset := aligned_to - aligned_end
|
" sub %[ato], %[aend], %[ato]\n\t" // offset := aligned_to - aligned_end
|
||||||
" srax %[ato], 1, %[ato]\n\t" // scale offset for instruction size of 4
|
" srax %[ato], 1, %[ato]\n\t" // scale offset for instruction size of 4
|
||||||
" add %[ato], 40, %[ato]\n\t" // offset += 10 * instruction size
|
" add %[ato], 40, %[ato]\n\t" // offset += 10 * instruction size
|
||||||
" rd %pc, %[temp]\n\t" // dispatch on scaled offset
|
" rd %%pc, %[temp]\n\t" // dispatch on scaled offset
|
||||||
" jmpl %[temp]+%[ato], %g0\n\t"
|
" jmpl %[temp]+%[ato], %%g0\n\t"
|
||||||
" nop\n\t"
|
" nop\n\t"
|
||||||
// DISPATCH: no direct reference, but without it the store block may be elided.
|
// DISPATCH: no direct reference, but without it the store block may be elided.
|
||||||
"3:\n\t"
|
"3:\n\t"
|
||||||
|
|
|
@ -56,7 +56,7 @@ void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
static int check_nonzero(const char* xname, int x) {
|
static int check_nonzero(const char* xname, int x) {
|
||||||
assert(x != 0, err_msg("%s should be nonzero", xname));
|
assert(x != 0, "%s should be nonzero", xname);
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
#define NONZERO(x) check_nonzero(#x, x)
|
#define NONZERO(x) check_nonzero(#x, x)
|
||||||
|
@ -453,7 +453,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
|
fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -53,6 +53,7 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
|
||||||
|
|
||||||
bool is_nop() { return long_at(0) == nop_instruction(); }
|
bool is_nop() { return long_at(0) == nop_instruction(); }
|
||||||
bool is_call() { return is_op(long_at(0), Assembler::call_op); }
|
bool is_call() { return is_op(long_at(0), Assembler::call_op); }
|
||||||
|
bool is_call_reg() { return is_op(long_at(0), Assembler::arith_op); }
|
||||||
bool is_sethi() { return (is_op2(long_at(0), Assembler::sethi_op2)
|
bool is_sethi() { return (is_op2(long_at(0), Assembler::sethi_op2)
|
||||||
&& inv_rd(long_at(0)) != G0); }
|
&& inv_rd(long_at(0)) != G0); }
|
||||||
|
|
||||||
|
@ -415,6 +416,19 @@ inline NativeCall* nativeCall_at(address instr) {
|
||||||
return call;
|
return call;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class NativeCallReg: public NativeInstruction {
|
||||||
|
public:
|
||||||
|
enum Sparc_specific_constants {
|
||||||
|
instruction_size = 8,
|
||||||
|
return_address_offset = 8,
|
||||||
|
instruction_offset = 0
|
||||||
|
};
|
||||||
|
|
||||||
|
address next_instruction_address() const {
|
||||||
|
return addr_at(instruction_size);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// The NativeFarCall is an abstraction for accessing/manipulating native call-anywhere
|
// The NativeFarCall is an abstraction for accessing/manipulating native call-anywhere
|
||||||
// instructions in the sparcv9 vm. Used to call native methods which may be loaded
|
// instructions in the sparcv9 vm. Used to call native methods which may be loaded
|
||||||
// anywhere in the address space, possibly out of reach of a call instruction.
|
// anywhere in the address space, possibly out of reach of a call instruction.
|
||||||
|
|
|
@ -197,8 +197,5 @@ address Relocation::pd_get_address_from_code() {
|
||||||
void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
|
void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
|
|
||||||
}
|
|
||||||
|
|
||||||
void metadata_Relocation::pd_fix_value(address x) {
|
void metadata_Relocation::pd_fix_value(address x) {
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,6 +43,9 @@
|
||||||
#include "compiler/compileBroker.hpp"
|
#include "compiler/compileBroker.hpp"
|
||||||
#include "shark/sharkCompiler.hpp"
|
#include "shark/sharkCompiler.hpp"
|
||||||
#endif
|
#endif
|
||||||
|
#if INCLUDE_JVMCI
|
||||||
|
#include "jvmci/jvmciJavaClasses.hpp"
|
||||||
|
#endif
|
||||||
|
|
||||||
#define __ masm->
|
#define __ masm->
|
||||||
|
|
||||||
|
@ -316,7 +319,7 @@ void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
|
||||||
// 8 bytes FP registers are saved by default on SPARC.
|
// 8 bytes FP registers are saved by default on SPARC.
|
||||||
bool SharedRuntime::is_wide_vector(int size) {
|
bool SharedRuntime::is_wide_vector(int size) {
|
||||||
// Note, MaxVectorSize == 8 on SPARC.
|
// Note, MaxVectorSize == 8 on SPARC.
|
||||||
assert(size <= 8, err_msg_res("%d bytes vectors are not supported", size));
|
assert(size <= 8, "%d bytes vectors are not supported", size);
|
||||||
return size > 8;
|
return size > 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -464,7 +467,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
fatal(err_msg_res("unknown basic type %d", sig_bt[i]));
|
fatal("unknown basic type %d", sig_bt[i]);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -760,13 +763,11 @@ static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg
|
||||||
__ bind(L_fail);
|
__ bind(L_fail);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AdapterGenerator::gen_i2c_adapter(
|
void AdapterGenerator::gen_i2c_adapter(int total_args_passed,
|
||||||
int total_args_passed,
|
|
||||||
// VMReg max_arg,
|
// VMReg max_arg,
|
||||||
int comp_args_on_stack, // VMRegStackSlots
|
int comp_args_on_stack, // VMRegStackSlots
|
||||||
const BasicType *sig_bt,
|
const BasicType *sig_bt,
|
||||||
const VMRegPair *regs) {
|
const VMRegPair *regs) {
|
||||||
|
|
||||||
// Generate an I2C adapter: adjust the I-frame to make space for the C-frame
|
// Generate an I2C adapter: adjust the I-frame to make space for the C-frame
|
||||||
// layout. Lesp was saved by the calling I-frame and will be restored on
|
// layout. Lesp was saved by the calling I-frame and will be restored on
|
||||||
// return. Meanwhile, outgoing arg space is all owned by the callee
|
// return. Meanwhile, outgoing arg space is all owned by the callee
|
||||||
|
@ -990,6 +991,21 @@ void AdapterGenerator::gen_i2c_adapter(
|
||||||
|
|
||||||
// Jump to the compiled code just as if compiled code was doing it.
|
// Jump to the compiled code just as if compiled code was doing it.
|
||||||
__ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3);
|
__ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3);
|
||||||
|
#if INCLUDE_JVMCI
|
||||||
|
if (EnableJVMCI) {
|
||||||
|
// check if this call should be routed towards a specific entry point
|
||||||
|
__ ld(Address(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), G1);
|
||||||
|
__ cmp(G0, G1);
|
||||||
|
Label no_alternative_target;
|
||||||
|
__ br(Assembler::equal, false, Assembler::pn, no_alternative_target);
|
||||||
|
__ delayed()->nop();
|
||||||
|
|
||||||
|
__ ld_ptr(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()), G3);
|
||||||
|
__ st_ptr(G0, Address(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
|
||||||
|
|
||||||
|
__ bind(no_alternative_target);
|
||||||
|
}
|
||||||
|
#endif // INCLUDE_JVMCI
|
||||||
|
|
||||||
// 6243940 We might end up in handle_wrong_method if
|
// 6243940 We might end up in handle_wrong_method if
|
||||||
// the callee is deoptimized as we race thru here. If that
|
// the callee is deoptimized as we race thru here. If that
|
||||||
|
@ -1006,6 +1022,15 @@ void AdapterGenerator::gen_i2c_adapter(
|
||||||
__ delayed()->nop();
|
__ delayed()->nop();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
|
||||||
|
int total_args_passed,
|
||||||
|
int comp_args_on_stack,
|
||||||
|
const BasicType *sig_bt,
|
||||||
|
const VMRegPair *regs) {
|
||||||
|
AdapterGenerator agen(masm);
|
||||||
|
agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs);
|
||||||
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------
|
// ---------------------------------------------------------------
|
||||||
AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
|
AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
|
||||||
int total_args_passed,
|
int total_args_passed,
|
||||||
|
@ -1016,9 +1041,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
|
||||||
AdapterFingerPrint* fingerprint) {
|
AdapterFingerPrint* fingerprint) {
|
||||||
address i2c_entry = __ pc();
|
address i2c_entry = __ pc();
|
||||||
|
|
||||||
AdapterGenerator agen(masm);
|
gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
|
||||||
|
|
||||||
agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs);
|
|
||||||
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
|
@ -1063,7 +1086,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
|
||||||
}
|
}
|
||||||
|
|
||||||
address c2i_entry = __ pc();
|
address c2i_entry = __ pc();
|
||||||
|
AdapterGenerator agen(masm);
|
||||||
agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, L_skip_fixup);
|
agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, L_skip_fixup);
|
||||||
|
|
||||||
__ flush();
|
__ flush();
|
||||||
|
@ -1859,7 +1882,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
|
||||||
} else if (iid == vmIntrinsics::_invokeBasic) {
|
} else if (iid == vmIntrinsics::_invokeBasic) {
|
||||||
has_receiver = true;
|
has_receiver = true;
|
||||||
} else {
|
} else {
|
||||||
fatal(err_msg_res("unexpected intrinsic id %d", iid));
|
fatal("unexpected intrinsic id %d", iid);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (member_reg != noreg) {
|
if (member_reg != noreg) {
|
||||||
|
@ -2916,6 +2939,11 @@ void SharedRuntime::generate_deopt_blob() {
|
||||||
pad += StackShadowPages*16 + 32;
|
pad += StackShadowPages*16 + 32;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
#if INCLUDE_JVMCI
|
||||||
|
if (EnableJVMCI) {
|
||||||
|
pad += 1000; // Increase the buffer size when compiling for JVMCI
|
||||||
|
}
|
||||||
|
#endif
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
CodeBuffer buffer("deopt_blob", 2100+pad, 512);
|
CodeBuffer buffer("deopt_blob", 2100+pad, 512);
|
||||||
#else
|
#else
|
||||||
|
@ -2982,6 +3010,45 @@ void SharedRuntime::generate_deopt_blob() {
|
||||||
__ ba(cont);
|
__ ba(cont);
|
||||||
__ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode);
|
__ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode);
|
||||||
|
|
||||||
|
|
||||||
|
#if INCLUDE_JVMCI
|
||||||
|
Label after_fetch_unroll_info_call;
|
||||||
|
int implicit_exception_uncommon_trap_offset = 0;
|
||||||
|
int uncommon_trap_offset = 0;
|
||||||
|
|
||||||
|
if (EnableJVMCI) {
|
||||||
|
masm->block_comment("BEGIN implicit_exception_uncommon_trap");
|
||||||
|
implicit_exception_uncommon_trap_offset = __ offset() - start;
|
||||||
|
|
||||||
|
__ ld_ptr(G2_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()), O7);
|
||||||
|
__ st_ptr(G0, Address(G2_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
|
||||||
|
__ add(O7, -8, O7);
|
||||||
|
|
||||||
|
uncommon_trap_offset = __ offset() - start;
|
||||||
|
|
||||||
|
// Save everything in sight.
|
||||||
|
(void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
|
||||||
|
__ set_last_Java_frame(SP, NULL);
|
||||||
|
|
||||||
|
__ ld(G2_thread, in_bytes(JavaThread::pending_deoptimization_offset()), O1);
|
||||||
|
__ sub(G0, 1, L1);
|
||||||
|
__ st(L1, G2_thread, in_bytes(JavaThread::pending_deoptimization_offset()));
|
||||||
|
|
||||||
|
__ mov((int32_t)Deoptimization::Unpack_reexecute, L0deopt_mode);
|
||||||
|
__ mov(G2_thread, O0);
|
||||||
|
__ call(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap));
|
||||||
|
__ delayed()->nop();
|
||||||
|
oop_maps->add_gc_map( __ offset()-start, map->deep_copy());
|
||||||
|
__ get_thread();
|
||||||
|
__ add(O7, 8, O7);
|
||||||
|
__ reset_last_Java_frame();
|
||||||
|
|
||||||
|
__ ba(after_fetch_unroll_info_call);
|
||||||
|
__ delayed()->nop(); // Delay slot
|
||||||
|
masm->block_comment("END implicit_exception_uncommon_trap");
|
||||||
|
} // EnableJVMCI
|
||||||
|
#endif // INCLUDE_JVMCI
|
||||||
|
|
||||||
int exception_offset = __ offset() - start;
|
int exception_offset = __ offset() - start;
|
||||||
|
|
||||||
// restore G2, the trampoline destroyed it
|
// restore G2, the trampoline destroyed it
|
||||||
|
@ -3004,6 +3071,7 @@ void SharedRuntime::generate_deopt_blob() {
|
||||||
int exception_in_tls_offset = __ offset() - start;
|
int exception_in_tls_offset = __ offset() - start;
|
||||||
|
|
||||||
// No need to update oop_map as each call to save_live_registers will produce identical oopmap
|
// No need to update oop_map as each call to save_live_registers will produce identical oopmap
|
||||||
|
// Opens a new stack frame
|
||||||
(void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
|
(void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
|
||||||
|
|
||||||
// Restore G2_thread
|
// Restore G2_thread
|
||||||
|
@ -3035,7 +3103,12 @@ void SharedRuntime::generate_deopt_blob() {
|
||||||
// Reexecute entry, similar to c2 uncommon trap
|
// Reexecute entry, similar to c2 uncommon trap
|
||||||
//
|
//
|
||||||
int reexecute_offset = __ offset() - start;
|
int reexecute_offset = __ offset() - start;
|
||||||
|
#if INCLUDE_JVMCI && !defined(COMPILER1)
|
||||||
|
if (EnableJVMCI && UseJVMCICompiler) {
|
||||||
|
// JVMCI does not use this kind of deoptimization
|
||||||
|
__ should_not_reach_here();
|
||||||
|
}
|
||||||
|
#endif
|
||||||
// No need to update oop_map as each call to save_live_registers will produce identical oopmap
|
// No need to update oop_map as each call to save_live_registers will produce identical oopmap
|
||||||
(void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
|
(void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
|
||||||
|
|
||||||
|
@ -3059,6 +3132,11 @@ void SharedRuntime::generate_deopt_blob() {
|
||||||
|
|
||||||
__ reset_last_Java_frame();
|
__ reset_last_Java_frame();
|
||||||
|
|
||||||
|
#if INCLUDE_JVMCI
|
||||||
|
if (EnableJVMCI) {
|
||||||
|
__ bind(after_fetch_unroll_info_call);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
// NOTE: we know that only O0/O1 will be reloaded by restore_result_registers
|
// NOTE: we know that only O0/O1 will be reloaded by restore_result_registers
|
||||||
// so this move will survive
|
// so this move will survive
|
||||||
|
|
||||||
|
@ -3124,6 +3202,12 @@ void SharedRuntime::generate_deopt_blob() {
|
||||||
masm->flush();
|
masm->flush();
|
||||||
_deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words);
|
_deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words);
|
||||||
_deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
|
_deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
|
||||||
|
#if INCLUDE_JVMCI
|
||||||
|
if (EnableJVMCI) {
|
||||||
|
_deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
|
||||||
|
_deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef COMPILER2
|
#ifdef COMPILER2
|
||||||
|
|
|
@ -1098,7 +1098,7 @@ void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
|
||||||
Register r = as_Register(ra_->get_encode(this));
|
Register r = as_Register(ra_->get_encode(this));
|
||||||
CodeSection* consts_section = __ code()->consts();
|
CodeSection* consts_section = __ code()->consts();
|
||||||
int consts_size = consts_section->align_at_start(consts_section->size());
|
int consts_size = consts_section->align_at_start(consts_section->size());
|
||||||
assert(constant_table.size() == consts_size, err_msg("must be: %d == %d", constant_table.size(), consts_size));
|
assert(constant_table.size() == consts_size, "must be: %d == %d", constant_table.size(), consts_size);
|
||||||
|
|
||||||
if (UseRDPCForConstantTableBase) {
|
if (UseRDPCForConstantTableBase) {
|
||||||
// For the following RDPC logic to work correctly the consts
|
// For the following RDPC logic to work correctly the consts
|
||||||
|
@ -1860,6 +1860,10 @@ const bool Matcher::match_rule_supported(int opcode) {
|
||||||
return true; // Per default match rules are supported.
|
return true; // Per default match rules are supported.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const int Matcher::float_pressure(int default_pressure_threshold) {
|
||||||
|
return default_pressure_threshold;
|
||||||
|
}
|
||||||
|
|
||||||
int Matcher::regnum_to_fpu_offset(int regnum) {
|
int Matcher::regnum_to_fpu_offset(int regnum) {
|
||||||
return regnum - 32; // The FP registers are in the second chunk
|
return regnum - 32; // The FP registers are in the second chunk
|
||||||
}
|
}
|
||||||
|
|
|
@ -204,6 +204,20 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||||
address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
|
address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
|
||||||
address entry = __ pc();
|
address entry = __ pc();
|
||||||
__ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
|
__ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
|
||||||
|
#if INCLUDE_JVMCI
|
||||||
|
// Check if we need to take lock at entry of synchronized method.
|
||||||
|
if (UseJVMCICompiler) {
|
||||||
|
Label L;
|
||||||
|
Address pending_monitor_enter_addr(G2_thread, JavaThread::pending_monitorenter_offset());
|
||||||
|
__ ldbool(pending_monitor_enter_addr, Gtemp); // Load if pending monitor enter
|
||||||
|
__ cmp_and_br_short(Gtemp, G0, Assembler::equal, Assembler::pn, L);
|
||||||
|
// Clear flag.
|
||||||
|
__ stbool(G0, pending_monitor_enter_addr);
|
||||||
|
// Take lock.
|
||||||
|
lock_method();
|
||||||
|
__ bind(L);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
{ Label L;
|
{ Label L;
|
||||||
Address exception_addr(G2_thread, Thread::pending_exception_offset());
|
Address exception_addr(G2_thread, Thread::pending_exception_offset());
|
||||||
__ ld_ptr(exception_addr, Gtemp); // Load pending exception.
|
__ ld_ptr(exception_addr, Gtemp); // Load pending exception.
|
||||||
|
@ -349,7 +363,7 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
|
||||||
// Allocate monitor and lock method (asm interpreter)
|
// Allocate monitor and lock method (asm interpreter)
|
||||||
// ebx - Method*
|
// ebx - Method*
|
||||||
//
|
//
|
||||||
void InterpreterGenerator::lock_method(void) {
|
void TemplateInterpreterGenerator::lock_method() {
|
||||||
__ ld(Lmethod, in_bytes(Method::access_flags_offset()), O0); // Load access flags.
|
__ ld(Lmethod, in_bytes(Method::access_flags_offset()), O0); // Load access flags.
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
|
@ -779,14 +793,14 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||||
|
|
||||||
// Generate regular method entry
|
// Generate regular method entry
|
||||||
__ bind(slow_path);
|
__ bind(slow_path);
|
||||||
(void) generate_normal_entry(false);
|
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
|
|
||||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||||
// Reference.get is an accessor
|
// Reference.get is an accessor
|
||||||
return generate_jump_to_normal_entry();
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
|
|
|
@ -37,9 +37,9 @@
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
// The sethi() instruction generates lots more instructions when shell
|
// The sethi() instruction generates lots more instructions when shell
|
||||||
// stack limit is unlimited, so that's why this is much bigger.
|
// stack limit is unlimited, so that's why this is much bigger.
|
||||||
const static int InterpreterCodeSize = 210 * K;
|
const static int InterpreterCodeSize = 260 * K;
|
||||||
#else
|
#else
|
||||||
const static int InterpreterCodeSize = 180 * K;
|
const static int InterpreterCodeSize = 230 * K;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif // CPU_SPARC_VM_TEMPLATEINTERPRETER_SPARC_HPP
|
#endif // CPU_SPARC_VM_TEMPLATEINTERPRETER_SPARC_HPP
|
||||||
|
|
|
@ -2949,12 +2949,14 @@ void TemplateTable::prepare_invoke(int byte_no,
|
||||||
|
|
||||||
|
|
||||||
void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) {
|
void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) {
|
||||||
|
Register Rtemp = G4_scratch;
|
||||||
Register Rcall = Rindex;
|
Register Rcall = Rindex;
|
||||||
assert_different_registers(Rcall, G5_method, Gargs, Rret);
|
assert_different_registers(Rcall, G5_method, Gargs, Rret);
|
||||||
|
|
||||||
// get target Method* & entry point
|
// get target Method* & entry point
|
||||||
__ lookup_virtual_method(Rrecv, Rindex, G5_method);
|
__ lookup_virtual_method(Rrecv, Rindex, G5_method);
|
||||||
__ profile_arguments_type(G5_method, Rcall, Gargs, true);
|
__ profile_arguments_type(G5_method, Rcall, Gargs, true);
|
||||||
|
__ profile_called_method(G5_method, Rtemp);
|
||||||
__ call_from_interpreter(Rcall, Gargs, Rret);
|
__ call_from_interpreter(Rcall, Gargs, Rret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3211,6 +3213,7 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||||
assert_different_registers(Rcall, G5_method, Gargs, Rret);
|
assert_different_registers(Rcall, G5_method, Gargs, Rret);
|
||||||
|
|
||||||
__ profile_arguments_type(G5_method, Rcall, Gargs, true);
|
__ profile_arguments_type(G5_method, Rcall, Gargs, true);
|
||||||
|
__ profile_called_method(G5_method, Rscratch);
|
||||||
__ call_from_interpreter(Rcall, Gargs, Rret);
|
__ call_from_interpreter(Rcall, Gargs, Rret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3486,7 +3489,8 @@ void TemplateTable::checkcast() {
|
||||||
Register RspecifiedKlass = O4;
|
Register RspecifiedKlass = O4;
|
||||||
|
|
||||||
// Check for casting a NULL
|
// Check for casting a NULL
|
||||||
__ br_null_short(Otos_i, Assembler::pn, is_null);
|
__ br_null(Otos_i, false, Assembler::pn, is_null);
|
||||||
|
__ delayed()->nop();
|
||||||
|
|
||||||
// Get value klass in RobjKlass
|
// Get value klass in RobjKlass
|
||||||
__ load_klass(Otos_i, RobjKlass); // get value klass
|
__ load_klass(Otos_i, RobjKlass); // get value klass
|
||||||
|
@ -3542,7 +3546,8 @@ void TemplateTable::instanceof() {
|
||||||
Register RspecifiedKlass = O4;
|
Register RspecifiedKlass = O4;
|
||||||
|
|
||||||
// Check for casting a NULL
|
// Check for casting a NULL
|
||||||
__ br_null_short(Otos_i, Assembler::pt, is_null);
|
__ br_null(Otos_i, false, Assembler::pt, is_null);
|
||||||
|
__ delayed()->nop();
|
||||||
|
|
||||||
// Get value klass in RobjKlass
|
// Get value klass in RobjKlass
|
||||||
__ load_klass(Otos_i, RobjKlass); // get value klass
|
__ load_klass(Otos_i, RobjKlass); // get value klass
|
||||||
|
|
|
@ -37,10 +37,11 @@
|
||||||
/******************************/ \
|
/******************************/ \
|
||||||
/* JavaFrameAnchor */ \
|
/* JavaFrameAnchor */ \
|
||||||
/******************************/ \
|
/******************************/ \
|
||||||
volatile_nonstatic_field(JavaFrameAnchor, _flags, int)
|
volatile_nonstatic_field(JavaFrameAnchor, _flags, int) \
|
||||||
|
static_field(VM_Version, _features, int)
|
||||||
#define VM_TYPES_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type)
|
|
||||||
|
|
||||||
|
#define VM_TYPES_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \
|
||||||
|
declare_toplevel_type(VM_Version)
|
||||||
|
|
||||||
#define VM_INT_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) \
|
#define VM_INT_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) \
|
||||||
/******************************/ \
|
/******************************/ \
|
||||||
|
@ -78,7 +79,11 @@
|
||||||
declare_c2_constant(R_G4_num) \
|
declare_c2_constant(R_G4_num) \
|
||||||
declare_c2_constant(R_G5_num) \
|
declare_c2_constant(R_G5_num) \
|
||||||
declare_c2_constant(R_G6_num) \
|
declare_c2_constant(R_G6_num) \
|
||||||
declare_c2_constant(R_G7_num)
|
declare_c2_constant(R_G7_num) \
|
||||||
|
declare_constant(VM_Version::vis1_instructions_m) \
|
||||||
|
declare_constant(VM_Version::vis2_instructions_m) \
|
||||||
|
declare_constant(VM_Version::vis3_instructions_m) \
|
||||||
|
declare_constant(VM_Version::cbcond_instructions_m)
|
||||||
|
|
||||||
#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
|
#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
|
||||||
|
|
||||||
|
|
|
@ -40,10 +40,6 @@ void VM_Version::initialize() {
|
||||||
PrefetchScanIntervalInBytes = prefetch_scan_interval_in_bytes();
|
PrefetchScanIntervalInBytes = prefetch_scan_interval_in_bytes();
|
||||||
PrefetchFieldsAhead = prefetch_fields_ahead();
|
PrefetchFieldsAhead = prefetch_fields_ahead();
|
||||||
|
|
||||||
assert(0 <= AllocatePrefetchInstr && AllocatePrefetchInstr <= 1, "invalid value");
|
|
||||||
if( AllocatePrefetchInstr < 0 ) AllocatePrefetchInstr = 0;
|
|
||||||
if( AllocatePrefetchInstr > 1 ) AllocatePrefetchInstr = 0;
|
|
||||||
|
|
||||||
// Allocation prefetch settings
|
// Allocation prefetch settings
|
||||||
intx cache_line_size = prefetch_data_size();
|
intx cache_line_size = prefetch_data_size();
|
||||||
if( cache_line_size > AllocatePrefetchStepSize )
|
if( cache_line_size > AllocatePrefetchStepSize )
|
||||||
|
@ -59,13 +55,6 @@ void VM_Version::initialize() {
|
||||||
AllocatePrefetchDistance = allocate_prefetch_distance();
|
AllocatePrefetchDistance = allocate_prefetch_distance();
|
||||||
AllocatePrefetchStyle = allocate_prefetch_style();
|
AllocatePrefetchStyle = allocate_prefetch_style();
|
||||||
|
|
||||||
assert((AllocatePrefetchDistance % AllocatePrefetchStepSize) == 0 &&
|
|
||||||
(AllocatePrefetchDistance > 0), "invalid value");
|
|
||||||
if ((AllocatePrefetchDistance % AllocatePrefetchStepSize) != 0 ||
|
|
||||||
(AllocatePrefetchDistance <= 0)) {
|
|
||||||
AllocatePrefetchDistance = AllocatePrefetchStepSize;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (AllocatePrefetchStyle == 3 && !has_blk_init()) {
|
if (AllocatePrefetchStyle == 3 && !has_blk_init()) {
|
||||||
warning("BIS instructions are not available on this CPU");
|
warning("BIS instructions are not available on this CPU");
|
||||||
FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1);
|
FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1);
|
||||||
|
@ -73,13 +62,6 @@ void VM_Version::initialize() {
|
||||||
|
|
||||||
guarantee(VM_Version::has_v9(), "only SPARC v9 is supported");
|
guarantee(VM_Version::has_v9(), "only SPARC v9 is supported");
|
||||||
|
|
||||||
assert(ArraycopySrcPrefetchDistance < 4096, "invalid value");
|
|
||||||
if (ArraycopySrcPrefetchDistance >= 4096)
|
|
||||||
ArraycopySrcPrefetchDistance = 4064;
|
|
||||||
assert(ArraycopyDstPrefetchDistance < 4096, "invalid value");
|
|
||||||
if (ArraycopyDstPrefetchDistance >= 4096)
|
|
||||||
ArraycopyDstPrefetchDistance = 4064;
|
|
||||||
|
|
||||||
UseSSE = 0; // Only on x86 and x64
|
UseSSE = 0; // Only on x86 and x64
|
||||||
|
|
||||||
_supports_cx8 = has_v9();
|
_supports_cx8 = has_v9();
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
#include "runtime/vm_version.hpp"
|
#include "runtime/vm_version.hpp"
|
||||||
|
|
||||||
class VM_Version: public Abstract_VM_Version {
|
class VM_Version: public Abstract_VM_Version {
|
||||||
|
friend class VMStructs;
|
||||||
protected:
|
protected:
|
||||||
enum Feature_Flag {
|
enum Feature_Flag {
|
||||||
v8_instructions = 0,
|
v8_instructions = 0,
|
||||||
|
|
|
@ -733,11 +733,11 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
|
||||||
// these asserts are somewhat nonsensical
|
// these asserts are somewhat nonsensical
|
||||||
#ifndef _LP64
|
#ifndef _LP64
|
||||||
assert(which == imm_operand || which == disp32_operand,
|
assert(which == imm_operand || which == disp32_operand,
|
||||||
err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip)));
|
"which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip));
|
||||||
#else
|
#else
|
||||||
assert((which == call32_operand || which == imm_operand) && is_64bit ||
|
assert((which == call32_operand || which == imm_operand) && is_64bit ||
|
||||||
which == narrow_oop_operand && !is_64bit,
|
which == narrow_oop_operand && !is_64bit,
|
||||||
err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip)));
|
"which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip));
|
||||||
#endif // _LP64
|
#endif // _LP64
|
||||||
return ip;
|
return ip;
|
||||||
|
|
||||||
|
@ -770,6 +770,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
|
||||||
case 0x55: // andnps
|
case 0x55: // andnps
|
||||||
case 0x56: // orps
|
case 0x56: // orps
|
||||||
case 0x57: // xorps
|
case 0x57: // xorps
|
||||||
|
case 0x59: //mulpd
|
||||||
case 0x6E: // movd
|
case 0x6E: // movd
|
||||||
case 0x7E: // movd
|
case 0x7E: // movd
|
||||||
case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush
|
case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush
|
||||||
|
@ -877,20 +878,34 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
|
||||||
// Check second byte
|
// Check second byte
|
||||||
NOT_LP64(assert((0xC0 & *ip) == 0xC0, "shouldn't have LDS and LES instructions"));
|
NOT_LP64(assert((0xC0 & *ip) == 0xC0, "shouldn't have LDS and LES instructions"));
|
||||||
|
|
||||||
|
int vex_opcode;
|
||||||
// First byte
|
// First byte
|
||||||
if ((0xFF & *inst) == VEX_3bytes) {
|
if ((0xFF & *inst) == VEX_3bytes) {
|
||||||
|
vex_opcode = VEX_OPCODE_MASK & *ip;
|
||||||
ip++; // third byte
|
ip++; // third byte
|
||||||
is_64bit = ((VEX_W & *ip) == VEX_W);
|
is_64bit = ((VEX_W & *ip) == VEX_W);
|
||||||
|
} else {
|
||||||
|
vex_opcode = VEX_OPCODE_0F;
|
||||||
}
|
}
|
||||||
ip++; // opcode
|
ip++; // opcode
|
||||||
// To find the end of instruction (which == end_pc_operand).
|
// To find the end of instruction (which == end_pc_operand).
|
||||||
|
switch (vex_opcode) {
|
||||||
|
case VEX_OPCODE_0F:
|
||||||
switch (0xFF & *ip) {
|
switch (0xFF & *ip) {
|
||||||
case 0x61: // pcmpestri r, r/a, #8
|
|
||||||
case 0x70: // pshufd r, r/a, #8
|
case 0x70: // pshufd r, r/a, #8
|
||||||
case 0x73: // psrldq r, #8
|
case 0x71: // ps[rl|ra|ll]w r, #8
|
||||||
|
case 0x72: // ps[rl|ra|ll]d r, #8
|
||||||
|
case 0x73: // ps[rl|ra|ll]q r, #8
|
||||||
|
case 0xC2: // cmp[ps|pd|ss|sd] r, r, r/a, #8
|
||||||
|
case 0xC4: // pinsrw r, r, r/a, #8
|
||||||
|
case 0xC5: // pextrw r/a, r, #8
|
||||||
|
case 0xC6: // shufp[s|d] r, r, r/a, #8
|
||||||
tail_size = 1; // the imm8
|
tail_size = 1; // the imm8
|
||||||
break;
|
break;
|
||||||
default:
|
}
|
||||||
|
break;
|
||||||
|
case VEX_OPCODE_0F_3A:
|
||||||
|
tail_size = 1;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
ip++; // skip opcode
|
ip++; // skip opcode
|
||||||
|
@ -1604,6 +1619,85 @@ void Assembler::cpuid() {
|
||||||
emit_int8((unsigned char)0xA2);
|
emit_int8((unsigned char)0xA2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Opcode / Instruction Op / En 64 - Bit Mode Compat / Leg Mode Description Implemented
|
||||||
|
// F2 0F 38 F0 / r CRC32 r32, r / m8 RM Valid Valid Accumulate CRC32 on r / m8. v
|
||||||
|
// F2 REX 0F 38 F0 / r CRC32 r32, r / m8* RM Valid N.E. Accumulate CRC32 on r / m8. -
|
||||||
|
// F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E. Accumulate CRC32 on r / m8. -
|
||||||
|
//
|
||||||
|
// F2 0F 38 F1 / r CRC32 r32, r / m16 RM Valid Valid Accumulate CRC32 on r / m16. v
|
||||||
|
//
|
||||||
|
// F2 0F 38 F1 / r CRC32 r32, r / m32 RM Valid Valid Accumulate CRC32 on r / m32. v
|
||||||
|
//
|
||||||
|
// F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E. Accumulate CRC32 on r / m64. v
|
||||||
|
void Assembler::crc32(Register crc, Register v, int8_t sizeInBytes) {
|
||||||
|
assert(VM_Version::supports_sse4_2(), "");
|
||||||
|
int8_t w = 0x01;
|
||||||
|
Prefix p = Prefix_EMPTY;
|
||||||
|
|
||||||
|
emit_int8((int8_t)0xF2);
|
||||||
|
switch (sizeInBytes) {
|
||||||
|
case 1:
|
||||||
|
w = 0;
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
case 4:
|
||||||
|
break;
|
||||||
|
LP64_ONLY(case 8:)
|
||||||
|
// This instruction is not valid in 32 bits
|
||||||
|
// Note:
|
||||||
|
// http://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf
|
||||||
|
//
|
||||||
|
// Page B - 72 Vol. 2C says
|
||||||
|
// qwreg2 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : 11 qwreg1 qwreg2
|
||||||
|
// mem64 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : mod qwreg r / m
|
||||||
|
// F0!!!
|
||||||
|
// while 3 - 208 Vol. 2A
|
||||||
|
// F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E.Accumulate CRC32 on r / m64.
|
||||||
|
//
|
||||||
|
// the 0 on a last bit is reserved for a different flavor of this instruction :
|
||||||
|
// F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E.Accumulate CRC32 on r / m8.
|
||||||
|
p = REX_W;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
assert(0, "Unsupported value for a sizeInBytes argument");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
LP64_ONLY(prefix(crc, v, p);)
|
||||||
|
emit_int8((int8_t)0x0F);
|
||||||
|
emit_int8(0x38);
|
||||||
|
emit_int8((int8_t)(0xF0 | w));
|
||||||
|
emit_int8(0xC0 | ((crc->encoding() & 0x7) << 3) | (v->encoding() & 7));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Assembler::crc32(Register crc, Address adr, int8_t sizeInBytes) {
|
||||||
|
assert(VM_Version::supports_sse4_2(), "");
|
||||||
|
InstructionMark im(this);
|
||||||
|
int8_t w = 0x01;
|
||||||
|
Prefix p = Prefix_EMPTY;
|
||||||
|
|
||||||
|
emit_int8((int8_t)0xF2);
|
||||||
|
switch (sizeInBytes) {
|
||||||
|
case 1:
|
||||||
|
w = 0;
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
case 4:
|
||||||
|
break;
|
||||||
|
LP64_ONLY(case 8:)
|
||||||
|
// This instruction is not valid in 32 bits
|
||||||
|
p = REX_W;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
assert(0, "Unsupported value for a sizeInBytes argument");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
LP64_ONLY(prefix(crc, adr, p);)
|
||||||
|
emit_int8((int8_t)0x0F);
|
||||||
|
emit_int8(0x38);
|
||||||
|
emit_int8((int8_t)(0xF0 | w));
|
||||||
|
emit_operand(crc, adr);
|
||||||
|
}
|
||||||
|
|
||||||
void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
|
void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
|
||||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||||
emit_simd_arith_nonds(0xE6, dst, src, VEX_SIMD_F3, /* no_mask_reg */ false, /* legacy_mode */ true);
|
emit_simd_arith_nonds(0xE6, dst, src, VEX_SIMD_F3, /* no_mask_reg */ false, /* legacy_mode */ true);
|
||||||
|
@ -2399,7 +2493,7 @@ void Assembler::movsbl(Register dst, Address src) { // movsxb
|
||||||
|
|
||||||
void Assembler::movsbl(Register dst, Register src) { // movsxb
|
void Assembler::movsbl(Register dst, Register src) { // movsxb
|
||||||
NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
|
NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
|
||||||
int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
|
int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true);
|
||||||
emit_int8(0x0F);
|
emit_int8(0x0F);
|
||||||
emit_int8((unsigned char)0xBE);
|
emit_int8((unsigned char)0xBE);
|
||||||
emit_int8((unsigned char)(0xC0 | encode));
|
emit_int8((unsigned char)(0xC0 | encode));
|
||||||
|
@ -2516,7 +2610,7 @@ void Assembler::movzbl(Register dst, Address src) { // movzxb
|
||||||
|
|
||||||
void Assembler::movzbl(Register dst, Register src) { // movzxb
|
void Assembler::movzbl(Register dst, Register src) { // movzxb
|
||||||
NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
|
NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
|
||||||
int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
|
int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true);
|
||||||
emit_int8(0x0F);
|
emit_int8(0x0F);
|
||||||
emit_int8((unsigned char)0xB6);
|
emit_int8((unsigned char)0xB6);
|
||||||
emit_int8(0xC0 | encode);
|
emit_int8(0xC0 | encode);
|
||||||
|
@ -2951,6 +3045,15 @@ void Assembler::pextrq(Register dst, XMMRegister src, int imm8) {
|
||||||
emit_int8(imm8);
|
emit_int8(imm8);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Assembler::pextrw(Register dst, XMMRegister src, int imm8) {
|
||||||
|
assert(VM_Version::supports_sse2(), "");
|
||||||
|
int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, /* no_mask_reg */ true,
|
||||||
|
VEX_OPCODE_0F, /* rex_w */ false, AVX_128bit, /* legacy_mode */ _legacy_mode_bw);
|
||||||
|
emit_int8((unsigned char)0xC5);
|
||||||
|
emit_int8((unsigned char)(0xC0 | encode));
|
||||||
|
emit_int8(imm8);
|
||||||
|
}
|
||||||
|
|
||||||
void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) {
|
void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) {
|
||||||
assert(VM_Version::supports_sse4_1(), "");
|
assert(VM_Version::supports_sse4_1(), "");
|
||||||
int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, /* no_mask_reg */ true,
|
int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, /* no_mask_reg */ true,
|
||||||
|
@ -2969,6 +3072,15 @@ void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) {
|
||||||
emit_int8(imm8);
|
emit_int8(imm8);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Assembler::pinsrw(XMMRegister dst, Register src, int imm8) {
|
||||||
|
assert(VM_Version::supports_sse2(), "");
|
||||||
|
int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, /* no_mask_reg */ true,
|
||||||
|
VEX_OPCODE_0F, /* rex_w */ false, AVX_128bit, /* legacy_mode */ _legacy_mode_bw);
|
||||||
|
emit_int8((unsigned char)0xC4);
|
||||||
|
emit_int8((unsigned char)(0xC0 | encode));
|
||||||
|
emit_int8(imm8);
|
||||||
|
}
|
||||||
|
|
||||||
void Assembler::pmovzxbw(XMMRegister dst, Address src) {
|
void Assembler::pmovzxbw(XMMRegister dst, Address src) {
|
||||||
assert(VM_Version::supports_sse4_1(), "");
|
assert(VM_Version::supports_sse4_1(), "");
|
||||||
if (VM_Version::supports_evex()) {
|
if (VM_Version::supports_evex()) {
|
||||||
|
@ -3984,6 +4096,16 @@ void Assembler::mulpd(XMMRegister dst, XMMRegister src) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Assembler::mulpd(XMMRegister dst, Address src) {
|
||||||
|
_instruction_uses_vl = true;
|
||||||
|
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||||
|
if (VM_Version::supports_evex()) {
|
||||||
|
emit_simd_arith_q(0x59, dst, src, VEX_SIMD_66);
|
||||||
|
} else {
|
||||||
|
emit_simd_arith(0x59, dst, src, VEX_SIMD_66);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void Assembler::mulps(XMMRegister dst, XMMRegister src) {
|
void Assembler::mulps(XMMRegister dst, XMMRegister src) {
|
||||||
_instruction_uses_vl = true;
|
_instruction_uses_vl = true;
|
||||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||||
|
@ -4172,6 +4294,26 @@ void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, int vector
|
||||||
emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector_len, /* no_mask_reg */ false, /* legacy_mode */ _legacy_mode_dq);
|
emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector_len, /* no_mask_reg */ false, /* legacy_mode */ _legacy_mode_dq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Assembler::unpckhpd(XMMRegister dst, XMMRegister src) {
|
||||||
|
_instruction_uses_vl = true;
|
||||||
|
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||||
|
if (VM_Version::supports_evex()) {
|
||||||
|
emit_simd_arith_q(0x15, dst, src, VEX_SIMD_66);
|
||||||
|
} else {
|
||||||
|
emit_simd_arith(0x15, dst, src, VEX_SIMD_66);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Assembler::unpcklpd(XMMRegister dst, XMMRegister src) {
|
||||||
|
_instruction_uses_vl = true;
|
||||||
|
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||||
|
if (VM_Version::supports_evex()) {
|
||||||
|
emit_simd_arith_q(0x14, dst, src, VEX_SIMD_66);
|
||||||
|
} else {
|
||||||
|
emit_simd_arith(0x14, dst, src, VEX_SIMD_66);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
|
void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
|
||||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||||
if (VM_Version::supports_avx512dq()) {
|
if (VM_Version::supports_avx512dq()) {
|
||||||
|
@ -4792,8 +4934,9 @@ void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// AND packed integers
|
// logical operations packed integers
|
||||||
void Assembler::pand(XMMRegister dst, XMMRegister src) {
|
void Assembler::pand(XMMRegister dst, XMMRegister src) {
|
||||||
|
_instruction_uses_vl = true;
|
||||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||||
emit_simd_arith(0xDB, dst, src, VEX_SIMD_66);
|
emit_simd_arith(0xDB, dst, src, VEX_SIMD_66);
|
||||||
}
|
}
|
||||||
|
@ -4814,6 +4957,17 @@ void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_
|
||||||
emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector_len);
|
emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Assembler::pandn(XMMRegister dst, XMMRegister src) {
|
||||||
|
_instruction_uses_vl = true;
|
||||||
|
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||||
|
if (VM_Version::supports_evex()) {
|
||||||
|
emit_simd_arith_q(0xDF, dst, src, VEX_SIMD_66);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
emit_simd_arith(0xDF, dst, src, VEX_SIMD_66);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void Assembler::por(XMMRegister dst, XMMRegister src) {
|
void Assembler::por(XMMRegister dst, XMMRegister src) {
|
||||||
_instruction_uses_vl = true;
|
_instruction_uses_vl = true;
|
||||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||||
|
@ -6223,6 +6377,14 @@ void Assembler::shldl(Register dst, Register src) {
|
||||||
emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
|
emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 0F A4 / r ib
|
||||||
|
void Assembler::shldl(Register dst, Register src, int8_t imm8) {
|
||||||
|
emit_int8(0x0F);
|
||||||
|
emit_int8((unsigned char)0xA4);
|
||||||
|
emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
|
||||||
|
emit_int8(imm8);
|
||||||
|
}
|
||||||
|
|
||||||
void Assembler::shrdl(Register dst, Register src) {
|
void Assembler::shrdl(Register dst, Register src) {
|
||||||
emit_int8(0x0F);
|
emit_int8(0x0F);
|
||||||
emit_int8((unsigned char)0xAD);
|
emit_int8((unsigned char)0xAD);
|
||||||
|
@ -6362,12 +6524,12 @@ int Assembler::prefixq_and_encode(int reg_enc) {
|
||||||
return reg_enc;
|
return reg_enc;
|
||||||
}
|
}
|
||||||
|
|
||||||
int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) {
|
int Assembler::prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte) {
|
||||||
if (dst_enc < 8) {
|
if (dst_enc < 8) {
|
||||||
if (src_enc >= 8) {
|
if (src_enc >= 8) {
|
||||||
prefix(REX_B);
|
prefix(REX_B);
|
||||||
src_enc -= 8;
|
src_enc -= 8;
|
||||||
} else if (byteinst && src_enc >= 4) {
|
} else if ((src_is_byte && src_enc >= 4) || (dst_is_byte && dst_enc >= 4)) {
|
||||||
prefix(REX);
|
prefix(REX);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -6408,6 +6570,40 @@ void Assembler::prefix(Register reg) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Assembler::prefix(Register dst, Register src, Prefix p) {
|
||||||
|
if (src->encoding() >= 8) {
|
||||||
|
p = (Prefix)(p | REX_B);
|
||||||
|
}
|
||||||
|
if (dst->encoding() >= 8) {
|
||||||
|
p = (Prefix)( p | REX_R);
|
||||||
|
}
|
||||||
|
if (p != Prefix_EMPTY) {
|
||||||
|
// do not generate an empty prefix
|
||||||
|
prefix(p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Assembler::prefix(Register dst, Address adr, Prefix p) {
|
||||||
|
if (adr.base_needs_rex()) {
|
||||||
|
if (adr.index_needs_rex()) {
|
||||||
|
assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X");
|
||||||
|
} else {
|
||||||
|
prefix(REX_B);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (adr.index_needs_rex()) {
|
||||||
|
assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (dst->encoding() >= 8) {
|
||||||
|
p = (Prefix)(p | REX_R);
|
||||||
|
}
|
||||||
|
if (p != Prefix_EMPTY) {
|
||||||
|
// do not generate an empty prefix
|
||||||
|
prefix(p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void Assembler::prefix(Address adr) {
|
void Assembler::prefix(Address adr) {
|
||||||
if (adr.base_needs_rex()) {
|
if (adr.base_needs_rex()) {
|
||||||
if (adr.index_needs_rex()) {
|
if (adr.index_needs_rex()) {
|
||||||
|
|
|
@ -506,7 +506,8 @@ class Assembler : public AbstractAssembler {
|
||||||
|
|
||||||
VEX_3bytes = 0xC4,
|
VEX_3bytes = 0xC4,
|
||||||
VEX_2bytes = 0xC5,
|
VEX_2bytes = 0xC5,
|
||||||
EVEX_4bytes = 0x62
|
EVEX_4bytes = 0x62,
|
||||||
|
Prefix_EMPTY = 0x0
|
||||||
};
|
};
|
||||||
|
|
||||||
enum VexPrefix {
|
enum VexPrefix {
|
||||||
|
@ -535,7 +536,8 @@ class Assembler : public AbstractAssembler {
|
||||||
VEX_OPCODE_NONE = 0x0,
|
VEX_OPCODE_NONE = 0x0,
|
||||||
VEX_OPCODE_0F = 0x1,
|
VEX_OPCODE_0F = 0x1,
|
||||||
VEX_OPCODE_0F_38 = 0x2,
|
VEX_OPCODE_0F_38 = 0x2,
|
||||||
VEX_OPCODE_0F_3A = 0x3
|
VEX_OPCODE_0F_3A = 0x3,
|
||||||
|
VEX_OPCODE_MASK = 0x1F
|
||||||
};
|
};
|
||||||
|
|
||||||
enum AvxVectorLen {
|
enum AvxVectorLen {
|
||||||
|
@ -611,10 +613,15 @@ private:
|
||||||
int prefix_and_encode(int reg_enc, bool byteinst = false);
|
int prefix_and_encode(int reg_enc, bool byteinst = false);
|
||||||
int prefixq_and_encode(int reg_enc);
|
int prefixq_and_encode(int reg_enc);
|
||||||
|
|
||||||
int prefix_and_encode(int dst_enc, int src_enc, bool byteinst = false);
|
int prefix_and_encode(int dst_enc, int src_enc) {
|
||||||
|
return prefix_and_encode(dst_enc, false, src_enc, false);
|
||||||
|
}
|
||||||
|
int prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte);
|
||||||
int prefixq_and_encode(int dst_enc, int src_enc);
|
int prefixq_and_encode(int dst_enc, int src_enc);
|
||||||
|
|
||||||
void prefix(Register reg);
|
void prefix(Register reg);
|
||||||
|
void prefix(Register dst, Register src, Prefix p);
|
||||||
|
void prefix(Register dst, Address adr, Prefix p);
|
||||||
void prefix(Address adr);
|
void prefix(Address adr);
|
||||||
void prefixq(Address adr);
|
void prefixq(Address adr);
|
||||||
|
|
||||||
|
@ -1177,6 +1184,10 @@ private:
|
||||||
// Identify processor type and features
|
// Identify processor type and features
|
||||||
void cpuid();
|
void cpuid();
|
||||||
|
|
||||||
|
// CRC32C
|
||||||
|
void crc32(Register crc, Register v, int8_t sizeInBytes);
|
||||||
|
void crc32(Register crc, Address adr, int8_t sizeInBytes);
|
||||||
|
|
||||||
// Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value
|
// Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value
|
||||||
void cvtsd2ss(XMMRegister dst, XMMRegister src);
|
void cvtsd2ss(XMMRegister dst, XMMRegister src);
|
||||||
void cvtsd2ss(XMMRegister dst, Address src);
|
void cvtsd2ss(XMMRegister dst, Address src);
|
||||||
|
@ -1672,10 +1683,14 @@ private:
|
||||||
// SSE 4.1 extract
|
// SSE 4.1 extract
|
||||||
void pextrd(Register dst, XMMRegister src, int imm8);
|
void pextrd(Register dst, XMMRegister src, int imm8);
|
||||||
void pextrq(Register dst, XMMRegister src, int imm8);
|
void pextrq(Register dst, XMMRegister src, int imm8);
|
||||||
|
// SSE 2 extract
|
||||||
|
void pextrw(Register dst, XMMRegister src, int imm8);
|
||||||
|
|
||||||
// SSE 4.1 insert
|
// SSE 4.1 insert
|
||||||
void pinsrd(XMMRegister dst, Register src, int imm8);
|
void pinsrd(XMMRegister dst, Register src, int imm8);
|
||||||
void pinsrq(XMMRegister dst, Register src, int imm8);
|
void pinsrq(XMMRegister dst, Register src, int imm8);
|
||||||
|
// SSE 2 insert
|
||||||
|
void pinsrw(XMMRegister dst, Register src, int imm8);
|
||||||
|
|
||||||
// SSE4.1 packed move
|
// SSE4.1 packed move
|
||||||
void pmovzxbw(XMMRegister dst, XMMRegister src);
|
void pmovzxbw(XMMRegister dst, XMMRegister src);
|
||||||
|
@ -1783,6 +1798,7 @@ private:
|
||||||
void setb(Condition cc, Register dst);
|
void setb(Condition cc, Register dst);
|
||||||
|
|
||||||
void shldl(Register dst, Register src);
|
void shldl(Register dst, Register src);
|
||||||
|
void shldl(Register dst, Register src, int8_t imm8);
|
||||||
|
|
||||||
void shll(Register dst, int imm8);
|
void shll(Register dst, int imm8);
|
||||||
void shll(Register dst);
|
void shll(Register dst);
|
||||||
|
@ -1925,6 +1941,7 @@ private:
|
||||||
|
|
||||||
// Multiply Packed Floating-Point Values
|
// Multiply Packed Floating-Point Values
|
||||||
void mulpd(XMMRegister dst, XMMRegister src);
|
void mulpd(XMMRegister dst, XMMRegister src);
|
||||||
|
void mulpd(XMMRegister dst, Address src);
|
||||||
void mulps(XMMRegister dst, XMMRegister src);
|
void mulps(XMMRegister dst, XMMRegister src);
|
||||||
void vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
|
void vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
|
||||||
void vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
|
void vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
|
||||||
|
@ -1951,6 +1968,9 @@ private:
|
||||||
void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
|
void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
|
||||||
void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
|
void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
|
||||||
|
|
||||||
|
void unpckhpd(XMMRegister dst, XMMRegister src);
|
||||||
|
void unpcklpd(XMMRegister dst, XMMRegister src);
|
||||||
|
|
||||||
// Bitwise Logical XOR of Packed Floating-Point Values
|
// Bitwise Logical XOR of Packed Floating-Point Values
|
||||||
void xorpd(XMMRegister dst, XMMRegister src);
|
void xorpd(XMMRegister dst, XMMRegister src);
|
||||||
void xorps(XMMRegister dst, XMMRegister src);
|
void xorps(XMMRegister dst, XMMRegister src);
|
||||||
|
@ -2046,6 +2066,9 @@ private:
|
||||||
void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
|
void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
|
||||||
void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
|
void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
|
||||||
|
|
||||||
|
// Andn packed integers
|
||||||
|
void pandn(XMMRegister dst, XMMRegister src);
|
||||||
|
|
||||||
// Or packed integers
|
// Or packed integers
|
||||||
void por(XMMRegister dst, XMMRegister src);
|
void por(XMMRegister dst, XMMRegister src);
|
||||||
void vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
|
void vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue