This commit is contained in:
Erik Helin 2014-03-17 14:54:12 +01:00
commit c45adf1dca
4777 changed files with 49082 additions and 24436 deletions

View file

@ -245,3 +245,4 @@ a1ee9743f4ee165eae59389a020f2552f895dac8 jdk8-b120
13b877757b0b1c0d5813298df85364f41d7ba6fe jdk9-b00
f130ca87de6637acae7d99fcd7a8573eea1cbaed jdk9-b01
b32e2219736e42baaf45daf0ad67ed34f6033799 jdk9-b02
7f655f31f9bcee618cf832f08176ad8c1ed3fdd3 jdk9-b03

View file

@ -245,3 +245,4 @@ cd3825b2983045784d6fc6d1729c799b08215752 jdk8-b120
1e1f86d5d4e22c15a9bf9f1581acddb8c59abae2 jdk9-b00
50669e45cec4491de0d921d3118a3fe2e767020a jdk9-b01
135f0c7af57ebace31383d8877f47e32172759ff jdk9-b02
fd8d51bdf9aadf7ae83e65e8655c53581017c363 jdk9-b03

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -24,4 +24,4 @@
# This Makefile was generated by configure @DATE_WHEN_CONFIGURED@
# GENERATED FILE, DO NOT EDIT
SPEC:=@OUTPUT_ROOT@/spec.gmk
include @SRC_ROOT@/Makefile
include @TOPDIR@/Makefile

View file

@ -1,6 +1,6 @@
#!/bin/bash
#
# Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -22,6 +22,23 @@
# questions.
#
generate_configure_script() {
# First create a header
cat > $1 << EOT
#!/bin/bash
#
# ##########################################################
# ### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. ###
# ##########################################################
#
EOT
# Then replace "magic" variables in configure.ac and append the output
# from autoconf. $2 is either cat (just a no-op) or a filter.
cat $script_dir/configure.ac | sed -e "s|@DATE_WHEN_GENERATED@|$TIMESTAMP|" | \
eval $2 | ${AUTOCONF} -W all -I$script_dir - >> $1
rm -rf autom4te.cache
}
script_dir=`dirname $0`
# Create a timestamp as seconds since epoch
@ -36,7 +53,8 @@ else
fi
if test "x$CUSTOM_CONFIG_DIR" = "x"; then
custom_script_dir="$script_dir/../../jdk/make/closed/autoconf"
topdir=`cd $script_dir/../.. >/dev/null && pwd`
custom_script_dir="$topdir/closed/autoconf"
else
custom_script_dir=$CUSTOM_CONFIG_DIR
fi
@ -45,25 +63,23 @@ custom_hook=$custom_script_dir/custom-hook.m4
AUTOCONF="`which autoconf 2> /dev/null | grep -v '^no autoconf in'`"
echo "Autoconf found: ${AUTOCONF}"
if test "x${AUTOCONF}" = x; then
echo You need autoconf installed to be able to regenerate the configure script
echo Error: Cannot find autoconf 1>&2
echo "You need autoconf installed to be able to regenerate the configure script"
echo "Error: Cannot find autoconf" 1>&2
exit 1
fi
echo Generating generated-configure.sh with ${AUTOCONF}
cat $script_dir/configure.ac | sed -e "s|@DATE_WHEN_GENERATED@|$TIMESTAMP|" | ${AUTOCONF} -W all -I$script_dir - > $script_dir/generated-configure.sh
rm -rf autom4te.cache
autoconf_version=`$AUTOCONF --version | head -1`
echo "Using autoconf at ${AUTOCONF} [$autoconf_version]"
echo "Generating generated-configure.sh"
generate_configure_script "$script_dir/generated-configure.sh" 'cat'
if test -e $custom_hook; then
echo Generating custom generated-configure.sh
# We have custom sources available; also generate configure script
# with custom hooks compiled in.
cat $script_dir/configure.ac | sed -e "s|@DATE_WHEN_GENERATED@|$TIMESTAMP|" | \
sed -e "s|#CUSTOM_AUTOCONF_INCLUDE|m4_include([$custom_hook])|" | ${AUTOCONF} -W all -I$script_dir - > $custom_script_dir/generated-configure.sh
rm -rf autom4te.cache
echo "Generating custom generated-configure.sh"
generate_configure_script "$custom_script_dir/generated-configure.sh" 'sed -e "s|#CUSTOM_AUTOCONF_INCLUDE|m4_include([$custom_hook])|"'
else
echo No custom hook found: $custom_hook
echo "(No custom hook found at $custom_hook)"
fi

View file

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -126,7 +126,7 @@ AC_DEFUN([BASIC_FIXUP_EXECUTABLE],
done
IFS="$IFS_save"
else
AC_MSG_NOTICE([Resolving $1 (as $path) failed, using $path directly.])
# This is an absolute path, we can use it without further modifications.
new_path="$path"
fi
@ -418,14 +418,8 @@ AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS],
# Setup basic configuration paths, and platform-specific stuff related to PATHs.
AC_DEFUN_ONCE([BASIC_SETUP_PATHS],
[
# Locate the directory of this script.
SCRIPT="[$]0"
AUTOCONF_DIR=`cd \`$DIRNAME $SCRIPT\`; $THEPWDCMD -L`
# Where is the source? It is located two levels above the configure script.
# Save the current directory this script was started from
CURDIR="$PWD"
cd "$AUTOCONF_DIR/../.."
SRC_ROOT="`$THEPWDCMD -L`"
if test "x$OPENJDK_TARGET_OS" = "xwindows"; then
PATH_SEP=";"
@ -433,13 +427,21 @@ AC_DEFUN_ONCE([BASIC_SETUP_PATHS],
else
PATH_SEP=":"
fi
AC_SUBST(SRC_ROOT)
AC_SUBST(PATH_SEP)
cd "$CURDIR"
BASIC_FIXUP_PATH(SRC_ROOT)
# We get the top-level directory from the supporting wrappers.
AC_MSG_CHECKING([for top-level directory])
AC_MSG_RESULT([$TOPDIR])
AC_SUBST(TOPDIR)
# We can only call BASIC_FIXUP_PATH after BASIC_CHECK_PATHS_WINDOWS.
BASIC_FIXUP_PATH(CURDIR)
BASIC_FIXUP_PATH(TOPDIR)
# SRC_ROOT is a traditional alias for TOPDIR.
SRC_ROOT=$TOPDIR
# Locate the directory of this script.
AUTOCONF_DIR=$TOPDIR/common/autoconf
if test "x$OPENJDK_BUILD_OS" = "xsolaris"; then
# Add extra search paths on solaris for utilities like ar and as etc...
@ -487,13 +489,17 @@ AC_DEFUN_ONCE([BASIC_SETUP_OUTPUT_DIR],
[ CONF_NAME=${with_conf_name} ])
# Test from where we are running configure, in or outside of src root.
AC_MSG_CHECKING([where to store configuration])
if test "x$CURDIR" = "x$SRC_ROOT" || test "x$CURDIR" = "x$SRC_ROOT/common" \
|| test "x$CURDIR" = "x$SRC_ROOT/common/autoconf" \
|| test "x$CURDIR" = "x$SRC_ROOT/make" ; then
# We are running configure from the src root.
# Create a default ./build/target-variant-debuglevel output root.
if test "x${CONF_NAME}" = x; then
AC_MSG_RESULT([in default location])
CONF_NAME="${OPENJDK_TARGET_OS}-${OPENJDK_TARGET_CPU}-${JDK_VARIANT}-${ANDED_JVM_VARIANTS}-${DEBUG_LEVEL}"
else
AC_MSG_RESULT([in build directory with custom name])
fi
OUTPUT_ROOT="$SRC_ROOT/build/${CONF_NAME}"
$MKDIR -p "$OUTPUT_ROOT"
@ -509,6 +515,7 @@ AC_DEFUN_ONCE([BASIC_SETUP_OUTPUT_DIR],
CONF_NAME=`$ECHO $CURDIR | $SED -e "s!^${SRC_ROOT}/build/!!"`
fi
OUTPUT_ROOT="$CURDIR"
AC_MSG_RESULT([in current directory])
# WARNING: This might be a bad thing to do. You need to be sure you want to
# have a configuration in this directory. Do some sanity checks!
@ -558,9 +565,6 @@ AC_DEFUN_ONCE([BASIC_SETUP_OUTPUT_DIR],
# You can run make from the OUTPUT_ROOT, or from the top-level Makefile
# which will look for generated configurations
AC_CONFIG_FILES([$OUTPUT_ROOT/Makefile:$AUTOCONF_DIR/Makefile.in])
# Save the arguments given to us
echo "$CONFIGURE_COMMAND_LINE" > $OUTPUT_ROOT/configure-arguments
])
AC_DEFUN_ONCE([BASIC_SETUP_LOGGING],

View file

@ -341,39 +341,45 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
[
##############################################################################
#
# Specify options for anything that is run with the Boot JDK.
# Specify jvm options for anything that is run with the Boot JDK.
# Not all JVM:s accept the same arguments on the command line.
#
AC_ARG_WITH(boot-jdk-jvmargs, [AS_HELP_STRING([--with-boot-jdk-jvmargs],
[specify JVM arguments to be passed to all invocations of the Boot JDK, overriding the default values,
[specify JVM arguments to be passed to all java invocations of boot JDK, overriding the default values,
e.g --with-boot-jdk-jvmargs="-Xmx8G -enableassertions"])])
if test "x$with_boot_jdk_jvmargs" = x; then
# Not all JVM:s accept the same arguments on the command line.
# OpenJDK specific increase in thread stack for JDK build,
# well more specifically, when running javac.
if test "x$BUILD_NUM_BITS" = x32; then
STACK_SIZE=768
else
# Running Javac on a JVM on a 64-bit machine, the stack takes more space
# since 64-bit pointers are pushed on the stach. Apparently, we need
# to increase the stack space when javacing the JDK....
STACK_SIZE=1536
fi
AC_MSG_CHECKING([flags for boot jdk java command] )
# Minimum amount of heap memory.
ADD_JVM_ARG_IF_OK([-Xms64M],boot_jdk_jvmargs,[$JAVA])
if test "x$OPENJDK_TARGET_OS" = "xmacosx" || test "x$OPENJDK_TARGET_CPU" = "xppc64" ; then
# Why does macosx need more heap? Its the huge JDK batch.
ADD_JVM_ARG_IF_OK([-Xmx1600M],boot_jdk_jvmargs,[$JAVA])
else
ADD_JVM_ARG_IF_OK([-Xmx1100M],boot_jdk_jvmargs,[$JAVA])
fi
# When is adding -client something that speeds up the JVM?
# ADD_JVM_ARG_IF_OK([-client],boot_jdk_jvmargs,[$JAVA])
ADD_JVM_ARG_IF_OK([-XX:ThreadStackSize=$STACK_SIZE],boot_jdk_jvmargs,[$JAVA])
# Disable special log output when a debug build is used as Boot JDK...
ADD_JVM_ARG_IF_OK([-XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput],boot_jdk_jvmargs,[$JAVA])
# Starting amount of heap memory.
ADD_JVM_ARG_IF_OK([-Xms64M],boot_jdk_jvmargs,[$JAVA])
# Maximum amount of heap memory.
# Maximum stack size.
if test "x$BUILD_NUM_BITS" = x32; then
JVM_MAX_HEAP=1100M
STACK_SIZE=768
else
# Running Javac on a JVM on a 64-bit machine, takes more space since 64-bit
# pointers are used. Apparently, we need to increase the heap and stack
# space for the jvm. More specifically, when running javac to build huge
# jdk batch
JVM_MAX_HEAP=1600M
STACK_SIZE=1536
fi
ADD_JVM_ARG_IF_OK([-Xmx$JVM_MAX_HEAP],boot_jdk_jvmargs,[$JAVA])
ADD_JVM_ARG_IF_OK([-XX:ThreadStackSize=$STACK_SIZE],boot_jdk_jvmargs,[$JAVA])
# Disable special log output when a debug build is used as Boot JDK...
ADD_JVM_ARG_IF_OK([-XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput],boot_jdk_jvmargs,[$JAVA])
# Apply user provided options.
ADD_JVM_ARG_IF_OK([$with_boot_jdk_jvmargs],boot_jdk_jvmargs,[$JAVA])
AC_MSG_RESULT([$boot_jdk_jvmargs])
# For now, general JAVA_FLAGS are the same as the boot jdk jvmargs
JAVA_FLAGS=$boot_jdk_jvmargs
AC_SUBST(BOOT_JDK_JVMARGS, $boot_jdk_jvmargs)
AC_SUBST(JAVA_FLAGS, $JAVA_FLAGS)
])

View file

@ -261,7 +261,7 @@ AC_DEFUN_ONCE([BPERF_SETUP_PRECOMPILED_HEADERS],
if test "x$ENABLE_PRECOMPH" = xyes; then
# Check that the compiler actually supports precomp headers.
if test "x$GCC" = xyes; then
if test "x$TOOLCHAIN_TYPE" = xgcc; then
AC_MSG_CHECKING([that precompiled headers work])
echo "int alfa();" > conftest.h
$CXX -x c++-header conftest.h -o conftest.hpp.gch 2>&AS_MESSAGE_LOG_FD >&AS_MESSAGE_LOG_FD

View file

@ -1,6 +1,6 @@
#!/bin/bash
#
# Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -64,7 +64,7 @@ UNIQ="@UNIQ@"
UNPACK200="@FIXPATH@ @BOOT_JDK@/bin/unpack200"
UNZIP="@UNZIP@"
SRC_ROOT="@SRC_ROOT@"
SRC_ROOT="@TOPDIR@"
if [ "$OPENJDK_TARGET_OS" = "windows" ]; then
PATH="@VS_PATH@"

View file

@ -1,6 +1,6 @@
#!/bin/bash
#
# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -22,19 +22,26 @@
# questions.
#
if test "x$BASH_VERSION" = x; then
echo This script needs bash to run.
echo It is recommended to use the configure script in the source tree root instead.
exit 1
if test "x$1" != xCHECKME; then
echo "WARNING: Calling the wrapper script directly is deprecated and unsupported."
echo "Not all features of configure will be available."
echo "Use the 'configure' script in the top-level directory instead."
TOPDIR=$(cd $(dirname $0)/../.. > /dev/null && pwd)
else
# Now the next argument is the absolute top-level directory path.
# The TOPDIR variable is passed on to configure.ac.
TOPDIR="$2"
# Remove these two arguments to get to the user supplied arguments
shift
shift
fi
CONFIGURE_COMMAND_LINE="$@"
conf_script_dir=`dirname $0`
conf_script_dir="$TOPDIR/common/autoconf"
if [ "$CUSTOM_CONFIG_DIR" = "" ]; then
conf_custom_script_dir="$conf_script_dir/../../jdk/make/closed/autoconf"
conf_custom_script_dir="$TOPDIR/closed/autoconf"
else
conf_custom_script_dir=$CUSTOM_CONFIG_DIR
conf_custom_script_dir="$CUSTOM_CONFIG_DIR"
fi
###
@ -110,14 +117,41 @@ fi
if test "x$conf_debug_configure" = xtrue; then
conf_debug_configure=recursive
fi
###
### Process command-line arguments
###
# Returns a shell-escaped version of the argument given.
function shell_quote() {
if [[ -n "$1" ]]; then
# Uses only shell-safe characters? No quoting needed.
# '=' is a zsh meta-character, but only in word-initial position.
if echo "$1" | grep '^[ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789\.:,%/+=_-]\{1,\}$' > /dev/null \
&& ! echo "$1" | grep '^=' > /dev/null; then
quoted="$1"
else
if echo "$1" | grep "[\'!]" > /dev/null; then
# csh does history expansion within single quotes, but not
# when backslash-escaped!
local quoted_quote="'\\''" quoted_exclam="'\\!'"
word="${1//\'/${quoted_quote}}"
word="${1//\!/${quoted_exclam}}"
fi
quoted="'$1'"
fi
echo "$quoted"
fi
}
conf_processed_arguments=()
conf_quoted_arguments=()
conf_openjdk_target=
for conf_option
do
# Process (and remove) our own extensions that will not be passed to autoconf
case $conf_option in
--openjdk-target=*)
conf_openjdk_target=`expr "X$conf_option" : '[^=]*=\(.*\)'`
@ -128,18 +162,35 @@ do
export conf_debug_configure
fi
;;
[^-]*=*)
# Add name of variable to CONFIGURE_OVERRIDDEN_VARIABLES list inside !...!.
conf_env_var=`expr "x$conf_option" : 'x\([^=]*\)='`
CONFIGURE_OVERRIDDEN_VARIABLES="$CONFIGURE_OVERRIDDEN_VARIABLES!$conf_env_var!"
# ... and then process argument as usual
conf_processed_arguments=("${conf_processed_arguments[@]}" "$conf_option")
;;
*)
conf_processed_arguments=("${conf_processed_arguments[@]}" "$conf_option")
;;
esac
# Store all variables overridden on the command line
case $conf_option in
[^-]*=*)
# Add name of variable to CONFIGURE_OVERRIDDEN_VARIABLES list inside !...!.
conf_env_var=`expr "x$conf_option" : 'x\([^=]*\)='`
CONFIGURE_OVERRIDDEN_VARIABLES="$CONFIGURE_OVERRIDDEN_VARIABLES!$conf_env_var!"
;;
esac
# Save the arguments, intelligently quoted for CONFIGURE_COMMAND_LINE.
case $conf_option in
*=*)
conf_option_name=`expr "x$conf_option" : 'x\([^=]*\)='`
conf_option_name=$(shell_quote "$conf_option_name")
conf_option_value=`expr "x$conf_option" : 'x[^=]*=\(.*\)'`
conf_option_value=$(shell_quote "$conf_option_value")
conf_quoted_arguments=("${conf_quoted_arguments[@]}" "$conf_option_name=$conf_option_value")
;;
*)
conf_quoted_arguments=("${conf_quoted_arguments[@]}" "$(shell_quote "$conf_option")")
;;
esac
# Check for certain autoconf options that require extra action
case $conf_option in
-build | --build | --buil | --bui | --bu |-build=* | --build=* | --buil=* | --bui=* | --bu=*)
conf_legacy_crosscompile="$conf_legacy_crosscompile $conf_option" ;;
@ -152,6 +203,9 @@ do
esac
done
# Save the quoted command line
CONFIGURE_COMMAND_LINE="${conf_quoted_arguments[@]}"
if test "x$conf_legacy_crosscompile" != "x"; then
if test "x$conf_openjdk_target" != "x"; then
echo "Error: Specifying --openjdk-target together with autoconf"
@ -181,10 +235,10 @@ conf_processed_arguments=("--enable-option-checking=fatal" "${conf_processed_arg
###
if test -e $conf_custom_script_dir/generated-configure.sh; then
# Custom source configure available; run that instead
echo Running custom generated-configure.sh
echo "Running custom generated-configure.sh"
conf_script_to_run=$conf_custom_script_dir/generated-configure.sh
else
echo Running generated-configure.sh
echo "Running generated-configure.sh"
conf_script_to_run=$conf_script_dir/generated-configure.sh
fi
@ -218,6 +272,13 @@ Additional (non-autoconf) OpenJDK Options:
--debug-configure Run the configure script with additional debug
logging enabled.
EOT
# Print list of toolchains. This must be done by the autoconf script.
( CONFIGURE_PRINT_TOOLCHAIN_LIST=true . $conf_script_to_run PRINTF=printf )
cat <<EOT
Please be aware that, when cross-compiling, the OpenJDK configure script will
generally use 'target' where autoconf traditionally uses 'host'.

View file

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -31,9 +31,9 @@
AC_PREREQ([2.69])
AC_INIT(OpenJDK, jdk8, build-dev@openjdk.java.net,,http://openjdk.java.net)
AC_INIT(OpenJDK, jdk9, build-dev@openjdk.java.net,,http://openjdk.java.net)
AC_CONFIG_AUX_DIR([build-aux])
AC_CONFIG_AUX_DIR([$TOPDIR/common/autoconf/build-aux])
m4_include([build-aux/pkg.m4])
# Include these first...
@ -43,6 +43,7 @@ m4_include([builddeps.m4])
# ... then the rest
m4_include([boot-jdk.m4])
m4_include([build-performance.m4])
m4_include([flags.m4])
m4_include([help.m4])
m4_include([jdk-options.m4])
m4_include([libraries.m4])
@ -71,7 +72,11 @@ DATE_WHEN_GENERATED=@DATE_WHEN_GENERATED@
#
###############################################################################
# Basic initialization that must happen first of all
# If we are requested to print additional help, do that and then exit.
# This must be the very first call.
HELP_PRINT_ADDITIONAL_HELP_AND_EXIT
# Basic initialization that must happen first of all in the normal process.
BASIC_INIT
BASIC_SETUP_FUNDAMENTAL_TOOLS
@ -148,26 +153,41 @@ SRCDIRS_SETUP_OUTPUT_DIRS
###############################################################################
#
# Setup the toolchain (compilers etc), i.e. the tools that need to be
# cross-compilation aware.
# Setup the toolchain (compilers etc), i.e. tools used to compile and process
# native code.
#
###############################################################################
TOOLCHAIN_SETUP_SYSROOT_AND_OUT_OPTIONS
# Locate the actual tools
TOOLCHAIN_SETUP_PATHS
# First determine the toolchain type (compiler family)
TOOLCHAIN_DETERMINE_TOOLCHAIN_TYPE
# FIXME: Currently we must test this after paths but before flags. Fix!
# Then detect the actual binaries needed
TOOLCHAIN_PRE_DETECTION
TOOLCHAIN_DETECT_TOOLCHAIN_CORE
TOOLCHAIN_DETECT_TOOLCHAIN_EXTRA
TOOLCHAIN_POST_DETECTION
# And we can test some aspects on the target using configure macros.
# Finally do some processing after the detection phase
TOOLCHAIN_SETUP_BUILD_COMPILERS
TOOLCHAIN_SETUP_LEGACY
TOOLCHAIN_MISC_CHECKS
# Setup the JTReg Regression Test Harness.
TOOLCHAIN_SETUP_JTREG
FLAGS_SETUP_INIT_FLAGS
# FIXME: Currently we must test this after toolchain but before flags. Fix!
# Now we can test some aspects on the target using configure macros.
PLATFORM_SETUP_OPENJDK_TARGET_BITS
PLATFORM_SETUP_OPENJDK_TARGET_ENDIANNESS
# Configure flags for the tools
TOOLCHAIN_SETUP_COMPILER_FLAGS_FOR_LIBS
TOOLCHAIN_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION
TOOLCHAIN_SETUP_COMPILER_FLAGS_FOR_JDK
TOOLCHAIN_SETUP_COMPILER_FLAGS_MISC
FLAGS_SETUP_COMPILER_FLAGS_FOR_LIBS
FLAGS_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION
FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK
FLAGS_SETUP_COMPILER_FLAGS_MISC
# Setup debug symbols (need objcopy from the toolchain for that)
JDKOPT_SETUP_DEBUG_SYMBOLS
@ -188,7 +208,7 @@ LIB_SETUP_FREETYPE
LIB_SETUP_ALSA
LIB_SETUP_MISC_LIBS
LIB_SETUP_STATIC_LINK_LIBSTDCPP
LIB_SETUP_ON_WINDOWS
###############################################################################
#

699
common/autoconf/flags.m4 Normal file
View file

@ -0,0 +1,699 @@
#
# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
AC_DEFUN_ONCE([FLAGS_SETUP_INIT_FLAGS],
[
# Option used to tell the compiler whether to create 32- or 64-bit executables
if test "x$TOOLCHAIN_TYPE" = xxlc; then
COMPILER_TARGET_BITS_FLAG="-q"
else
COMPILER_TARGET_BITS_FLAG="-m"
fi
AC_SUBST(COMPILER_TARGET_BITS_FLAG)
# FIXME: figure out if we should select AR flags depending on OS or toolchain.
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
ARFLAGS="-r"
elif test "x$OPENJDK_TARGET_OS" = xaix; then
ARFLAGS="-X64"
elif test "x$OPENJDK_TARGET_OS" = xwindows; then
# lib.exe is used as AR to create static libraries.
ARFLAGS="-nologo -NODEFAULTLIB:MSVCRT"
else
ARFLAGS=""
fi
AC_SUBST(ARFLAGS)
## Setup strip.
# FIXME: should this really be per platform, or should it be per toolchain type?
# strip is not provided by clang or solstudio; so guessing platform makes most sense.
# FIXME: we should really only export STRIPFLAGS from here, not POST_STRIP_CMD.
if test "x$OPENJDK_TARGET_OS" = xlinux; then
STRIPFLAGS="-g"
elif test "x$OPENJDK_TARGET_OS" = xsolaris; then
STRIPFLAGS="-x"
elif test "x$OPENJDK_TARGET_OS" = xmacosx; then
STRIPFLAGS="-S"
elif test "x$OPENJDK_TARGET_OS" = xaix; then
STRIPFLAGS="-X32_64"
fi
if test "x$OPENJDK_TARGET_OS" != xwindows; then
POST_STRIP_CMD="$STRIP $STRIPFLAGS"
fi
AC_SUBST(POST_STRIP_CMD)
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
# FIXME: break out into MCSFLAGS
POST_MCS_CMD="$MCS -d -a \"JDK $FULL_VERSION\""
fi
AC_SUBST(POST_MCS_CMD)
if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
CC_OUT_OPTION=-Fo
EXE_OUT_OPTION=-out:
LD_OUT_OPTION=-out:
AR_OUT_OPTION=-out:
else
# The option used to specify the target .o,.a or .so file.
# When compiling, how to specify the to be created object file.
CC_OUT_OPTION='-o$(SPACE)'
# When linking, how to specify the to be created executable.
EXE_OUT_OPTION='-o$(SPACE)'
# When linking, how to specify the to be created dynamically linkable library.
LD_OUT_OPTION='-o$(SPACE)'
# When archiving, how to specify the to be create static archive for object files.
AR_OUT_OPTION='rcs$(SPACE)'
fi
AC_SUBST(CC_OUT_OPTION)
AC_SUBST(EXE_OUT_OPTION)
AC_SUBST(LD_OUT_OPTION)
AC_SUBST(AR_OUT_OPTION)
# On Windows, we need to set RC flags.
if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
RC_FLAGS="-nologo -l 0x409 -r"
if test "x$VARIANT" = xOPT; then
RC_FLAGS="$RC_FLAGS -d NDEBUG"
fi
# The version variables used to create RC_FLAGS may be overridden
# in a custom configure script, or possibly the command line.
# Let those variables be expanded at make time in spec.gmk.
# The \$ are escaped to the shell, and the $(...) variables
# are evaluated by make.
RC_FLAGS="$RC_FLAGS \
-d \"JDK_BUILD_ID=\$(FULL_VERSION)\" \
-d \"JDK_COMPANY=\$(COMPANY_NAME)\" \
-d \"JDK_COMPONENT=\$(PRODUCT_NAME) \$(JDK_RC_PLATFORM_NAME) binary\" \
-d \"JDK_VER=\$(JDK_MINOR_VERSION).\$(JDK_MICRO_VERSION).\$(if \$(JDK_UPDATE_VERSION),\$(JDK_UPDATE_VERSION),0).\$(COOKED_BUILD_NUMBER)\" \
-d \"JDK_COPYRIGHT=Copyright \xA9 $COPYRIGHT_YEAR\" \
-d \"JDK_NAME=\$(PRODUCT_NAME) \$(JDK_RC_PLATFORM_NAME) \$(JDK_MINOR_VERSION) \$(JDK_UPDATE_META_TAG)\" \
-d \"JDK_FVER=\$(JDK_MINOR_VERSION),\$(JDK_MICRO_VERSION),\$(if \$(JDK_UPDATE_VERSION),\$(JDK_UPDATE_VERSION),0),\$(COOKED_BUILD_NUMBER)\""
fi
AC_SUBST(RC_FLAGS)
if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
# FIXME: likely bug, should be CCXXFLAGS_JDK? or one for C or CXX.
CCXXFLAGS="$CCXXFLAGS -nologo"
fi
])
AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_LIBS],
[
###############################################################################
#
# How to compile shared libraries.
#
if test "x$TOOLCHAIN_TYPE" = xgcc; then
PICFLAG="-fPIC"
C_FLAG_REORDER=''
CXX_FLAG_REORDER=''
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
# Linking is different on MacOSX
SHARED_LIBRARY_FLAGS="-dynamiclib -compatibility_version 1.0.0 -current_version 1.0.0 $PICFLAG"
SET_EXECUTABLE_ORIGIN='-Xlinker -rpath -Xlinker @loader_path/.'
SET_SHARED_LIBRARY_ORIGIN="$SET_EXECUTABLE_ORIGIN"
SET_SHARED_LIBRARY_NAME='-Xlinker -install_name -Xlinker @rpath/[$]1'
SET_SHARED_LIBRARY_MAPFILE=''
else
# Default works for linux, might work on other platforms as well.
SHARED_LIBRARY_FLAGS='-shared'
SET_EXECUTABLE_ORIGIN='-Xlinker -rpath -Xlinker \$$$$ORIGIN[$]1'
SET_SHARED_LIBRARY_ORIGIN="-Xlinker -z -Xlinker origin $SET_EXECUTABLE_ORIGIN"
SET_SHARED_LIBRARY_NAME='-Xlinker -soname=[$]1'
SET_SHARED_LIBRARY_MAPFILE='-Xlinker -version-script=[$]1'
fi
elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
PICFLAG="-KPIC"
C_FLAG_REORDER='-xF'
CXX_FLAG_REORDER='-xF'
SHARED_LIBRARY_FLAGS="-G"
SET_EXECUTABLE_ORIGIN='-R\$$$$ORIGIN[$]1'
SET_SHARED_LIBRARY_ORIGIN="$SET_EXECUTABLE_ORIGIN"
SET_SHARED_LIBRARY_NAME=''
SET_SHARED_LIBRARY_MAPFILE='-M[$]1'
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
PICFLAG="-qpic=large"
C_FLAG_REORDER=''
CXX_FLAG_REORDER=''
SHARED_LIBRARY_FLAGS="-qmkshrobj"
SET_EXECUTABLE_ORIGIN=""
SET_SHARED_LIBRARY_ORIGIN=''
SET_SHARED_LIBRARY_NAME=''
SET_SHARED_LIBRARY_MAPFILE=''
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
PICFLAG=""
C_FLAG_REORDER=''
CXX_FLAG_REORDER=''
SHARED_LIBRARY_FLAGS="-LD"
SET_EXECUTABLE_ORIGIN=''
SET_SHARED_LIBRARY_ORIGIN=''
SET_SHARED_LIBRARY_NAME=''
SET_SHARED_LIBRARY_MAPFILE=''
fi
AC_SUBST(C_FLAG_REORDER)
AC_SUBST(CXX_FLAG_REORDER)
AC_SUBST(SHARED_LIBRARY_FLAGS)
AC_SUBST(SET_EXECUTABLE_ORIGIN)
AC_SUBST(SET_SHARED_LIBRARY_ORIGIN)
AC_SUBST(SET_SHARED_LIBRARY_NAME)
AC_SUBST(SET_SHARED_LIBRARY_MAPFILE)
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
CFLAGS_JDK="${CFLAGS_JDK} -D__solaris__"
CXXFLAGS_JDK="${CXXFLAGS_JDK} -D__solaris__"
CFLAGS_JDKLIB_EXTRA='-xstrconst'
fi
# The (cross) compiler is now configured, we can now test capabilities
# of the target platform.
])
# Documentation on common flags used for solstudio in HIGHEST.
#
# WARNING: Use of OPTIMIZATION_LEVEL=HIGHEST in your Makefile needs to be
# done with care, there are some assumptions below that need to
# be understood about the use of pointers, and IEEE behavior.
#
# -fns: Use non-standard floating point mode (not IEEE 754)
# -fsimple: Do some simplification of floating point arithmetic (not IEEE 754)
# -fsingle: Use single precision floating point with 'float'
# -xalias_level=basic: Assume memory references via basic pointer types do not alias
# (Source with excessing pointer casting and data access with mixed
# pointer types are not recommended)
# -xbuiltin=%all: Use intrinsic or inline versions for math/std functions
# (If you expect perfect errno behavior, do not use this)
# -xdepend: Loop data dependency optimizations (need -xO3 or higher)
# -xrestrict: Pointer parameters to functions do not overlap
# (Similar to -xalias_level=basic usage, but less obvious sometimes.
# If you pass in multiple pointers to the same data, do not use this)
# -xlibmil: Inline some library routines
# (If you expect perfect errno behavior, do not use this)
# -xlibmopt: Use optimized math routines (CURRENTLY DISABLED)
# (If you expect perfect errno behavior, do not use this)
# Can cause undefined external on Solaris 8 X86 on __sincos, removing for now
# FIXME: this will never happen since sparc != sparcv9, ie 32 bit, which we don't build anymore.
# Bug?
#if test "x$OPENJDK_TARGET_CPU" = xsparc; then
# CFLAGS_JDK="${CFLAGS_JDK} -xmemalign=4s"
# CXXFLAGS_JDK="${CXXFLAGS_JDK} -xmemalign=4s"
#fi
AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION],
[
###############################################################################
#
# Setup the opt flags for different compilers
# and different operating systems.
#
# FIXME: this was indirectly the old default, but just inherited.
# if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
# C_FLAG_DEPS="-MMD -MF"
# fi
# Generate make dependency files
if test "x$TOOLCHAIN_TYPE" = xgcc; then
C_FLAG_DEPS="-MMD -MF"
elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
C_FLAG_DEPS="-xMMD -xMF"
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
C_FLAG_DEPS="-qmakedep=gcc -MF"
fi
CXX_FLAG_DEPS="$C_FLAG_DEPS"
AC_SUBST(C_FLAG_DEPS)
AC_SUBST(CXX_FLAG_DEPS)
# Debug symbols
if test "x$TOOLCHAIN_TYPE" = xgcc; then
if test "x$OPENJDK_TARGET_CPU_BITS" = "x64" && test "x$DEBUG_LEVEL" = "xfastdebug"; then
CFLAGS_DEBUG_SYMBOLS="-g1"
CXXFLAGS_DEBUG_SYMBOLS="-g1"
else
CFLAGS_DEBUG_SYMBOLS="-g"
CXXFLAGS_DEBUG_SYMBOLS="-g"
fi
elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
CFLAGS_DEBUG_SYMBOLS="-g -xs"
CXXFLAGS_DEBUG_SYMBOLS="-g0 -xs"
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
CFLAGS_DEBUG_SYMBOLS="-g"
CXXFLAGS_DEBUG_SYMBOLS="-g"
fi
AC_SUBST(CFLAGS_DEBUG_SYMBOLS)
AC_SUBST(CXXFLAGS_DEBUG_SYMBOLS)
# Optimization levels
if test "x$TOOLCHAIN_TYPE" = xsolstudio; then
CC_HIGHEST="$CC_HIGHEST -fns -fsimple -fsingle -xbuiltin=%all -xdepend -xrestrict -xlibmil"
if test "x$OPENJDK_TARGET_CPU_ARCH" = "xx86"; then
# FIXME: seems we always set -xregs=no%frameptr; put it elsewhere more global?
C_O_FLAG_HIGHEST="-xO4 -Wu,-O4~yz $CC_HIGHEST -xalias_level=basic -xregs=no%frameptr"
C_O_FLAG_HI="-xO4 -Wu,-O4~yz -xregs=no%frameptr"
C_O_FLAG_NORM="-xO2 -Wu,-O2~yz -xregs=no%frameptr"
C_O_FLAG_NONE="-xregs=no%frameptr"
CXX_O_FLAG_HIGHEST="-xO4 -Qoption ube -O4~yz $CC_HIGHEST -xregs=no%frameptr"
CXX_O_FLAG_HI="-xO4 -Qoption ube -O4~yz -xregs=no%frameptr"
CXX_O_FLAG_NORM="-xO2 -Qoption ube -O2~yz -xregs=no%frameptr"
CXX_O_FLAG_NONE="-xregs=no%frameptr"
if test "x$OPENJDK_TARGET_CPU_BITS" = "x32"; then
C_O_FLAG_HIGHEST="$C_O_FLAG_HIGHEST -xchip=pentium"
CXX_O_FLAG_HIGHEST="$CXX_O_FLAG_HIGHEST -xchip=pentium"
fi
elif test "x$OPENJDK_TARGET_CPU_ARCH" = "xsparc"; then
C_O_FLAG_HIGHEST="-xO4 -Wc,-Qrm-s -Wc,-Qiselect-T0 $CC_HIGHEST -xalias_level=basic -xprefetch=auto,explicit -xchip=ultra"
C_O_FLAG_HI="-xO4 -Wc,-Qrm-s -Wc,-Qiselect-T0"
C_O_FLAG_NORM="-xO2 -Wc,-Qrm-s -Wc,-Qiselect-T0"
C_O_FLAG_NONE=""
CXX_O_FLAG_HIGHEST="-xO4 -Qoption cg -Qrm-s -Qoption cg -Qiselect-T0 $CC_HIGHEST -xprefetch=auto,explicit -xchip=ultra"
CXX_O_FLAG_HI="-xO4 -Qoption cg -Qrm-s -Qoption cg -Qiselect-T0"
CXX_O_FLAG_NORM="-xO2 -Qoption cg -Qrm-s -Qoption cg -Qiselect-T0"
CXX_O_FLAG_NONE=""
fi
else
# The remaining toolchains share opt flags between CC and CXX;
# setup for C and duplicate afterwards.
if test "x$TOOLCHAIN_TYPE" = xgcc; then
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
# On MacOSX we optimize for size, something
# we should do for all platforms?
C_O_FLAG_HIGHEST="-Os"
C_O_FLAG_HI="-Os"
C_O_FLAG_NORM="-Os"
C_O_FLAG_NONE=""
else
C_O_FLAG_HIGHEST="-O3"
C_O_FLAG_HI="-O3"
C_O_FLAG_NORM="-O2"
C_O_FLAG_NONE="-O0"
fi
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
C_O_FLAG_HIGHEST="-O3"
C_O_FLAG_HI="-O3 -qstrict"
C_O_FLAG_NORM="-O2"
C_O_FLAG_NONE=""
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
C_O_FLAG_HIGHEST="-O2"
C_O_FLAG_HI="-O1"
C_O_FLAG_NORM="-O1"
C_O_FLAG_NONE="-Od"
fi
CXX_O_FLAG_HIGHEST="$C_O_FLAG_HIGHEST"
CXX_O_FLAG_HI="$C_O_FLAG_HI"
CXX_O_FLAG_NORM="$C_O_FLAG_NORM"
CXX_O_FLAG_NONE="$C_O_FLAG_NONE"
fi
AC_SUBST(C_O_FLAG_HIGHEST)
AC_SUBST(C_O_FLAG_HI)
AC_SUBST(C_O_FLAG_NORM)
AC_SUBST(C_O_FLAG_NONE)
AC_SUBST(CXX_O_FLAG_HIGHEST)
AC_SUBST(CXX_O_FLAG_HI)
AC_SUBST(CXX_O_FLAG_NORM)
AC_SUBST(CXX_O_FLAG_NONE)
])
AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK],
[
# Special extras...
if test "x$TOOLCHAIN_TYPE" = xsolstudio; then
if test "x$OPENJDK_TARGET_CPU_ARCH" = "xsparc"; then
CFLAGS_JDKLIB_EXTRA="${CFLAGS_JDKLIB_EXTRA} -xregs=no%appl"
CXXFLAGS_JDKLIB_EXTRA="${CXXFLAGS_JDKLIB_EXTRA} -xregs=no%appl"
fi
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
LDFLAGS_JDK="${LDFLAGS_JDK} -q64 -brtl -bnolibpath -liconv -bexpall"
CFLAGS_JDK="${CFLAGS_JDK} -qchars=signed -q64 -qfullpath -qsaveopt"
CXXFLAGS_JDK="${CXXFLAGS_JDK} -qchars=signed -q64 -qfullpath -qsaveopt"
fi
if test "x$CFLAGS" != "x${ADDED_CFLAGS}"; then
AC_MSG_WARN([Ignoring CFLAGS($CFLAGS) found in environment. Use --with-extra-cflags])
fi
if test "x$CXXFLAGS" != "x${ADDED_CXXFLAGS}"; then
AC_MSG_WARN([Ignoring CXXFLAGS($CXXFLAGS) found in environment. Use --with-extra-cxxflags])
fi
if test "x$LDFLAGS" != "x${ADDED_LDFLAGS}"; then
AC_MSG_WARN([Ignoring LDFLAGS($LDFLAGS) found in environment. Use --with-extra-ldflags])
fi
AC_ARG_WITH(extra-cflags, [AS_HELP_STRING([--with-extra-cflags],
[extra flags to be used when compiling jdk c-files])])
AC_ARG_WITH(extra-cxxflags, [AS_HELP_STRING([--with-extra-cxxflags],
[extra flags to be used when compiling jdk c++-files])])
AC_ARG_WITH(extra-ldflags, [AS_HELP_STRING([--with-extra-ldflags],
[extra flags to be used when linking jdk])])
CFLAGS_JDK="${CFLAGS_JDK} $with_extra_cflags"
CXXFLAGS_JDK="${CXXFLAGS_JDK} $with_extra_cxxflags"
LDFLAGS_JDK="${LDFLAGS_JDK} $with_extra_ldflags"
# Hotspot needs these set in their legacy form
LEGACY_EXTRA_CFLAGS=$with_extra_cflags
LEGACY_EXTRA_CXXFLAGS=$with_extra_cxxflags
LEGACY_EXTRA_LDFLAGS=$with_extra_ldflags
AC_SUBST(LEGACY_EXTRA_CFLAGS)
AC_SUBST(LEGACY_EXTRA_CXXFLAGS)
AC_SUBST(LEGACY_EXTRA_LDFLAGS)
###############################################################################
#
# Now setup the CFLAGS and LDFLAGS for the JDK build.
# Later we will also have CFLAGS and LDFLAGS for the hotspot subrepo build.
#
# Setup compiler/platform specific flags to CFLAGS_JDK,
# CXXFLAGS_JDK and CCXXFLAGS_JDK (common to C and CXX?)
if test "x$TOOLCHAIN_TYPE" = xgcc; then
# these options are used for both C and C++ compiles
CCXXFLAGS_JDK="$CCXXFLAGS $CCXXFLAGS_JDK -Wall -Wno-parentheses -Wextra -Wno-unused -Wno-unused-parameter -Wformat=2 \
-pipe -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE"
case $OPENJDK_TARGET_CPU_ARCH in
arm )
# on arm we don't prevent gcc to omit frame pointer but do prevent strict aliasing
CFLAGS_JDK="${CFLAGS_JDK} -fno-strict-aliasing"
;;
ppc )
# on ppc we don't prevent gcc to omit frame pointer nor strict-aliasing
;;
* )
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -fno-omit-frame-pointer"
CFLAGS_JDK="${CFLAGS_JDK} -fno-strict-aliasing"
;;
esac
elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
CCXXFLAGS_JDK="$CCXXFLAGS $CCXXFLAGS_JDK -DTRACING -DMACRO_MEMSYS_OPS -DBREAKPTS"
if test "x$OPENJDK_TARGET_CPU_ARCH" = xx86; then
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DcpuIntel -Di586 -D$OPENJDK_TARGET_CPU_LEGACY_LIB"
CFLAGS_JDK="$CFLAGS_JDK -erroff=E_BAD_PRAGMA_PACK_VALUE"
fi
CFLAGS_JDK="$CFLAGS_JDK -xc99=%none -xCC -errshort=tags -Xa -v -mt -W0,-noglobal"
CXXFLAGS_JDK="$CXXFLAGS_JDK -errtags=yes +w -mt -features=no%except -DCC_NOEX -norunpath -xnolib"
elif test "x$TOOLCHAIN_TYPE" = xxlc; then
CFLAGS_JDK="$CFLAGS_JDK -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE -DSTDC"
CXXFLAGS_JDK="$CXXFLAGS_JDK -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE -DSTDC"
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
CCXXFLAGS_JDK="$CCXXFLAGS $CCXXFLAGS_JDK -Zi -MD -Zc:wchar_t- -W3 -wd4800 \
-D_STATIC_CPPLIB -D_DISABLE_DEPRECATE_STATIC_CPPLIB -DWIN32_LEAN_AND_MEAN \
-D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE \
-DWIN32 -DIAL"
if test "x$OPENJDK_TARGET_CPU" = xx86_64; then
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_AMD64_ -Damd64"
else
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_X86_ -Dx86"
fi
fi
###############################################################################
# Adjust flags according to debug level.
case $DEBUG_LEVEL in
fastdebug )
CFLAGS_JDK="$CFLAGS_JDK $CFLAGS_DEBUG_SYMBOLS"
CXXFLAGS_JDK="$CXXFLAGS_JDK $CXXFLAGS_DEBUG_SYMBOLS"
C_O_FLAG_HI="$C_O_FLAG_NORM"
C_O_FLAG_NORM="$C_O_FLAG_NORM"
CXX_O_FLAG_HI="$CXX_O_FLAG_NORM"
CXX_O_FLAG_NORM="$CXX_O_FLAG_NORM"
JAVAC_FLAGS="$JAVAC_FLAGS -g"
;;
slowdebug )
CFLAGS_JDK="$CFLAGS_JDK $CFLAGS_DEBUG_SYMBOLS"
CXXFLAGS_JDK="$CXXFLAGS_JDK $CXXFLAGS_DEBUG_SYMBOLS"
C_O_FLAG_HI="$C_O_FLAG_NONE"
C_O_FLAG_NORM="$C_O_FLAG_NONE"
CXX_O_FLAG_HI="$CXX_O_FLAG_NONE"
CXX_O_FLAG_NORM="$CXX_O_FLAG_NONE"
JAVAC_FLAGS="$JAVAC_FLAGS -g"
;;
esac
# Setup LP64
CCXXFLAGS_JDK="$CCXXFLAGS_JDK $ADD_LP64"
# Set some common defines. These works for all compilers, but assume
# -D is universally accepted.
# Setup endianness
if test "x$OPENJDK_TARGET_CPU_ENDIAN" = xlittle; then
# The macro _LITTLE_ENDIAN needs to be defined the same to avoid the
# Sun C compiler warning message: warning: macro redefined: _LITTLE_ENDIAN
# (The Solaris X86 system defines this in file /usr/include/sys/isa_defs.h).
# Note: -Dmacro is the same as #define macro 1
# -Dmacro= is the same as #define macro
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_LITTLE_ENDIAN="
else
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_LITTLE_ENDIAN"
fi
else
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_BIG_ENDIAN"
fi
# Setup target OS define. Use OS target name but in upper case.
OPENJDK_TARGET_OS_UPPERCASE=`$ECHO $OPENJDK_TARGET_OS | $TR 'abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'`
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D$OPENJDK_TARGET_OS_UPPERCASE"
# Setup target CPU
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DARCH='\"$OPENJDK_TARGET_CPU_LEGACY\"' -D$OPENJDK_TARGET_CPU_LEGACY"
# Setup debug/release defines
if test "x$DEBUG_LEVEL" = xrelease; then
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DNDEBUG"
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DTRIMMED"
fi
else
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DDEBUG"
fi
# Setup release name
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DRELEASE='\"\$(RELEASE)\"'"
# Set some additional per-OS defines.
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_ALLBSD_SOURCE -D_DARWIN_UNLIMITED_SELECT"
elif test "x$OPENJDK_TARGET_OS" = xaix; then
# FIXME: PPC64 should not be here.
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DPPC64"
elif test "x$OPENJDK_TARGET_OS" = xbsd; then
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_ALLBSD_SOURCE"
fi
# Additional macosx handling
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
if test "x$TOOLCHAIN_TYPE" = xgcc; then
# FIXME: This needs to be exported in spec.gmk due to closed legacy code.
# FIXME: clean this up, and/or move it elsewhere.
# Setting these parameters makes it an error to link to macosx APIs that are
# newer than the given OS version and makes the linked binaries compatible
# even if built on a newer version of the OS.
# The expected format is X.Y.Z
MACOSX_VERSION_MIN=10.7.0
AC_SUBST(MACOSX_VERSION_MIN)
# The macro takes the version with no dots, ex: 1070
# Let the flags variables get resolved in make for easier override on make
# command line.
CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DMAC_OS_X_VERSION_MAX_ALLOWED=\$(subst .,,\$(MACOSX_VERSION_MIN)) -mmacosx-version-min=\$(MACOSX_VERSION_MIN)"
LDFLAGS_JDK="$LDFLAGS_JDK -mmacosx-version-min=\$(MACOSX_VERSION_MIN)"
fi
fi
# Setup some hard coded includes
CCXXFLAGS_JDK="$CCXXFLAGS_JDK \
-I${JDK_OUTPUTDIR}/include \
-I${JDK_OUTPUTDIR}/include/$OPENJDK_TARGET_OS \
-I${JDK_TOPDIR}/src/share/javavm/export \
-I${JDK_TOPDIR}/src/$OPENJDK_TARGET_OS_EXPORT_DIR/javavm/export \
-I${JDK_TOPDIR}/src/share/native/common \
-I${JDK_TOPDIR}/src/$OPENJDK_TARGET_OS_API_DIR/native/common"
# The shared libraries are compiled using the picflag.
CFLAGS_JDKLIB="$CCXXFLAGS_JDK $CFLAGS_JDK $PICFLAG $CFLAGS_JDKLIB_EXTRA"
CXXFLAGS_JDKLIB="$CCXXFLAGS_JDK $CXXFLAGS_JDK $PICFLAG $CXXFLAGS_JDKLIB_EXTRA "
# Executable flags
CFLAGS_JDKEXE="$CCXXFLAGS_JDK $CFLAGS_JDK"
CXXFLAGS_JDKEXE="$CCXXFLAGS_JDK $CXXFLAGS_JDK"
AC_SUBST(CFLAGS_JDKLIB)
AC_SUBST(CFLAGS_JDKEXE)
AC_SUBST(CXXFLAGS_JDKLIB)
AC_SUBST(CXXFLAGS_JDKEXE)
# Setup LDFLAGS et al.
#
# Now this is odd. The JDK native libraries have to link against libjvm.so
# On 32-bit machines there is normally two distinct libjvm.so:s, client and server.
# Which should we link to? Are we lucky enough that the binary api to the libjvm.so library
# is identical for client and server? Yes. Which is picked at runtime (client or server)?
# Neither, since the chosen libjvm.so has already been loaded by the launcher, all the following
# libraries will link to whatever is in memory. Yuck.
#
# Thus we offer the compiler to find libjvm.so first in server then in client. It works. Ugh.
if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
LDFLAGS_JDK="$LDFLAGS_JDK -nologo -opt:ref -incremental:no"
if test "x$OPENJDK_TARGET_CPU_BITS" = "x32"; then
LDFLAGS_JDK="$LDFLAGS_JDK -safeseh"
fi
# TODO: make -debug optional "--disable-full-debug-symbols"
LDFLAGS_JDK="$LDFLAGS_JDK -debug"
LDFLAGS_JDKLIB="${LDFLAGS_JDK} -dll -libpath:${JDK_OUTPUTDIR}/lib"
LDFLAGS_JDKLIB_SUFFIX=""
if test "x$OPENJDK_TARGET_CPU_BITS" = "x64"; then
LDFLAGS_STACK_SIZE=1048576
else
LDFLAGS_STACK_SIZE=327680
fi
LDFLAGS_JDKEXE="${LDFLAGS_JDK} /STACK:$LDFLAGS_STACK_SIZE"
else
if test "x$TOOLCHAIN_TYPE" = xgcc; then
# If this is a --hash-style=gnu system, use --hash-style=both, why?
# We have previously set HAS_GNU_HASH if this is the case
if test -n "$HAS_GNU_HASH"; then
LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker --hash-style=both "
fi
if test "x$OPENJDK_TARGET_OS" = xlinux; then
# And since we now know that the linker is gnu, then add -z defs, to forbid
# undefined symbols in object files.
LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker -z -Xlinker defs"
if test "x$DEBUG_LEVEL" = "xrelease"; then
# When building release libraries, tell the linker optimize them.
# Should this be supplied to the OSS linker as well?
LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker -O1"
fi
fi
fi
if test "x$TOOLCHAIN_TYPE" = xsolstudio; then
LDFLAGS_JDK="$LDFLAGS_JDK -z defs -xildoff -ztext"
LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK -norunpath -xnolib"
fi
LDFLAGS_JDKLIB="${LDFLAGS_JDK} $SHARED_LIBRARY_FLAGS \
-L${JDK_OUTPUTDIR}/lib${OPENJDK_TARGET_CPU_LIBDIR}"
# On some platforms (mac) the linker warns about non existing -L dirs.
# Add server first if available. Linking aginst client does not always produce the same results.
# Only add client dir if client is being built. Add minimal (note not minimal1) if only building minimal1.
# Default to server for other variants.
if test "x$JVM_VARIANT_SERVER" = xtrue; then
LDFLAGS_JDKLIB="${LDFLAGS_JDKLIB} -L${JDK_OUTPUTDIR}/lib${OPENJDK_TARGET_CPU_LIBDIR}/server"
elif test "x$JVM_VARIANT_CLIENT" = xtrue; then
LDFLAGS_JDKLIB="${LDFLAGS_JDKLIB} -L${JDK_OUTPUTDIR}/lib${OPENJDK_TARGET_CPU_LIBDIR}/client"
elif test "x$JVM_VARIANT_MINIMAL1" = xtrue; then
LDFLAGS_JDKLIB="${LDFLAGS_JDKLIB} -L${JDK_OUTPUTDIR}/lib${OPENJDK_TARGET_CPU_LIBDIR}/minimal"
else
LDFLAGS_JDKLIB="${LDFLAGS_JDKLIB} -L${JDK_OUTPUTDIR}/lib${OPENJDK_TARGET_CPU_LIBDIR}/server"
fi
LDFLAGS_JDKLIB_SUFFIX="-ljava -ljvm"
if test "x$TOOLCHAIN_TYPE" = xsolstudio; then
LDFLAGS_JDKLIB_SUFFIX="$LDFLAGS_JDKLIB_SUFFIX -lc"
fi
LDFLAGS_JDKEXE="${LDFLAGS_JDK}"
if test "x$OPENJDK_TARGET_OS" = xlinux; then
LDFLAGS_JDKEXE="$LDFLAGS_JDKEXE -Xlinker --allow-shlib-undefined"
fi
fi
AC_SUBST(LDFLAGS_JDKLIB)
AC_SUBST(LDFLAGS_JDKEXE)
AC_SUBST(LDFLAGS_JDKLIB_SUFFIX)
AC_SUBST(LDFLAGS_JDKEXE_SUFFIX)
AC_SUBST(LDFLAGS_CXX_JDK)
])
# FLAGS_COMPILER_CHECK_ARGUMENTS([ARGUMENT], [RUN-IF-TRUE],
# [RUN-IF-FALSE])
# ------------------------------------------------------------
# Check that the c and c++ compilers support an argument
AC_DEFUN([FLAGS_COMPILER_CHECK_ARGUMENTS],
[
AC_MSG_CHECKING([if compiler supports "$1"])
supports=yes
saved_cflags="$CFLAGS"
CFLAGS="$CFLAGS $1"
AC_LANG_PUSH([C])
AC_COMPILE_IFELSE([AC_LANG_SOURCE([[int i;]])], [],
[supports=no])
AC_LANG_POP([C])
CFLAGS="$saved_cflags"
saved_cxxflags="$CXXFLAGS"
CXXFLAGS="$CXXFLAG $1"
AC_LANG_PUSH([C++])
AC_COMPILE_IFELSE([AC_LANG_SOURCE([[int i;]])], [],
[supports=no])
AC_LANG_POP([C++])
CXXFLAGS="$saved_cxxflags"
AC_MSG_RESULT([$supports])
if test "x$supports" = "xyes" ; then
m4_ifval([$2], [$2], [:])
else
m4_ifval([$3], [$3], [:])
fi
])
AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_MISC],
[
# Some Zero and Shark settings.
# ZERO_ARCHFLAG tells the compiler which mode to build for
case "${OPENJDK_TARGET_CPU}" in
s390)
ZERO_ARCHFLAG="${COMPILER_TARGET_BITS_FLAG}31"
;;
*)
ZERO_ARCHFLAG="${COMPILER_TARGET_BITS_FLAG}${OPENJDK_TARGET_CPU_BITS}"
esac
FLAGS_COMPILER_CHECK_ARGUMENTS([$ZERO_ARCHFLAG], [], [ZERO_ARCHFLAG=""])
AC_SUBST(ZERO_ARCHFLAG)
# Check that the compiler supports -mX (or -qX on AIX) flags
# Set COMPILER_SUPPORTS_TARGET_BITS_FLAG to 'true' if it does
FLAGS_COMPILER_CHECK_ARGUMENTS([${COMPILER_TARGET_BITS_FLAG}${OPENJDK_TARGET_CPU_BITS}],
[COMPILER_SUPPORTS_TARGET_BITS_FLAG=true],
[COMPILER_SUPPORTS_TARGET_BITS_FLAG=false])
AC_SUBST(COMPILER_SUPPORTS_TARGET_BITS_FLAG)
])

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -151,14 +151,46 @@ pkgadd_help() {
PKGHANDLER_COMMAND=""
}
# This function will check if we're called from the "configure" wrapper while
# printing --help. If so, we will print out additional information that can
# only be extracted within the autoconf script, and then exit. This must be
# called at the very beginning in configure.ac.
AC_DEFUN_ONCE([HELP_PRINT_ADDITIONAL_HELP_AND_EXIT],
[
if test "x$CONFIGURE_PRINT_TOOLCHAIN_LIST" != x; then
$PRINTF "The following toolchains are available as arguments to --with-toolchain-type.\n"
$PRINTF "Which are valid to use depends on the build platform.\n"
for toolchain in $VALID_TOOLCHAINS_all; do
# Use indirect variable referencing
toolchain_var_name=TOOLCHAIN_DESCRIPTION_$toolchain
TOOLCHAIN_DESCRIPTION=${!toolchain_var_name}
$PRINTF " %-10s %s\n" $toolchain "$TOOLCHAIN_DESCRIPTION"
done
# And now exit directly
exit 0
fi
])
AC_DEFUN_ONCE([HELP_PRINT_SUMMARY_AND_WARNINGS],
[
# Finally output some useful information to the user
printf "\n"
printf "====================================================\n"
printf "A new configuration has been successfully created in\n"
printf "$OUTPUT_ROOT\n"
if test "x$no_create" != "xyes"; then
if test "x$IS_RECONFIGURE" != "xyes"; then
printf "A new configuration has been successfully created in\n %s\n" "$OUTPUT_ROOT"
else
printf "The existing configuration has been successfully updated in\n %s\n" "$OUTPUT_ROOT"
fi
else
if test "x$IS_RECONFIGURE" != "xyes"; then
printf "A configuration has been successfully checked but not created\n"
else
printf "The existing configuration has been successfully checked in\n %s\n" "$OUTPUT_ROOT"
fi
fi
if test "x$CONFIGURE_COMMAND_LINE" != x; then
printf "using configure arguments '$CONFIGURE_COMMAND_LINE'.\n"
else
@ -178,8 +210,9 @@ AC_DEFUN_ONCE([HELP_PRINT_SUMMARY_AND_WARNINGS],
printf "* Environment: $WINDOWS_ENV_VENDOR version $WINDOWS_ENV_VERSION (root at $WINDOWS_ENV_ROOT_PATH)\n"
fi
printf "* Boot JDK: $BOOT_JDK_VERSION (at $BOOT_JDK)\n"
printf "* C Compiler: $CC_VENDOR version $CC_VERSION (at $CC)\n"
printf "* C++ Compiler: $CXX_VENDOR version $CXX_VERSION (at $CXX)\n"
printf "* Toolchain: $TOOLCHAIN_TYPE ($TOOLCHAIN_DESCRIPTION)\n"
printf "* C Compiler: Version $CC_VERSION_NUMBER (at $CC)\n"
printf "* C++ Compiler: Version $CXX_VERSION_NUMBER (at $CXX)\n"
printf "\n"
printf "Build performance summary:\n"
@ -212,10 +245,16 @@ AC_DEFUN_ONCE([HELP_PRINT_SUMMARY_AND_WARNINGS],
printf "\n"
fi
if test "x$IS_RECONFIGURE" = "xyes"; then
if test "x$IS_RECONFIGURE" = "xyes" && test "x$no_create" != "xyes"; then
printf "WARNING: The result of this configuration has overridden an older\n"
printf "configuration. You *should* run 'make clean' to make sure you get a\n"
printf "proper build. Failure to do so might result in strange build problems.\n"
printf "\n"
fi
if test "x$IS_RECONFIGURE" != "xyes" && test "x$no_create" = "xyes"; then
printf "WARNING: The result of this configuration was not saved.\n"
printf "You should run without '--no-create | -n' to create the configuration.\n"
printf "\n"
fi
])

View file

@ -102,6 +102,8 @@ HOTSPOT_BUILD_JOBS:=$(JOBS)
# Control wether Hotspot runs Queens test after building
TEST_IN_BUILD=@TEST_IN_BUILD@
USE_CLANG := @USE_CLANG@
# For hotspot, override compiler/tools definition to not include FIXPATH prefix.
# Hotspot has its own handling on the Windows path situation.
CXX:=@CCACHE@ @HOTSPOT_CXX@

View file

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -332,6 +332,10 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_OPEN_OR_CUSTOM],
fi
AC_SUBST(SET_OPENJDK)
# custom-make-dir is deprecated. Please use your custom-hook.m4 to override
# the IncludeCustomExtension macro.
BASIC_DEPRECATED_ARG_WITH(custom-make-dir)
])
AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_OPTIONS],
@ -424,6 +428,19 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_OPTIONS],
AC_SUBST(ENABLE_INTREE_EC)
])
###############################################################################
#
# --enable-rmiconnector-iiop
#
AC_ARG_ENABLE(rmiconnector-iiop, [AS_HELP_STRING([--enable-rmiconnector-iiop],
[enable the JMX RMIConnector iiop transport @<:@disabled@:>@])])
if test "x$enable_rmiconnector_iiop" = "xyes"; then
RMICONNECTOR_IIOP=true
else
RMICONNECTOR_IIOP=false
fi
AC_SUBST(RMICONNECTOR_IIOP)
###############################################################################
#
# Compress jars
@ -469,7 +486,7 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_VERSION_NUMBERS],
fi
AC_ARG_WITH(user-release-suffix, [AS_HELP_STRING([--with-user-release-suffix],
[Add a custom string to the version string if build number isn't set.@<:@username_builddateb00@:>@])])
[Add a custom string to the version string if build number is not set.@<:@username_builddateb00@:>@])])
if test "x$with_user_release_suffix" = xyes; then
AC_MSG_ERROR([Release suffix must have a value])
elif test "x$with_user_release_suffix" != x; then
@ -589,14 +606,4 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_DEBUG_SYMBOLS],
AC_SUBST(ENABLE_DEBUG_SYMBOLS)
AC_SUBST(ZIP_DEBUGINFO_FILES)
AC_SUBST(CFLAGS_DEBUG_SYMBOLS)
AC_SUBST(CXXFLAGS_DEBUG_SYMBOLS)
])
# Support for customization of the build process. Some build files
# will include counterparts from this location, if they exist. This allows
# for a degree of customization of the build targets and the rules/recipes
# to create them
AC_ARG_WITH([custom-make-dir], [AS_HELP_STRING([--with-custom-make-dir],
[use this directory for custom build/make files])], [CUSTOM_MAKE_DIR=$with_custom_make_dir])
AC_SUBST(CUSTOM_MAKE_DIR)

View file

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -562,7 +562,7 @@ AC_DEFUN_ONCE([LIB_SETUP_ALSA],
fi
if test "x${with_alsa}" != x; then
ALSA_LIBS="-L${with_alsa}/lib -lalsa"
ALSA_LIBS="-L${with_alsa}/lib -lasound"
ALSA_CFLAGS="-I${with_alsa}/include"
ALSA_FOUND=yes
fi
@ -571,7 +571,7 @@ AC_DEFUN_ONCE([LIB_SETUP_ALSA],
ALSA_FOUND=yes
fi
if test "x${with_alsa_lib}" != x; then
ALSA_LIBS="-L${with_alsa_lib} -lalsa"
ALSA_LIBS="-L${with_alsa_lib} -lasound"
ALSA_FOUND=yes
fi
if test "x$ALSA_FOUND" = xno; then
@ -876,14 +876,25 @@ AC_DEFUN_ONCE([LIB_SETUP_STATIC_LINK_LIBSTDCPP],
fi
# libCrun is the c++ runtime-library with SunStudio (roughly the equivalent of gcc's libstdc++.so)
if test "x$OPENJDK_TARGET_OS" = xsolaris && test "x$LIBCXX" = x; then
if test "x$TOOLCHAIN_TYPE" = xsolstudio && test "x$LIBCXX" = x; then
LIBCXX="/usr/lib${OPENJDK_TARGET_CPU_ISADIR}/libCrun.so.1"
fi
# TODO better (platform agnostic) test
if test "x$OPENJDK_TARGET_OS" = xmacosx && test "x$LIBCXX" = x && test "x$GCC" = xyes; then
if test "x$OPENJDK_TARGET_OS" = xmacosx && test "x$LIBCXX" = x && test "x$TOOLCHAIN_TYPE" = xgcc; then
LIBCXX="-lstdc++"
fi
AC_SUBST(LIBCXX)
])
AC_DEFUN_ONCE([LIB_SETUP_ON_WINDOWS],
[
if test "x$OPENJDK_TARGET_OS" = "xwindows"; then
TOOLCHAIN_SETUP_MSVCR_DLL
BASIC_DEPRECATED_ARG_WITH([dxsdk])
BASIC_DEPRECATED_ARG_WITH([dxsdk-lib])
BASIC_DEPRECATED_ARG_WITH([dxsdk-include])
fi
AC_SUBST(MSVCR_DLL)
])

View file

@ -48,6 +48,9 @@ define NEWLINE
endef
# The command line given to configure.
CONFIGURE_COMMAND_LINE:=@CONFIGURE_COMMAND_LINE@
# A self-referential reference to this file.
SPEC:=@SPEC@
@ -67,8 +70,8 @@ ifeq (,$(findstring -R,$(MAKE)))
endif
# Specify where the common include directory for makefiles is.
ifeq (,$(findstring -I @SRC_ROOT@/make/common,$(MAKE)))
MAKE:=$(MAKE) -I @SRC_ROOT@/make/common
ifeq (,$(findstring -I @TOPDIR@/make/common,$(MAKE)))
MAKE:=$(MAKE) -I @TOPDIR@/make/common
endif
# The "human readable" name of this configuration
@ -133,10 +136,13 @@ endif
SYS_ROOT:=@SYS_ROOT@
# Paths to the source code
SRC_ROOT:=@SRC_ROOT@
ADD_SRC_ROOT:=@ADD_SRC_ROOT@
OVERRIDE_SRC_ROOT:=@OVERRIDE_SRC_ROOT@
TOPDIR:=@SRC_ROOT@
# The top-level directory of the forest (SRC_ROOT is a traditional alias)
TOPDIR:=@TOPDIR@
SRC_ROOT:=@TOPDIR@
OUTPUT_ROOT:=@OUTPUT_ROOT@
JDK_TOPDIR:=@JDK_TOPDIR@
LANGTOOLS_TOPDIR:=@LANGTOOLS_TOPDIR@
@ -147,9 +153,6 @@ HOTSPOT_TOPDIR:=@HOTSPOT_TOPDIR@
NASHORN_TOPDIR:=@NASHORN_TOPDIR@
COPYRIGHT_YEAR:=@COPYRIGHT_YEAR@
# Location where build customization files may be found
CUSTOM_MAKE_DIR:=@CUSTOM_MAKE_DIR@
# Information gathered from the version.numbers file.
JDK_MAJOR_VERSION:=@JDK_MAJOR_VERSION@
JDK_MINOR_VERSION:=@JDK_MINOR_VERSION@
@ -172,7 +175,7 @@ RUNTIME_NAME=$(PRODUCT_NAME) $(PRODUCT_SUFFIX)
COOKED_BUILD_NUMBER:=@COOKED_BUILD_NUMBER@
# These variables need to be generated here so that MILESTONE and
# JDK_BUILD_NUMBER can be overridden on the make command line.
ifeq ($(MILESTONE),)
ifeq ($(MILESTONE), fcs)
RELEASE=$(JDK_VERSION)$(BUILD_VARIANT_RELEASE)
else
RELEASE=$(JDK_VERSION)-$(MILESTONE)$(BUILD_VARIANT_RELEASE)
@ -286,6 +289,9 @@ CACERTS_FILE=@CACERTS_FILE@
# Enable unlimited crypto policy
UNLIMITED_CRYPTO=@UNLIMITED_CRYPTO@
# Enable RMIConnector IIOP transport
RMICONNECTOR_IIOP=@RMICONNECTOR_IIOP@
# Necessary additional compiler flags to compile X11
X_CFLAGS:=@X_CFLAGS@
X_LIBS:=@X_LIBS@
@ -294,11 +300,8 @@ OPENWIN_HOME:=@OPENWIN_HOME@
# The lowest required version of macosx to enforce compatiblity for
MACOSX_VERSION_MIN=@MACOSX_VERSION_MIN@
# There are two types: CC or CL
# CC is gcc and others behaving reasonably similar.
# CL is cl.exe only.
COMPILER_TYPE:=@COMPILER_TYPE@
COMPILER_NAME:=@COMPILER_NAME@
# Toolchain type: gcc, clang, solstudio, lxc, microsoft...
TOOLCHAIN_TYPE:=@TOOLCHAIN_TYPE@
# Option used to tell the compiler whether to create 32- or 64-bit executables
COMPILER_TARGET_BITS_FLAG:=@COMPILER_TARGET_BITS_FLAG@
@ -438,7 +441,7 @@ OBJ_SUFFIX:=@OBJ_SUFFIX@
POST_STRIP_CMD:=@POST_STRIP_CMD@
POST_MCS_CMD:=@POST_MCS_CMD@
JAVA_FLAGS:=@BOOT_JDK_JVMARGS@
JAVA_FLAGS:=@JAVA_FLAGS@
JAVA=@FIXPATH@ @JAVA@ $(JAVA_FLAGS)
@ -674,5 +677,14 @@ JRE_BUNDLE_SUBDIR=j2re-bundle/jre$(JDK_VERSION).jre/Contents
JDK_BUNDLE_DIR=$(IMAGES_OUTPUTDIR)/$(JDK_BUNDLE_SUBDIR)
JRE_BUNDLE_DIR=$(IMAGES_OUTPUTDIR)/$(JRE_BUNDLE_SUBDIR)
# This macro is called to allow inclusion of closed source counterparts.
# Unless overridden in closed sources, it expands to nothing.
# Usage: This function is called in an open makefile, with the following
# arguments:
# $1 the name of the repo, or empty if the top-level repo.
# $2 the name of the makefile
define IncludeCustomExtension
endef
# Include the custom-spec.gmk file if it exists
-include $(dir @SPEC@)/custom-spec.gmk

File diff suppressed because it is too large Load diff

View file

@ -1,7 +1,7 @@
#!/bin/sh
#
# Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -88,7 +88,7 @@ if [ "${command}" = "clone" -o "${command}" = "fclone" ] ; then
fi
done
if [ "${pull_extra_base}" != "" ] ; then
subrepos_extra="jdk/src/closed jdk/make/closed jdk/test/closed hotspot/make/closed hotspot/src/closed hotspot/test/closed deploy install sponsors pubs"
subrepos_extra="closed jdk/src/closed jdk/make/closed jdk/test/closed hotspot/make/closed hotspot/src/closed hotspot/test/closed deploy install sponsors pubs"
pull_default_tail=`echo ${pull_default} | sed -e 's@^.*://[^/]*/\(.*\)@\1@'`
pull_extra="${pull_extra_base}/${pull_default_tail}"
for i in ${subrepos_extra} ; do

9
configure vendored
View file

@ -1,6 +1,6 @@
#!/bin/bash
#
# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -25,5 +25,10 @@
# This is a thin wrapper which will call the real configure script, and
# make sure that is called using bash.
# Get an absolute path to this script, since that determines the top-level directory.
this_script_dir=`dirname $0`
bash $this_script_dir/common/autoconf/configure "$@"
this_script_dir=`cd $this_script_dir > /dev/null && pwd`
# Delegate to wrapper, forcing wrapper to believe $0 is this script by using -c.
# This trick is needed to get autoconf to co-operate properly.
bash -c ". $this_script_dir/common/autoconf/configure" $this_script_dir/configure CHECKME $this_script_dir "$@"

View file

@ -245,3 +245,4 @@ d6820a414f182a011a53a29a52370c696cd58dab jdk8-b118
a7d3638deb2f4e33217b1ecf889479e90f9e5b50 jdk9-b00
79a8136b18c1c6848f500088f5a4b39f262f082d jdk9-b01
8394993063135a42b63a94473280399fb2a13aa7 jdk9-b02
d338b892a13db19b093f85cf5f949a4504e4d31f jdk9-b03

File diff suppressed because it is too large Load diff

View file

@ -37,8 +37,7 @@ include CommonCorba.gmk
$(eval $(call SetupJavaCompilation,BUILD_CORBA, \
SETUP := GENERATE_NEWBYTECODE, \
SRC := $(CORBA_TOPDIR)/src/share/classes $(CORBA_OUTPUTDIR)/gensrc, \
EXCLUDES := com/sun/corba/se/PortableActivationIDL \
com/sun/tools/corba/se/logutil, \
EXCLUDES := com/sun/corba/se/PortableActivationIDL, \
EXCLUDE_FILES := com/sun/corba/se/impl/presentation/rmi/JNDIStateFactoryImpl.java \
com/sun/corba/se/spi/presentation/rmi/StubWrapper.java \
com/sun/org/omg/CORBA/IDLTypeOperations.java \

File diff suppressed because one or more lines are too long

View file

@ -23,7 +23,7 @@
* questions.
*/
package com.sun.tools.corba.se.logutil;
package build.tools.logutil;
import java.io.PrintWriter ;
import java.io.Writer ;

View file

@ -23,7 +23,7 @@
* questions.
*/
package com.sun.tools.corba.se.logutil;
package build.tools.logutil;
import java.io.BufferedReader;
import java.io.File;

View file

@ -22,7 +22,7 @@
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package com.sun.tools.corba.se.logutil;
package build.tools.logutil;
public class InputCode {

View file

@ -22,7 +22,7 @@
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package com.sun.tools.corba.se.logutil;
package build.tools.logutil;
import java.util.LinkedList;
import java.util.Queue;

View file

@ -22,7 +22,7 @@
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package com.sun.tools.corba.se.logutil;
package build.tools.logutil;
import java.io.File;
import java.io.FileNotFoundException;

View file

@ -23,7 +23,7 @@
* questions.
*/
package com.sun.tools.corba.se.logutil;
package build.tools.logutil;
public abstract class StringUtil {
/** Take a string containing underscores, and return a string

View file

@ -110,7 +110,7 @@ ParseException.badAbstract=%0 (\u884C%1): %2\u306Eforward\u5BA3\u8A00\u3068\u5B9
ParseException.badCustom=%0 (\u884C%1): forward\u5024\u5BA3\u8A00\u306Fcustom\u3068\u3057\u3066\u5BA3\u8A00\u3067\u304D\u307E\u305B\u3093\u3002\n%2\n%3
ParseException.badRepIDAlreadyAssigned=%0 (\u884C%1): \u578B%2\u306B\u306F\u3001\u524D\u306EID\u30D7\u30E9\u30B0\u30DE\u30FB\u30C7\u30A3\u30EC\u30AF\u30C6\u30A3\u30D6\u306E\u30EA\u30DD\u30B8\u30C8\u30EAID\u304C\u3059\u3067\u306B\u5272\u308A\u5F53\u3066\u3089\u308C\u3066\u3044\u307E\u3059\u3002\n%3\n%4
ParseException.badRepIDForm=%0 (\u884C%1): \u30EA\u30DD\u30B8\u30C8\u30EAID '%2'\u306E\u5F62\u5F0F\u306F'<format>:<string>'\u306B\u3059\u308B\u5FC5\u8981\u304C\u3042\u308A\u307E\u3059\u3002\n%3\n%4
ParseException.badRepIDPrefix=%0 (\u884C%1): \u30A4\u30F3\u30BF\u30D5\u30A7\u30FC\u30B9%2\u306B\u306F\u3001'%4'\u3067\u306F\u306A\u304F\u30EA\u30DD\u30B8\u30C8\u30EAID\u30D7\u30EC\u30D5\u30A3\u30C3\u30AF\u30B9`%3'\u304C\u5FC5\u8981\u3067\u3059\u3002\n%5\n%6
ParseException.badRepIDPrefix=%0 (\u884C%1): \u30A4\u30F3\u30BF\u30D5\u30A7\u30FC\u30B9%2\u306B\u306F\u3001'%4'\u3067\u306F\u306A\u304F\u30EA\u30DD\u30B8\u30C8\u30EAID\u63A5\u982D\u8F9E`%3'\u304C\u5FC5\u8981\u3067\u3059\u3002\n%5\n%6
ParseException.badState=%0 (\u884C%1): %2\u306F\u30B9\u30C6\u30FC\u30C8\u30D5\u30EB\u30FB\u30A4\u30F3\u30BF\u30D5\u30A7\u30FC\u30B9\u306B\u3067\u304D\u307E\u305B\u3093\u3002\u8907\u6570\u306E\u30B9\u30C6\u30FC\u30C8\u30D5\u30EB\u89AA\u304C\u3042\u308A\u307E\u3059\u3002\n%3\n%4
ParseException.branchLabel=%0 (\u884C%1): case %2\u306F\u3059\u3067\u306B\u5BA3\u8A00\u3055\u308C\u3066\u3044\u307E\u3059\u3002\n%3\n%4
ParseException.branchName=%0 (\u884C%1): %2\u3068\u3044\u3046\u540D\u524D\u306E\u5206\u5C90\u306F\u3059\u3067\u306B\u5BA3\u8A00\u3055\u308C\u3066\u3044\u307E\u3059\u3002\n%3\n%4

View file

@ -1,93 +0,0 @@
#
# Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
PACKAGE=com/sun/kenc/util
CLASSES=LogWrapperBase IndentingPrintWriter StringUtil
SOURCE=$(CLASSES:%=$(PACKAGE)/%.java)
OBJECT=$(CLASSES:%=$(PACKAGE)/%.class)
MCROOTS= Activation Interceptors IOR Naming ORBUtil POA Util OMG
MCFILES=$(MCROOTS:%=data/%.mc)
MCSOURCE=$(MCROOTS:%=$(PACKAGE)/%SystemException.java)
MCCLASS=$(MCROOTS:%=$(PACKAGE)/%SystemException.class)
MCRESOURCE=$(MCROOTS:%=resources/%SystemException.resource)
JARDIR=lib
UTILJAR=$(JARDIR)/util.jar
JSCHEMEJAR=$(JARDIR)/jscheme.jar
JARS=$(UTILJAR):$(JSCHEMEJAR)
all: mkdir jar resources classes
mkdir :
@-mkdir resources
clean:
@-rm $(OBJECT) $(MCSOURCE) $(MCOBJECT) $(MCCLASS) $(MCRESOURCE) $(UTILJAR)
test:
@echo "PACKAGE :" $(PACKAGE)
@echo "CLASSES :" $(CLASSES)
@echo "SOURCE :" $(SOURCE)
@echo "OBJECT :" $(OBJECT)
@echo "MCROOTS :" $(MCROOTS)
@echo "MCFILES :" $(MCFILES)
@echo "MCCLASS :" $(MCCLASS)
@echo "MCRESOURCE :" $(MCRESOURCE)
jar: $(UTILJAR)
$(UTILJAR) : $(OBJECT)
jar cvf $(JARDIR)/util.jar $(OBJECT)
$(OBJECT) : $(SOURCE)
classes: $(MCCLASS)
$(MCCLASS) : $(MCSOURCE)
$(MCSOURCE) : $(MCFILES)
resources: $(MCRESOURCE)
$(MCRESOURCE) : $(MCFILES)
# This target does not compile, because the generated source code requires
# com.sun.corba.se.impl.util.SUNVMCID, which would normally be built in the
# ee package. This should not be a problem once everything is merged into the
# workspace.
genclasses: $(MCCLASS)
$(PACKAGE)/%SystemException.java : data/%.mc
./mc make-class $<
mv *SystemException.java $(PACKAGE)
resources/%SystemException.resource : data/%.mc
./mc make-resource $<
mv *.resource resources
$(PACKAGE)/%.class : $(PACKAGE)/%.java
javac -classpath $(JARS) $<

View file

@ -405,3 +405,4 @@ fca262db9c4309f99d2f5542ab0780e45c2f1578 jdk8-b120
ce2d7e46f3c7e41241f3b407705a4071323a11ab jdk9-b00
050a626a88951140df874f7b163e304d07b6c296 jdk9-b01
b188446de75bda5fc52d102cddf242c3ef5ecbdf jdk9-b02
b2fee789d23f3cdabb3db4e51af43038e5692d3a jdk9-b03

View file

@ -70,6 +70,10 @@ ifndef CC_INTERP
FORCE_TIERED=1
endif
endif
# C1 is not ported on ppc64(le), so we cannot build a tiered VM:
ifneq (,$(filter $(ARCH),ppc64 pp64le))
FORCE_TIERED=0
endif
ifdef LP64
ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")

View file

@ -1,6 +1,6 @@
#! /bin/sh
#
# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -64,7 +64,7 @@ MFLAGS=`
echo "$MFLAGS" \
| sed '
s/^-/ -/
s/ -\([^ ][^ ]*\)j/ -\1 -j/
s/ -\([^ I][^ I]*\)j/ -\1 -j/
s/ -j[0-9][0-9]*/ -j/
s/ -j\([^ ]\)/ -j -\1/
s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/

View file

@ -260,7 +260,7 @@ ifeq ($(USE_CLANG), true)
WARNINGS_ARE_ERRORS += -Wno-empty-body
endif
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wformat=2 -Wno-error=format-nonliteral
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wformat=2
ifeq ($(USE_CLANG),)
# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit

View file

@ -66,6 +66,10 @@ ifndef CC_INTERP
FORCE_TIERED=1
endif
endif
# C1 is not ported on ppc64(le), so we cannot build a tiered VM:
ifneq (,$(filter $(ARCH),ppc64 pp64le))
FORCE_TIERED=0
endif
ifdef LP64
ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")

View file

@ -215,7 +215,7 @@ ifeq ($(USE_CLANG), true)
WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
endif
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value -Wformat=2 -Wno-error=format-nonliteral
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value -Wformat=2
ifeq ($(USE_CLANG),)
# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit

View file

@ -25,6 +25,9 @@
# Setup common to Zero (non-Shark) and Shark versions of VM
# override this from the main file because some version of llvm do not like -Wundef
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wunused-function -Wunused-value
# The copied fdlibm routines in sharedRuntimeTrig.o must not be optimized
OPT_CFLAGS/sharedRuntimeTrig.o = $(OPT_CFLAGS/NOOPT)
# The copied fdlibm routines in sharedRuntimeTrans.o must not be optimized

View file

@ -118,7 +118,7 @@ endif
# Compiler warnings are treated as errors
WARNINGS_ARE_ERRORS = -Werror
# Enable these warnings. See 'info gcc' about details on these options
WARNING_FLAGS = -Wpointer-arith -Wconversion -Wsign-compare -Wundef -Wformat=2 -Wno-error=format-nonliteral
WARNING_FLAGS = -Wpointer-arith -Wconversion -Wsign-compare -Wundef -Wformat=2
CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(WARNING_FLAGS)
# Special cases
CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@))

View file

@ -124,6 +124,7 @@ class Argument VALUE_OBJ_CLASS_SPEC {
}
};
#if !defined(ABI_ELFv2)
// A ppc64 function descriptor.
struct FunctionDescriptor VALUE_OBJ_CLASS_SPEC {
private:
@ -161,6 +162,7 @@ struct FunctionDescriptor VALUE_OBJ_CLASS_SPEC {
_env = (address) 0xbad;
}
};
#endif
class Assembler : public AbstractAssembler {
protected:
@ -1067,6 +1069,7 @@ class Assembler : public AbstractAssembler {
// Emit an address.
inline address emit_addr(const address addr = NULL);
#if !defined(ABI_ELFv2)
// Emit a function descriptor with the specified entry point, TOC,
// and ENV. If the entry point is NULL, the descriptor will point
// just past the descriptor.
@ -1074,6 +1077,7 @@ class Assembler : public AbstractAssembler {
inline address emit_fd(address entry = NULL,
address toc = (address) FunctionDescriptor::friend_toc,
address env = (address) FunctionDescriptor::friend_env);
#endif
/////////////////////////////////////////////////////////////////////////////////////
// PPC instructions

View file

@ -55,6 +55,7 @@ inline address Assembler::emit_addr(const address addr) {
return start;
}
#if !defined(ABI_ELFv2)
// Emit a function descriptor with the specified entry point, TOC, and
// ENV. If the entry point is NULL, the descriptor will point just
// past the descriptor.
@ -73,6 +74,7 @@ inline address Assembler::emit_fd(address entry, address toc, address env) {
return (address)fd;
}
#endif
// Issue an illegal instruction. 0 is guaranteed to be an illegal instruction.
inline void Assembler::illtrap() { Assembler::emit_int32(0); }

View file

@ -1136,7 +1136,9 @@ address CppInterpreterGenerator::generate_native_entry(void) {
// (outgoing C args), R3_ARG1 to R10_ARG8, and F1_ARG1 to
// F13_ARG13.
__ mr(R3_ARG1, R18_locals);
#if !defined(ABI_ELFv2)
__ ld(signature_handler_fd, 0, signature_handler_fd);
#endif
__ call_stub(signature_handler_fd);
// reload method
__ ld(R19_method, state_(_method));
@ -1295,8 +1297,13 @@ address CppInterpreterGenerator::generate_native_entry(void) {
// native result acrosss the call. No oop is present
__ mr(R3_ARG1, R16_thread);
#if defined(ABI_ELFv2)
__ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
relocInfo::none);
#else
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans),
relocInfo::none);
#endif
__ bind(sync_check_done);
//=============================================================================
@ -1346,9 +1353,9 @@ address CppInterpreterGenerator::generate_native_entry(void) {
// notify here, we'll drop it on the floor.
__ notify_method_exit(true/*native method*/,
ilgl /*illegal state (not used for native methods)*/);
ilgl /*illegal state (not used for native methods)*/,
InterpreterMacroAssembler::NotifyJVMTI,
false /*check_exceptions*/);
//=============================================================================
// Handle exceptions
@ -1413,7 +1420,7 @@ address CppInterpreterGenerator::generate_native_entry(void) {
// First, pop to caller's frame.
__ pop_interpreter_frame(R11_scratch1, R12_scratch2, R21_tmp1 /* set to return pc */, R22_tmp2);
__ push_frame_abi112(0, R11_scratch1);
__ push_frame_reg_args(0, R11_scratch1);
// Get the address of the exception handler.
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
R16_thread,
@ -2545,7 +2552,7 @@ address CppInterpreterGenerator::generate_normal_entry(void) {
__ mr(R4_ARG2, R3_ARG1); // ARG2 := ARG1
// Find the address of the "catch_exception" stub.
__ push_frame_abi112(0, R11_scratch1);
__ push_frame_reg_args(0, R11_scratch1);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
R16_thread,
R4_ARG2);

View file

@ -1,6 +1,6 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,10 +42,6 @@
#include "runtime/vframeArray.hpp"
#endif
#ifndef CC_INTERP
#error "CC_INTERP must be defined on PPC64"
#endif
#ifdef ASSERT
void RegisterMap::check_location_valid() {
}
@ -89,7 +85,10 @@ frame frame::sender_for_entry_frame(RegisterMap *map) const {
frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
// Pass callers initial_caller_sp as unextended_sp.
return frame(sender_sp(), sender_pc(), (intptr_t*)((parent_ijava_frame_abi *)callers_abi())->initial_caller_sp);
return frame(sender_sp(), sender_pc(),
CC_INTERP_ONLY((intptr_t*)((parent_ijava_frame_abi *)callers_abi())->initial_caller_sp)
NOT_CC_INTERP((intptr_t*)get_ijava_state()->sender_sp)
);
}
frame frame::sender_for_compiled_frame(RegisterMap *map) const {
@ -183,6 +182,9 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
interpreterState istate = get_interpreterState();
address lresult = (address)istate + in_bytes(BytecodeInterpreter::native_lresult_offset());
address fresult = (address)istate + in_bytes(BytecodeInterpreter::native_fresult_offset());
#else
address lresult = (address)&(get_ijava_state()->lresult);
address fresult = (address)&(get_ijava_state()->fresult);
#endif
switch (method->result_type()) {
@ -259,7 +261,21 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
values.describe(frame_no, (intptr_t*)&(istate->_native_fresult), " native_fresult");
values.describe(frame_no, (intptr_t*)&(istate->_native_lresult), " native_lresult");
#else
Unimplemented();
#define DESCRIBE_ADDRESS(name) \
values.describe(frame_no, (intptr_t*)&(get_ijava_state()->name), #name);
DESCRIBE_ADDRESS(method);
DESCRIBE_ADDRESS(locals);
DESCRIBE_ADDRESS(monitors);
DESCRIBE_ADDRESS(cpoolCache);
DESCRIBE_ADDRESS(bcp);
DESCRIBE_ADDRESS(esp);
DESCRIBE_ADDRESS(mdx);
DESCRIBE_ADDRESS(top_frame_sp);
DESCRIBE_ADDRESS(sender_sp);
DESCRIBE_ADDRESS(oop_tmp);
DESCRIBE_ADDRESS(lresult);
DESCRIBE_ADDRESS(fresult);
#endif
}
}

View file

@ -1,6 +1,6 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,10 +29,6 @@
#include "runtime/synchronizer.hpp"
#include "utilities/top.hpp"
#ifndef CC_INTERP
#error "CC_INTERP must be defined on PPC64"
#endif
// C frame layout on PPC-64.
//
// In this figure the stack grows upwards, while memory grows
@ -50,7 +46,7 @@
// [C_FRAME]
//
// C_FRAME:
// 0 [ABI_112]
// 0 [ABI_REG_ARGS]
// 112 CARG_9: outgoing arg 9 (arg_1 ... arg_8 via gpr_3 ... gpr_{10})
// ...
// 40+M*8 CARG_M: outgoing arg M (M is the maximum of outgoing args taken over all call sites in the procedure)
@ -77,7 +73,7 @@
// 32 reserved
// 40 space for TOC (=R2) register for next call
//
// ABI_112:
// ABI_REG_ARGS:
// 0 [ABI_48]
// 48 CARG_1: spill slot for outgoing arg 1. used by next callee.
// ... ...
@ -95,23 +91,25 @@
log_2_of_alignment_in_bits = 7
};
// ABI_48:
struct abi_48 {
// ABI_MINFRAME:
struct abi_minframe {
uint64_t callers_sp;
uint64_t cr; //_16
uint64_t lr;
#if !defined(ABI_ELFv2)
uint64_t reserved1; //_16
uint64_t reserved2;
#endif
uint64_t toc; //_16
// nothing to add here!
// aligned to frame::alignment_in_bytes (16)
};
enum {
abi_48_size = sizeof(abi_48)
abi_minframe_size = sizeof(abi_minframe)
};
struct abi_112 : abi_48 {
struct abi_reg_args : abi_minframe {
uint64_t carg_1;
uint64_t carg_2; //_16
uint64_t carg_3;
@ -124,13 +122,13 @@
};
enum {
abi_112_size = sizeof(abi_112)
abi_reg_args_size = sizeof(abi_reg_args)
};
#define _abi(_component) \
(offset_of(frame::abi_112, _component))
(offset_of(frame::abi_reg_args, _component))
struct abi_112_spill : abi_112 {
struct abi_reg_args_spill : abi_reg_args {
// additional spill slots
uint64_t spill_ret;
uint64_t spill_fret; //_16
@ -138,11 +136,11 @@
};
enum {
abi_112_spill_size = sizeof(abi_112_spill)
abi_reg_args_spill_size = sizeof(abi_reg_args_spill)
};
#define _abi_112_spill(_component) \
(offset_of(frame::abi_112_spill, _component))
#define _abi_reg_args_spill(_component) \
(offset_of(frame::abi_reg_args_spill, _component))
// non-volatile GPRs:
@ -195,7 +193,85 @@
#define _spill_nonvolatiles_neg(_component) \
(int)(-frame::spill_nonvolatiles_size + offset_of(frame::spill_nonvolatiles, _component))
// Frame layout for the Java interpreter on PPC64.
#ifndef CC_INTERP
// Frame layout for the Java template interpreter on PPC64.
//
// Diffs to the CC_INTERP are marked with 'X'.
//
// TOP_IJAVA_FRAME:
//
// 0 [TOP_IJAVA_FRAME_ABI]
// alignment (optional)
// [operand stack]
// [monitors] (optional)
// X[IJAVA_STATE]
// note: own locals are located in the caller frame.
//
// PARENT_IJAVA_FRAME:
//
// 0 [PARENT_IJAVA_FRAME_ABI]
// alignment (optional)
// [callee's Java result]
// [callee's locals w/o arguments]
// [outgoing arguments]
// [used part of operand stack w/o arguments]
// [monitors] (optional)
// X[IJAVA_STATE]
//
struct parent_ijava_frame_abi : abi_minframe {
};
enum {
parent_ijava_frame_abi_size = sizeof(parent_ijava_frame_abi)
};
#define _parent_ijava_frame_abi(_component) \
(offset_of(frame::parent_ijava_frame_abi, _component))
struct top_ijava_frame_abi : abi_reg_args {
};
enum {
top_ijava_frame_abi_size = sizeof(top_ijava_frame_abi)
};
#define _top_ijava_frame_abi(_component) \
(offset_of(frame::top_ijava_frame_abi, _component))
struct ijava_state {
#ifdef ASSERT
uint64_t ijava_reserved; // Used for assertion.
uint64_t ijava_reserved2; // Inserted for alignment.
#endif
uint64_t method;
uint64_t locals;
uint64_t monitors;
uint64_t cpoolCache;
uint64_t bcp;
uint64_t esp;
uint64_t mdx;
uint64_t top_frame_sp; // Maybe define parent_frame_abi and move there.
uint64_t sender_sp;
// Slots only needed for native calls. Maybe better to move elsewhere.
uint64_t oop_tmp;
uint64_t lresult;
uint64_t fresult;
// Aligned to frame::alignment_in_bytes (16).
};
enum {
ijava_state_size = sizeof(ijava_state)
};
#define _ijava_state_neg(_component) \
(int) (-frame::ijava_state_size + offset_of(frame::ijava_state, _component))
#else // CC_INTERP:
// Frame layout for the Java C++ interpreter on PPC64.
//
// This frame layout provides a C-like frame for every Java frame.
//
@ -242,7 +318,7 @@
// [ENTRY_FRAME_LOCALS]
//
// PARENT_IJAVA_FRAME_ABI:
// 0 [ABI_48]
// 0 [ABI_MINFRAME]
// top_frame_sp
// initial_caller_sp
//
@ -258,7 +334,7 @@
// PARENT_IJAVA_FRAME_ABI
struct parent_ijava_frame_abi : abi_48 {
struct parent_ijava_frame_abi : abi_minframe {
// SOE registers.
// C2i adapters spill their top-frame stack-pointer here.
uint64_t top_frame_sp; // carg_1
@ -285,7 +361,7 @@
uint64_t carg_6_unused; //_16 carg_6
uint64_t carg_7_unused; // carg_7
// Use arg8 for storing frame_manager_lr. The size of
// top_ijava_frame_abi must match abi_112.
// top_ijava_frame_abi must match abi_reg_args.
uint64_t frame_manager_lr; //_16 carg_8
// nothing to add here!
// aligned to frame::alignment_in_bytes (16)
@ -298,6 +374,8 @@
#define _top_ijava_frame_abi(_component) \
(offset_of(frame::top_ijava_frame_abi, _component))
#endif // CC_INTERP
// ENTRY_FRAME
struct entry_frame_locals {
@ -395,8 +473,8 @@
intptr_t* fp() const { return _fp; }
// Accessors for ABIs
inline abi_48* own_abi() const { return (abi_48*) _sp; }
inline abi_48* callers_abi() const { return (abi_48*) _fp; }
inline abi_minframe* own_abi() const { return (abi_minframe*) _sp; }
inline abi_minframe* callers_abi() const { return (abi_minframe*) _fp; }
private:
@ -421,6 +499,14 @@
#ifdef CC_INTERP
// Additional interface for interpreter frames:
inline interpreterState get_interpreterState() const;
#else
inline ijava_state* get_ijava_state() const;
// Some convenient register frame setters/getters for deoptimization.
inline intptr_t* interpreter_frame_esp() const;
inline void interpreter_frame_set_cpcache(ConstantPoolCache* cp);
inline void interpreter_frame_set_esp(intptr_t* esp);
inline void interpreter_frame_set_top_frame_sp(intptr_t* top_frame_sp);
inline void interpreter_frame_set_sender_sp(intptr_t* sender_sp);
#endif // CC_INTERP
// Size of a monitor in bytes.

View file

@ -1,6 +1,6 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,10 +26,6 @@
#ifndef CPU_PPC_VM_FRAME_PPC_INLINE_HPP
#define CPU_PPC_VM_FRAME_PPC_INLINE_HPP
#ifndef CC_INTERP
#error "CC_INTERP must be defined on PPC64"
#endif
// Inline functions for ppc64 frames:
// Find codeblob and set deopt_state.
@ -199,6 +195,75 @@ inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
interpreterState istate = get_interpreterState();
return &istate->_constants;
}
#else // !CC_INTERP
// Template Interpreter frame value accessors.
inline frame::ijava_state* frame::get_ijava_state() const {
return (ijava_state*) ((uintptr_t)fp() - ijava_state_size);
}
inline intptr_t** frame::interpreter_frame_locals_addr() const {
return (intptr_t**) &(get_ijava_state()->locals);
}
inline intptr_t* frame::interpreter_frame_bcx_addr() const {
return (intptr_t*) &(get_ijava_state()->bcp);
}
inline intptr_t* frame::interpreter_frame_mdx_addr() const {
return (intptr_t*) &(get_ijava_state()->mdx);
}
// Pointer beyond the "oldest/deepest" BasicObjectLock on stack.
inline BasicObjectLock* frame::interpreter_frame_monitor_end() const {
return (BasicObjectLock *) get_ijava_state()->monitors;
}
inline BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
return (BasicObjectLock *) get_ijava_state();
}
// SAPJVM ASc 2012-11-21. Return register stack slot addr at which currently interpreted method is found
inline Method** frame::interpreter_frame_method_addr() const {
return (Method**) &(get_ijava_state()->method);
}
inline ConstantPoolCache** frame::interpreter_frame_cpoolcache_addr() const {
return (ConstantPoolCache**) &(get_ijava_state()->cpoolCache);
}
inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
return (ConstantPoolCache**) &(get_ijava_state()->cpoolCache);
}
inline oop* frame::interpreter_frame_temp_oop_addr() const {
return (oop *) &(get_ijava_state()->oop_tmp);
}
inline intptr_t* frame::interpreter_frame_esp() const {
return (intptr_t*) get_ijava_state()->esp;
}
// Convenient setters
inline void frame::interpreter_frame_set_monitor_end(BasicObjectLock* end) { get_ijava_state()->monitors = (intptr_t) end;}
inline void frame::interpreter_frame_set_cpcache(ConstantPoolCache* cp) { *frame::interpreter_frame_cpoolcache_addr() = cp; }
inline void frame::interpreter_frame_set_esp(intptr_t* esp) { get_ijava_state()->esp = (intptr_t) esp; }
inline void frame::interpreter_frame_set_top_frame_sp(intptr_t* top_frame_sp) { get_ijava_state()->top_frame_sp = (intptr_t) top_frame_sp; }
inline void frame::interpreter_frame_set_sender_sp(intptr_t* sender_sp) { get_ijava_state()->sender_sp = (intptr_t) sender_sp; }
inline intptr_t* frame::interpreter_frame_expression_stack() const {
return (intptr_t*)interpreter_frame_monitor_end() - 1;
}
inline jint frame::interpreter_frame_expression_stack_direction() {
return -1;
}
// top of expression stack
inline intptr_t* frame::interpreter_frame_tos_address() const {
return ((intptr_t*) get_ijava_state()->esp) + Interpreter::stackElementWords;
}
inline intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
return &interpreter_frame_tos_address()[offset];
}
#endif // CC_INTERP
inline int frame::interpreter_frame_monitor_size() {

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@
#include "assembler_ppc.inline.hpp"
#include "interpreter/invocationCounter.hpp"
// This file specializes the assembler with interpreter-specific macros
// This file specializes the assembler with interpreter-specific macros.
class InterpreterMacroAssembler: public MacroAssembler {
@ -39,15 +39,176 @@ class InterpreterMacroAssembler: public MacroAssembler {
void null_check_throw(Register a, int offset, Register temp_reg);
// Handy address generation macros
void branch_to_entry(address entry, Register Rscratch);
// Handy address generation macros.
#define thread_(field_name) in_bytes(JavaThread::field_name ## _offset()), R16_thread
#define method_(field_name) in_bytes(Method::field_name ## _offset()), R19_method
#ifdef CC_INTERP
#define state_(field_name) in_bytes(byte_offset_of(BytecodeInterpreter, field_name)), R14_state
#define prev_state_(field_name) in_bytes(byte_offset_of(BytecodeInterpreter, field_name)), R15_prev_state
void pop (TosState state) {}; // Not needed.
void push(TosState state) {}; // Not needed.
#endif
#ifndef CC_INTERP
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
// Base routine for all dispatches.
void dispatch_base(TosState state, address* table);
void load_earlyret_value(TosState state, Register Rscratch1);
static const Address l_tmp;
static const Address d_tmp;
// dispatch routines
void dispatch_next(TosState state, int step = 0);
void dispatch_via (TosState state, address* table);
void load_dispatch_table(Register dst, address* table);
void dispatch_Lbyte_code(TosState state, Register bytecode, address* table, bool verify = false);
// Called by shared interpreter generator.
void dispatch_prolog(TosState state, int step = 0);
void dispatch_epilog(TosState state, int step = 0);
// Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls.
void super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1);
void super_call_VM(Register thread_cache, Register oop_result, Register last_java_sp,
address entry_point, Register arg_1, Register arg_2, bool check_exception = true);
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass. Blows registers tmp1, tmp2 and tmp3.
void gen_subtype_check(Register sub_klass, Register super_klass,
Register tmp1, Register tmp2, Register tmp3, Label &ok_is_subtype);
// Load object from cpool->resolved_references(index).
void load_resolved_reference_at_index(Register result, Register index);
void generate_stack_overflow_check_with_compare_and_throw(Register Rmem_frame_size, Register Rscratch1);
void load_receiver(Register Rparam_count, Register Rrecv_dst);
// helpers for expression stack
void pop_i( Register r = R17_tos);
void pop_ptr( Register r = R17_tos);
void pop_l( Register r = R17_tos);
void pop_f(FloatRegister f = F15_ftos);
void pop_d(FloatRegister f = F15_ftos );
void push_i( Register r = R17_tos);
void push_ptr( Register r = R17_tos);
void push_l( Register r = R17_tos);
void push_f(FloatRegister f = F15_ftos );
void push_d(FloatRegister f = F15_ftos);
void push_2ptrs(Register first, Register second);
void push_l_pop_d(Register l = R17_tos, FloatRegister d = F15_ftos);
void push_d_pop_l(FloatRegister d = F15_ftos, Register l = R17_tos);
void pop (TosState state); // transition vtos -> state
void push(TosState state); // transition state -> vtos
void empty_expression_stack(); // Resets both Lesp and SP.
public:
// Load values from bytecode stream:
enum signedOrNot { Signed, Unsigned };
enum setCCOrNot { set_CC, dont_set_CC };
void get_2_byte_integer_at_bcp(int bcp_offset,
Register Rdst,
signedOrNot is_signed);
void get_4_byte_integer_at_bcp(int bcp_offset,
Register Rdst,
signedOrNot is_signed = Unsigned);
void get_cache_index_at_bcp(Register Rdst, int bcp_offset, size_t index_size);
void get_cache_and_index_at_bcp(Register cache, int bcp_offset, size_t index_size = sizeof(u2));
// common code
void field_offset_at(int n, Register tmp, Register dest, Register base);
int field_offset_at(Register object, address bcp, int offset);
void fast_iaaccess(int n, address bcp);
void fast_iagetfield(address bcp);
void fast_iaputfield(address bcp, bool do_store_check);
void index_check(Register array, Register index, int index_shift, Register tmp, Register res);
void index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res);
void get_const(Register Rdst);
void get_constant_pool(Register Rdst);
void get_constant_pool_cache(Register Rdst);
void get_cpool_and_tags(Register Rcpool, Register Rtags);
void is_a(Label& L);
// Java Call Helpers
void call_from_interpreter(Register Rtarget_method, Register Rret_addr, Register Rscratch1, Register Rscratch2);
// --------------------------------------------------
void unlock_if_synchronized_method(TosState state, bool throw_monitor_exception = true,
bool install_monitor_exception = true);
// Removes the current activation (incl. unlocking of monitors).
// Additionally this code is used for earlyReturn in which case we
// want to skip throwing an exception and installing an exception.
void remove_activation(TosState state,
bool throw_monitor_exception = true,
bool install_monitor_exception = true);
void merge_frames(Register Rtop_frame_sp, Register return_pc, Register Rscratch1, Register Rscratch2); // merge top frames
void add_monitor_to_stack(bool stack_is_empty, Register Rtemp1, Register Rtemp2);
// Local variable access helpers
void load_local_int(Register Rdst_value, Register Rdst_address, Register Rindex);
void load_local_long(Register Rdst_value, Register Rdst_address, Register Rindex);
void load_local_ptr(Register Rdst_value, Register Rdst_address, Register Rindex);
void load_local_float(FloatRegister Rdst_value, Register Rdst_address, Register Rindex);
void load_local_double(FloatRegister Rdst_value, Register Rdst_address, Register Rindex);
void store_local_int(Register Rvalue, Register Rindex);
void store_local_long(Register Rvalue, Register Rindex);
void store_local_ptr(Register Rvalue, Register Rindex);
void store_local_float(FloatRegister Rvalue, Register Rindex);
void store_local_double(FloatRegister Rvalue, Register Rindex);
// Call VM for std frames
// Special call VM versions that check for exceptions and forward exception
// via short cut (not via expensive forward exception stub).
void check_and_forward_exception(Register Rscratch1, Register Rscratch2);
void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
// Should not be used:
void call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions = true) {ShouldNotReachHere();}
void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true) {ShouldNotReachHere();}
void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true) {ShouldNotReachHere();}
void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true) {ShouldNotReachHere();}
Address first_local_in_stack();
enum LoadOrStore { load, store };
void static_iload_or_store(int which_local, LoadOrStore direction, Register Rtmp);
void static_aload_or_store(int which_local, LoadOrStore direction, Register Rtmp);
void static_dload_or_store(int which_local, LoadOrStore direction);
void save_interpreter_state(Register scratch);
void restore_interpreter_state(Register scratch, bool bcp_and_mdx_only = false);
void increment_backedge_counter(const Register Rcounters, Register Rtmp, Register Rtmp2, Register Rscratch);
void test_backedge_count_for_osr(Register backedge_count, Register branch_bcp, Register Rtmp);
void record_static_call_in_profile(Register Rentry, Register Rtmp);
void record_receiver_call_in_profile(Register Rklass, Register Rentry, Register Rtmp);
#endif // !CC_INTERP
void get_method_counters(Register method, Register Rcounters, Label& skip);
void increment_invocation_counter(Register iv_be_count, Register Rtmp1, Register Rtmp2_r0);
@ -55,12 +216,59 @@ class InterpreterMacroAssembler: public MacroAssembler {
void lock_object (Register lock_reg, Register obj_reg);
void unlock_object(Register lock_reg, bool check_for_exceptions = true);
#ifndef CC_INTERP
// Interpreter profiling operations
void set_method_data_pointer_for_bcp();
void test_method_data_pointer(Label& zero_continue);
void verify_method_data_pointer();
void test_invocation_counter_for_mdp(Register invocation_count, Register Rscratch, Label &profile_continue);
void set_mdp_data_at(int constant, Register value);
void increment_mdp_data_at(int constant, Register counter_addr, Register Rbumped_count, bool decrement = false);
void increment_mdp_data_at(Register counter_addr, Register Rbumped_count, bool decrement = false);
void increment_mdp_data_at(Register reg, int constant, Register scratch, Register Rbumped_count, bool decrement = false);
void set_mdp_flag_at(int flag_constant, Register scratch);
void test_mdp_data_at(int offset, Register value, Label& not_equal_continue, Register test_out);
void update_mdp_by_offset(int offset_of_disp, Register scratch);
void update_mdp_by_offset(Register reg, int offset_of_disp,
Register scratch);
void update_mdp_by_constant(int constant);
void update_mdp_for_ret(TosState state, Register return_bci);
void profile_taken_branch(Register scratch, Register bumped_count);
void profile_not_taken_branch(Register scratch1, Register scratch2);
void profile_call(Register scratch1, Register scratch2);
void profile_final_call(Register scratch1, Register scratch2);
void profile_virtual_call(Register Rreceiver, Register Rscratch1, Register Rscratch2, bool receiver_can_be_null);
void profile_typecheck(Register Rklass, Register Rscratch1, Register Rscratch2);
void profile_typecheck_failed(Register Rscratch1, Register Rscratch2);
void profile_ret(TosState state, Register return_bci, Register scratch1, Register scratch2);
void profile_switch_default(Register scratch1, Register scratch2);
void profile_switch_case(Register index, Register scratch1,Register scratch2, Register scratch3);
void profile_null_seen(Register Rscratch1, Register Rscratch2);
void record_klass_in_profile(Register receiver, Register scratch1, Register scratch2, bool is_virtual_call);
void record_klass_in_profile_helper(Register receiver, Register scratch1, Register scratch2, int start_row, Label& done, bool is_virtual_call);
#endif // !CC_INTERP
// Debugging
void verify_oop(Register reg, TosState state = atos); // only if +VerifyOops && state == atos
#ifndef CC_INTERP
void verify_oop_or_return_address(Register reg, Register rtmp); // for astore
void verify_FPU(int stack_depth, TosState state = ftos);
#endif // !CC_INTERP
// support for jvmdi/jvmpi
typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
// Support for jvmdi/jvmpi.
void notify_method_entry();
void notify_method_exit(bool is_native_method, TosState state);
void notify_method_exit(bool is_native_method, TosState state,
NotifyMethodExitMode mode, bool check_exceptions);
#ifdef CC_INTERP
// Convert the current TOP_IJAVA_FRAME into a PARENT_IJAVA_FRAME

View file

@ -109,8 +109,10 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
}
void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprint) {
#if !defined(ABI_ELFv2)
// Emit fd for current codebuffer. Needs patching!
__ emit_fd();
#endif
// Generate code to handle arguments.
iterate(fingerprint);
@ -127,11 +129,13 @@ void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprin
// Implementation of SignatureHandlerLibrary
void SignatureHandlerLibrary::pd_set_handler(address handler) {
#if !defined(ABI_ELFv2)
// patch fd here.
FunctionDescriptor* fd = (FunctionDescriptor*) handler;
fd->set_entry(handler + (int)sizeof(FunctionDescriptor));
assert(fd->toc() == (address)0xcafe, "need to adjust TOC here");
#endif
}

View file

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,10 +51,6 @@
#include "c1/c1_Runtime1.hpp"
#endif
#ifndef CC_INTERP
#error "CC_INTERP must be defined on PPC"
#endif
#define __ _masm->
#ifdef PRODUCT
@ -128,13 +124,13 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
const Register target_sp = R28_tmp8;
const FloatRegister floatSlot = F0;
address entry = __ emit_fd();
address entry = __ function_entry();
__ save_LR_CR(R0);
__ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
// We use target_sp for storing arguments in the C frame.
__ mr(target_sp, R1_SP);
__ push_frame_abi112_nonvolatiles(0, R11_scratch1);
__ push_frame_reg_args_nonvolatiles(0, R11_scratch1);
__ mr(arg_java, R3_ARG1);
@ -147,7 +143,8 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
#ifdef CC_INTERP
__ ld(R19_method, state_(_method));
#else
__ unimplemented("slow signature handler 1");
__ ld(R19_method, 0, target_sp);
__ ld(R19_method, _ijava_state_neg(method), R19_method);
#endif
// Get the result handler.
@ -157,7 +154,8 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
#ifdef CC_INTERP
__ ld(R19_method, state_(_method));
#else
__ unimplemented("slow signature handler 2");
__ ld(R19_method, 0, target_sp);
__ ld(R19_method, _ijava_state_neg(method), R19_method);
#endif
{
@ -453,7 +451,7 @@ address InterpreterGenerator::generate_abstract_entry(void) {
//
// Registers alive
// R16_thread - JavaThread*
// R19_method - callee's methodOop (method to be invoked)
// R19_method - callee's method (method to be invoked)
// R1_SP - SP prepared such that caller's outgoing args are near top
// LR - return address to caller
//
@ -474,7 +472,7 @@ address InterpreterGenerator::generate_abstract_entry(void) {
// Push a new C frame and save LR.
__ save_LR_CR(R0);
__ push_frame_abi112(0, R11_scratch1);
__ push_frame_reg_args(0, R11_scratch1);
// This is not a leaf but we have a JavaFrameAnchor now and we will
// check (create) exceptions afterward so this is ok.
@ -491,7 +489,12 @@ address InterpreterGenerator::generate_abstract_entry(void) {
// Return to frame manager, it will handle the pending exception.
__ blr();
#else
Unimplemented();
// We don't know our caller, so jump to the general forward exception stub,
// which will also pop our full frame off. Satisfy the interface of
// SharedRuntime::generate_forward_exception()
__ load_const_optimized(R11_scratch1, StubRoutines::forward_exception_entry(), R0);
__ mtctr(R11_scratch1);
__ bctr();
#endif
return entry;
@ -500,8 +503,9 @@ address InterpreterGenerator::generate_abstract_entry(void) {
// Call an accessor method (assuming it is resolved, otherwise drop into
// vanilla (slow path) entry.
address InterpreterGenerator::generate_accessor_entry(void) {
if(!UseFastAccessorMethods && (!FLAG_IS_ERGO(UseFastAccessorMethods)))
if (!UseFastAccessorMethods && (!FLAG_IS_ERGO(UseFastAccessorMethods))) {
return NULL;
}
Label Lslow_path, Lacquire;
@ -586,10 +590,14 @@ address InterpreterGenerator::generate_accessor_entry(void) {
// Load from branch table and dispatch (volatile case: one instruction ahead)
__ sldi(Rflags, Rflags, LogBytesPerWord);
__ cmpwi(CCR6, Rscratch, 1); // volatile?
__ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // volatile ? size of 1 instruction : 0
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // volatile ? size of 1 instruction : 0
}
__ ldx(Rbtable, Rbtable, Rflags);
__ subf(Rbtable, Rscratch, Rbtable); // point to volatile/non-volatile entry point
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ subf(Rbtable, Rscratch, Rbtable); // point to volatile/non-volatile entry point
}
__ mtctr(Rbtable);
__ bctr();
@ -605,7 +613,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
}
assert(all_uninitialized != all_initialized, "consistency"); // either or
__ sync(); // volatile entry point (one instruction before non-volatile_entry point)
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
if (branch_table[vtos] == 0) branch_table[vtos] = __ pc(); // non-volatile_entry point
if (branch_table[dtos] == 0) branch_table[dtos] = __ pc(); // non-volatile_entry point
if (branch_table[ftos] == 0) branch_table[ftos] = __ pc(); // non-volatile_entry point
@ -614,7 +622,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
if (branch_table[itos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ sync(); // volatile entry point (one instruction before non-volatile_entry point)
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[itos] = __ pc(); // non-volatile_entry point
__ lwax(R3_RET, Rclass_or_obj, Roffset);
__ beq(CCR6, Lacquire);
@ -623,7 +631,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
if (branch_table[ltos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ sync(); // volatile entry point (one instruction before non-volatile_entry point)
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[ltos] = __ pc(); // non-volatile_entry point
__ ldx(R3_RET, Rclass_or_obj, Roffset);
__ beq(CCR6, Lacquire);
@ -632,7 +640,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
if (branch_table[btos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ sync(); // volatile entry point (one instruction before non-volatile_entry point)
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[btos] = __ pc(); // non-volatile_entry point
__ lbzx(R3_RET, Rclass_or_obj, Roffset);
__ extsb(R3_RET, R3_RET);
@ -642,7 +650,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
if (branch_table[ctos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ sync(); // volatile entry point (one instruction before non-volatile_entry point)
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[ctos] = __ pc(); // non-volatile_entry point
__ lhzx(R3_RET, Rclass_or_obj, Roffset);
__ beq(CCR6, Lacquire);
@ -651,7 +659,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
if (branch_table[stos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ sync(); // volatile entry point (one instruction before non-volatile_entry point)
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[stos] = __ pc(); // non-volatile_entry point
__ lhax(R3_RET, Rclass_or_obj, Roffset);
__ beq(CCR6, Lacquire);
@ -660,7 +668,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
if (branch_table[atos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ sync(); // volatile entry point (one instruction before non-volatile_entry point)
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[atos] = __ pc(); // non-volatile_entry point
__ load_heap_oop(R3_RET, (RegisterOrConstant)Roffset, Rclass_or_obj);
__ verify_oop(R3_RET);
@ -683,10 +691,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
#endif
__ bind(Lslow_path);
assert(Interpreter::entry_for_kind(Interpreter::zerolocals), "Normal entry must have been generated by now");
__ load_const_optimized(Rscratch, Interpreter::entry_for_kind(Interpreter::zerolocals), R0);
__ mtctr(Rscratch);
__ bctr();
__ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), Rscratch);
__ flush();
return entry;
@ -773,10 +778,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// Generate regular method entry.
__ bind(slow_path);
assert(Interpreter::entry_for_kind(Interpreter::zerolocals), "Normal entry must have been generated by now");
__ load_const_optimized(R11_scratch1, Interpreter::entry_for_kind(Interpreter::zerolocals), R0);
__ mtctr(R11_scratch1);
__ bctr();
__ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
__ flush();
return entry;

View file

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,15 +28,23 @@
public:
// Stack index relative to tos (which points at value)
// Stack index relative to tos (which points at value).
static int expr_index_at(int i) {
return stackElementWords * i;
}
// Already negated by c++ interpreter
// Already negated by c++ interpreter.
static int local_index_at(int i) {
assert(i <= 0, "local direction already negated");
return stackElementWords * i;
}
#ifndef CC_INTERP
// The offset in bytes to access a expression stack slot
// relative to the esp pointer.
static int expr_offset_in_bytes(int slot) {
return stackElementSize * slot + wordSize;
}
#endif
#endif // CPU_PPC_VM_INTERPRETER_PPC_PP

View file

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,10 +26,6 @@
#ifndef CPU_PPC_VM_JAVAFRAMEANCHOR_PPC_HPP
#define CPU_PPC_VM_JAVAFRAMEANCHOR_PPC_HPP
#ifndef CC_INTERP
#error "CC_INTERP must be defined on PPC64"
#endif
public:
// Each arch must define reset, save, restore
// These are used by objects that only care about:

View file

@ -594,7 +594,13 @@ void MacroAssembler::bxx64_patchable(address dest, relocInfo::relocType rt, bool
"can't identify emitted call");
} else {
// variant 1:
#if defined(ABI_ELFv2)
nop();
calculate_address_from_global_toc(R12, dest, true, true, false);
mtctr(R12);
nop();
nop();
#else
mr(R0, R11); // spill R11 -> R0.
// Load the destination address into CTR,
@ -604,6 +610,7 @@ void MacroAssembler::bxx64_patchable(address dest, relocInfo::relocType rt, bool
mtctr(R11);
mr(R11, R0); // spill R11 <- R0.
nop();
#endif
// do the call/jump
if (link) {
@ -912,16 +919,16 @@ void MacroAssembler::push_frame(unsigned int bytes, Register tmp) {
}
}
// Push a frame of size `bytes' plus abi112 on top.
void MacroAssembler::push_frame_abi112(unsigned int bytes, Register tmp) {
push_frame(bytes + frame::abi_112_size, tmp);
// Push a frame of size `bytes' plus abi_reg_args on top.
void MacroAssembler::push_frame_reg_args(unsigned int bytes, Register tmp) {
push_frame(bytes + frame::abi_reg_args_size, tmp);
}
// Setup up a new C frame with a spill area for non-volatile GPRs and
// additional space for local variables.
void MacroAssembler::push_frame_abi112_nonvolatiles(unsigned int bytes,
Register tmp) {
push_frame(bytes + frame::abi_112_size + frame::spill_nonvolatiles_size, tmp);
void MacroAssembler::push_frame_reg_args_nonvolatiles(unsigned int bytes,
Register tmp) {
push_frame(bytes + frame::abi_reg_args_size + frame::spill_nonvolatiles_size, tmp);
}
// Pop current C frame.
@ -929,6 +936,42 @@ void MacroAssembler::pop_frame() {
ld(R1_SP, _abi(callers_sp), R1_SP);
}
#if defined(ABI_ELFv2)
address MacroAssembler::branch_to(Register r_function_entry, bool and_link) {
// TODO(asmundak): make sure the caller uses R12 as function descriptor
// most of the times.
if (R12 != r_function_entry) {
mr(R12, r_function_entry);
}
mtctr(R12);
// Do a call or a branch.
if (and_link) {
bctrl();
} else {
bctr();
}
_last_calls_return_pc = pc();
return _last_calls_return_pc;
}
// Call a C function via a function descriptor and use full C
// calling conventions. Updates and returns _last_calls_return_pc.
address MacroAssembler::call_c(Register r_function_entry) {
return branch_to(r_function_entry, /*and_link=*/true);
}
// For tail calls: only branch, don't link, so callee returns to caller of this function.
address MacroAssembler::call_c_and_return_to_caller(Register r_function_entry) {
return branch_to(r_function_entry, /*and_link=*/false);
}
address MacroAssembler::call_c(address function_entry, relocInfo::relocType rt) {
load_const(R12, function_entry, R0);
return branch_to(R12, /*and_link=*/true);
}
#else
// Generic version of a call to C function via a function descriptor
// with variable support for C calling conventions (TOC, ENV, etc.).
// Updates and returns _last_calls_return_pc.
@ -1077,6 +1120,7 @@ address MacroAssembler::call_c_using_toc(const FunctionDescriptor* fd,
}
return _last_calls_return_pc;
}
#endif
void MacroAssembler::call_VM_base(Register oop_result,
Register last_java_sp,
@ -1091,8 +1135,11 @@ void MacroAssembler::call_VM_base(Register oop_result,
// ARG1 must hold thread address.
mr(R3_ARG1, R16_thread);
#if defined(ABI_ELFv2)
address return_pc = call_c(entry_point, relocInfo::none);
#else
address return_pc = call_c((FunctionDescriptor*)entry_point, relocInfo::none);
#endif
reset_last_Java_frame();
@ -1113,7 +1160,11 @@ void MacroAssembler::call_VM_base(Register oop_result,
void MacroAssembler::call_VM_leaf_base(address entry_point) {
BLOCK_COMMENT("call_VM_leaf {");
#if defined(ABI_ELFv2)
call_c(entry_point, relocInfo::none);
#else
call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::none);
#endif
BLOCK_COMMENT("} call_VM_leaf");
}
@ -2227,7 +2278,7 @@ void MacroAssembler::g1_write_barrier_pre(Register Robj, RegisterOrConstant offs
// VM call need frame to access(write) O register.
if (needs_frame) {
save_LR_CR(Rtmp1);
push_frame_abi112(0, Rtmp2);
push_frame_reg_args(0, Rtmp2);
}
if (Rpre_val->is_volatile() && Robj == noreg) mr(R31, Rpre_val); // Save pre_val across C call if it was preloaded.
@ -2361,7 +2412,8 @@ void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, R
#ifdef CC_INTERP
ld(tmp1/*pc*/, _top_ijava_frame_abi(frame_manager_lr), sp);
#else
Unimplemented();
address entry = pc();
load_const_optimized(tmp1, entry);
#endif
set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1);
@ -2421,6 +2473,16 @@ void MacroAssembler::store_klass(Register dst_oop, Register klass, Register ck)
}
}
void MacroAssembler::store_klass_gap(Register dst_oop, Register val) {
if (UseCompressedClassPointers) {
if (val == noreg) {
val = R0;
li(val, 0);
}
stw(val, oopDesc::klass_gap_offset_in_bytes(), dst_oop); // klass gap if compressed
}
}
int MacroAssembler::instr_size_for_decode_klass_not_null() {
if (!UseCompressedClassPointers) return 0;
int num_instrs = 1; // shift or move
@ -3006,13 +3068,13 @@ void MacroAssembler::verify_oop(Register oop, const char* msg) {
mr(R0, tmp);
// kill tmp
save_LR_CR(tmp);
push_frame_abi112(nbytes_save, tmp);
push_frame_reg_args(nbytes_save, tmp);
// restore tmp
mr(tmp, R0);
save_volatile_gprs(R1_SP, 112); // except R0
// load FunctionDescriptor**
// load FunctionDescriptor** / entry_address *
load_const(tmp, fd);
// load FunctionDescriptor*
// load FunctionDescriptor* / entry_address
ld(tmp, 0, tmp);
mr(R4_ARG2, oop);
load_const(R3_ARG1, (address)msg);
@ -3092,3 +3154,15 @@ void MacroAssembler::zap_from_to(Register low, int before, Register high, int af
}
#endif // !PRODUCT
SkipIfEqualZero::SkipIfEqualZero(MacroAssembler* masm, Register temp, const bool* flag_addr) : _masm(masm), _label() {
int simm16_offset = masm->load_const_optimized(temp, (address)flag_addr, R0, true);
assert(sizeof(bool) == 1, "PowerPC ABI");
masm->lbz(temp, simm16_offset, temp);
masm->cmpwi(CCR0, temp, 0);
masm->beq(CCR0, _label);
}
SkipIfEqualZero::~SkipIfEqualZero() {
_masm->bind(_label);
}

View file

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -279,12 +279,12 @@ class MacroAssembler: public Assembler {
// Push a frame of size `bytes'. No abi space provided.
void push_frame(unsigned int bytes, Register tmp);
// Push a frame of size `bytes' plus abi112 on top.
void push_frame_abi112(unsigned int bytes, Register tmp);
// Push a frame of size `bytes' plus abi_reg_args on top.
void push_frame_reg_args(unsigned int bytes, Register tmp);
// Setup up a new C frame with a spill area for non-volatile GPRs and additional
// space for local variables
void push_frame_abi112_nonvolatiles(unsigned int bytes, Register tmp);
void push_frame_reg_args_nonvolatiles(unsigned int bytes, Register tmp);
// pop current C frame
void pop_frame();
@ -296,17 +296,31 @@ class MacroAssembler: public Assembler {
private:
address _last_calls_return_pc;
#if defined(ABI_ELFv2)
// Generic version of a call to C function.
// Updates and returns _last_calls_return_pc.
address branch_to(Register function_entry, bool and_link);
#else
// Generic version of a call to C function via a function descriptor
// with variable support for C calling conventions (TOC, ENV, etc.).
// updates and returns _last_calls_return_pc.
address branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call,
bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee);
#endif
public:
// Get the pc where the last call will return to. returns _last_calls_return_pc.
inline address last_calls_return_pc();
#if defined(ABI_ELFv2)
// Call a C function via a function descriptor and use full C
// calling conventions. Updates and returns _last_calls_return_pc.
address call_c(Register function_entry);
// For tail calls: only branch, don't link, so callee returns to caller of this function.
address call_c_and_return_to_caller(Register function_entry);
address call_c(address function_entry, relocInfo::relocType rt);
#else
// Call a C function via a function descriptor and use full C
// calling conventions. Updates and returns _last_calls_return_pc.
address call_c(Register function_descriptor);
@ -315,6 +329,7 @@ class MacroAssembler: public Assembler {
address call_c(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt);
address call_c_using_toc(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt,
Register toc);
#endif
protected:
@ -551,12 +566,14 @@ class MacroAssembler: public Assembler {
// Load heap oop and decompress. Loaded oop may not be null.
inline void load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1 = noreg);
inline void store_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1,
/*specify if d must stay uncompressed*/ Register tmp = noreg);
// Null allowed.
inline void load_heap_oop(Register d, RegisterOrConstant offs, Register s1 = noreg);
// Encode/decode heap oop. Oop may not be null, else en/decoding goes wrong.
inline void encode_heap_oop_not_null(Register d);
inline Register encode_heap_oop_not_null(Register d, Register src = noreg);
inline void decode_heap_oop_not_null(Register d);
// Null allowed.
@ -566,6 +583,7 @@ class MacroAssembler: public Assembler {
void load_klass(Register dst, Register src);
void load_klass_with_trap_null_check(Register dst, Register src);
void store_klass(Register dst_oop, Register klass, Register tmp = R0);
void store_klass_gap(Register dst_oop, Register val = noreg); // Will store 0 if val not specified.
static int instr_size_for_decode_klass_not_null();
void decode_klass_not_null(Register dst, Register src = noreg);
void encode_klass_not_null(Register dst, Register src = noreg);
@ -649,6 +667,11 @@ class MacroAssembler: public Assembler {
void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {}
// Convenience method returning function entry. For the ELFv1 case
// creates function descriptor at the current address and returs
// the pointer to it. For the ELFv2 case returns the current address.
inline address function_entry();
#define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
#define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
@ -673,4 +696,21 @@ class MacroAssembler: public Assembler {
void zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) PRODUCT_RETURN;
};
// class SkipIfEqualZero:
//
// Instantiating this class will result in assembly code being output that will
// jump around any code emitted between the creation of the instance and it's
// automatic destruction at the end of a scope block, depending on the value of
// the flag passed to the constructor, which will be checked at run-time.
class SkipIfEqualZero : public StackObj {
private:
MacroAssembler* _masm;
Label _label;
public:
// 'Temp' is a temp register that this object can use (and trash).
explicit SkipIfEqualZero(MacroAssembler*, Register temp, const bool* flag_addr);
~SkipIfEqualZero();
};
#endif // CPU_PPC_VM_MACROASSEMBLER_PPC_HPP

View file

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -321,6 +321,15 @@ inline void MacroAssembler::load_heap_oop_not_null(Register d, RegisterOrConstan
}
}
inline void MacroAssembler::store_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1, Register tmp) {
if (UseCompressedOops) {
Register compressedOop = encode_heap_oop_not_null((tmp != noreg) ? tmp : d, d);
stw(compressedOop, offs, s1);
} else {
std(d, offs, s1);
}
}
inline void MacroAssembler::load_heap_oop(Register d, RegisterOrConstant offs, Register s1) {
if (UseCompressedOops) {
lwz(d, offs, s1);
@ -330,13 +339,17 @@ inline void MacroAssembler::load_heap_oop(Register d, RegisterOrConstant offs, R
}
}
inline void MacroAssembler::encode_heap_oop_not_null(Register d) {
inline Register MacroAssembler::encode_heap_oop_not_null(Register d, Register src) {
Register current = (src!=noreg) ? src : d; // Compressed oop is in d if no src provided.
if (Universe::narrow_oop_base() != NULL) {
sub(d, d, R30);
sub(d, current, R30);
current = d;
}
if (Universe::narrow_oop_shift() != 0) {
srdi(d, d, LogMinObjAlignmentInBytes);
srdi(d, current, LogMinObjAlignmentInBytes);
current = d;
}
return current; // Encoded oop is in this register.
}
inline void MacroAssembler::decode_heap_oop_not_null(Register d) {
@ -385,4 +398,10 @@ inline void MacroAssembler::trap_range_check_ge(Register a, int si16) {
twi(traptoEqual | traptoGreaterThanUnsigned, a/*reg a*/, si16);
}
#if defined(ABI_ELFv2)
inline address MacroAssembler::function_entry() { return pc(); }
#else
inline address MacroAssembler::function_entry() { return emit_fd(); }
#endif
#endif // CPU_PPC_VM_MACROASSEMBLER_PPC_INLINE_HPP

View file

@ -453,11 +453,11 @@ void trace_method_handle_stub(const char* adaptername,
if (Verbose) {
tty->print_cr("Registers:");
const int abi_offset = frame::abi_112_size / 8;
const int abi_offset = frame::abi_reg_args_size / 8;
for (int i = R3->encoding(); i <= R12->encoding(); i++) {
Register r = as_Register(i);
int count = i - R3->encoding();
// The registers are stored in reverse order on the stack (by save_volatile_gprs(R1_SP, abi_112_size)).
// The registers are stored in reverse order on the stack (by save_volatile_gprs(R1_SP, abi_reg_args_size)).
tty->print("%3s=" PTR_FORMAT, r->name(), saved_regs[abi_offset + count]);
if ((count + 1) % 4 == 0) {
tty->cr();
@ -524,9 +524,9 @@ void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adapt
__ save_LR_CR(R0);
__ mr(R0, R1_SP); // saved_sp
assert(Assembler::is_simm(-nbytes_save, 16), "Overwriting R0");
// push_frame_abi112 only uses R0 if nbytes_save is wider than 16 bit
__ push_frame_abi112(nbytes_save, R0);
__ save_volatile_gprs(R1_SP, frame::abi_112_size); // Except R0.
// Push_frame_reg_args only uses R0 if nbytes_save is wider than 16 bit.
__ push_frame_reg_args(nbytes_save, R0);
__ save_volatile_gprs(R1_SP, frame::abi_reg_args_size); // Except R0.
__ load_const(R3_ARG1, (address)adaptername);
__ mr(R4_ARG2, R23_method_handle);

View file

@ -1008,7 +1008,11 @@ int MachCallDynamicJavaNode::ret_addr_offset() {
}
int MachCallRuntimeNode::ret_addr_offset() {
#if defined(ABI_ELFv2)
return 28;
#else
return 40;
#endif
}
//=============================================================================
@ -3674,6 +3678,10 @@ encode %{
MacroAssembler _masm(&cbuf);
const address start_pc = __ pc();
#if defined(ABI_ELFv2)
address entry= !($meth$$method) ? NULL : (address)$meth$$method;
__ call_c(entry, relocInfo::runtime_call_type);
#else
// The function we're going to call.
FunctionDescriptor fdtemp;
const FunctionDescriptor* fd = !($meth$$method) ? &fdtemp : (FunctionDescriptor*)$meth$$method;
@ -3684,6 +3692,7 @@ encode %{
// Put entry, env, toc into the constant pool, this needs up to 3 constant
// pool entries; call_c_using_toc will optimize the call.
__ call_c_using_toc(fd, relocInfo::runtime_call_type, Rtoc);
#endif
// Check the ret_addr_offset.
assert(((MachCallRuntimeNode*)this)->ret_addr_offset() == __ last_calls_return_pc() - start_pc,
@ -3699,20 +3708,25 @@ encode %{
__ mtctr($src$$Register);
%}
// postalloc expand emitter for runtime leaf calls.
// Postalloc expand emitter for runtime leaf calls.
enc_class postalloc_expand_java_to_runtime_call(method meth, iRegLdst toc) %{
loadConLNodesTuple loadConLNodes_Entry;
#if defined(ABI_ELFv2)
jlong entry_address = (jlong) this->entry_point();
assert(entry_address, "need address here");
loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper(entry_address),
OptoReg::Name(R12_H_num), OptoReg::Name(R12_num));
#else
// Get the struct that describes the function we are about to call.
FunctionDescriptor* fd = (FunctionDescriptor*) this->entry_point();
assert(fd, "need fd here");
jlong entry_address = (jlong) fd->entry();
// new nodes
loadConLNodesTuple loadConLNodes_Entry;
loadConLNodesTuple loadConLNodes_Env;
loadConLNodesTuple loadConLNodes_Toc;
MachNode *mtctr = NULL;
MachCallLeafNode *call = NULL;
// Create nodes and operands for loading the entry point.
loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper((jlong) fd->entry()),
loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper(entry_address),
OptoReg::Name(R12_H_num), OptoReg::Name(R12_num));
@ -3733,8 +3747,9 @@ encode %{
// Create nodes and operands for loading the Toc point.
loadConLNodes_Toc = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper((jlong) fd->toc()),
OptoReg::Name(R2_H_num), OptoReg::Name(R2_num));
#endif // ABI_ELFv2
// mtctr node
mtctr = new (C) CallLeafDirect_mtctrNode();
MachNode *mtctr = new (C) CallLeafDirect_mtctrNode();
assert(loadConLNodes_Entry._last != NULL, "entry must exist");
mtctr->add_req(0, loadConLNodes_Entry._last);
@ -3743,10 +3758,10 @@ encode %{
mtctr->_opnds[1] = new (C) iRegLdstOper();
// call node
call = new (C) CallLeafDirectNode();
MachCallLeafNode *call = new (C) CallLeafDirectNode();
call->_opnds[0] = _opnds[0];
call->_opnds[1] = new (C) methodOper((intptr_t) fd->entry()); // may get set later
call->_opnds[1] = new (C) methodOper((intptr_t) entry_address); // May get set later.
// Make the new call node look like the old one.
call->_name = _name;
@ -3773,8 +3788,10 @@ encode %{
// These must be reqired edges, as the registers are live up to
// the call. Else the constants are handled as kills.
call->add_req(mtctr);
#if !defined(ABI_ELFv2)
call->add_req(loadConLNodes_Env._last);
call->add_req(loadConLNodes_Toc._last);
#endif
// ...as well as prec
for (uint i = req(); i < len(); ++i) {
@ -3787,10 +3804,12 @@ encode %{
// Insert the new nodes.
if (loadConLNodes_Entry._large_hi) nodes->push(loadConLNodes_Entry._large_hi);
if (loadConLNodes_Entry._last) nodes->push(loadConLNodes_Entry._last);
#if !defined(ABI_ELFv2)
if (loadConLNodes_Env._large_hi) nodes->push(loadConLNodes_Env._large_hi);
if (loadConLNodes_Env._last) nodes->push(loadConLNodes_Env._last);
if (loadConLNodes_Toc._large_hi) nodes->push(loadConLNodes_Toc._large_hi);
if (loadConLNodes_Toc._last) nodes->push(loadConLNodes_Toc._last);
#endif
nodes->push(mtctr);
nodes->push(call);
%}
@ -3837,7 +3856,7 @@ frame %{
// out_preserve_stack_slots for calls to C. Supports the var-args
// backing area for register parms.
//
varargs_C_out_slots_killed(((frame::abi_112_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size));
varargs_C_out_slots_killed(((frame::abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size));
// The after-PROLOG location of the return address. Location of
// return address specifies a type (REG or STACK) and a number

View file

@ -1,6 +1,6 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -579,15 +579,27 @@ REGISTER_DECLARATION(FloatRegister, F13_ARG13, F13); // volatile
// Register declarations to be used in frame manager assembly code.
// Use only non-volatile registers in order to keep values across C-calls.
#ifdef CC_INTERP
REGISTER_DECLARATION(Register, R14_state, R14); // address of new cInterpreter.
REGISTER_DECLARATION(Register, R15_prev_state, R15); // address of old cInterpreter
#else // CC_INTERP
REGISTER_DECLARATION(Register, R14_bcp, R14);
REGISTER_DECLARATION(Register, R15_esp, R15);
REGISTER_DECLARATION(FloatRegister, F15_ftos, F15);
#endif // CC_INTERP
REGISTER_DECLARATION(Register, R16_thread, R16); // address of current thread
REGISTER_DECLARATION(Register, R17_tos, R17); // address of Java tos (prepushed).
REGISTER_DECLARATION(Register, R18_locals, R18); // address of first param slot (receiver).
REGISTER_DECLARATION(Register, R19_method, R19); // address of current method
#ifndef DONT_USE_REGISTER_DEFINES
#ifdef CC_INTERP
#define R14_state AS_REGISTER(Register, R14)
#define R15_prev_state AS_REGISTER(Register, R15)
#else // CC_INTERP
#define R14_bcp AS_REGISTER(Register, R14)
#define R15_esp AS_REGISTER(Register, R15)
#define F15_ftos AS_REGISTER(FloatRegister, F15)
#endif // CC_INTERP
#define R16_thread AS_REGISTER(Register, R16)
#define R17_tos AS_REGISTER(Register, R17)
#define R18_locals AS_REGISTER(Register, R18)
@ -608,6 +620,14 @@ REGISTER_DECLARATION(Register, R26_tmp6, R26);
REGISTER_DECLARATION(Register, R27_tmp7, R27);
REGISTER_DECLARATION(Register, R28_tmp8, R28);
REGISTER_DECLARATION(Register, R29_tmp9, R29);
#ifndef CC_INTERP
REGISTER_DECLARATION(Register, R24_dispatch_addr, R24);
REGISTER_DECLARATION(Register, R25_templateTableBase, R25);
REGISTER_DECLARATION(Register, R26_monitor, R26);
REGISTER_DECLARATION(Register, R27_constPoolCache, R27);
REGISTER_DECLARATION(Register, R28_mdx, R28);
#endif // CC_INTERP
#ifndef DONT_USE_REGISTER_DEFINES
#define R21_tmp1 AS_REGISTER(Register, R21)
#define R22_tmp2 AS_REGISTER(Register, R22)
@ -618,6 +638,16 @@ REGISTER_DECLARATION(Register, R29_tmp9, R29);
#define R27_tmp7 AS_REGISTER(Register, R27)
#define R28_tmp8 AS_REGISTER(Register, R28)
#define R29_tmp9 AS_REGISTER(Register, R29)
#ifndef CC_INTERP
// Lmonitors : monitor pointer
// LcpoolCache: constant pool cache
// mdx: method data index
#define R24_dispatch_addr AS_REGISTER(Register, R24)
#define R25_templateTableBase AS_REGISTER(Register, R25)
#define R26_monitor AS_REGISTER(Register, R26)
#define R27_constPoolCache AS_REGISTER(Register, R27)
#define R28_mdx AS_REGISTER(Register, R28)
#endif
#define CCR4_is_synced AS_REGISTER(ConditionRegister, CCR4)
#endif

View file

@ -87,7 +87,7 @@ void OptoRuntime::generate_exception_blob() {
address start = __ pc();
int frame_size_in_bytes = frame::abi_112_size;
int frame_size_in_bytes = frame::abi_reg_args_size;
OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
// Exception pc is 'return address' for stack walker.
@ -99,7 +99,7 @@ void OptoRuntime::generate_exception_blob() {
// Save callee-saved registers.
// Push a C frame for the exception blob. It is needed for the C call later on.
__ push_frame_abi112(0, R11_scratch1);
__ push_frame_reg_args(0, R11_scratch1);
// This call does all the hard work. It checks if an exception handler
// exists in the method.
@ -109,8 +109,12 @@ void OptoRuntime::generate_exception_blob() {
__ set_last_Java_frame(/*sp=*/R1_SP, noreg);
__ mr(R3_ARG1, R16_thread);
#if defined(ABI_ELFv2)
__ call_c((address) OptoRuntime::handle_exception_C, relocInfo::none);
#else
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, OptoRuntime::handle_exception_C),
relocInfo::none);
#endif
address calls_return_pc = __ last_calls_return_pc();
# ifdef ASSERT
__ cmpdi(CCR0, R3_RET, 0);
@ -162,7 +166,11 @@ void OptoRuntime::generate_exception_blob() {
__ bind(mh_callsite);
__ mr(R31, R3_RET); // Save branch address.
__ mr(R3_ARG1, R16_thread);
#if defined(ABI_ELFv2)
__ call_c((address) adjust_SP_for_methodhandle_callsite, relocInfo::none);
#else
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, adjust_SP_for_methodhandle_callsite), relocInfo::none);
#endif
// Returns unextended_sp in R3_RET.
__ mtctr(R31); // Move address of exception handler to SR_CTR.

View file

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -67,7 +67,7 @@ class RegisterSaver {
return_pc_is_thread_saved_exception_pc
};
static OopMap* push_frame_abi112_and_save_live_registers(MacroAssembler* masm,
static OopMap* push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
int* out_frame_size_in_bytes,
bool generate_oop_map,
int return_pc_adjustment,
@ -200,12 +200,12 @@ static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = {
RegisterSaver_LiveIntReg( R30 ), // r30 must be the last register
};
OopMap* RegisterSaver::push_frame_abi112_and_save_live_registers(MacroAssembler* masm,
OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
int* out_frame_size_in_bytes,
bool generate_oop_map,
int return_pc_adjustment,
ReturnPCLocation return_pc_location) {
// Push an abi112-frame and store all registers which may be live.
// Push an abi_reg_args-frame and store all registers which may be live.
// If requested, create an OopMap: Record volatile registers as
// callee-save values in an OopMap so their save locations will be
// propagated to the RegisterMap of the caller frame during
@ -221,7 +221,7 @@ OopMap* RegisterSaver::push_frame_abi112_and_save_live_registers(MacroAssembler*
sizeof(RegisterSaver::LiveRegType);
const int register_save_size = regstosave_num * reg_size;
const int frame_size_in_bytes = round_to(register_save_size, frame::alignment_in_bytes)
+ frame::abi_112_size;
+ frame::abi_reg_args_size;
*out_frame_size_in_bytes = frame_size_in_bytes;
const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
const int register_save_offset = frame_size_in_bytes - register_save_size;
@ -229,7 +229,7 @@ OopMap* RegisterSaver::push_frame_abi112_and_save_live_registers(MacroAssembler*
// OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words.
OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : NULL;
BLOCK_COMMENT("push_frame_abi112_and_save_live_registers {");
BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {");
// Save r30 in the last slot of the not yet pushed frame so that we
// can use it as scratch reg.
@ -294,7 +294,7 @@ OopMap* RegisterSaver::push_frame_abi112_and_save_live_registers(MacroAssembler*
offset += reg_size;
}
BLOCK_COMMENT("} push_frame_abi112_and_save_live_registers");
BLOCK_COMMENT("} push_frame_reg_args_and_save_live_registers");
// And we're done.
return map;
@ -699,15 +699,19 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
int i;
VMReg reg;
// Leave room for C-compatible ABI_112.
int stk = (frame::abi_112_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size;
// Leave room for C-compatible ABI_REG_ARGS.
int stk = (frame::abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size;
int arg = 0;
int freg = 0;
// Avoid passing C arguments in the wrong stack slots.
#if defined(ABI_ELFv2)
assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 96,
"passing C arguments in wrong stack slots");
#else
assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 112,
"passing C arguments in wrong stack slots");
#endif
// We fill-out regs AND regs2 if an argument must be passed in a
// register AND in a stack slot. If regs2 is NULL in such a
// situation, we bail-out with a fatal error.
@ -953,6 +957,9 @@ static address gen_c2i_adapter(MacroAssembler *masm,
#ifdef CC_INTERP
const Register tos = R17_tos;
#else
const Register tos = R15_esp;
__ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
#endif
// load TOS
@ -971,7 +978,7 @@ static void gen_i2c_adapter(MacroAssembler *masm,
const BasicType *sig_bt,
const VMRegPair *regs) {
// Load method's entry-point from methodOop.
// Load method's entry-point from method.
__ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
__ mtctr(R12_scratch2);
@ -992,7 +999,10 @@ static void gen_i2c_adapter(MacroAssembler *masm,
#ifdef CC_INTERP
const Register ld_ptr = R17_tos;
#else
const Register ld_ptr = R15_esp;
#endif
const Register value_regs[] = { R22_tmp2, R23_tmp3, R24_tmp4, R25_tmp5, R26_tmp6 };
const int num_value_regs = sizeof(value_regs) / sizeof(Register);
int value_regs_index = 0;
@ -1083,8 +1093,8 @@ static void gen_i2c_adapter(MacroAssembler *masm,
}
}
BLOCK_COMMENT("Store method oop");
// Store method oop into thread->callee_target.
BLOCK_COMMENT("Store method");
// Store method into thread->callee_target.
// We might end up in handle_wrong_method if the callee is
// deoptimized as we race thru here. If that happens we don't want
// to take a safepoint because the caller frame will look
@ -1504,7 +1514,11 @@ static void check_needs_gc_for_critical_native(MacroAssembler* masm,
__ block_comment("block_for_jni_critical");
address entry_point = CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical);
#if defined(ABI_ELFv2)
__ call_c(entry_point, relocInfo::runtime_call_type);
#else
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::runtime_call_type);
#endif
address start = __ pc() - __ offset(),
calls_return_pc = __ last_calls_return_pc();
oop_maps->add_gc_map(calls_return_pc - start, map);
@ -1877,7 +1891,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Layout of the native wrapper frame:
// (stack grows upwards, memory grows downwards)
//
// NW [ABI_112] <-- 1) R1_SP
// NW [ABI_REG_ARGS] <-- 1) R1_SP
// [outgoing arguments] <-- 2) R1_SP + out_arg_slot_offset
// [oopHandle area] <-- 3) R1_SP + oop_handle_offset (save area for critical natives)
// klass <-- 4) R1_SP + klass_offset
@ -2211,8 +2225,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// slow case of monitor enter. Inline a special case of call_VM that
// disallows any pending_exception.
// Save argument registers and leave room for C-compatible ABI_112.
int frame_size = frame::abi_112_size +
// Save argument registers and leave room for C-compatible ABI_REG_ARGS.
int frame_size = frame::abi_reg_args_size +
round_to(total_c_args * wordSize, frame::alignment_in_bytes);
__ mr(R11_scratch1, R1_SP);
RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs, out_regs2);
@ -2250,9 +2264,12 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// The JNI call
// --------------------------------------------------------------------------
#if defined(ABI_ELFv2)
__ call_c(native_func, relocInfo::runtime_call_type);
#else
FunctionDescriptor* fd_native_method = (FunctionDescriptor*) native_func;
__ call_c(fd_native_method, relocInfo::runtime_call_type);
#endif
// Now, we are back from the native code.
@ -2606,8 +2623,12 @@ static void push_skeleton_frame(MacroAssembler* masm, bool deopt,
#ifdef CC_INTERP
__ std(R1_SP, _parent_ijava_frame_abi(initial_caller_sp), R1_SP);
#else
Unimplemented();
#ifdef ASSERT
__ load_const_optimized(pc_reg, 0x5afe);
__ std(pc_reg, _ijava_state_neg(ijava_reserved), R1_SP);
#endif
__ std(R1_SP, _ijava_state_neg(sender_sp), R1_SP);
#endif // CC_INTERP
__ addi(number_of_frames_reg, number_of_frames_reg, -1);
__ addi(frame_sizes_reg, frame_sizes_reg, wordSize);
__ addi(pcs_reg, pcs_reg, wordSize);
@ -2679,7 +2700,15 @@ static void push_skeleton_frames(MacroAssembler* masm, bool deopt,
__ std(R12_scratch2, _abi(lr), R1_SP);
// Initialize initial_caller_sp.
#ifdef CC_INTERP
__ std(frame_size_reg/*old_sp*/, _parent_ijava_frame_abi(initial_caller_sp), R1_SP);
#else
#ifdef ASSERT
__ load_const_optimized(pc_reg, 0x5afe);
__ std(pc_reg, _ijava_state_neg(ijava_reserved), R1_SP);
#endif
__ std(frame_size_reg, _ijava_state_neg(sender_sp), R1_SP);
#endif // CC_INTERP
#ifdef ASSERT
// Make sure that there is at least one entry in the array.
@ -2724,7 +2753,7 @@ void SharedRuntime::generate_deopt_blob() {
OopMapSet *oop_maps = new OopMapSet();
// size of ABI112 plus spill slots for R3_RET and F1_RET.
const int frame_size_in_bytes = frame::abi_112_spill_size;
const int frame_size_in_bytes = frame::abi_reg_args_spill_size;
const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
int first_frame_size_in_bytes = 0; // frame size of "unpack frame" for call to fetch_unroll_info.
@ -2757,11 +2786,11 @@ void SharedRuntime::generate_deopt_blob() {
// Push the "unpack frame"
// Save everything in sight.
map = RegisterSaver::push_frame_abi112_and_save_live_registers(masm,
&first_frame_size_in_bytes,
/*generate_oop_map=*/ true,
return_pc_adjustment_no_exception,
RegisterSaver::return_pc_is_lr);
map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&first_frame_size_in_bytes,
/*generate_oop_map=*/ true,
return_pc_adjustment_no_exception,
RegisterSaver::return_pc_is_lr);
assert(map != NULL, "OopMap must have been created");
__ li(exec_mode_reg, Deoptimization::Unpack_deopt);
@ -2787,11 +2816,11 @@ void SharedRuntime::generate_deopt_blob() {
// Push the "unpack frame".
// Save everything in sight.
assert(R4 == R4_ARG2, "exception pc must be in r4");
RegisterSaver::push_frame_abi112_and_save_live_registers(masm,
&first_frame_size_in_bytes,
/*generate_oop_map=*/ false,
return_pc_adjustment_exception,
RegisterSaver::return_pc_is_r4);
RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&first_frame_size_in_bytes,
/*generate_oop_map=*/ false,
return_pc_adjustment_exception,
RegisterSaver::return_pc_is_r4);
// Deopt during an exception. Save exec mode for unpack_frames.
__ li(exec_mode_reg, Deoptimization::Unpack_exception);
@ -2876,8 +2905,8 @@ void SharedRuntime::generate_deopt_blob() {
// ...).
// Spill live volatile registers since we'll do a call.
__ std( R3_RET, _abi_112_spill(spill_ret), R1_SP);
__ stfd(F1_RET, _abi_112_spill(spill_fret), R1_SP);
__ std( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP);
__ stfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP);
// Let the unpacker layout information in the skeletal frames just
// allocated.
@ -2889,8 +2918,8 @@ void SharedRuntime::generate_deopt_blob() {
__ reset_last_Java_frame();
// Restore the volatiles saved above.
__ ld( R3_RET, _abi_112_spill(spill_ret), R1_SP);
__ lfd(F1_RET, _abi_112_spill(spill_fret), R1_SP);
__ ld( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP);
__ lfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP);
// Pop the unpack frame.
__ pop_frame();
@ -2900,10 +2929,16 @@ void SharedRuntime::generate_deopt_blob() {
// optional c2i, caller of deoptee, ...).
// Initialize R14_state.
#ifdef CC_INTERP
__ ld(R14_state, 0, R1_SP);
__ addi(R14_state, R14_state, -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
// Also inititialize R15_prev_state.
__ restore_prev_state();
#else
__ restore_interpreter_state(R11_scratch1);
__ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
#endif // CC_INTERP
// Return to the interpreter entry point.
__ blr();
@ -2930,7 +2965,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
Register unc_trap_reg = R23_tmp3;
OopMapSet* oop_maps = new OopMapSet();
int frame_size_in_bytes = frame::abi_112_size;
int frame_size_in_bytes = frame::abi_reg_args_size;
OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
// stack: (deoptee, optional i2c, caller_of_deoptee, ...).
@ -2943,7 +2978,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
__ save_LR_CR(R11_scratch1);
// Push an "uncommon_trap" frame.
__ push_frame_abi112(0, R11_scratch1);
__ push_frame_reg_args(0, R11_scratch1);
// stack: (unpack frame, deoptee, optional i2c, caller_of_deoptee, ...).
@ -2996,7 +3031,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// interpreter frames just created.
// Push a simple "unpack frame" here.
__ push_frame_abi112(0, R11_scratch1);
__ push_frame_reg_args(0, R11_scratch1);
// stack: (unpack frame, skeletal interpreter frame, ..., optional
// skeletal interpreter frame, optional c2i, caller of deoptee,
@ -3022,11 +3057,17 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// stack: (top interpreter frame, ..., optional interpreter frame,
// optional c2i, caller of deoptee, ...).
#ifdef CC_INTERP
// Initialize R14_state, ...
__ ld(R11_scratch1, 0, R1_SP);
__ addi(R14_state, R11_scratch1, -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
// also initialize R15_prev_state.
__ restore_prev_state();
#else
__ restore_interpreter_state(R11_scratch1);
__ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
#endif // CC_INTERP
// Return to the interpreter entry point.
__ blr();
@ -3064,11 +3105,11 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
}
// Save registers, fpu state, and flags.
map = RegisterSaver::push_frame_abi112_and_save_live_registers(masm,
&frame_size_in_bytes,
/*generate_oop_map=*/ true,
/*return_pc_adjustment=*/0,
return_pc_location);
map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&frame_size_in_bytes,
/*generate_oop_map=*/ true,
/*return_pc_adjustment=*/0,
return_pc_location);
// The following is basically a call_VM. However, we need the precise
// address of the call in order to generate an oopmap. Hence, we do all the
@ -3104,7 +3145,6 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
frame_size_in_bytes,
/*restore_ctr=*/true);
BLOCK_COMMENT(" Jump to forward_exception_entry.");
// Jump to forward_exception_entry, with the issuing PC in LR
// so it looks like the original nmethod called forward_exception_entry.
@ -3151,11 +3191,11 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
address start = __ pc();
map = RegisterSaver::push_frame_abi112_and_save_live_registers(masm,
&frame_size_in_bytes,
/*generate_oop_map*/ true,
/*return_pc_adjustment*/ 0,
RegisterSaver::return_pc_is_lr);
map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&frame_size_in_bytes,
/*generate_oop_map*/ true,
/*return_pc_adjustment*/ 0,
RegisterSaver::return_pc_is_lr);
// Use noreg as last_Java_pc, the return pc will be reconstructed
// from the physical frame.
@ -3189,7 +3229,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ false);
// Get the returned methodOop.
// Get the returned method.
__ get_vm_result_2(R19_method);
__ bctr();

View file

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,15 +39,10 @@
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/top.hpp"
#ifdef TARGET_OS_FAMILY_aix
# include "thread_aix.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_linux
# include "thread_linux.inline.hpp"
#endif
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
#include "runtime/thread.inline.hpp"
#define __ _masm->
@ -79,11 +74,11 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", "call_stub");
address start = __ emit_fd();
address start = __ function_entry();
// some sanity checks
assert((sizeof(frame::abi_48) % 16) == 0, "unaligned");
assert((sizeof(frame::abi_112) % 16) == 0, "unaligned");
assert((sizeof(frame::abi_minframe) % 16) == 0, "unaligned");
assert((sizeof(frame::abi_reg_args) % 16) == 0, "unaligned");
assert((sizeof(frame::spill_nonvolatiles) % 16) == 0, "unaligned");
assert((sizeof(frame::parent_ijava_frame_abi) % 16) == 0, "unaligned");
assert((sizeof(frame::entry_frame_locals) % 16) == 0, "unaligned");
@ -221,7 +216,7 @@ class StubGenerator: public StubCodeGenerator {
{
BLOCK_COMMENT("Call frame manager or native entry.");
// Call frame manager or native entry.
Register r_new_arg_entry = R14_state;
Register r_new_arg_entry = R14; // PPC_state;
assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr,
r_arg_method, r_arg_thread);
@ -234,7 +229,11 @@ class StubGenerator: public StubCodeGenerator {
// R16_thread - JavaThread*
// Tos must point to last argument - element_size.
#ifdef CC_INTERP
const Register tos = R17_tos;
#else
const Register tos = R15_esp;
#endif
__ addi(tos, r_top_of_arguments_addr, -Interpreter::stackElementSize);
// initialize call_stub locals (step 2)
@ -248,8 +247,11 @@ class StubGenerator: public StubCodeGenerator {
assert(tos != r_arg_thread && R19_method != r_arg_thread, "trashed r_arg_thread");
// Set R15_prev_state to 0 for simplifying checks in callee.
#ifdef CC_INTERP
__ li(R15_prev_state, 0);
#else
__ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
#endif
// Stack on entry to frame manager / native entry:
//
// F0 [TOP_IJAVA_FRAME_ABI]
@ -444,7 +446,7 @@ class StubGenerator: public StubCodeGenerator {
// Save LR/CR and copy exception pc (LR) into R4_ARG2.
__ save_LR_CR(R4_ARG2);
__ push_frame_abi112(0, R0);
__ push_frame_reg_args(0, R0);
// Find exception handler.
__ call_VM_leaf(CAST_FROM_FN_PTR(address,
SharedRuntime::exception_handler_for_return_address),
@ -519,7 +521,7 @@ class StubGenerator: public StubCodeGenerator {
MacroAssembler* masm = new MacroAssembler(&code);
OopMapSet* oop_maps = new OopMapSet();
int frame_size_in_bytes = frame::abi_112_size;
int frame_size_in_bytes = frame::abi_reg_args_size;
OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
StubCodeMark mark(this, "StubRoutines", "throw_exception");
@ -529,7 +531,7 @@ class StubGenerator: public StubCodeGenerator {
__ save_LR_CR(R11_scratch1);
// Push a frame.
__ push_frame_abi112(0, R11_scratch1);
__ push_frame_reg_args(0, R11_scratch1);
address frame_complete_pc = __ pc();
@ -551,8 +553,11 @@ class StubGenerator: public StubCodeGenerator {
if (arg2 != noreg) {
__ mr(R5_ARG3, arg2);
}
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry),
relocInfo::none);
#if defined(ABI_ELFv2)
__ call_c(runtime_entry, relocInfo::none);
#else
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none);
#endif
// Set an oopmap for the call site.
oop_maps->add_gc_map((int)(gc_map_pc - start), map);
@ -614,7 +619,7 @@ class StubGenerator: public StubCodeGenerator {
// With G1, don't generate the call if we statically know that the target in uninitialized
if (!dest_uninitialized) {
const int spill_slots = 4 * wordSize;
const int frame_size = frame::abi_112_size + spill_slots;
const int frame_size = frame::abi_reg_args_size + spill_slots;
Label filtered;
// Is marking active?
@ -628,7 +633,7 @@ class StubGenerator: public StubCodeGenerator {
__ beq(CCR0, filtered);
__ save_LR_CR(R0);
__ push_frame_abi112(spill_slots, R0);
__ push_frame_reg_args(spill_slots, R0);
__ std(from, frame_size - 1 * wordSize, R1_SP);
__ std(to, frame_size - 2 * wordSize, R1_SP);
__ std(count, frame_size - 3 * wordSize, R1_SP);
@ -672,7 +677,7 @@ class StubGenerator: public StubCodeGenerator {
if (branchToEnd) {
__ save_LR_CR(R0);
// We need this frame only to spill LR.
__ push_frame_abi112(0, R0);
__ push_frame_reg_args(0, R0);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count);
__ pop_frame();
__ restore_LR_CR(R0);
@ -742,7 +747,7 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8");
// Implemented as in ClearArray.
address start = __ emit_fd();
address start = __ function_entry();
Register base_ptr_reg = R3_ARG1; // tohw (needs to be 8b aligned)
Register cnt_dwords_reg = R4_ARG2; // count (in dwords)
@ -820,7 +825,7 @@ class StubGenerator: public StubCodeGenerator {
//
address generate_handler_for_unsafe_access() {
StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
address start = __ emit_fd();
address start = __ function_entry();
__ unimplemented("StubRoutines::handler_for_unsafe_access", 93);
return start;
}
@ -861,7 +866,7 @@ class StubGenerator: public StubCodeGenerator {
// to read from the safepoint polling page.
address generate_load_from_poll() {
StubCodeMark mark(this, "StubRoutines", "generate_load_from_poll");
address start = __ emit_fd();
address start = __ function_entry();
__ unimplemented("StubRoutines::verify_oop", 95); // TODO PPC port
return start;
}
@ -885,7 +890,7 @@ class StubGenerator: public StubCodeGenerator {
//
address generate_fill(BasicType t, bool aligned, const char* name) {
StubCodeMark mark(this, "StubRoutines", name);
address start = __ emit_fd();
address start = __ function_entry();
const Register to = R3_ARG1; // source array address
const Register value = R4_ARG2; // fill value
@ -1123,7 +1128,7 @@ class StubGenerator: public StubCodeGenerator {
//
address generate_disjoint_byte_copy(bool aligned, const char * name) {
StubCodeMark mark(this, "StubRoutines", name);
address start = __ emit_fd();
address start = __ function_entry();
Register tmp1 = R6_ARG4;
Register tmp2 = R7_ARG5;
@ -1254,15 +1259,21 @@ class StubGenerator: public StubCodeGenerator {
//
address generate_conjoint_byte_copy(bool aligned, const char * name) {
StubCodeMark mark(this, "StubRoutines", name);
address start = __ emit_fd();
address start = __ function_entry();
Register tmp1 = R6_ARG4;
Register tmp2 = R7_ARG5;
Register tmp3 = R8_ARG6;
#if defined(ABI_ELFv2)
address nooverlap_target = aligned ?
StubRoutines::arrayof_jbyte_disjoint_arraycopy() :
StubRoutines::jbyte_disjoint_arraycopy();
#else
address nooverlap_target = aligned ?
((FunctionDescriptor*)StubRoutines::arrayof_jbyte_disjoint_arraycopy())->entry() :
((FunctionDescriptor*)StubRoutines::jbyte_disjoint_arraycopy())->entry();
#endif
array_overlap_test(nooverlap_target, 0);
// Do reverse copy. We assume the case of actual overlap is rare enough
@ -1345,7 +1356,7 @@ class StubGenerator: public StubCodeGenerator {
Register tmp3 = R8_ARG6;
Register tmp4 = R9_ARG7;
address start = __ emit_fd();
address start = __ function_entry();
Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8;
// don't try anything fancy if arrays don't have many elements
@ -1474,15 +1485,21 @@ class StubGenerator: public StubCodeGenerator {
//
address generate_conjoint_short_copy(bool aligned, const char * name) {
StubCodeMark mark(this, "StubRoutines", name);
address start = __ emit_fd();
address start = __ function_entry();
Register tmp1 = R6_ARG4;
Register tmp2 = R7_ARG5;
Register tmp3 = R8_ARG6;
#if defined(ABI_ELFv2)
address nooverlap_target = aligned ?
StubRoutines::arrayof_jshort_disjoint_arraycopy() :
StubRoutines::jshort_disjoint_arraycopy();
#else
address nooverlap_target = aligned ?
((FunctionDescriptor*)StubRoutines::arrayof_jshort_disjoint_arraycopy())->entry() :
((FunctionDescriptor*)StubRoutines::jshort_disjoint_arraycopy())->entry();
#endif
array_overlap_test(nooverlap_target, 1);
@ -1597,7 +1614,7 @@ class StubGenerator: public StubCodeGenerator {
//
address generate_disjoint_int_copy(bool aligned, const char * name) {
StubCodeMark mark(this, "StubRoutines", name);
address start = __ emit_fd();
address start = __ function_entry();
generate_disjoint_int_copy_core(aligned);
__ blr();
return start;
@ -1681,11 +1698,17 @@ class StubGenerator: public StubCodeGenerator {
//
address generate_conjoint_int_copy(bool aligned, const char * name) {
StubCodeMark mark(this, "StubRoutines", name);
address start = __ emit_fd();
address start = __ function_entry();
#if defined(ABI_ELFv2)
address nooverlap_target = aligned ?
StubRoutines::arrayof_jint_disjoint_arraycopy() :
StubRoutines::jint_disjoint_arraycopy();
#else
address nooverlap_target = aligned ?
((FunctionDescriptor*)StubRoutines::arrayof_jint_disjoint_arraycopy())->entry() :
((FunctionDescriptor*)StubRoutines::jint_disjoint_arraycopy())->entry();
#endif
array_overlap_test(nooverlap_target, 2);
@ -1767,7 +1790,7 @@ class StubGenerator: public StubCodeGenerator {
//
address generate_disjoint_long_copy(bool aligned, const char * name) {
StubCodeMark mark(this, "StubRoutines", name);
address start = __ emit_fd();
address start = __ function_entry();
generate_disjoint_long_copy_core(aligned);
__ blr();
@ -1849,11 +1872,17 @@ class StubGenerator: public StubCodeGenerator {
//
address generate_conjoint_long_copy(bool aligned, const char * name) {
StubCodeMark mark(this, "StubRoutines", name);
address start = __ emit_fd();
address start = __ function_entry();
#if defined(ABI_ELFv2)
address nooverlap_target = aligned ?
StubRoutines::arrayof_jlong_disjoint_arraycopy() :
StubRoutines::jlong_disjoint_arraycopy();
#else
address nooverlap_target = aligned ?
((FunctionDescriptor*)StubRoutines::arrayof_jlong_disjoint_arraycopy())->entry() :
((FunctionDescriptor*)StubRoutines::jlong_disjoint_arraycopy())->entry();
#endif
array_overlap_test(nooverlap_target, 3);
generate_conjoint_long_copy_core(aligned);
@ -1875,11 +1904,17 @@ class StubGenerator: public StubCodeGenerator {
address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
StubCodeMark mark(this, "StubRoutines", name);
address start = __ emit_fd();
address start = __ function_entry();
#if defined(ABI_ELFv2)
address nooverlap_target = aligned ?
StubRoutines::arrayof_oop_disjoint_arraycopy() :
StubRoutines::oop_disjoint_arraycopy();
#else
address nooverlap_target = aligned ?
((FunctionDescriptor*)StubRoutines::arrayof_oop_disjoint_arraycopy())->entry() :
((FunctionDescriptor*)StubRoutines::oop_disjoint_arraycopy())->entry();
#endif
gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
@ -1910,7 +1945,7 @@ class StubGenerator: public StubCodeGenerator {
//
address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
StubCodeMark mark(this, "StubRoutines", name);
address start = __ emit_fd();
address start = __ function_entry();
gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
@ -1991,7 +2026,7 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", name);
// Entry point, pc or function descriptor.
*entry = __ emit_fd();
*entry = __ function_entry();
// Load *adr into R4_ARG2, may fault.
*fault_pc = __ pc();
@ -2056,7 +2091,7 @@ class StubGenerator: public StubCodeGenerator {
guarantee(!UseAESIntrinsics, "not yet implemented.");
}
// PPC uses stubs for safefetch.
// Safefetch stubs.
generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
&StubRoutines::_safefetch32_fault_pc,
&StubRoutines::_safefetch32_continuation_pc);

View file

@ -0,0 +1,44 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2013, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_TEMPLATEINTERPRETERGENERATOR_PPC_HPP
#define CPU_PPC_VM_TEMPLATEINTERPRETERGENERATOR_PPC_HPP
protected:
address generate_normal_entry(bool synchronized);
address generate_native_entry(bool synchronized);
address generate_math_entry(AbstractInterpreter::MethodKind kind);
address generate_empty_entry(void);
void lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded=false);
void unlock_method(bool check_exceptions = true);
void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
void generate_counter_overflow(Label& continue_entry);
void generate_fixed_frame(bool native_call, Register Rsize_of_parameters, Register Rsize_of_locals);
void generate_stack_overflow_check(Register Rframe_size, Register Rscratch1);
#endif // CPU_PPC_VM_TEMPLATEINTERPRETERGENERATOR_PPC_HPP

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,41 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2013, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_TEMPLATEINTERPRETER_PPC_HPP
#define CPU_PPC_VM_TEMPLATEINTERPRETER_PPC_HPP
protected:
// Size of interpreter code. Increase if too small. Interpreter will
// fail with a guarantee ("not enough space for interpreter generation");
// if too small.
// Run with +PrintInterpreter to get the VM to print out the size.
// Max size with JVMTI
const static int InterpreterCodeSize = 210*K;
#endif // CPU_PPC_VM_TEMPLATEINTERPRETER_PPC_HPP

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,38 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2013, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_TEMPLATETABLE_PPC_64_HPP
#define CPU_PPC_VM_TEMPLATETABLE_PPC_64_HPP
static void prepare_invoke(int byte_no, Register Rmethod, Register Rret_addr, Register Rindex, Register Rrecv, Register Rflags, Register Rscratch);
static void invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2);
static void generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp);
static void invokeinterface_object_method(Register Rrecv_klass, Register Rret, Register Rflags, Register Rindex, Register Rtemp, Register Rtemp2);
// Branch_conditional which takes TemplateTable::Condition.
static void branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert = false);
static void if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0);
#endif // CPU_PPC_VM_TEMPLATETABLE_PPC_64_HPP

View file

@ -24,7 +24,8 @@
*/
#include "precompiled.hpp"
#include "assembler_ppc.inline.hpp"
#include "asm/assembler.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "compiler/disassembler.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/java.hpp"
@ -168,7 +169,7 @@ void VM_Version::determine_section_size() {
uint32_t *code = (uint32_t *)a->pc();
// Emit code.
void (*test1)() = (void(*)())(void *)a->emit_fd();
void (*test1)() = (void(*)())(void *)a->function_entry();
Label l1;
@ -242,7 +243,7 @@ void VM_Version::determine_section_size() {
a->blr();
// Emit code.
void (*test2)() = (void(*)())(void *)a->emit_fd();
void (*test2)() = (void(*)())(void *)a->function_entry();
// uint32_t *code = (uint32_t *)a->pc();
Label l2;
@ -383,8 +384,12 @@ void VM_Version::determine_section_size() {
#endif // COMPILER2
void VM_Version::determine_features() {
#if defined(ABI_ELFv2)
const int code_size = (num_features+1+2*7)*BytesPerInstWord; // TODO(asmundak): calculation is incorrect.
#else
// 7 InstWords for each call (function descriptor + blr instruction).
const int code_size = (num_features+1+2*7)*BytesPerInstWord;
#endif
int features = 0;
// create test area
@ -398,7 +403,7 @@ void VM_Version::determine_features() {
MacroAssembler* a = new MacroAssembler(&cb);
// Emit code.
void (*test)(address addr, uint64_t offset)=(void(*)(address addr, uint64_t offset))(void *)a->emit_fd();
void (*test)(address addr, uint64_t offset)=(void(*)(address addr, uint64_t offset))(void *)a->function_entry();
uint32_t *code = (uint32_t *)a->pc();
// Don't use R0 in ldarx.
// Keep R3_ARG1 unmodified, it contains &field (see below).
@ -415,7 +420,7 @@ void VM_Version::determine_features() {
a->blr();
// Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
void (*zero_cacheline_func_ptr)(char*) = (void(*)(char*))(void *)a->emit_fd();
void (*zero_cacheline_func_ptr)(char*) = (void(*)(char*))(void *)a->function_entry();
a->dcbz(R3_ARG1); // R3_ARG1 = addr
a->blr();

View file

@ -413,16 +413,15 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
// Update standard invocation counters
__ increment_invocation_counter(Rcounters, O0, G4_scratch);
if (ProfileInterpreter) {
Address interpreter_invocation_counter(Rcounters, 0,
Address interpreter_invocation_counter(Rcounters,
in_bytes(MethodCounters::interpreter_invocation_counter_offset()));
__ ld(interpreter_invocation_counter, G4_scratch);
__ inc(G4_scratch);
__ st(G4_scratch, interpreter_invocation_counter);
}
Address invocation_limit(G3_scratch, (address)&InvocationCounter::InterpreterInvocationLimit);
__ sethi(invocation_limit);
__ ld(invocation_limit, G3_scratch);
AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit);
__ load_contents(invocation_limit, G3_scratch);
__ cmp(O0, G3_scratch);
__ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
__ delayed()->nop();
@ -439,7 +438,7 @@ address InterpreterGenerator::generate_empty_entry(void) {
// do nothing for empty methods (do not even increment invocation counter)
if ( UseFastEmptyMethods) {
// If we need a safepoint check, generate full interpreter entry.
Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
__ load_contents(sync_state, G3_scratch);
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
__ br(Assembler::notEqual, false, Assembler::pn, frame_manager_entry);
@ -471,7 +470,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
if ( UseFastAccessorMethods) {
// Check if we need to reach a safepoint and generate full interpreter
// frame if so.
Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
__ load_contents(sync_state, G3_scratch);
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
__ br(Assembler::notEqual, false, Assembler::pn, slow_path);
@ -486,8 +485,8 @@ address InterpreterGenerator::generate_accessor_entry(void) {
// read first instruction word and extract bytecode @ 1 and index @ 2
// get first 4 bytes of the bytecodes (big endian!)
__ ld_ptr(Address(G5_method, 0, in_bytes(Method::const_offset())), G1_scratch);
__ ld(Address(G1_scratch, 0, in_bytes(ConstMethod::codes_offset())), G1_scratch);
__ ld_ptr(Address(G5_method, in_bytes(Method::const_offset())), G1_scratch);
__ ld(Address(G1_scratch, in_bytes(ConstMethod::codes_offset())), G1_scratch);
// move index @ 2 far left then to the right most two bytes.
__ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
@ -590,15 +589,15 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
const Register Gtmp1 = G3_scratch ;
const Register Gtmp2 = G1_scratch;
const Register RconstMethod = Gtmp1;
const Address constMethod(G5_method, 0, in_bytes(Method::const_offset()));
const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
const Address constMethod(G5_method, in_bytes(Method::const_offset()));
const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
bool inc_counter = UseCompiler || CountCompiledCalls;
// make sure registers are different!
assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
const Address access_flags (G5_method, in_bytes(Method::access_flags_offset()));
Label Lentry;
__ bind(Lentry);
@ -643,7 +642,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// At this point Lstate points to new interpreter state
//
const Address do_not_unlock_if_synchronized(G2_thread, 0,
const Address do_not_unlock_if_synchronized(G2_thread,
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
// Since at this point in the method invocation the exception handler
// would try to exit the monitor of synchronized methods which hasn't
@ -717,17 +716,17 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
{ Label L;
__ ld_ptr(STATE(_method), G5_method);
__ ld_ptr(Address(G5_method, 0, in_bytes(Method::signature_handler_offset())), G3_scratch);
__ ld_ptr(Address(G5_method, in_bytes(Method::signature_handler_offset())), G3_scratch);
__ tst(G3_scratch);
__ brx(Assembler::notZero, false, Assembler::pt, L);
__ delayed()->nop();
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), G5_method, false);
__ ld_ptr(STATE(_method), G5_method);
Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
Address exception_addr(G2_thread, in_bytes(Thread::pending_exception_offset()));
__ ld_ptr(exception_addr, G3_scratch);
__ br_notnull_short(G3_scratch, Assembler::pn, pending_exception_present);
__ ld_ptr(Address(G5_method, 0, in_bytes(Method::signature_handler_offset())), G3_scratch);
__ ld_ptr(Address(G5_method, in_bytes(Method::signature_handler_offset())), G3_scratch);
__ bind(L);
}
@ -771,13 +770,13 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ br( Assembler::zero, false, Assembler::pt, not_static);
__ delayed()->
// get native function entry point(O0 is a good temp until the very end)
ld_ptr(Address(G5_method, 0, in_bytes(Method::native_function_offset())), O0);
ld_ptr(Address(G5_method, in_bytes(Method::native_function_offset())), O0);
// for static methods insert the mirror argument
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ ld_ptr(Address(G5_method, 0, in_bytes(Method:: const_offset())), O1);
__ ld_ptr(Address(O1, 0, in_bytes(ConstMethod::constants_offset())), O1);
__ ld_ptr(Address(O1, 0, ConstantPool::pool_holder_offset_in_bytes()), O1);
__ ld_ptr(Address(G5_method, in_bytes(Method:: const_offset())), O1);
__ ld_ptr(Address(O1, in_bytes(ConstMethod::constants_offset())), O1);
__ ld_ptr(Address(O1, ConstantPool::pool_holder_offset_in_bytes()), O1);
__ ld_ptr(O1, mirror_offset, O1);
// where the mirror handle body is allocated:
#ifdef ASSERT
@ -831,18 +830,17 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// flush the windows now. We don't care about the current (protection) frame
// only the outer frames
__ flush_windows();
__ flushw();
// mark windows as flushed
Address flags(G2_thread,
0,
in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset()));
__ set(JavaFrameAnchor::flushed, G3_scratch);
__ st(G3_scratch, flags);
// Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
Address thread_state(G2_thread, 0, in_bytes(JavaThread::thread_state_offset()));
Address thread_state(G2_thread, in_bytes(JavaThread::thread_state_offset()));
#ifdef ASSERT
{ Label L;
__ ld(thread_state, G3_scratch);
@ -867,7 +865,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// Block, if necessary, before resuming in _thread_in_Java state.
// In order for GC to work, don't clear the last_Java_sp until after blocking.
{ Label no_block;
Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
// Switch thread to "native transition" state before reading the synchronization state.
// This additional state is necessary because reading and testing the synchronization
@ -890,7 +888,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
Label L;
Address suspend_state(G2_thread, 0, in_bytes(JavaThread::suspend_flags_offset()));
Address suspend_state(G2_thread, in_bytes(JavaThread::suspend_flags_offset()));
__ br(Assembler::notEqual, false, Assembler::pn, L);
__ delayed()->
ld(suspend_state, G3_scratch);
@ -965,7 +963,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// handle exceptions (exception handling will handle unlocking!)
{ Label L;
Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
Address exception_addr (G2_thread, in_bytes(Thread::pending_exception_offset()));
__ ld_ptr(exception_addr, Gtemp);
__ tst(Gtemp);
@ -1055,8 +1053,8 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
assert_different_registers(state, prev_state);
assert_different_registers(prev_state, G3_scratch);
const Register Gtmp = G3_scratch;
const Address constMethod (G5_method, 0, in_bytes(Method::const_offset()));
const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
const Address constMethod (G5_method, in_bytes(Method::const_offset()));
const Address access_flags (G5_method, in_bytes(Method::access_flags_offset()));
// slop factor is two extra slots on the expression stack so that
// we always have room to store a result when returning from a call without parameters
@ -1075,7 +1073,7 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
if (native) {
const Register RconstMethod = Gtmp;
const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
__ ld_ptr(constMethod, RconstMethod);
__ lduh( size_of_parameters, Gtmp );
__ calc_mem_param_words(Gtmp, Gtmp); // space for native call parameters passed on the stack in words
@ -1246,8 +1244,8 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
if (init_value != noreg) {
Label clear_loop;
const Register RconstMethod = O1;
const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
const Address size_of_locals (RconstMethod, 0, in_bytes(ConstMethod::size_of_locals_offset()));
const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
const Address size_of_locals (RconstMethod, in_bytes(ConstMethod::size_of_locals_offset()));
// NOTE: If you change the frame layout, this code will need to
// be updated!
@ -1496,11 +1494,11 @@ void CppInterpreterGenerator::adjust_callers_stack(Register args) {
//
// assert_different_registers(state, prev_state);
const Register Gtmp = G3_scratch;
const RconstMethod = G3_scratch;
const Register RconstMethod = G3_scratch;
const Register tmp = O2;
const Address constMethod(G5_method, 0, in_bytes(Method::const_offset()));
const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
const Address size_of_locals (RconstMethod, 0, in_bytes(ConstMethod::size_of_locals_offset()));
const Address constMethod(G5_method, in_bytes(Method::const_offset()));
const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
const Address size_of_locals (RconstMethod, in_bytes(ConstMethod::size_of_locals_offset()));
__ ld_ptr(constMethod, RconstMethod);
__ lduh(size_of_parameters, tmp);
@ -1555,8 +1553,8 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
const Register Gtmp1 = G3_scratch;
// const Register Lmirror = L1; // native mirror (native calls only)
const Address constMethod (G5_method, 0, in_bytes(Method::const_offset()));
const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
const Address constMethod (G5_method, in_bytes(Method::const_offset()));
const Address access_flags (G5_method, in_bytes(Method::access_flags_offset()));
address entry_point = __ pc();
__ mov(G0, prevState); // no current activation
@ -1709,7 +1707,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// We want exception in the thread no matter what we ultimately decide about frame type.
Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
Address exception_addr (G2_thread, in_bytes(Thread::pending_exception_offset()));
__ verify_thread();
__ st_ptr(O0, exception_addr);

View file

@ -827,6 +827,7 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
}
if (is_interpreted_frame()) {
#ifndef CC_INTERP
DESCRIBE_FP_OFFSET(interpreter_frame_d_scratch_fp);
DESCRIBE_FP_OFFSET(interpreter_frame_l_scratch_fp);
DESCRIBE_FP_OFFSET(interpreter_frame_padding);
@ -837,6 +838,7 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
if ((esp >= sp()) && (esp < fp())) {
values.describe(-1, esp, "*Lesp");
}
#endif
}
if (!is_compiled_frame()) {

View file

@ -2497,6 +2497,24 @@ void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Regis
void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);
}
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
int increment, int mask,
Register scratch1, Register scratch2,
Condition cond, Label *where) {
ld(counter_addr, scratch1);
add(scratch1, increment, scratch1);
if (is_simm13(mask)) {
andcc(scratch1, mask, G0);
} else {
set(mask, scratch2);
andcc(scratch1, scratch2, G0);
}
br(cond, false, Assembler::pn, *where);
delayed()->st(scratch1, counter_addr);
}
#endif /* CC_INTERP */
// Inline assembly for:
@ -2646,20 +2664,3 @@ void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_na
}
#endif // CC_INTERP
}
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
int increment, int mask,
Register scratch1, Register scratch2,
Condition cond, Label *where) {
ld(counter_addr, scratch1);
add(scratch1, increment, scratch1);
if (is_simm13(mask)) {
andcc(scratch1, mask, G0);
} else {
set(mask, scratch2);
andcc(scratch1, scratch2, G0);
}
br(cond, false, Assembler::pn, *where);
delayed()->st(scratch1, counter_addr);
}

View file

@ -23,7 +23,8 @@
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/codeCache.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/oop.inline.hpp"

View file

@ -1089,6 +1089,21 @@ void Assembler::andl(Register dst, Register src) {
emit_arith(0x23, 0xC0, dst, src);
}
void Assembler::andnl(Register dst, Register src1, Register src2) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
int encode = vex_prefix_0F38_and_encode(dst, src1, src2);
emit_int8((unsigned char)0xF2);
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::andnl(Register dst, Register src1, Address src2) {
InstructionMark im(this);
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
vex_prefix_0F38(dst, src1, src2);
emit_int8((unsigned char)0xF2);
emit_operand(dst, src2);
}
void Assembler::bsfl(Register dst, Register src) {
int encode = prefix_and_encode(dst->encoding(), src->encoding());
emit_int8(0x0F);
@ -1110,6 +1125,51 @@ void Assembler::bswapl(Register reg) { // bswap
emit_int8((unsigned char)(0xC8 | encode));
}
void Assembler::blsil(Register dst, Register src) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
int encode = vex_prefix_0F38_and_encode(rbx, dst, src);
emit_int8((unsigned char)0xF3);
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::blsil(Register dst, Address src) {
InstructionMark im(this);
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
vex_prefix_0F38(rbx, dst, src);
emit_int8((unsigned char)0xF3);
emit_operand(rbx, src);
}
void Assembler::blsmskl(Register dst, Register src) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
int encode = vex_prefix_0F38_and_encode(rdx, dst, src);
emit_int8((unsigned char)0xF3);
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::blsmskl(Register dst, Address src) {
InstructionMark im(this);
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
vex_prefix_0F38(rdx, dst, src);
emit_int8((unsigned char)0xF3);
emit_operand(rdx, src);
}
void Assembler::blsrl(Register dst, Register src) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
int encode = vex_prefix_0F38_and_encode(rcx, dst, src);
emit_int8((unsigned char)0xF3);
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::blsrl(Register dst, Address src) {
InstructionMark im(this);
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
vex_prefix_0F38(rcx, dst, src);
emit_int8((unsigned char)0xF3);
emit_operand(rcx, src);
}
void Assembler::call(Label& L, relocInfo::relocType rtype) {
// suspect disp32 is always good
int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand);
@ -2878,6 +2938,24 @@ void Assembler::testl(Register dst, Address src) {
emit_operand(dst, src);
}
void Assembler::tzcntl(Register dst, Register src) {
assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
emit_int8((unsigned char)0xF3);
int encode = prefix_and_encode(dst->encoding(), src->encoding());
emit_int8(0x0F);
emit_int8((unsigned char)0xBC);
emit_int8((unsigned char)0xC0 | encode);
}
void Assembler::tzcntq(Register dst, Register src) {
assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
emit_int8((unsigned char)0xF3);
int encode = prefixq_and_encode(dst->encoding(), src->encoding());
emit_int8(0x0F);
emit_int8((unsigned char)0xBC);
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::ucomisd(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66);
@ -4837,6 +4915,21 @@ void Assembler::andq(Register dst, Register src) {
emit_arith(0x23, 0xC0, dst, src);
}
void Assembler::andnq(Register dst, Register src1, Register src2) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
int encode = vex_prefix_0F38_and_encode_q(dst, src1, src2);
emit_int8((unsigned char)0xF2);
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::andnq(Register dst, Register src1, Address src2) {
InstructionMark im(this);
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
vex_prefix_0F38_q(dst, src1, src2);
emit_int8((unsigned char)0xF2);
emit_operand(dst, src2);
}
void Assembler::bsfq(Register dst, Register src) {
int encode = prefixq_and_encode(dst->encoding(), src->encoding());
emit_int8(0x0F);
@ -4858,6 +4951,51 @@ void Assembler::bswapq(Register reg) {
emit_int8((unsigned char)(0xC8 | encode));
}
void Assembler::blsiq(Register dst, Register src) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
int encode = vex_prefix_0F38_and_encode_q(rbx, dst, src);
emit_int8((unsigned char)0xF3);
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::blsiq(Register dst, Address src) {
InstructionMark im(this);
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
vex_prefix_0F38_q(rbx, dst, src);
emit_int8((unsigned char)0xF3);
emit_operand(rbx, src);
}
void Assembler::blsmskq(Register dst, Register src) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
int encode = vex_prefix_0F38_and_encode_q(rdx, dst, src);
emit_int8((unsigned char)0xF3);
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::blsmskq(Register dst, Address src) {
InstructionMark im(this);
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
vex_prefix_0F38_q(rdx, dst, src);
emit_int8((unsigned char)0xF3);
emit_operand(rdx, src);
}
void Assembler::blsrq(Register dst, Register src) {
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
int encode = vex_prefix_0F38_and_encode_q(rcx, dst, src);
emit_int8((unsigned char)0xF3);
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::blsrq(Register dst, Address src) {
InstructionMark im(this);
assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
vex_prefix_0F38_q(rcx, dst, src);
emit_int8((unsigned char)0xF3);
emit_operand(rcx, src);
}
void Assembler::cdqq() {
prefix(REX_W);
emit_int8((unsigned char)0x99);

View file

@ -590,10 +590,35 @@ private:
vex_prefix(src, nds_enc, dst_enc, pre, VEX_OPCODE_0F, false, vector256);
}
void vex_prefix_0F38(Register dst, Register nds, Address src) {
bool vex_w = false;
bool vector256 = false;
vex_prefix(src, nds->encoding(), dst->encoding(),
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
}
void vex_prefix_0F38_q(Register dst, Register nds, Address src) {
bool vex_w = true;
bool vector256 = false;
vex_prefix(src, nds->encoding(), dst->encoding(),
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
}
int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc,
VexSimdPrefix pre, VexOpcode opc,
bool vex_w, bool vector256);
int vex_prefix_0F38_and_encode(Register dst, Register nds, Register src) {
bool vex_w = false;
bool vector256 = false;
return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
}
int vex_prefix_0F38_and_encode_q(Register dst, Register nds, Register src) {
bool vex_w = true;
bool vector256 = false;
return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
}
int vex_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
VexSimdPrefix pre, bool vector256 = false,
VexOpcode opc = VEX_OPCODE_0F) {
@ -897,6 +922,27 @@ private:
void andq(Register dst, Address src);
void andq(Register dst, Register src);
// BMI instructions
void andnl(Register dst, Register src1, Register src2);
void andnl(Register dst, Register src1, Address src2);
void andnq(Register dst, Register src1, Register src2);
void andnq(Register dst, Register src1, Address src2);
void blsil(Register dst, Register src);
void blsil(Register dst, Address src);
void blsiq(Register dst, Register src);
void blsiq(Register dst, Address src);
void blsmskl(Register dst, Register src);
void blsmskl(Register dst, Address src);
void blsmskq(Register dst, Register src);
void blsmskq(Register dst, Address src);
void blsrl(Register dst, Register src);
void blsrl(Register dst, Address src);
void blsrq(Register dst, Register src);
void blsrq(Register dst, Address src);
void bsfl(Register dst, Register src);
void bsrl(Register dst, Register src);
@ -1574,6 +1620,9 @@ private:
void testq(Register dst, int32_t imm32);
void testq(Register dst, Register src);
// BMI - count trailing zeros
void tzcntl(Register dst, Register src);
void tzcntq(Register dst, Register src);
// Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
void ucomisd(XMMRegister dst, Address src);

View file

@ -250,7 +250,7 @@ inline jint BytecodeInterpreter::VMintSub(jint op1, jint op2) {
return op1 - op2;
}
inline jint BytecodeInterpreter::VMintUshr(jint op1, jint op2) {
inline juint BytecodeInterpreter::VMintUshr(jint op1, jint op2) {
return ((juint) op1) >> (op2 & 0x1f);
}

View file

@ -574,7 +574,7 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
MethodCounters::invocation_counter_offset() +
InvocationCounter::counter_offset());
const Address backedge_counter (rax,
MethodCounter::backedge_counter_offset() +
MethodCounters::backedge_counter_offset() +
InvocationCounter::counter_offset());
__ get_method_counters(rbx, rax, done);
@ -982,16 +982,18 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// to save/restore.
address entry_point = __ pc();
const Address constMethod (rbx, Method::const_offset());
const Address access_flags (rbx, Method::access_flags_offset());
const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
// rsi/r13 == state/locals rdi == prevstate
const Register locals = rdi;
// get parameter size (always needed)
__ movptr(rcx, constMethod);
__ load_unsigned_short(rcx, size_of_parameters);
{
const Address constMethod (rbx, Method::const_offset());
const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
__ movptr(rcx, constMethod);
__ load_unsigned_short(rcx, size_of_parameters);
}
// rbx: Method*
// rcx: size of parameters
@ -1111,14 +1113,16 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
const Register method = rbx;
const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rdi);
const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); // rcx|rscratch1
const Address constMethod (method, Method::const_offset());
const Address size_of_parameters(t, ConstMethod::size_of_parameters_offset());
// allocate space for parameters
// allocate space for parameters
__ movptr(method, STATE(_method));
__ verify_method_ptr(method);
__ movptr(t, constMethod);
__ load_unsigned_short(t, size_of_parameters);
{
const Address constMethod (method, Method::const_offset());
const Address size_of_parameters(t, ConstMethod::size_of_parameters_offset());
__ movptr(t, constMethod);
__ load_unsigned_short(t, size_of_parameters);
}
__ shll(t, 2);
#ifdef _LP64
__ subptr(rsp, t);
@ -2221,7 +2225,6 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break;
case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru
@ -2229,7 +2232,10 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
case Interpreter::java_lang_math_abs : // fall thru
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
case Interpreter::java_lang_math_sqrt : // fall thru
case Interpreter::java_lang_math_pow : // fall thru
case Interpreter::java_lang_math_exp : // fall thru
entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
default : ShouldNotReachHere(); break;
@ -2451,4 +2457,22 @@ int AbstractInterpreter::layout_activation(Method* method,
return frame_size/BytesPerWord;
}
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
switch (method_kind(m)) {
case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru
case Interpreter::java_lang_math_tan : // fall thru
case Interpreter::java_lang_math_abs : // fall thru
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : // fall thru
case Interpreter::java_lang_math_pow : // fall thru
case Interpreter::java_lang_math_exp :
return false;
default:
return true;
}
}
#endif // CC_INTERP (all)

View file

@ -687,6 +687,7 @@ intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
void frame::describe_pd(FrameValues& values, int frame_no) {
if (is_interpreted_frame()) {
#ifndef CC_INTERP
DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
DESCRIBE_FP_OFFSET(interpreter_frame_method);
@ -695,6 +696,7 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
DESCRIBE_FP_OFFSET(interpreter_frame_locals);
DESCRIBE_FP_OFFSET(interpreter_frame_bcx);
DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
#endif
}
}
#endif

View file

@ -135,5 +135,11 @@ define_pd_global(uintx, TypeProfileLevel, 111);
\
product(bool, UseCountLeadingZerosInstruction, false, \
"Use count leading zeros instruction") \
\
product(bool, UseCountTrailingZerosInstruction, false, \
"Use count trailing zeros instruction") \
\
product(bool, UseBMI1Instructions, false, \
"Use BMI instructions")
#endif // CPU_X86_VM_GLOBALS_X86_HPP

View file

@ -266,20 +266,6 @@ void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, R
addptr(cache, tmp); // construct pointer to cache entry
}
void InterpreterMacroAssembler::get_method_counters(Register method,
Register mcs, Label& skip) {
Label has_counters;
movptr(mcs, Address(method, Method::method_counters_offset()));
testptr(mcs, mcs);
jcc(Assembler::notZero, has_counters);
call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::build_method_counters), method);
movptr(mcs, Address(method,Method::method_counters_offset()));
testptr(mcs, mcs);
jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
bind(has_counters);
}
// Load object from cpool->resolved_references(index)
void InterpreterMacroAssembler::load_resolved_reference_at_index(
Register result, Register index) {
@ -678,6 +664,20 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
#endif /* !CC_INTERP */
void InterpreterMacroAssembler::get_method_counters(Register method,
Register mcs, Label& skip) {
Label has_counters;
movptr(mcs, Address(method, Method::method_counters_offset()));
testptr(mcs, mcs);
jcc(Assembler::notZero, has_counters);
call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::build_method_counters), method);
movptr(mcs, Address(method,Method::method_counters_offset()));
testptr(mcs, mcs);
jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
bind(has_counters);
}
// Lock object
//
@ -1359,6 +1359,19 @@ void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);
}
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
int increment, int mask,
Register scratch, bool preloaded,
Condition cond, Label* where) {
if (!preloaded) {
movl(scratch, counter_addr);
}
incrementl(scratch, increment);
movl(counter_addr, scratch);
andl(scratch, mask);
jcc(cond, *where);
}
#endif /* CC_INTERP */
@ -1430,17 +1443,3 @@ void InterpreterMacroAssembler::notify_method_exit(
NOT_CC_INTERP(pop(state));
}
}
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
int increment, int mask,
Register scratch, bool preloaded,
Condition cond, Label* where) {
if (!preloaded) {
movl(scratch, counter_addr);
}
incrementl(scratch, increment);
movl(counter_addr, scratch);
andl(scratch, mask);
jcc(cond, *where);
}

View file

@ -77,7 +77,6 @@
void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
void get_method_counters(Register method, Register mcs, Label& skip);
// load cpool->resolved_references(index);
void load_resolved_reference_at_index(Register result, Register index);
@ -156,6 +155,7 @@
bool install_monitor_exception = true,
bool notify_jvmdi = true);
#endif /* !CC_INTERP */
void get_method_counters(Register method, Register mcs, Label& skip);
// Debugging
void verify_oop(Register reg, TosState state = atos); // only if +VerifyOops && state == atos

View file

@ -271,20 +271,6 @@ void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
addptr(cache, tmp); // construct pointer to cache entry
}
void InterpreterMacroAssembler::get_method_counters(Register method,
Register mcs, Label& skip) {
Label has_counters;
movptr(mcs, Address(method, Method::method_counters_offset()));
testptr(mcs, mcs);
jcc(Assembler::notZero, has_counters);
call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::build_method_counters), method);
movptr(mcs, Address(method,Method::method_counters_offset()));
testptr(mcs, mcs);
jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
bind(has_counters);
}
// Load object from cpool->resolved_references(index)
void InterpreterMacroAssembler::load_resolved_reference_at_index(
Register result, Register index) {
@ -676,6 +662,21 @@ void InterpreterMacroAssembler::remove_activation(
#endif // C_INTERP
void InterpreterMacroAssembler::get_method_counters(Register method,
Register mcs, Label& skip) {
Label has_counters;
movptr(mcs, Address(method, Method::method_counters_offset()));
testptr(mcs, mcs);
jcc(Assembler::notZero, has_counters);
call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::build_method_counters), method);
movptr(mcs, Address(method,Method::method_counters_offset()));
testptr(mcs, mcs);
jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
bind(has_counters);
}
// Lock object
//
// Args:
@ -1423,6 +1424,20 @@ void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
}
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
int increment, int mask,
Register scratch, bool preloaded,
Condition cond, Label* where) {
if (!preloaded) {
movl(scratch, counter_addr);
}
incrementl(scratch, increment);
movl(counter_addr, scratch);
andl(scratch, mask);
jcc(cond, *where);
}
#endif // !CC_INTERP
@ -1491,16 +1506,3 @@ void InterpreterMacroAssembler::notify_method_exit(
}
}
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
int increment, int mask,
Register scratch, bool preloaded,
Condition cond, Label* where) {
if (!preloaded) {
movl(scratch, counter_addr);
}
incrementl(scratch, increment);
movl(counter_addr, scratch);
andl(scratch, mask);
jcc(cond, *where);
}

View file

@ -99,7 +99,6 @@
void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
void get_method_counters(Register method, Register mcs, Label& skip);
// load cpool->resolved_references(index);
void load_resolved_reference_at_index(Register result, Register index);
@ -172,6 +171,7 @@
bool install_monitor_exception = true,
bool notify_jvmdi = true);
#endif // CC_INTERP
void get_method_counters(Register method, Register mcs, Label& skip);
// Object locking
void lock_object (Register lock_reg);

View file

@ -229,10 +229,12 @@ address InterpreterGenerator::generate_abstract_entry(void) {
// abstract method entry
#ifndef CC_INTERP
// pop return address, reset last_sp to NULL
__ empty_expression_stack();
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
#endif
// throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));

View file

@ -310,10 +310,12 @@ address InterpreterGenerator::generate_abstract_entry(void) {
// abstract method entry
#ifndef CC_INTERP
// pop return address, reset last_sp to NULL
__ empty_expression_stack();
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
#endif
// throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address,

View file

@ -429,7 +429,7 @@ void VM_Version::get_processor_features() {
}
char buf[256];
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
cores_per_cpu(), threads_per_core(),
cpu_family(), _model, _stepping,
(supports_cmov() ? ", cmov" : ""),
@ -455,7 +455,9 @@ void VM_Version::get_processor_features() {
(supports_ht() ? ", ht": ""),
(supports_tsc() ? ", tsc": ""),
(supports_tscinv_bit() ? ", tscinvbit": ""),
(supports_tscinv() ? ", tscinv": ""));
(supports_tscinv() ? ", tscinv": ""),
(supports_bmi1() ? ", bmi1" : ""),
(supports_bmi2() ? ", bmi2" : ""));
_features_str = strdup(buf);
// UseSSE is set to the smaller of what hardware supports and what
@ -600,13 +602,6 @@ void VM_Version::get_processor_features() {
}
}
// Use count leading zeros count instruction if available.
if (supports_lzcnt()) {
if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) {
UseCountLeadingZerosInstruction = true;
}
}
// some defaults for AMD family 15h
if ( cpu_family() == 0x15 ) {
// On family 15h processors default is no sw prefetch
@ -692,6 +687,35 @@ void VM_Version::get_processor_features() {
}
#endif // COMPILER2
// Use count leading zeros count instruction if available.
if (supports_lzcnt()) {
if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) {
UseCountLeadingZerosInstruction = true;
}
} else if (UseCountLeadingZerosInstruction) {
warning("lzcnt instruction is not available on this CPU");
FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false);
}
if (supports_bmi1()) {
if (FLAG_IS_DEFAULT(UseBMI1Instructions)) {
UseBMI1Instructions = true;
}
} else if (UseBMI1Instructions) {
warning("BMI1 instructions are not available on this CPU");
FLAG_SET_DEFAULT(UseBMI1Instructions, false);
}
// Use count trailing zeros instruction if available
if (supports_bmi1()) {
if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) {
UseCountTrailingZerosInstruction = UseBMI1Instructions;
}
} else if (UseCountTrailingZerosInstruction) {
warning("tzcnt instruction is not available on this CPU");
FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false);
}
// Use population count instruction if available.
if (supports_popcnt()) {
if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {

View file

@ -141,7 +141,8 @@ public:
struct {
uint32_t LahfSahf : 1,
CmpLegacy : 1,
: 4,
: 3,
lzcnt_intel : 1,
lzcnt : 1,
sse4a : 1,
misalignsse : 1,
@ -251,7 +252,9 @@ protected:
CPU_AVX2 = (1 << 18),
CPU_AES = (1 << 19),
CPU_ERMS = (1 << 20), // enhanced 'rep movsb/stosb' instructions
CPU_CLMUL = (1 << 21) // carryless multiply for CRC
CPU_CLMUL = (1 << 21), // carryless multiply for CRC
CPU_BMI1 = (1 << 22),
CPU_BMI2 = (1 << 23)
} cpuFeatureFlags;
enum {
@ -423,6 +426,8 @@ protected:
if (_cpuid_info.sef_cpuid7_ebx.bits.avx2 != 0)
result |= CPU_AVX2;
}
if(_cpuid_info.sef_cpuid7_ebx.bits.bmi1 != 0)
result |= CPU_BMI1;
if (_cpuid_info.std_cpuid1_edx.bits.tsc != 0)
result |= CPU_TSC;
if (_cpuid_info.ext_cpuid7_edx.bits.tsc_invariance != 0)
@ -444,6 +449,13 @@ protected:
if (_cpuid_info.ext_cpuid1_ecx.bits.sse4a != 0)
result |= CPU_SSE4A;
}
// Intel features.
if(is_intel()) {
if(_cpuid_info.sef_cpuid7_ebx.bits.bmi2 != 0)
result |= CPU_BMI2;
if(_cpuid_info.ext_cpuid1_ecx.bits.lzcnt_intel != 0)
result |= CPU_LZCNT;
}
return result;
}
@ -560,7 +572,8 @@ public:
static bool supports_aes() { return (_cpuFeatures & CPU_AES) != 0; }
static bool supports_erms() { return (_cpuFeatures & CPU_ERMS) != 0; }
static bool supports_clmul() { return (_cpuFeatures & CPU_CLMUL) != 0; }
static bool supports_bmi1() { return (_cpuFeatures & CPU_BMI1) != 0; }
static bool supports_bmi2() { return (_cpuFeatures & CPU_BMI2) != 0; }
// Intel features
static bool is_intel_family_core() { return is_intel() &&
extended_cpu_family() == CPU_FAMILY_INTEL_CORE; }

View file

@ -5163,6 +5163,19 @@ instruct countLeadingZerosL_bsr(rRegI dst, eRegL src, eFlagsReg cr) %{
%}
instruct countTrailingZerosI(rRegI dst, rRegI src, eFlagsReg cr) %{
predicate(UseCountTrailingZerosInstruction);
match(Set dst (CountTrailingZerosI src));
effect(KILL cr);
format %{ "TZCNT $dst, $src\t# count trailing zeros (int)" %}
ins_encode %{
__ tzcntl($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg);
%}
instruct countTrailingZerosI_bsf(rRegI dst, rRegI src, eFlagsReg cr) %{
predicate(!UseCountTrailingZerosInstruction);
match(Set dst (CountTrailingZerosI src));
effect(KILL cr);
@ -5182,6 +5195,30 @@ instruct countTrailingZerosI(rRegI dst, rRegI src, eFlagsReg cr) %{
%}
instruct countTrailingZerosL(rRegI dst, eRegL src, eFlagsReg cr) %{
predicate(UseCountTrailingZerosInstruction);
match(Set dst (CountTrailingZerosL src));
effect(TEMP dst, KILL cr);
format %{ "TZCNT $dst, $src.lo\t# count trailing zeros (long) \n\t"
"JNC done\n\t"
"TZCNT $dst, $src.hi\n\t"
"ADD $dst, 32\n"
"done:" %}
ins_encode %{
Register Rdst = $dst$$Register;
Register Rsrc = $src$$Register;
Label done;
__ tzcntl(Rdst, Rsrc);
__ jccb(Assembler::carryClear, done);
__ tzcntl(Rdst, HIGH_FROM_LOW(Rsrc));
__ addl(Rdst, BitsPerInt);
__ bind(done);
%}
ins_pipe(ialu_reg);
%}
instruct countTrailingZerosL_bsf(rRegI dst, eRegL src, eFlagsReg cr) %{
predicate(!UseCountTrailingZerosInstruction);
match(Set dst (CountTrailingZerosL src));
effect(TEMP dst, KILL cr);
@ -8027,6 +8064,123 @@ instruct andI_mem_imm(memory dst, immI src, eFlagsReg cr) %{
ins_pipe( ialu_mem_imm );
%}
// BMI1 instructions
instruct andnI_rReg_rReg_rReg(rRegI dst, rRegI src1, rRegI src2, immI_M1 minus_1, eFlagsReg cr) %{
match(Set dst (AndI (XorI src1 minus_1) src2));
predicate(UseBMI1Instructions);
effect(KILL cr);
format %{ "ANDNL $dst, $src1, $src2" %}
ins_encode %{
__ andnl($dst$$Register, $src1$$Register, $src2$$Register);
%}
ins_pipe(ialu_reg);
%}
instruct andnI_rReg_rReg_mem(rRegI dst, rRegI src1, memory src2, immI_M1 minus_1, eFlagsReg cr) %{
match(Set dst (AndI (XorI src1 minus_1) (LoadI src2) ));
predicate(UseBMI1Instructions);
effect(KILL cr);
ins_cost(125);
format %{ "ANDNL $dst, $src1, $src2" %}
ins_encode %{
__ andnl($dst$$Register, $src1$$Register, $src2$$Address);
%}
ins_pipe(ialu_reg_mem);
%}
instruct blsiI_rReg_rReg(rRegI dst, rRegI src, immI0 imm_zero, eFlagsReg cr) %{
match(Set dst (AndI (SubI imm_zero src) src));
predicate(UseBMI1Instructions);
effect(KILL cr);
format %{ "BLSIL $dst, $src" %}
ins_encode %{
__ blsil($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg);
%}
instruct blsiI_rReg_mem(rRegI dst, memory src, immI0 imm_zero, eFlagsReg cr) %{
match(Set dst (AndI (SubI imm_zero (LoadI src) ) (LoadI src) ));
predicate(UseBMI1Instructions);
effect(KILL cr);
ins_cost(125);
format %{ "BLSIL $dst, $src" %}
ins_encode %{
__ blsil($dst$$Register, $src$$Address);
%}
ins_pipe(ialu_reg_mem);
%}
instruct blsmskI_rReg_rReg(rRegI dst, rRegI src, immI_M1 minus_1, eFlagsReg cr)
%{
match(Set dst (XorI (AddI src minus_1) src));
predicate(UseBMI1Instructions);
effect(KILL cr);
format %{ "BLSMSKL $dst, $src" %}
ins_encode %{
__ blsmskl($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg);
%}
instruct blsmskI_rReg_mem(rRegI dst, memory src, immI_M1 minus_1, eFlagsReg cr)
%{
match(Set dst (XorI (AddI (LoadI src) minus_1) (LoadI src) ));
predicate(UseBMI1Instructions);
effect(KILL cr);
ins_cost(125);
format %{ "BLSMSKL $dst, $src" %}
ins_encode %{
__ blsmskl($dst$$Register, $src$$Address);
%}
ins_pipe(ialu_reg_mem);
%}
instruct blsrI_rReg_rReg(rRegI dst, rRegI src, immI_M1 minus_1, eFlagsReg cr)
%{
match(Set dst (AndI (AddI src minus_1) src) );
predicate(UseBMI1Instructions);
effect(KILL cr);
format %{ "BLSRL $dst, $src" %}
ins_encode %{
__ blsrl($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg);
%}
instruct blsrI_rReg_mem(rRegI dst, memory src, immI_M1 minus_1, eFlagsReg cr)
%{
match(Set dst (AndI (AddI (LoadI src) minus_1) (LoadI src) ));
predicate(UseBMI1Instructions);
effect(KILL cr);
ins_cost(125);
format %{ "BLSRL $dst, $src" %}
ins_encode %{
__ blsrl($dst$$Register, $src$$Address);
%}
ins_pipe(ialu_reg_mem);
%}
// Or Instructions
// Or Register with Register
instruct orI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
@ -8649,6 +8803,210 @@ instruct andL_eReg_mem(eRegL dst, load_long_memory mem, eFlagsReg cr) %{
ins_pipe( ialu_reg_long_mem );
%}
// BMI1 instructions
instruct andnL_eReg_eReg_eReg(eRegL dst, eRegL src1, eRegL src2, immL_M1 minus_1, eFlagsReg cr) %{
match(Set dst (AndL (XorL src1 minus_1) src2));
predicate(UseBMI1Instructions);
effect(KILL cr, TEMP dst);
format %{ "ANDNL $dst.lo, $src1.lo, $src2.lo\n\t"
"ANDNL $dst.hi, $src1.hi, $src2.hi"
%}
ins_encode %{
Register Rdst = $dst$$Register;
Register Rsrc1 = $src1$$Register;
Register Rsrc2 = $src2$$Register;
__ andnl(Rdst, Rsrc1, Rsrc2);
__ andnl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc1), HIGH_FROM_LOW(Rsrc2));
%}
ins_pipe(ialu_reg_reg_long);
%}
instruct andnL_eReg_eReg_mem(eRegL dst, eRegL src1, memory src2, immL_M1 minus_1, eFlagsReg cr) %{
match(Set dst (AndL (XorL src1 minus_1) (LoadL src2) ));
predicate(UseBMI1Instructions);
effect(KILL cr, TEMP dst);
ins_cost(125);
format %{ "ANDNL $dst.lo, $src1.lo, $src2\n\t"
"ANDNL $dst.hi, $src1.hi, $src2+4"
%}
ins_encode %{
Register Rdst = $dst$$Register;
Register Rsrc1 = $src1$$Register;
Address src2_hi = Address::make_raw($src2$$base, $src2$$index, $src2$$scale, $src2$$disp + 4, relocInfo::none);
__ andnl(Rdst, Rsrc1, $src2$$Address);
__ andnl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc1), src2_hi);
%}
ins_pipe(ialu_reg_mem);
%}
instruct blsiL_eReg_eReg(eRegL dst, eRegL src, immL0 imm_zero, eFlagsReg cr) %{
match(Set dst (AndL (SubL imm_zero src) src));
predicate(UseBMI1Instructions);
effect(KILL cr, TEMP dst);
format %{ "MOVL $dst.hi, 0\n\t"
"BLSIL $dst.lo, $src.lo\n\t"
"JNZ done\n\t"
"BLSIL $dst.hi, $src.hi\n"
"done:"
%}
ins_encode %{
Label done;
Register Rdst = $dst$$Register;
Register Rsrc = $src$$Register;
__ movl(HIGH_FROM_LOW(Rdst), 0);
__ blsil(Rdst, Rsrc);
__ jccb(Assembler::notZero, done);
__ blsil(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc));
__ bind(done);
%}
ins_pipe(ialu_reg);
%}
instruct blsiL_eReg_mem(eRegL dst, memory src, immL0 imm_zero, eFlagsReg cr) %{
match(Set dst (AndL (SubL imm_zero (LoadL src) ) (LoadL src) ));
predicate(UseBMI1Instructions);
effect(KILL cr, TEMP dst);
ins_cost(125);
format %{ "MOVL $dst.hi, 0\n\t"
"BLSIL $dst.lo, $src\n\t"
"JNZ done\n\t"
"BLSIL $dst.hi, $src+4\n"
"done:"
%}
ins_encode %{
Label done;
Register Rdst = $dst$$Register;
Address src_hi = Address::make_raw($src$$base, $src$$index, $src$$scale, $src$$disp + 4, relocInfo::none);
__ movl(HIGH_FROM_LOW(Rdst), 0);
__ blsil(Rdst, $src$$Address);
__ jccb(Assembler::notZero, done);
__ blsil(HIGH_FROM_LOW(Rdst), src_hi);
__ bind(done);
%}
ins_pipe(ialu_reg_mem);
%}
instruct blsmskL_eReg_eReg(eRegL dst, eRegL src, immL_M1 minus_1, eFlagsReg cr)
%{
match(Set dst (XorL (AddL src minus_1) src));
predicate(UseBMI1Instructions);
effect(KILL cr, TEMP dst);
format %{ "MOVL $dst.hi, 0\n\t"
"BLSMSKL $dst.lo, $src.lo\n\t"
"JNC done\n\t"
"BLSMSKL $dst.hi, $src.hi\n"
"done:"
%}
ins_encode %{
Label done;
Register Rdst = $dst$$Register;
Register Rsrc = $src$$Register;
__ movl(HIGH_FROM_LOW(Rdst), 0);
__ blsmskl(Rdst, Rsrc);
__ jccb(Assembler::carryClear, done);
__ blsmskl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc));
__ bind(done);
%}
ins_pipe(ialu_reg);
%}
instruct blsmskL_eReg_mem(eRegL dst, memory src, immL_M1 minus_1, eFlagsReg cr)
%{
match(Set dst (XorL (AddL (LoadL src) minus_1) (LoadL src) ));
predicate(UseBMI1Instructions);
effect(KILL cr, TEMP dst);
ins_cost(125);
format %{ "MOVL $dst.hi, 0\n\t"
"BLSMSKL $dst.lo, $src\n\t"
"JNC done\n\t"
"BLSMSKL $dst.hi, $src+4\n"
"done:"
%}
ins_encode %{
Label done;
Register Rdst = $dst$$Register;
Address src_hi = Address::make_raw($src$$base, $src$$index, $src$$scale, $src$$disp + 4, relocInfo::none);
__ movl(HIGH_FROM_LOW(Rdst), 0);
__ blsmskl(Rdst, $src$$Address);
__ jccb(Assembler::carryClear, done);
__ blsmskl(HIGH_FROM_LOW(Rdst), src_hi);
__ bind(done);
%}
ins_pipe(ialu_reg_mem);
%}
instruct blsrL_eReg_eReg(eRegL dst, eRegL src, immL_M1 minus_1, eFlagsReg cr)
%{
match(Set dst (AndL (AddL src minus_1) src) );
predicate(UseBMI1Instructions);
effect(KILL cr, TEMP dst);
format %{ "MOVL $dst.hi, $src.hi\n\t"
"BLSRL $dst.lo, $src.lo\n\t"
"JNC done\n\t"
"BLSRL $dst.hi, $src.hi\n"
"done:"
%}
ins_encode %{
Label done;
Register Rdst = $dst$$Register;
Register Rsrc = $src$$Register;
__ movl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc));
__ blsrl(Rdst, Rsrc);
__ jccb(Assembler::carryClear, done);
__ blsrl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc));
__ bind(done);
%}
ins_pipe(ialu_reg);
%}
instruct blsrL_eReg_mem(eRegL dst, memory src, immL_M1 minus_1, eFlagsReg cr)
%{
match(Set dst (AndL (AddL (LoadL src) minus_1) (LoadL src) ));
predicate(UseBMI1Instructions);
effect(KILL cr, TEMP dst);
ins_cost(125);
format %{ "MOVL $dst.hi, $src+4\n\t"
"BLSRL $dst.lo, $src\n\t"
"JNC done\n\t"
"BLSRL $dst.hi, $src+4\n"
"done:"
%}
ins_encode %{
Label done;
Register Rdst = $dst$$Register;
Address src_hi = Address::make_raw($src$$base, $src$$index, $src$$scale, $src$$disp + 4, relocInfo::none);
__ movl(HIGH_FROM_LOW(Rdst), src_hi);
__ blsrl(Rdst, $src$$Address);
__ jccb(Assembler::carryClear, done);
__ blsrl(HIGH_FROM_LOW(Rdst), src_hi);
__ bind(done);
%}
ins_pipe(ialu_reg_mem);
%}
// Or Long Register with Register
instruct orl_eReg(eRegL dst, eRegL src, eFlagsReg cr) %{
match(Set dst (OrL dst src));

View file

@ -6022,6 +6022,19 @@ instruct countLeadingZerosL_bsr(rRegI dst, rRegL src, rFlagsReg cr) %{
%}
instruct countTrailingZerosI(rRegI dst, rRegI src, rFlagsReg cr) %{
predicate(UseCountTrailingZerosInstruction);
match(Set dst (CountTrailingZerosI src));
effect(KILL cr);
format %{ "tzcntl $dst, $src\t# count trailing zeros (int)" %}
ins_encode %{
__ tzcntl($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg);
%}
instruct countTrailingZerosI_bsf(rRegI dst, rRegI src, rFlagsReg cr) %{
predicate(!UseCountTrailingZerosInstruction);
match(Set dst (CountTrailingZerosI src));
effect(KILL cr);
@ -6041,6 +6054,19 @@ instruct countTrailingZerosI(rRegI dst, rRegI src, rFlagsReg cr) %{
%}
instruct countTrailingZerosL(rRegI dst, rRegL src, rFlagsReg cr) %{
predicate(UseCountTrailingZerosInstruction);
match(Set dst (CountTrailingZerosL src));
effect(KILL cr);
format %{ "tzcntq $dst, $src\t# count trailing zeros (long)" %}
ins_encode %{
__ tzcntq($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg);
%}
instruct countTrailingZerosL_bsf(rRegI dst, rRegL src, rFlagsReg cr) %{
predicate(!UseCountTrailingZerosInstruction);
match(Set dst (CountTrailingZerosL src));
effect(KILL cr);
@ -8622,6 +8648,122 @@ instruct andI_mem_imm(memory dst, immI src, rFlagsReg cr)
ins_pipe(ialu_mem_imm);
%}
// BMI1 instructions
instruct andnI_rReg_rReg_mem(rRegI dst, rRegI src1, memory src2, immI_M1 minus_1, rFlagsReg cr) %{
match(Set dst (AndI (XorI src1 minus_1) (LoadI src2)));
predicate(UseBMI1Instructions);
effect(KILL cr);
ins_cost(125);
format %{ "andnl $dst, $src1, $src2" %}
ins_encode %{
__ andnl($dst$$Register, $src1$$Register, $src2$$Address);
%}
ins_pipe(ialu_reg_mem);
%}
instruct andnI_rReg_rReg_rReg(rRegI dst, rRegI src1, rRegI src2, immI_M1 minus_1, rFlagsReg cr) %{
match(Set dst (AndI (XorI src1 minus_1) src2));
predicate(UseBMI1Instructions);
effect(KILL cr);
format %{ "andnl $dst, $src1, $src2" %}
ins_encode %{
__ andnl($dst$$Register, $src1$$Register, $src2$$Register);
%}
ins_pipe(ialu_reg);
%}
instruct blsiI_rReg_rReg(rRegI dst, rRegI src, immI0 imm_zero, rFlagsReg cr) %{
match(Set dst (AndI (SubI imm_zero src) src));
predicate(UseBMI1Instructions);
effect(KILL cr);
format %{ "blsil $dst, $src" %}
ins_encode %{
__ blsil($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg);
%}
instruct blsiI_rReg_mem(rRegI dst, memory src, immI0 imm_zero, rFlagsReg cr) %{
match(Set dst (AndI (SubI imm_zero (LoadI src) ) (LoadI src) ));
predicate(UseBMI1Instructions);
effect(KILL cr);
ins_cost(125);
format %{ "blsil $dst, $src" %}
ins_encode %{
__ blsil($dst$$Register, $src$$Address);
%}
ins_pipe(ialu_reg_mem);
%}
instruct blsmskI_rReg_mem(rRegI dst, memory src, immI_M1 minus_1, rFlagsReg cr)
%{
match(Set dst (XorI (AddI (LoadI src) minus_1) (LoadI src) ) );
predicate(UseBMI1Instructions);
effect(KILL cr);
ins_cost(125);
format %{ "blsmskl $dst, $src" %}
ins_encode %{
__ blsmskl($dst$$Register, $src$$Address);
%}
ins_pipe(ialu_reg_mem);
%}
instruct blsmskI_rReg_rReg(rRegI dst, rRegI src, immI_M1 minus_1, rFlagsReg cr)
%{
match(Set dst (XorI (AddI src minus_1) src));
predicate(UseBMI1Instructions);
effect(KILL cr);
format %{ "blsmskl $dst, $src" %}
ins_encode %{
__ blsmskl($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg);
%}
instruct blsrI_rReg_rReg(rRegI dst, rRegI src, immI_M1 minus_1, rFlagsReg cr)
%{
match(Set dst (AndI (AddI src minus_1) src) );
predicate(UseBMI1Instructions);
effect(KILL cr);
format %{ "blsrl $dst, $src" %}
ins_encode %{
__ blsrl($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg_mem);
%}
instruct blsrI_rReg_mem(rRegI dst, memory src, immI_M1 minus_1, rFlagsReg cr)
%{
match(Set dst (AndI (AddI (LoadI src) minus_1) (LoadI src) ) );
predicate(UseBMI1Instructions);
effect(KILL cr);
ins_cost(125);
format %{ "blsrl $dst, $src" %}
ins_encode %{
__ blsrl($dst$$Register, $src$$Address);
%}
ins_pipe(ialu_reg);
%}
// Or Instructions
// Or Register with Register
instruct orI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
@ -8853,6 +8995,122 @@ instruct andL_mem_imm(memory dst, immL32 src, rFlagsReg cr)
ins_pipe(ialu_mem_imm);
%}
// BMI1 instructions
instruct andnL_rReg_rReg_mem(rRegL dst, rRegL src1, memory src2, immL_M1 minus_1, rFlagsReg cr) %{
match(Set dst (AndL (XorL src1 minus_1) (LoadL src2)));
predicate(UseBMI1Instructions);
effect(KILL cr);
ins_cost(125);
format %{ "andnq $dst, $src1, $src2" %}
ins_encode %{
__ andnq($dst$$Register, $src1$$Register, $src2$$Address);
%}
ins_pipe(ialu_reg_mem);
%}
instruct andnL_rReg_rReg_rReg(rRegL dst, rRegL src1, rRegL src2, immL_M1 minus_1, rFlagsReg cr) %{
match(Set dst (AndL (XorL src1 minus_1) src2));
predicate(UseBMI1Instructions);
effect(KILL cr);
format %{ "andnq $dst, $src1, $src2" %}
ins_encode %{
__ andnq($dst$$Register, $src1$$Register, $src2$$Register);
%}
ins_pipe(ialu_reg_mem);
%}
instruct blsiL_rReg_rReg(rRegL dst, rRegL src, immL0 imm_zero, rFlagsReg cr) %{
match(Set dst (AndL (SubL imm_zero src) src));
predicate(UseBMI1Instructions);
effect(KILL cr);
format %{ "blsiq $dst, $src" %}
ins_encode %{
__ blsiq($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg);
%}
instruct blsiL_rReg_mem(rRegL dst, memory src, immL0 imm_zero, rFlagsReg cr) %{
match(Set dst (AndL (SubL imm_zero (LoadL src) ) (LoadL src) ));
predicate(UseBMI1Instructions);
effect(KILL cr);
ins_cost(125);
format %{ "blsiq $dst, $src" %}
ins_encode %{
__ blsiq($dst$$Register, $src$$Address);
%}
ins_pipe(ialu_reg_mem);
%}
instruct blsmskL_rReg_mem(rRegL dst, memory src, immL_M1 minus_1, rFlagsReg cr)
%{
match(Set dst (XorL (AddL (LoadL src) minus_1) (LoadL src) ) );
predicate(UseBMI1Instructions);
effect(KILL cr);
ins_cost(125);
format %{ "blsmskq $dst, $src" %}
ins_encode %{
__ blsmskq($dst$$Register, $src$$Address);
%}
ins_pipe(ialu_reg_mem);
%}
instruct blsmskL_rReg_rReg(rRegL dst, rRegL src, immL_M1 minus_1, rFlagsReg cr)
%{
match(Set dst (XorL (AddL src minus_1) src));
predicate(UseBMI1Instructions);
effect(KILL cr);
format %{ "blsmskq $dst, $src" %}
ins_encode %{
__ blsmskq($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg);
%}
instruct blsrL_rReg_rReg(rRegL dst, rRegL src, immL_M1 minus_1, rFlagsReg cr)
%{
match(Set dst (AndL (AddL src minus_1) src) );
predicate(UseBMI1Instructions);
effect(KILL cr);
format %{ "blsrq $dst, $src" %}
ins_encode %{
__ blsrq($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg);
%}
instruct blsrL_rReg_mem(rRegL dst, memory src, immL_M1 minus_1, rFlagsReg cr)
%{
match(Set dst (AndL (AddL (LoadL src) minus_1) (LoadL src)) );
predicate(UseBMI1Instructions);
effect(KILL cr);
ins_cost(125);
format %{ "blsrq $dst, $src" %}
ins_encode %{
__ blsrq($dst$$Register, $src$$Address);
%}
ins_pipe(ialu_reg);
%}
// Or Instructions
// Or Register with Register
instruct orL_rReg(rRegL dst, rRegL src, rFlagsReg cr)

View file

@ -1135,15 +1135,10 @@ jlong os::javaTimeNanos() {
}
void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
{
// gettimeofday - based on time in seconds since the Epoch thus does not wrap
info_ptr->max_value = ALL_64_BITS;
// gettimeofday is a real time clock so it skips
info_ptr->may_skip_backward = true;
info_ptr->may_skip_forward = true;
}
info_ptr->max_value = ALL_64_BITS;
// mread_real_time() is monotonic (see 'os::javaTimeNanos()')
info_ptr->may_skip_backward = false;
info_ptr->may_skip_forward = false;
info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
}
@ -2799,105 +2794,6 @@ size_t os::read(int fd, void *buf, unsigned int nBytes) {
return ::read(fd, buf, nBytes);
}
#define NANOSECS_PER_MILLISEC 1000000
int os::sleep(Thread* thread, jlong millis, bool interruptible) {
assert(thread == Thread::current(), "thread consistency check");
// Prevent nasty overflow in deadline calculation
// by handling long sleeps similar to solaris or windows.
const jlong limit = INT_MAX;
int result;
while (millis > limit) {
if ((result = os::sleep(thread, limit, interruptible)) != OS_OK) {
return result;
}
millis -= limit;
}
ParkEvent * const slp = thread->_SleepEvent;
slp->reset();
OrderAccess::fence();
if (interruptible) {
jlong prevtime = javaTimeNanos();
// Prevent precision loss and too long sleeps
jlong deadline = prevtime + millis * NANOSECS_PER_MILLISEC;
for (;;) {
if (os::is_interrupted(thread, true)) {
return OS_INTRPT;
}
jlong newtime = javaTimeNanos();
assert(newtime >= prevtime, "time moving backwards");
// Doing prevtime and newtime in microseconds doesn't help precision,
// and trying to round up to avoid lost milliseconds can result in a
// too-short delay.
millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
if (millis <= 0) {
return OS_OK;
}
// Stop sleeping if we passed the deadline
if (newtime >= deadline) {
return OS_OK;
}
prevtime = newtime;
{
assert(thread->is_Java_thread(), "sanity check");
JavaThread *jt = (JavaThread *) thread;
ThreadBlockInVM tbivm(jt);
OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
jt->set_suspend_equivalent();
slp->park(millis);
// were we externally suspended while we were waiting?
jt->check_and_wait_while_suspended();
}
}
} else {
OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
jlong prevtime = javaTimeNanos();
// Prevent precision loss and too long sleeps
jlong deadline = prevtime + millis * NANOSECS_PER_MILLISEC;
for (;;) {
// It'd be nice to avoid the back-to-back javaTimeNanos() calls on
// the 1st iteration ...
jlong newtime = javaTimeNanos();
if (newtime - prevtime < 0) {
// time moving backwards, should only happen if no monotonic clock
// not a guarantee() because JVM should not abort on kernel/glibc bugs
// - HS14 Commented out as not implemented.
// - TODO Maybe we should implement it?
//assert(!Aix::supports_monotonic_clock(), "time moving backwards");
} else {
millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
}
if (millis <= 0) break;
if (newtime >= deadline) {
break;
}
prevtime = newtime;
slp->park(millis);
}
return OS_OK;
}
}
void os::naked_short_sleep(jlong ms) {
struct timespec req;
@ -3246,50 +3142,6 @@ static void do_resume(OSThread* osthread) {
guarantee(osthread->sr.is_running(), "Must be running!");
}
////////////////////////////////////////////////////////////////////////////////
// interrupt support
void os::interrupt(Thread* thread) {
assert(Thread::current() == thread || Threads_lock->owned_by_self(),
"possibility of dangling Thread pointer");
OSThread* osthread = thread->osthread();
if (!osthread->interrupted()) {
osthread->set_interrupted(true);
// More than one thread can get here with the same value of osthread,
// resulting in multiple notifications. We do, however, want the store
// to interrupted() to be visible to other threads before we execute unpark().
OrderAccess::fence();
ParkEvent * const slp = thread->_SleepEvent;
if (slp != NULL) slp->unpark();
}
// For JSR166. Unpark even if interrupt status already was set
if (thread->is_Java_thread())
((JavaThread*)thread)->parker()->unpark();
ParkEvent * ev = thread->_ParkEvent;
if (ev != NULL) ev->unpark();
}
bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
assert(Thread::current() == thread || Threads_lock->owned_by_self(),
"possibility of dangling Thread pointer");
OSThread* osthread = thread->osthread();
bool interrupted = osthread->interrupted();
if (interrupted && clear_interrupted) {
osthread->set_interrupted(false);
// consider thread->_SleepEvent->reset() ... optional optimization
}
return interrupted;
}
///////////////////////////////////////////////////////////////////////////////////
// signal handling (except suspend/resume)

View file

@ -283,4 +283,10 @@ inline int os::set_sock_opt(int fd, int level, int optname,
const char* optval, socklen_t optlen) {
return ::setsockopt(fd, level, optname, optval, optlen);
}
inline bool os::supports_monotonic_clock() {
// mread_real_time() is monotonic on AIX (see os::javaTimeNanos() comments)
return true;
}
#endif // OS_AIX_VM_OS_AIX_INLINE_HPP

View file

@ -5284,7 +5284,6 @@ jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
static bool proc_task_unchecked = true;
static const char *proc_stat_path = "/proc/%d/stat";
pid_t tid = thread->osthread()->thread_id();
char *s;
char stat[2048];
@ -5297,6 +5296,8 @@ static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
long ldummy;
FILE *fp;
snprintf(proc_name, 64, "/proc/%d/stat", tid);
// The /proc/<tid>/stat aggregates per-process usage on
// new Linux kernels 2.6+ where NPTL is supported.
// The /proc/self/task/<tid>/stat still has the per-thread usage.
@ -5308,12 +5309,11 @@ static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
proc_task_unchecked = false;
fp = fopen("/proc/self/task", "r");
if (fp != NULL) {
proc_stat_path = "/proc/self/task/%d/stat";
snprintf(proc_name, 64, "/proc/self/task/%d/stat", tid);
fclose(fp);
}
}
sprintf(proc_name, proc_stat_path, tid);
fp = fopen(proc_name, "r");
if ( fp == NULL ) return -1;
statlen = fread(stat, 1, 2047, fp);

View file

@ -660,6 +660,7 @@ int InstructForm::memory_operand(FormDict &globals) const {
int USE_of_memory = 0;
int DEF_of_memory = 0;
const char* last_memory_DEF = NULL; // to test DEF/USE pairing in asserts
const char* last_memory_USE = NULL;
Component *unique = NULL;
Component *comp = NULL;
ComponentList &components = (ComponentList &)_components;
@ -681,7 +682,16 @@ int InstructForm::memory_operand(FormDict &globals) const {
assert(0 == strcmp(last_memory_DEF, comp->_name), "every memory DEF is followed by a USE of the same name");
last_memory_DEF = NULL;
}
USE_of_memory++;
// Handles same memory being used multiple times in the case of BMI1 instructions.
if (last_memory_USE != NULL) {
if (strcmp(comp->_name, last_memory_USE) != 0) {
USE_of_memory++;
}
} else {
USE_of_memory++;
}
last_memory_USE = comp->_name;
if (DEF_of_memory == 0) // defs take precedence
unique = comp;
} else {

View file

@ -1436,7 +1436,7 @@ void GraphBuilder::method_return(Value x) {
bool need_mem_bar = false;
if (method()->name() == ciSymbol::object_initializer_name() &&
scope()->wrote_final()) {
(scope()->wrote_final() || (AlwaysSafeConstructors && scope()->wrote_fields()))) {
need_mem_bar = true;
}
@ -1550,6 +1550,10 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
scope()->set_wrote_final();
}
if (code == Bytecodes::_putfield) {
scope()->set_wrote_fields();
}
const int offset = !needs_patching ? field->offset() : -1;
switch (code) {
case Bytecodes::_getstatic: {
@ -3767,11 +3771,14 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode
}
// now perform tests that are based on flag settings
if (callee->force_inline()) {
if (inline_level() > MaxForceInlineLevel) INLINE_BAILOUT("MaxForceInlineLevel");
print_inlining(callee, "force inline by annotation");
} else if (callee->should_inline()) {
print_inlining(callee, "force inline by CompileOracle");
if (callee->force_inline() || callee->should_inline()) {
if (inline_level() > MaxForceInlineLevel ) INLINE_BAILOUT("MaxForceInlineLevel");
if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("recursive inlining too deep");
const char* msg = "";
if (callee->force_inline()) msg = "force inline by annotation";
if (callee->should_inline()) msg = "force inline by CompileOracle";
print_inlining(callee, msg);
} else {
// use heuristic controls on inlining
if (inline_level() > MaxInlineLevel ) INLINE_BAILOUT("inlining too deep");

View file

@ -142,6 +142,7 @@ IRScope::IRScope(Compilation* compilation, IRScope* caller, int caller_bci, ciMe
_number_of_locks = 0;
_monitor_pairing_ok = method->has_balanced_monitors();
_wrote_final = false;
_wrote_fields = false;
_start = NULL;
if (osr_bci == -1) {

View file

@ -150,6 +150,7 @@ class IRScope: public CompilationResourceObj {
int _number_of_locks; // the number of monitor lock slots needed
bool _monitor_pairing_ok; // the monitor pairing info
bool _wrote_final; // has written final field
bool _wrote_fields; // has written fields
BlockBegin* _start; // the start block, successsors are method entries
BitMap _requires_phi_function; // bit is set if phi functions at loop headers are necessary for a local variable
@ -184,6 +185,9 @@ class IRScope: public CompilationResourceObj {
BlockBegin* start() const { return _start; }
void set_wrote_final() { _wrote_final = true; }
bool wrote_final () const { return _wrote_final; }
void set_wrote_fields() { _wrote_fields = true; }
bool wrote_fields () const { return _wrote_fields; }
};

View file

@ -1734,7 +1734,8 @@ void LIRGenerator::do_StoreField(StoreField* x) {
(info ? new CodeEmitInfo(info) : NULL));
}
if (is_volatile && !needs_patching) {
bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
if (needs_atomic_access && !needs_patching) {
volatile_field_store(value.result(), address, info);
} else {
LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
@ -1807,7 +1808,8 @@ void LIRGenerator::do_LoadField(LoadField* x) {
address = generate_address(object.result(), x->offset(), field_type);
}
if (is_volatile && !needs_patching) {
bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
if (needs_atomic_access && !needs_patching) {
volatile_field_load(address, reg, info);
} else {
LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;

View file

@ -809,11 +809,10 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
int bci = vfst.bci();
Bytecodes::Code code = caller_method()->java_code_at(bci);
#ifndef PRODUCT
// this is used by assertions in the access_field_patching_id
BasicType patch_field_type = T_ILLEGAL;
#endif // PRODUCT
bool deoptimize_for_volatile = false;
bool deoptimize_for_atomic = false;
int patch_field_offset = -1;
KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
@ -839,11 +838,24 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
// is the path for patching field offsets. load_klass is only
// used for patching references to oops which don't need special
// handling in the volatile case.
deoptimize_for_volatile = result.access_flags().is_volatile();
#ifndef PRODUCT
// If we are patching a field which should be atomic, then
// the generated code is not correct either, force deoptimizing.
// We need to only cover T_LONG and T_DOUBLE fields, as we can
// break access atomicity only for them.
// Strictly speaking, the deoptimizaation on 64-bit platforms
// is unnecessary, and T_LONG stores on 32-bit platforms need
// to be handled by special patching code when AlwaysAtomicAccesses
// becomes product feature. At this point, we are still going
// for the deoptimization for consistency against volatile
// accesses.
patch_field_type = result.field_type();
#endif
deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG));
} else if (load_klass_or_mirror_patch_id) {
Klass* k = NULL;
switch (code) {
@ -918,13 +930,19 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
ShouldNotReachHere();
}
if (deoptimize_for_volatile) {
// At compile time we assumed the field wasn't volatile but after
// loading it turns out it was volatile so we have to throw the
if (deoptimize_for_volatile || deoptimize_for_atomic) {
// At compile time we assumed the field wasn't volatile/atomic but after
// loading it turns out it was volatile/atomic so we have to throw the
// compiled code out and let it be regenerated.
if (TracePatching) {
tty->print_cr("Deoptimizing for patching volatile field reference");
if (deoptimize_for_volatile) {
tty->print_cr("Deoptimizing for patching volatile field reference");
}
if (deoptimize_for_atomic) {
tty->print_cr("Deoptimizing for patching atomic field reference");
}
}
// It's possible the nmethod was invalidated in the last
// safepoint, but if it's still alive then make it not_entrant.
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());

View file

@ -724,6 +724,11 @@ ciMethod* ciMethod::find_monomorphic_target(ciInstanceKlass* caller,
VM_ENTRY_MARK;
// Disable CHA for default methods for now
if (root_m->get_Method()->is_default_method()) {
return NULL;
}
methodHandle target;
{
MutexLocker locker(Compile_lock);

View file

@ -87,8 +87,9 @@ void ciMethodData::load_extra_data() {
DataLayout* dp_dst = extra_data_base();
for (;; dp_src = MethodData::next_extra(dp_src), dp_dst = MethodData::next_extra(dp_dst)) {
assert(dp_src < end_src, "moved past end of extra data");
assert(dp_src->tag() == dp_dst->tag(), err_msg("should be same tags %d != %d", dp_src->tag(), dp_dst->tag()));
switch(dp_src->tag()) {
// New traps in the MDO can be added as we translate the copy so
// look at the entries in the copy.
switch(dp_dst->tag()) {
case DataLayout::speculative_trap_data_tag: {
ciSpeculativeTrapData* data_dst = new ciSpeculativeTrapData(dp_dst);
SpeculativeTrapData* data_src = new SpeculativeTrapData(dp_src);
@ -102,7 +103,7 @@ void ciMethodData::load_extra_data() {
// An empty slot or ArgInfoData entry marks the end of the trap data
return;
default:
fatal(err_msg("bad tag = %d", dp_src->tag()));
fatal(err_msg("bad tag = %d", dp_dst->tag()));
}
}
}

Some files were not shown because too many files have changed in this diff Show more