This commit is contained in:
Lana Steuck 2013-10-17 16:12:58 -07:00
commit 4ea1997a1b
1192 changed files with 45183 additions and 28401 deletions

View file

@ -232,3 +232,4 @@ b5ed503c26ad38869c247c5e32debec217fd056b jdk8-b104
74049f7a28b48c14910106a75d9f2504169c352e jdk8-b108
af9a674e12a16da1a4bd53e4990ddb1121a21ef1 jdk8-b109
b5d2bf482a3ea1cca08c994512804ffbc73de0a1 jdk8-b110
b9a0f6c693f347a6f4b9bb994957f4eaa05bdedd jdk8-b111

View file

@ -232,3 +232,4 @@ b7e64be81c8a7690703df5711f4fc2375da8a9cb jdk8-b103
9286a6e61291246d88af713f1ef79adeea30fe2e jdk8-b108
91f47e8da5c60de58ed195e9b57f3bf192a18f83 jdk8-b109
4faa09c7fe555de086dd9048d3c5cc92317d6f45 jdk8-b110
d086227bfc45d124f09b3bd72a07956b4073bf71 jdk8-b111

View file

@ -37,16 +37,16 @@ TEST_FOR_NON_GNUMAKE:sh=echo You are not using GNU make/gmake, this is a require
# Assume we have GNU make, but check version.
ifeq (,$(findstring 3.81,$(MAKE_VERSION)))
ifeq (,$(findstring 3.82,$(MAKE_VERSION)))
$(error This version of GNU Make is too low ($(MAKE_VERSION)). Check your path, or upgrade to 3.81 or newer.)
endif
ifeq (,$(findstring 3.82,$(MAKE_VERSION)))
$(error This version of GNU Make is too low ($(MAKE_VERSION)). Check your path, or upgrade to 3.81 or newer.)
endif
endif
# Locate this Makefile
ifeq ($(filter /%,$(lastword $(MAKEFILE_LIST))),)
makefile_path:=$(CURDIR)/$(lastword $(MAKEFILE_LIST))
makefile_path:=$(CURDIR)/$(lastword $(MAKEFILE_LIST))
else
makefile_path:=$(lastword $(MAKEFILE_LIST))
makefile_path:=$(lastword $(MAKEFILE_LIST))
endif
root_dir:=$(dir $(makefile_path))
@ -58,27 +58,27 @@ $(eval $(call ParseConfAndSpec))
# Now determine if we have zero, one or several configurations to build.
ifeq ($(SPEC),)
# Since we got past ParseConfAndSpec, we must be building a global target. Do nothing.
# Since we got past ParseConfAndSpec, we must be building a global target. Do nothing.
else
ifeq ($(words $(SPEC)),1)
# We are building a single configuration. This is the normal case. Execute the Main.gmk file.
include $(root_dir)/common/makefiles/Main.gmk
else
# We are building multiple configurations.
# First, find out the valid targets
# Run the makefile with an arbitrary SPEC using -p -q (quiet dry-run and dump rules) to find
# available PHONY targets. Use this list as valid targets to pass on to the repeated calls.
all_phony_targets=$(filter-out $(global_targets) bundles-only, $(strip $(shell \
$(MAKE) -p -q -f common/makefiles/Main.gmk FRC SPEC=$(firstword $(SPEC)) | \
grep ^.PHONY: | head -n 1 | cut -d " " -f 2-)))
ifeq ($(words $(SPEC)),1)
# We are building a single configuration. This is the normal case. Execute the Main.gmk file.
include $(root_dir)/common/makefiles/Main.gmk
else
# We are building multiple configurations.
# First, find out the valid targets
# Run the makefile with an arbitrary SPEC using -p -q (quiet dry-run and dump rules) to find
# available PHONY targets. Use this list as valid targets to pass on to the repeated calls.
all_phony_targets=$(filter-out $(global_targets) bundles-only, $(strip $(shell \
$(MAKE) -p -q -f common/makefiles/Main.gmk FRC SPEC=$(firstword $(SPEC)) | \
grep ^.PHONY: | head -n 1 | cut -d " " -f 2-)))
$(all_phony_targets):
@$(foreach spec,$(SPEC),($(MAKE) -f NewMakefile.gmk SPEC=$(spec) \
$(VERBOSE) VERBOSE=$(VERBOSE) LOG_LEVEL=$(LOG_LEVEL) $@) &&) true
$(all_phony_targets):
@$(foreach spec,$(SPEC),($(MAKE) -f NewMakefile.gmk SPEC=$(spec) \
$(VERBOSE) VERBOSE=$(VERBOSE) LOG_LEVEL=$(LOG_LEVEL) $@) &&) true
.PHONY: $(all_phony_targets)
.PHONY: $(all_phony_targets)
endif
endif
endif
# Include this after a potential spec file has been included so that the bundles target

View file

@ -62,7 +62,7 @@ if test -e $custom_hook; then
# We have custom sources available; also generate configure script
# with custom hooks compiled in.
cat $script_dir/configure.ac | sed -e "s|@DATE_WHEN_GENERATED@|$TIMESTAMP|" | \
sed -e "s|#CUSTOM_AUTOCONF_INCLUDE|m4_include([$custom_hook])|" | ${AUTOCONF} -W all -I$script_dir - > $custom_script_dir/generated-configure.sh
sed -e "s|#CUSTOM_AUTOCONF_INCLUDE|m4_include([$custom_hook])|" | ${AUTOCONF} -W all -I$script_dir - > $custom_script_dir/generated-configure.sh
rm -rf autom4te.cache
else
echo No custom hook found: $custom_hook

View file

@ -24,23 +24,23 @@
#
# Test if $1 is a valid argument to $3 (often is $JAVA passed as $3)
# If so, then append $1 to $2\
# If so, then append $1 to $2 \
# Also set JVM_ARG_OK to true/false depending on outcome.
AC_DEFUN([ADD_JVM_ARG_IF_OK],
[
$ECHO "Check if jvm arg is ok: $1" >&AS_MESSAGE_LOG_FD
$ECHO "Command: $3 $1 -version" >&AS_MESSAGE_LOG_FD
OUTPUT=`$3 $1 -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
$2="[$]$2 $1"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&AS_MESSAGE_LOG_FD
$ECHO "$OUTPUT" >&AS_MESSAGE_LOG_FD
JVM_ARG_OK=false
fi
$ECHO "Check if jvm arg is ok: $1" >&AS_MESSAGE_LOG_FD
$ECHO "Command: $3 $1 -version" >&AS_MESSAGE_LOG_FD
OUTPUT=`$3 $1 -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
$2="[$]$2 $1"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&AS_MESSAGE_LOG_FD
$ECHO "$OUTPUT" >&AS_MESSAGE_LOG_FD
JVM_ARG_OK=false
fi
])
# Appends a string to a path variable, only adding the : when needed.
@ -131,76 +131,76 @@ AC_DEFUN([BASIC_FIXUP_EXECUTABLE],
fi
if test "x$new_path" = x; then
AC_MSG_NOTICE([The path of $1, which resolves as "$complete", is not found.])
has_space=`$ECHO "$complete" | $GREP " "`
if test "x$has_space" != x; then
AC_MSG_NOTICE([This might be caused by spaces in the path, which is not allowed.])
fi
AC_MSG_ERROR([Cannot locate the the path of $1])
AC_MSG_NOTICE([The path of $1, which resolves as "$complete", is not found.])
has_space=`$ECHO "$complete" | $GREP " "`
if test "x$has_space" != x; then
AC_MSG_NOTICE([This might be caused by spaces in the path, which is not allowed.])
fi
AC_MSG_ERROR([Cannot locate the the path of $1])
fi
fi
# Now join together the path and the arguments once again
if test "x$arguments" != xEOL; then
new_complete="$new_path ${arguments% *}"
else
new_complete="$new_path"
fi
# Now join together the path and the arguments once again
if test "x$arguments" != xEOL; then
new_complete="$new_path ${arguments% *}"
else
new_complete="$new_path"
fi
if test "x$complete" != "x$new_complete"; then
$1="$new_complete"
AC_MSG_NOTICE([Rewriting $1 to "$new_complete"])
fi
$1="$new_complete"
AC_MSG_NOTICE([Rewriting $1 to "$new_complete"])
fi
])
AC_DEFUN([BASIC_REMOVE_SYMBOLIC_LINKS],
[
if test "x$OPENJDK_BUILD_OS" != xwindows; then
# Follow a chain of symbolic links. Use readlink
# where it exists, else fall back to horribly
# complicated shell code.
if test "x$READLINK_TESTED" != yes; then
# On MacOSX there is a readlink tool with a different
# purpose than the GNU readlink tool. Check the found readlink.
ISGNU=`$READLINK --version 2>&1 | $GREP GNU`
if test "x$ISGNU" = x; then
# A readlink that we do not know how to use.
# Are there other non-GNU readlinks out there?
READLINK_TESTED=yes
READLINK=
fi
fi
if test "x$READLINK" != x; then
$1=`$READLINK -f [$]$1`
else
# Save the current directory for restoring afterwards
STARTDIR=$PWD
COUNTER=0
sym_link_dir=`$DIRNAME [$]$1`
sym_link_file=`$BASENAME [$]$1`
cd $sym_link_dir
# Use -P flag to resolve symlinks in directories.
cd `$THEPWDCMD -P`
sym_link_dir=`$THEPWDCMD -P`
# Resolve file symlinks
while test $COUNTER -lt 20; do
ISLINK=`$LS -l $sym_link_dir/$sym_link_file | $GREP '\->' | $SED -e 's/.*-> \(.*\)/\1/'`
if test "x$ISLINK" == x; then
# This is not a symbolic link! We are done!
break
fi
# Again resolve directory symlinks since the target of the just found
# link could be in a different directory
cd `$DIRNAME $ISLINK`
sym_link_dir=`$THEPWDCMD -P`
sym_link_file=`$BASENAME $ISLINK`
let COUNTER=COUNTER+1
done
cd $STARTDIR
$1=$sym_link_dir/$sym_link_file
fi
if test "x$OPENJDK_BUILD_OS" != xwindows; then
# Follow a chain of symbolic links. Use readlink
# where it exists, else fall back to horribly
# complicated shell code.
if test "x$READLINK_TESTED" != yes; then
# On MacOSX there is a readlink tool with a different
# purpose than the GNU readlink tool. Check the found readlink.
ISGNU=`$READLINK --version 2>&1 | $GREP GNU`
if test "x$ISGNU" = x; then
# A readlink that we do not know how to use.
# Are there other non-GNU readlinks out there?
READLINK_TESTED=yes
READLINK=
fi
fi
if test "x$READLINK" != x; then
$1=`$READLINK -f [$]$1`
else
# Save the current directory for restoring afterwards
STARTDIR=$PWD
COUNTER=0
sym_link_dir=`$DIRNAME [$]$1`
sym_link_file=`$BASENAME [$]$1`
cd $sym_link_dir
# Use -P flag to resolve symlinks in directories.
cd `$THEPWDCMD -P`
sym_link_dir=`$THEPWDCMD -P`
# Resolve file symlinks
while test $COUNTER -lt 20; do
ISLINK=`$LS -l $sym_link_dir/$sym_link_file | $GREP '\->' | $SED -e 's/.*-> \(.*\)/\1/'`
if test "x$ISLINK" == x; then
# This is not a symbolic link! We are done!
break
fi
# Again resolve directory symlinks since the target of the just found
# link could be in a different directory
cd `$DIRNAME $ISLINK`
sym_link_dir=`$THEPWDCMD -P`
sym_link_file=`$BASENAME $ISLINK`
let COUNTER=COUNTER+1
done
cd $STARTDIR
$1=$sym_link_dir/$sym_link_file
fi
fi
])
# Register a --with argument but mark it as deprecated
@ -214,12 +214,12 @@ AC_DEFUN([BASIC_DEPRECATED_ARG_WITH],
AC_DEFUN_ONCE([BASIC_INIT],
[
# Save the original command line. This is passed to us by the wrapper configure script.
AC_SUBST(CONFIGURE_COMMAND_LINE)
DATE_WHEN_CONFIGURED=`LANG=C date`
AC_SUBST(DATE_WHEN_CONFIGURED)
AC_MSG_NOTICE([Configuration created at $DATE_WHEN_CONFIGURED.])
AC_MSG_NOTICE([configure script generated at timestamp $DATE_WHEN_GENERATED.])
# Save the original command line. This is passed to us by the wrapper configure script.
AC_SUBST(CONFIGURE_COMMAND_LINE)
DATE_WHEN_CONFIGURED=`LANG=C date`
AC_SUBST(DATE_WHEN_CONFIGURED)
AC_MSG_NOTICE([Configuration created at $DATE_WHEN_CONFIGURED.])
AC_MSG_NOTICE([configure script generated at timestamp $DATE_WHEN_GENERATED.])
])
# Test that variable $1 denoting a program is not empty. If empty, exit with an error.
@ -227,15 +227,15 @@ AC_MSG_NOTICE([configure script generated at timestamp $DATE_WHEN_GENERATED.])
# $2: executable name to print in warning (optional)
AC_DEFUN([BASIC_CHECK_NONEMPTY],
[
if test "x[$]$1" = x; then
if test "x$2" = x; then
PROG_NAME=translit($1,A-Z,a-z)
else
PROG_NAME=$2
fi
AC_MSG_NOTICE([Could not find $PROG_NAME!])
AC_MSG_ERROR([Cannot continue])
if test "x[$]$1" = x; then
if test "x$2" = x; then
PROG_NAME=translit($1,A-Z,a-z)
else
PROG_NAME=$2
fi
AC_MSG_NOTICE([Could not find $PROG_NAME!])
AC_MSG_ERROR([Cannot continue])
fi
])
# Does AC_PATH_PROG followed by BASIC_CHECK_NONEMPTY.
@ -244,8 +244,8 @@ AC_DEFUN([BASIC_CHECK_NONEMPTY],
# $2: executable name to look for
AC_DEFUN([BASIC_REQUIRE_PROG],
[
AC_PATH_PROGS($1, $2)
BASIC_CHECK_NONEMPTY($1, $2)
AC_PATH_PROGS($1, $2)
BASIC_CHECK_NONEMPTY($1, $2)
])
# Setup the most fundamental tools that relies on not much else to set up,
@ -253,171 +253,171 @@ AC_DEFUN([BASIC_REQUIRE_PROG],
AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS],
[
# Start with tools that do not need have cross compilation support
# and can be expected to be found in the default PATH. These tools are
# used by configure. Nor are these tools expected to be found in the
# devkit from the builddeps server either, since they are
# needed to download the devkit.
# Start with tools that do not need have cross compilation support
# and can be expected to be found in the default PATH. These tools are
# used by configure. Nor are these tools expected to be found in the
# devkit from the builddeps server either, since they are
# needed to download the devkit.
# First are all the simple required tools.
BASIC_REQUIRE_PROG(BASENAME, basename)
BASIC_REQUIRE_PROG(BASH, bash)
BASIC_REQUIRE_PROG(CAT, cat)
BASIC_REQUIRE_PROG(CHMOD, chmod)
BASIC_REQUIRE_PROG(CMP, cmp)
BASIC_REQUIRE_PROG(COMM, comm)
BASIC_REQUIRE_PROG(CP, cp)
BASIC_REQUIRE_PROG(CPIO, cpio)
BASIC_REQUIRE_PROG(CUT, cut)
BASIC_REQUIRE_PROG(DATE, date)
BASIC_REQUIRE_PROG(DIFF, [gdiff diff])
BASIC_REQUIRE_PROG(DIRNAME, dirname)
BASIC_REQUIRE_PROG(ECHO, echo)
BASIC_REQUIRE_PROG(EXPR, expr)
BASIC_REQUIRE_PROG(FILE, file)
BASIC_REQUIRE_PROG(FIND, find)
BASIC_REQUIRE_PROG(HEAD, head)
BASIC_REQUIRE_PROG(LN, ln)
BASIC_REQUIRE_PROG(LS, ls)
BASIC_REQUIRE_PROG(MKDIR, mkdir)
BASIC_REQUIRE_PROG(MKTEMP, mktemp)
BASIC_REQUIRE_PROG(MV, mv)
BASIC_REQUIRE_PROG(PRINTF, printf)
BASIC_REQUIRE_PROG(RM, rm)
BASIC_REQUIRE_PROG(SH, sh)
BASIC_REQUIRE_PROG(SORT, sort)
BASIC_REQUIRE_PROG(TAIL, tail)
BASIC_REQUIRE_PROG(TAR, tar)
BASIC_REQUIRE_PROG(TEE, tee)
BASIC_REQUIRE_PROG(TOUCH, touch)
BASIC_REQUIRE_PROG(TR, tr)
BASIC_REQUIRE_PROG(UNAME, uname)
BASIC_REQUIRE_PROG(UNIQ, uniq)
BASIC_REQUIRE_PROG(WC, wc)
BASIC_REQUIRE_PROG(WHICH, which)
BASIC_REQUIRE_PROG(XARGS, xargs)
# First are all the simple required tools.
BASIC_REQUIRE_PROG(BASENAME, basename)
BASIC_REQUIRE_PROG(BASH, bash)
BASIC_REQUIRE_PROG(CAT, cat)
BASIC_REQUIRE_PROG(CHMOD, chmod)
BASIC_REQUIRE_PROG(CMP, cmp)
BASIC_REQUIRE_PROG(COMM, comm)
BASIC_REQUIRE_PROG(CP, cp)
BASIC_REQUIRE_PROG(CPIO, cpio)
BASIC_REQUIRE_PROG(CUT, cut)
BASIC_REQUIRE_PROG(DATE, date)
BASIC_REQUIRE_PROG(DIFF, [gdiff diff])
BASIC_REQUIRE_PROG(DIRNAME, dirname)
BASIC_REQUIRE_PROG(ECHO, echo)
BASIC_REQUIRE_PROG(EXPR, expr)
BASIC_REQUIRE_PROG(FILE, file)
BASIC_REQUIRE_PROG(FIND, find)
BASIC_REQUIRE_PROG(HEAD, head)
BASIC_REQUIRE_PROG(LN, ln)
BASIC_REQUIRE_PROG(LS, ls)
BASIC_REQUIRE_PROG(MKDIR, mkdir)
BASIC_REQUIRE_PROG(MKTEMP, mktemp)
BASIC_REQUIRE_PROG(MV, mv)
BASIC_REQUIRE_PROG(PRINTF, printf)
BASIC_REQUIRE_PROG(RM, rm)
BASIC_REQUIRE_PROG(SH, sh)
BASIC_REQUIRE_PROG(SORT, sort)
BASIC_REQUIRE_PROG(TAIL, tail)
BASIC_REQUIRE_PROG(TAR, tar)
BASIC_REQUIRE_PROG(TEE, tee)
BASIC_REQUIRE_PROG(TOUCH, touch)
BASIC_REQUIRE_PROG(TR, tr)
BASIC_REQUIRE_PROG(UNAME, uname)
BASIC_REQUIRE_PROG(UNIQ, uniq)
BASIC_REQUIRE_PROG(WC, wc)
BASIC_REQUIRE_PROG(WHICH, which)
BASIC_REQUIRE_PROG(XARGS, xargs)
# Then required tools that require some special treatment.
AC_PROG_AWK
BASIC_CHECK_NONEMPTY(AWK)
AC_PROG_GREP
BASIC_CHECK_NONEMPTY(GREP)
AC_PROG_EGREP
BASIC_CHECK_NONEMPTY(EGREP)
AC_PROG_FGREP
BASIC_CHECK_NONEMPTY(FGREP)
AC_PROG_SED
BASIC_CHECK_NONEMPTY(SED)
# Then required tools that require some special treatment.
AC_PROG_AWK
BASIC_CHECK_NONEMPTY(AWK)
AC_PROG_GREP
BASIC_CHECK_NONEMPTY(GREP)
AC_PROG_EGREP
BASIC_CHECK_NONEMPTY(EGREP)
AC_PROG_FGREP
BASIC_CHECK_NONEMPTY(FGREP)
AC_PROG_SED
BASIC_CHECK_NONEMPTY(SED)
AC_PATH_PROGS(NAWK, [nawk gawk awk])
BASIC_CHECK_NONEMPTY(NAWK)
AC_PATH_PROGS(NAWK, [nawk gawk awk])
BASIC_CHECK_NONEMPTY(NAWK)
# Always force rm.
RM="$RM -f"
# Always force rm.
RM="$RM -f"
# pwd behaves differently on various platforms and some don't support the -L flag.
# Always use the bash builtin pwd to get uniform behavior.
THEPWDCMD=pwd
# pwd behaves differently on various platforms and some don't support the -L flag.
# Always use the bash builtin pwd to get uniform behavior.
THEPWDCMD=pwd
# These are not required on all platforms
AC_PATH_PROG(CYGPATH, cygpath)
AC_PATH_PROG(READLINK, readlink)
AC_PATH_PROG(DF, df)
AC_PATH_PROG(SETFILE, SetFile)
# These are not required on all platforms
AC_PATH_PROG(CYGPATH, cygpath)
AC_PATH_PROG(READLINK, readlink)
AC_PATH_PROG(DF, df)
AC_PATH_PROG(SETFILE, SetFile)
])
# Setup basic configuration paths, and platform-specific stuff related to PATHs.
AC_DEFUN_ONCE([BASIC_SETUP_PATHS],
[
# Locate the directory of this script.
SCRIPT="[$]0"
AUTOCONF_DIR=`cd \`$DIRNAME $SCRIPT\`; $THEPWDCMD -L`
# Locate the directory of this script.
SCRIPT="[$]0"
AUTOCONF_DIR=`cd \`$DIRNAME $SCRIPT\`; $THEPWDCMD -L`
# Where is the source? It is located two levels above the configure script.
CURDIR="$PWD"
cd "$AUTOCONF_DIR/../.."
SRC_ROOT="`$THEPWDCMD -L`"
# Where is the source? It is located two levels above the configure script.
CURDIR="$PWD"
cd "$AUTOCONF_DIR/../.."
SRC_ROOT="`$THEPWDCMD -L`"
if test "x$OPENJDK_TARGET_OS" = "xwindows"; then
PATH_SEP=";"
BASIC_CHECK_PATHS_WINDOWS
else
PATH_SEP=":"
fi
if test "x$OPENJDK_TARGET_OS" = "xwindows"; then
PATH_SEP=";"
BASIC_CHECK_PATHS_WINDOWS
else
PATH_SEP=":"
fi
AC_SUBST(SRC_ROOT)
AC_SUBST(PATH_SEP)
cd "$CURDIR"
AC_SUBST(SRC_ROOT)
AC_SUBST(PATH_SEP)
cd "$CURDIR"
BASIC_FIXUP_PATH(SRC_ROOT)
BASIC_FIXUP_PATH(CURDIR)
BASIC_FIXUP_PATH(SRC_ROOT)
BASIC_FIXUP_PATH(CURDIR)
if test "x$OPENJDK_BUILD_OS" = "xsolaris"; then
if test "x$OPENJDK_BUILD_OS" = "xsolaris"; then
# Add extra search paths on solaris for utilities like ar and as etc...
PATH="$PATH:/usr/ccs/bin:/usr/sfw/bin:/opt/csw/bin"
fi
fi
# You can force the sys-root if the sys-root encoded into the cross compiler tools
# is not correct.
AC_ARG_WITH(sys-root, [AS_HELP_STRING([--with-sys-root],
[pass this sys-root to the compilers and tools (for cross-compiling)])])
# You can force the sys-root if the sys-root encoded into the cross compiler tools
# is not correct.
AC_ARG_WITH(sys-root, [AS_HELP_STRING([--with-sys-root],
[pass this sys-root to the compilers and tools (for cross-compiling)])])
if test "x$with_sys_root" != x; then
SYS_ROOT=$with_sys_root
else
SYS_ROOT=/
fi
AC_SUBST(SYS_ROOT)
if test "x$with_sys_root" != x; then
SYS_ROOT=$with_sys_root
else
SYS_ROOT=/
fi
AC_SUBST(SYS_ROOT)
AC_ARG_WITH([tools-dir], [AS_HELP_STRING([--with-tools-dir],
[search this directory for compilers and tools (for cross-compiling)])],
[TOOLS_DIR=$with_tools_dir]
)
AC_ARG_WITH([tools-dir], [AS_HELP_STRING([--with-tools-dir],
[search this directory for compilers and tools (for cross-compiling)])],
[TOOLS_DIR=$with_tools_dir]
)
AC_ARG_WITH([devkit], [AS_HELP_STRING([--with-devkit],
[use this directory as base for tools-dir and sys-root (for cross-compiling)])],
[
if test "x$with_sys_root" != x; then
AC_MSG_ERROR([Cannot specify both --with-devkit and --with-sys-root at the same time])
fi
BASIC_FIXUP_PATH([with_devkit])
BASIC_APPEND_TO_PATH([TOOLS_DIR],$with_devkit/bin)
if test -d "$with_devkit/$host_alias/libc"; then
SYS_ROOT=$with_devkit/$host_alias/libc
elif test -d "$with_devkit/$host/sys-root"; then
SYS_ROOT=$with_devkit/$host/sys-root
fi
])
AC_ARG_WITH([devkit], [AS_HELP_STRING([--with-devkit],
[use this directory as base for tools-dir and sys-root (for cross-compiling)])],
[
if test "x$with_sys_root" != x; then
AC_MSG_ERROR([Cannot specify both --with-devkit and --with-sys-root at the same time])
fi
BASIC_FIXUP_PATH([with_devkit])
BASIC_APPEND_TO_PATH([TOOLS_DIR],$with_devkit/bin)
if test -d "$with_devkit/$host_alias/libc"; then
SYS_ROOT=$with_devkit/$host_alias/libc
elif test -d "$with_devkit/$host/sys-root"; then
SYS_ROOT=$with_devkit/$host/sys-root
fi
])
])
AC_DEFUN_ONCE([BASIC_SETUP_OUTPUT_DIR],
[
AC_ARG_WITH(conf-name, [AS_HELP_STRING([--with-conf-name],
[use this as the name of the configuration @<:@generated from important configuration options@:>@])],
[ CONF_NAME=${with_conf_name} ])
AC_ARG_WITH(conf-name, [AS_HELP_STRING([--with-conf-name],
[use this as the name of the configuration @<:@generated from important configuration options@:>@])],
[ CONF_NAME=${with_conf_name} ])
# Test from where we are running configure, in or outside of src root.
if test "x$CURDIR" = "x$SRC_ROOT" || test "x$CURDIR" = "x$SRC_ROOT/common" \
|| test "x$CURDIR" = "x$SRC_ROOT/common/autoconf" \
|| test "x$CURDIR" = "x$SRC_ROOT/common/makefiles" ; then
# Test from where we are running configure, in or outside of src root.
if test "x$CURDIR" = "x$SRC_ROOT" || test "x$CURDIR" = "x$SRC_ROOT/common" \
|| test "x$CURDIR" = "x$SRC_ROOT/common/autoconf" \
|| test "x$CURDIR" = "x$SRC_ROOT/common/makefiles" ; then
# We are running configure from the src root.
# Create a default ./build/target-variant-debuglevel output root.
if test "x${CONF_NAME}" = x; then
CONF_NAME="${OPENJDK_TARGET_OS}-${OPENJDK_TARGET_CPU}-${JDK_VARIANT}-${ANDED_JVM_VARIANTS}-${DEBUG_LEVEL}"
CONF_NAME="${OPENJDK_TARGET_OS}-${OPENJDK_TARGET_CPU}-${JDK_VARIANT}-${ANDED_JVM_VARIANTS}-${DEBUG_LEVEL}"
fi
OUTPUT_ROOT="$SRC_ROOT/build/${CONF_NAME}"
$MKDIR -p "$OUTPUT_ROOT"
if test ! -d "$OUTPUT_ROOT"; then
AC_MSG_ERROR([Could not create build directory $OUTPUT_ROOT])
AC_MSG_ERROR([Could not create build directory $OUTPUT_ROOT])
fi
else
else
# We are running configure from outside of the src dir.
# Then use the current directory as output dir!
# If configuration is situated in normal build directory, just use the build
# directory name as configuration name, otherwise use the complete path.
if test "x${CONF_NAME}" = x; then
CONF_NAME=`$ECHO $CURDIR | $SED -e "s!^${SRC_ROOT}/build/!!"`
CONF_NAME=`$ECHO $CURDIR | $SED -e "s!^${SRC_ROOT}/build/!!"`
fi
OUTPUT_ROOT="$CURDIR"
@ -431,7 +431,7 @@ else
# Configure has already touched config.log and confdefs.h in the current dir when this check
# is performed.
filtered_files=`$ECHO "$files_present" | $SED -e 's/config.log//g' -e 's/confdefs.h//g' -e 's/ //g' \
| $TR -d '\n'`
| $TR -d '\n'`
if test "x$filtered_files" != x; then
AC_MSG_NOTICE([Current directory is $CURDIR.])
AC_MSG_NOTICE([Since this is not the source root, configure will output the configuration here])
@ -443,46 +443,46 @@ else
AC_MSG_ERROR([Will not continue creating configuration in $CURDIR])
fi
fi
fi
AC_MSG_CHECKING([what configuration name to use])
AC_MSG_RESULT([$CONF_NAME])
fi
AC_MSG_CHECKING([what configuration name to use])
AC_MSG_RESULT([$CONF_NAME])
BASIC_FIXUP_PATH(OUTPUT_ROOT)
BASIC_FIXUP_PATH(OUTPUT_ROOT)
AC_SUBST(SPEC, $OUTPUT_ROOT/spec.gmk)
AC_SUBST(CONF_NAME, $CONF_NAME)
AC_SUBST(OUTPUT_ROOT, $OUTPUT_ROOT)
AC_SUBST(SPEC, $OUTPUT_ROOT/spec.gmk)
AC_SUBST(CONF_NAME, $CONF_NAME)
AC_SUBST(OUTPUT_ROOT, $OUTPUT_ROOT)
# Most of the probed defines are put into config.h
AC_CONFIG_HEADERS([$OUTPUT_ROOT/config.h:$AUTOCONF_DIR/config.h.in])
# The spec.gmk file contains all variables for the make system.
AC_CONFIG_FILES([$OUTPUT_ROOT/spec.gmk:$AUTOCONF_DIR/spec.gmk.in])
# The hotspot-spec.gmk file contains legacy variables for the hotspot make system.
AC_CONFIG_FILES([$OUTPUT_ROOT/hotspot-spec.gmk:$AUTOCONF_DIR/hotspot-spec.gmk.in])
# The bootcycle-spec.gmk file contains support for boot cycle builds.
AC_CONFIG_FILES([$OUTPUT_ROOT/bootcycle-spec.gmk:$AUTOCONF_DIR/bootcycle-spec.gmk.in])
# The compare.sh is used to compare the build output to other builds.
AC_CONFIG_FILES([$OUTPUT_ROOT/compare.sh:$AUTOCONF_DIR/compare.sh.in])
# Spec.sh is currently used by compare-objects.sh
AC_CONFIG_FILES([$OUTPUT_ROOT/spec.sh:$AUTOCONF_DIR/spec.sh.in])
# The generated Makefile knows where the spec.gmk is and where the source is.
# You can run make from the OUTPUT_ROOT, or from the top-level Makefile
# which will look for generated configurations
AC_CONFIG_FILES([$OUTPUT_ROOT/Makefile:$AUTOCONF_DIR/Makefile.in])
# Most of the probed defines are put into config.h
AC_CONFIG_HEADERS([$OUTPUT_ROOT/config.h:$AUTOCONF_DIR/config.h.in])
# The spec.gmk file contains all variables for the make system.
AC_CONFIG_FILES([$OUTPUT_ROOT/spec.gmk:$AUTOCONF_DIR/spec.gmk.in])
# The hotspot-spec.gmk file contains legacy variables for the hotspot make system.
AC_CONFIG_FILES([$OUTPUT_ROOT/hotspot-spec.gmk:$AUTOCONF_DIR/hotspot-spec.gmk.in])
# The bootcycle-spec.gmk file contains support for boot cycle builds.
AC_CONFIG_FILES([$OUTPUT_ROOT/bootcycle-spec.gmk:$AUTOCONF_DIR/bootcycle-spec.gmk.in])
# The compare.sh is used to compare the build output to other builds.
AC_CONFIG_FILES([$OUTPUT_ROOT/compare.sh:$AUTOCONF_DIR/compare.sh.in])
# Spec.sh is currently used by compare-objects.sh
AC_CONFIG_FILES([$OUTPUT_ROOT/spec.sh:$AUTOCONF_DIR/spec.sh.in])
# The generated Makefile knows where the spec.gmk is and where the source is.
# You can run make from the OUTPUT_ROOT, or from the top-level Makefile
# which will look for generated configurations
AC_CONFIG_FILES([$OUTPUT_ROOT/Makefile:$AUTOCONF_DIR/Makefile.in])
# Save the arguments given to us
echo "$CONFIGURE_COMMAND_LINE" > $OUTPUT_ROOT/configure-arguments
# Save the arguments given to us
echo "$CONFIGURE_COMMAND_LINE" > $OUTPUT_ROOT/configure-arguments
])
AC_DEFUN_ONCE([BASIC_SETUP_LOGGING],
[
# Setup default logging of stdout and stderr to build.log in the output root.
BUILD_LOG='$(OUTPUT_ROOT)/build.log'
BUILD_LOG_PREVIOUS='$(OUTPUT_ROOT)/build.log.old'
BUILD_LOG_WRAPPER='$(BASH) $(SRC_ROOT)/common/bin/logger.sh $(BUILD_LOG)'
AC_SUBST(BUILD_LOG)
AC_SUBST(BUILD_LOG_PREVIOUS)
AC_SUBST(BUILD_LOG_WRAPPER)
# Setup default logging of stdout and stderr to build.log in the output root.
BUILD_LOG='$(OUTPUT_ROOT)/build.log'
BUILD_LOG_PREVIOUS='$(OUTPUT_ROOT)/build.log.old'
BUILD_LOG_WRAPPER='$(BASH) $(SRC_ROOT)/common/bin/logger.sh $(BUILD_LOG)'
AC_SUBST(BUILD_LOG)
AC_SUBST(BUILD_LOG_PREVIOUS)
AC_SUBST(BUILD_LOG_WRAPPER)
])
@ -581,85 +581,85 @@ AC_DEFUN([BASIC_CHECK_GNU_MAKE],
AC_DEFUN([BASIC_CHECK_FIND_DELETE],
[
# Test if find supports -delete
AC_MSG_CHECKING([if find supports -delete])
FIND_DELETE="-delete"
# Test if find supports -delete
AC_MSG_CHECKING([if find supports -delete])
FIND_DELETE="-delete"
DELETEDIR=`$MKTEMP -d tmp.XXXXXXXXXX` || (echo Could not create temporary directory!; exit $?)
DELETEDIR=`$MKTEMP -d tmp.XXXXXXXXXX` || (echo Could not create temporary directory!; exit $?)
echo Hejsan > $DELETEDIR/TestIfFindSupportsDelete
echo Hejsan > $DELETEDIR/TestIfFindSupportsDelete
TEST_DELETE=`$FIND "$DELETEDIR" -name TestIfFindSupportsDelete $FIND_DELETE 2>&1`
if test -f $DELETEDIR/TestIfFindSupportsDelete; then
# No, it does not.
rm $DELETEDIR/TestIfFindSupportsDelete
FIND_DELETE="-exec rm \{\} \+"
AC_MSG_RESULT([no])
else
AC_MSG_RESULT([yes])
fi
rmdir $DELETEDIR
AC_SUBST(FIND_DELETE)
TEST_DELETE=`$FIND "$DELETEDIR" -name TestIfFindSupportsDelete $FIND_DELETE 2>&1`
if test -f $DELETEDIR/TestIfFindSupportsDelete; then
# No, it does not.
rm $DELETEDIR/TestIfFindSupportsDelete
FIND_DELETE="-exec rm \{\} \+"
AC_MSG_RESULT([no])
else
AC_MSG_RESULT([yes])
fi
rmdir $DELETEDIR
AC_SUBST(FIND_DELETE)
])
AC_DEFUN_ONCE([BASIC_SETUP_COMPLEX_TOOLS],
[
BASIC_CHECK_GNU_MAKE
BASIC_CHECK_GNU_MAKE
BASIC_CHECK_FIND_DELETE
BASIC_CHECK_FIND_DELETE
# These tools might not be installed by default,
# need hint on how to install them.
BASIC_REQUIRE_PROG(UNZIP, unzip)
BASIC_REQUIRE_PROG(ZIP, zip)
# These tools might not be installed by default,
# need hint on how to install them.
BASIC_REQUIRE_PROG(UNZIP, unzip)
BASIC_REQUIRE_PROG(ZIP, zip)
# Non-required basic tools
# Non-required basic tools
AC_PATH_PROG(LDD, ldd)
if test "x$LDD" = "x"; then
AC_PATH_PROG(LDD, ldd)
if test "x$LDD" = "x"; then
# List shared lib dependencies is used for
# debug output and checking for forbidden dependencies.
# We can build without it.
LDD="true"
fi
AC_PATH_PROG(OTOOL, otool)
if test "x$OTOOL" = "x"; then
OTOOL="true"
fi
AC_PATH_PROGS(READELF, [readelf greadelf])
AC_PATH_PROG(HG, hg)
AC_PATH_PROG(STAT, stat)
AC_PATH_PROG(TIME, time)
# Check if it's GNU time
IS_GNU_TIME=`$TIME --version 2>&1 | $GREP 'GNU time'`
if test "x$IS_GNU_TIME" != x; then
IS_GNU_TIME=yes
else
IS_GNU_TIME=no
fi
AC_SUBST(IS_GNU_TIME)
fi
AC_PATH_PROG(OTOOL, otool)
if test "x$OTOOL" = "x"; then
OTOOL="true"
fi
AC_PATH_PROGS(READELF, [readelf greadelf])
AC_PATH_PROG(HG, hg)
AC_PATH_PROG(STAT, stat)
AC_PATH_PROG(TIME, time)
# Check if it's GNU time
IS_GNU_TIME=`$TIME --version 2>&1 | $GREP 'GNU time'`
if test "x$IS_GNU_TIME" != x; then
IS_GNU_TIME=yes
else
IS_GNU_TIME=no
fi
AC_SUBST(IS_GNU_TIME)
if test "x$OPENJDK_TARGET_OS" = "xwindows"; then
BASIC_REQUIRE_PROG(COMM, comm)
fi
if test "x$OPENJDK_TARGET_OS" = "xwindows"; then
BASIC_REQUIRE_PROG(COMM, comm)
fi
if test "x$OPENJDK_TARGET_OS" = "xmacosx"; then
BASIC_REQUIRE_PROG(XATTR, xattr)
AC_PATH_PROG(CODESIGN, codesign)
if test "x$CODESIGN" != "x"; then
# Verify that the openjdk_codesign certificate is present
AC_MSG_CHECKING([if openjdk_codesign certificate is present])
rm -f codesign-testfile
touch codesign-testfile
codesign -s openjdk_codesign codesign-testfile 2>&AS_MESSAGE_LOG_FD >&AS_MESSAGE_LOG_FD || CODESIGN=
rm -f codesign-testfile
if test "x$CODESIGN" = x; then
AC_MSG_RESULT([no])
else
AC_MSG_RESULT([yes])
if test "x$OPENJDK_TARGET_OS" = "xmacosx"; then
BASIC_REQUIRE_PROG(XATTR, xattr)
AC_PATH_PROG(CODESIGN, codesign)
if test "x$CODESIGN" != "x"; then
# Verify that the openjdk_codesign certificate is present
AC_MSG_CHECKING([if openjdk_codesign certificate is present])
rm -f codesign-testfile
touch codesign-testfile
codesign -s openjdk_codesign codesign-testfile 2>&AS_MESSAGE_LOG_FD >&AS_MESSAGE_LOG_FD || CODESIGN=
rm -f codesign-testfile
if test "x$CODESIGN" = x; then
AC_MSG_RESULT([no])
else
AC_MSG_RESULT([yes])
fi
fi
fi
fi
])
# Check if build directory is on local disk. If not possible to determine,
@ -669,8 +669,8 @@ fi
# Argument 3: what to do otherwise (remote disk or failure)
AC_DEFUN([BASIC_CHECK_DIR_ON_LOCAL_DISK],
[
# df -l lists only local disks; if the given directory is not found then
# a non-zero exit code is given
# df -l lists only local disks; if the given directory is not found then
# a non-zero exit code is given
if test "x$DF" = x; then
if test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.msys"; then
# msys does not have df; use Windows "net use" instead.
@ -707,32 +707,30 @@ AC_DEFUN_ONCE([BASIC_CHECK_SRC_PERMS],
AC_DEFUN_ONCE([BASIC_TEST_USABILITY_ISSUES],
[
AC_MSG_CHECKING([if build directory is on local disk])
BASIC_CHECK_DIR_ON_LOCAL_DISK($OUTPUT_ROOT,
[OUTPUT_DIR_IS_LOCAL="yes"],
[OUTPUT_DIR_IS_LOCAL="no"])
AC_MSG_RESULT($OUTPUT_DIR_IS_LOCAL)
AC_MSG_CHECKING([if build directory is on local disk])
BASIC_CHECK_DIR_ON_LOCAL_DISK($OUTPUT_ROOT,
[OUTPUT_DIR_IS_LOCAL="yes"],
[OUTPUT_DIR_IS_LOCAL="no"])
AC_MSG_RESULT($OUTPUT_DIR_IS_LOCAL)
BASIC_CHECK_SRC_PERMS
BASIC_CHECK_SRC_PERMS
# Check if the user has any old-style ALT_ variables set.
FOUND_ALT_VARIABLES=`env | grep ^ALT_`
# Check if the user has any old-style ALT_ variables set.
FOUND_ALT_VARIABLES=`env | grep ^ALT_`
# Before generating output files, test if they exist. If they do, this is a reconfigure.
# Since we can't properly handle the dependencies for this, warn the user about the situation
if test -e $OUTPUT_ROOT/spec.gmk; then
IS_RECONFIGURE=yes
else
IS_RECONFIGURE=no
fi
if test -e $SRC_ROOT/build/.hide-configure-performance-hints; then
HIDE_PERFORMANCE_HINTS=yes
else
HIDE_PERFORMANCE_HINTS=no
# Hide it the next time around...
$TOUCH $SRC_ROOT/build/.hide-configure-performance-hints > /dev/null 2>&1
fi
# Before generating output files, test if they exist. If they do, this is a reconfigure.
# Since we can't properly handle the dependencies for this, warn the user about the situation
if test -e $OUTPUT_ROOT/spec.gmk; then
IS_RECONFIGURE=yes
else
IS_RECONFIGURE=no
fi
if test -e $SRC_ROOT/build/.hide-configure-performance-hints; then
HIDE_PERFORMANCE_HINTS=yes
else
HIDE_PERFORMANCE_HINTS=no
# Hide it the next time around...
$TOUCH $SRC_ROOT/build/.hide-configure-performance-hints > /dev/null 2>&1
fi
])

View file

@ -175,8 +175,8 @@ AC_DEFUN([BASIC_FIXUP_EXECUTABLE_CYGWIN],
# bat and cmd files are not always considered executable in cygwin causing which
# to not find them
if test "x$new_path" = x \
&& test "x`$ECHO \"$path\" | $GREP -i -e \"\\.bat$\" -e \"\\.cmd$\"`" != x \
&& test "x`$LS \"$path\" 2>/dev/null`" != x; then
&& test "x`$ECHO \"$path\" | $GREP -i -e \"\\.bat$\" -e \"\\.cmd$\"`" != x \
&& test "x`$LS \"$path\" 2>/dev/null`" != x; then
new_path=`$CYGPATH -u "$path"`
fi
if test "x$new_path" = x; then
@ -191,8 +191,8 @@ AC_DEFUN([BASIC_FIXUP_EXECUTABLE_CYGWIN],
# bat and cmd files are not always considered executable in cygwin causing which
# to not find them
if test "x$new_path" = x \
&& test "x`$ECHO \"$path\" | $GREP -i -e \"\\.bat$\" -e \"\\.cmd$\"`" != x \
&& test "x`$LS \"$path\" 2>/dev/null`" != x; then
&& test "x`$ECHO \"$path\" | $GREP -i -e \"\\.bat$\" -e \"\\.cmd$\"`" != x \
&& test "x`$LS \"$path\" 2>/dev/null`" != x; then
new_path=`$CYGPATH -u "$path"`
fi
if test "x$new_path" = x; then
@ -218,9 +218,9 @@ AC_DEFUN([BASIC_FIXUP_EXECUTABLE_CYGWIN],
# Short path failed, file does not exist as specified.
# Try adding .exe or .cmd
if test -f "${new_path}.exe"; then
input_to_shortpath="${new_path}.exe"
input_to_shortpath="${new_path}.exe"
elif test -f "${new_path}.cmd"; then
input_to_shortpath="${new_path}.cmd"
input_to_shortpath="${new_path}.cmd"
else
AC_MSG_NOTICE([The path of $1, which resolves as "$new_path", is invalid.])
AC_MSG_NOTICE([Neither "$new_path" nor "$new_path.exe/cmd" can be found])
@ -302,7 +302,7 @@ AC_DEFUN([BASIC_CHECK_PATHS_WINDOWS],
[
SRC_ROOT_LENGTH=`$THEPWDCMD -L|$WC -m`
if test $SRC_ROOT_LENGTH -gt 100; then
AC_MSG_ERROR([Your base path is too long. It is $SRC_ROOT_LENGTH characters long, but only 100 is supported])
AC_MSG_ERROR([Your base path is too long. It is $SRC_ROOT_LENGTH characters long, but only 100 is supported])
fi
if test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.cygwin"; then
@ -318,7 +318,7 @@ AC_DEFUN([BASIC_CHECK_PATHS_WINDOWS],
AC_MSG_ERROR([Cannot continue])
fi
if test "x$CYGPATH" = x; then
AC_MSG_ERROR([Something is wrong with your cygwin installation since I cannot find cygpath.exe in your path])
AC_MSG_ERROR([Something is wrong with your cygwin installation since I cannot find cygpath.exe in your path])
fi
AC_MSG_CHECKING([cygwin root directory as unix-style path])
# The cmd output ends with Windows line endings (CR/LF), the grep command will strip that away
@ -329,7 +329,7 @@ AC_DEFUN([BASIC_CHECK_PATHS_WINDOWS],
WINDOWS_ENV_ROOT_PATH="$CYGWIN_ROOT_PATH"
test_cygdrive_prefix=`$ECHO $CYGWIN_ROOT_PATH | $GREP ^/cygdrive/`
if test "x$test_cygdrive_prefix" = x; then
AC_MSG_ERROR([Your cygdrive prefix is not /cygdrive. This is currently not supported. Change with mount -c.])
AC_MSG_ERROR([Your cygdrive prefix is not /cygdrive. This is currently not supported. Change with mount -c.])
fi
elif test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.msys"; then
AC_MSG_CHECKING([msys release])
@ -367,12 +367,12 @@ AC_DEFUN([BASIC_CHECK_PATHS_WINDOWS],
AC_DEFUN_ONCE([BASIC_COMPILE_FIXPATH],
[
# When using cygwin or msys, we need a wrapper binary that renames
# /cygdrive/c/ arguments into c:/ arguments and peeks into
# @files and rewrites these too! This wrapper binary is
# called fixpath.
FIXPATH=
if test "x$OPENJDK_BUILD_OS" = xwindows; then
# When using cygwin or msys, we need a wrapper binary that renames
# /cygdrive/c/ arguments into c:/ arguments and peeks into
# @files and rewrites these too! This wrapper binary is
# called fixpath.
FIXPATH=
if test "x$OPENJDK_BUILD_OS" = xwindows; then
AC_MSG_CHECKING([if fixpath can be created])
FIXPATH_SRC="$SRC_ROOT/common/src/fixpath.c"
FIXPATH_BIN="$OUTPUT_ROOT/fixpath.exe"
@ -398,9 +398,9 @@ if test "x$OPENJDK_BUILD_OS" = xwindows; then
cd $CURDIR
if test ! -x $OUTPUT_ROOT/fixpath.exe; then
AC_MSG_RESULT([no])
cat $OUTPUT_ROOT/fixpath1.log
AC_MSG_ERROR([Could not create $OUTPUT_ROOT/fixpath.exe])
AC_MSG_RESULT([no])
cat $OUTPUT_ROOT/fixpath1.log
AC_MSG_ERROR([Could not create $OUTPUT_ROOT/fixpath.exe])
fi
AC_MSG_RESULT([yes])
AC_MSG_CHECKING([if fixpath.exe works])
@ -408,13 +408,13 @@ if test "x$OPENJDK_BUILD_OS" = xwindows; then
$FIXPATH $CC $SRC_ROOT/common/src/fixpath.c -Fe$OUTPUT_ROOT/fixpath2.exe > $OUTPUT_ROOT/fixpath2.log 2>&1
cd $CURDIR
if test ! -x $OUTPUT_ROOT/fixpath2.exe; then
AC_MSG_RESULT([no])
cat $OUTPUT_ROOT/fixpath2.log
AC_MSG_ERROR([fixpath did not work!])
AC_MSG_RESULT([no])
cat $OUTPUT_ROOT/fixpath2.log
AC_MSG_ERROR([fixpath did not work!])
fi
AC_MSG_RESULT([yes])
rm -f $OUTPUT_ROOT/fixpath?.??? $OUTPUT_ROOT/fixpath.obj
fi
fi
AC_SUBST(FIXPATH)
AC_SUBST(FIXPATH)
])

View file

@ -79,70 +79,70 @@ AC_DEFUN([BOOTJDK_DO_CHECK],
# Test: Is bootjdk explicitely set by command line arguments?
AC_DEFUN([BOOTJDK_CHECK_ARGUMENTS],
[
if test "x$with_boot_jdk" != x; then
if test "x$with_boot_jdk" != x; then
BOOT_JDK=$with_boot_jdk
BOOT_JDK_FOUND=maybe
AC_MSG_NOTICE([Found potential Boot JDK using configure arguments])
fi
fi
])
# Test: Is bootjdk available from builddeps?
AC_DEFUN([BOOTJDK_CHECK_BUILDDEPS],
[
BDEPS_CHECK_MODULE(BOOT_JDK, bootjdk, xxx, [BOOT_JDK_FOUND=maybe], [BOOT_JDK_FOUND=no])
BDEPS_CHECK_MODULE(BOOT_JDK, bootjdk, xxx, [BOOT_JDK_FOUND=maybe], [BOOT_JDK_FOUND=no])
])
# Test: Is $JAVA_HOME set?
AC_DEFUN([BOOTJDK_CHECK_JAVA_HOME],
[
if test "x$JAVA_HOME" != x; then
JAVA_HOME_PROCESSED="$JAVA_HOME"
BASIC_FIXUP_PATH(JAVA_HOME_PROCESSED)
if test ! -d "$JAVA_HOME_PROCESSED"; then
AC_MSG_NOTICE([Your JAVA_HOME points to a non-existing directory!])
else
# Aha, the user has set a JAVA_HOME
# let us use that as the Boot JDK.
BOOT_JDK="$JAVA_HOME_PROCESSED"
BOOT_JDK_FOUND=maybe
AC_MSG_NOTICE([Found potential Boot JDK using JAVA_HOME])
fi
if test "x$JAVA_HOME" != x; then
JAVA_HOME_PROCESSED="$JAVA_HOME"
BASIC_FIXUP_PATH(JAVA_HOME_PROCESSED)
if test ! -d "$JAVA_HOME_PROCESSED"; then
AC_MSG_NOTICE([Your JAVA_HOME points to a non-existing directory!])
else
# Aha, the user has set a JAVA_HOME
# let us use that as the Boot JDK.
BOOT_JDK="$JAVA_HOME_PROCESSED"
BOOT_JDK_FOUND=maybe
AC_MSG_NOTICE([Found potential Boot JDK using JAVA_HOME])
fi
fi
])
# Test: Is there a java or javac in the PATH, which is a symlink to the JDK?
AC_DEFUN([BOOTJDK_CHECK_JAVA_IN_PATH_IS_SYMLINK],
[
AC_PATH_PROG(JAVAC_CHECK, javac)
AC_PATH_PROG(JAVA_CHECK, java)
BINARY="$JAVAC_CHECK"
if test "x$JAVAC_CHECK" = x; then
BINARY="$JAVA_CHECK"
fi
if test "x$BINARY" != x; then
# So there is a java(c) binary, it might be part of a JDK.
# Lets find the JDK/JRE directory by following symbolic links.
# Linux/GNU systems often have links from /usr/bin/java to
# /etc/alternatives/java to the real JDK binary.
BASIC_REMOVE_SYMBOLIC_LINKS(BINARY)
BOOT_JDK=`dirname "$BINARY"`
BOOT_JDK=`cd "$BOOT_JDK/.."; pwd`
if test -x "$BOOT_JDK/bin/javac" && test -x "$BOOT_JDK/bin/java"; then
# Looks like we found ourselves an JDK
BOOT_JDK_FOUND=maybe
AC_MSG_NOTICE([Found potential Boot JDK using java(c) in PATH])
fi
AC_PATH_PROG(JAVAC_CHECK, javac)
AC_PATH_PROG(JAVA_CHECK, java)
BINARY="$JAVAC_CHECK"
if test "x$JAVAC_CHECK" = x; then
BINARY="$JAVA_CHECK"
fi
if test "x$BINARY" != x; then
# So there is a java(c) binary, it might be part of a JDK.
# Lets find the JDK/JRE directory by following symbolic links.
# Linux/GNU systems often have links from /usr/bin/java to
# /etc/alternatives/java to the real JDK binary.
BASIC_REMOVE_SYMBOLIC_LINKS(BINARY)
BOOT_JDK=`dirname "$BINARY"`
BOOT_JDK=`cd "$BOOT_JDK/.."; pwd`
if test -x "$BOOT_JDK/bin/javac" && test -x "$BOOT_JDK/bin/java"; then
# Looks like we found ourselves an JDK
BOOT_JDK_FOUND=maybe
AC_MSG_NOTICE([Found potential Boot JDK using java(c) in PATH])
fi
fi
])
# Test: Is there a /usr/libexec/java_home? (Typically on MacOSX)
AC_DEFUN([BOOTJDK_CHECK_LIBEXEC_JAVA_HOME],
[
if test -x /usr/libexec/java_home; then
BOOT_JDK=`/usr/libexec/java_home`
BOOT_JDK_FOUND=maybe
AC_MSG_NOTICE([Found potential Boot JDK using /usr/libexec/java_home])
fi
if test -x /usr/libexec/java_home; then
BOOT_JDK=`/usr/libexec/java_home`
BOOT_JDK_FOUND=maybe
AC_MSG_NOTICE([Found potential Boot JDK using /usr/libexec/java_home])
fi
])
# Look for a jdk in the given path. If there are multiple, try to select the newest.
@ -204,9 +204,9 @@ AC_DEFUN([BOOTJDK_CHECK_TOOL_IN_BOOTJDK],
AC_MSG_CHECKING([for $2 in Boot JDK])
$1=$BOOT_JDK/bin/$2
if test ! -x [$]$1; then
AC_MSG_RESULT(not found)
AC_MSG_NOTICE([Your Boot JDK seems broken. This might be fixed by explicitely setting --with-boot-jdk])
AC_MSG_ERROR([Could not find $2 in the Boot JDK])
AC_MSG_RESULT(not found)
AC_MSG_NOTICE([Your Boot JDK seems broken. This might be fixed by explicitely setting --with-boot-jdk])
AC_MSG_ERROR([Could not find $2 in the Boot JDK])
fi
AC_MSG_RESULT(ok)
])
@ -218,109 +218,109 @@ AC_DEFUN([BOOTJDK_CHECK_TOOL_IN_BOOTJDK],
AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK],
[
BOOT_JDK_FOUND=no
AC_ARG_WITH(boot-jdk, [AS_HELP_STRING([--with-boot-jdk],
[path to Boot JDK (used to bootstrap build) @<:@probed@:>@])])
BOOT_JDK_FOUND=no
AC_ARG_WITH(boot-jdk, [AS_HELP_STRING([--with-boot-jdk],
[path to Boot JDK (used to bootstrap build) @<:@probed@:>@])])
# We look for the Boot JDK through various means, going from more certain to
# more of a guess-work. After each test, BOOT_JDK_FOUND is set to "yes" if
# we detected something (if so, the path to the jdk is in BOOT_JDK). But we
# must check if this is indeed valid; otherwise we'll continue looking.
# We look for the Boot JDK through various means, going from more certain to
# more of a guess-work. After each test, BOOT_JDK_FOUND is set to "yes" if
# we detected something (if so, the path to the jdk is in BOOT_JDK). But we
# must check if this is indeed valid; otherwise we'll continue looking.
# Test: Is bootjdk explicitely set by command line arguments?
BOOTJDK_DO_CHECK([BOOTJDK_CHECK_ARGUMENTS])
if test "x$with_boot_jdk" != x && test "x$BOOT_JDK_FOUND" = xno; then
# Having specified an argument which is incorrect will produce an instant failure;
# we should not go on looking
AC_MSG_ERROR([The path given by --with-boot-jdk does not contain a valid Boot JDK])
fi
# Test: Is bootjdk explicitely set by command line arguments?
BOOTJDK_DO_CHECK([BOOTJDK_CHECK_ARGUMENTS])
if test "x$with_boot_jdk" != x && test "x$BOOT_JDK_FOUND" = xno; then
# Having specified an argument which is incorrect will produce an instant failure;
# we should not go on looking
AC_MSG_ERROR([The path given by --with-boot-jdk does not contain a valid Boot JDK])
fi
# Test: Is bootjdk available from builddeps?
BOOTJDK_DO_CHECK([BOOTJDK_CHECK_BUILDDEPS])
# Test: Is bootjdk available from builddeps?
BOOTJDK_DO_CHECK([BOOTJDK_CHECK_BUILDDEPS])
# Test: Is $JAVA_HOME set?
BOOTJDK_DO_CHECK([BOOTJDK_CHECK_JAVA_HOME])
# Test: Is $JAVA_HOME set?
BOOTJDK_DO_CHECK([BOOTJDK_CHECK_JAVA_HOME])
# Test: Is there a /usr/libexec/java_home? (Typically on MacOSX)
BOOTJDK_DO_CHECK([BOOTJDK_CHECK_LIBEXEC_JAVA_HOME])
# Test: Is there a /usr/libexec/java_home? (Typically on MacOSX)
BOOTJDK_DO_CHECK([BOOTJDK_CHECK_LIBEXEC_JAVA_HOME])
# Test: Is there a java or javac in the PATH, which is a symlink to the JDK?
BOOTJDK_DO_CHECK([BOOTJDK_CHECK_JAVA_IN_PATH_IS_SYMLINK])
# Test: Is there a java or javac in the PATH, which is a symlink to the JDK?
BOOTJDK_DO_CHECK([BOOTJDK_CHECK_JAVA_IN_PATH_IS_SYMLINK])
# Test: Is there a JDK installed in default, well-known locations?
BOOTJDK_DO_CHECK([BOOTJDK_CHECK_WELL_KNOWN_LOCATIONS])
# Test: Is there a JDK installed in default, well-known locations?
BOOTJDK_DO_CHECK([BOOTJDK_CHECK_WELL_KNOWN_LOCATIONS])
# If we haven't found anything yet, we've truly lost. Give up.
if test "x$BOOT_JDK_FOUND" = xno; then
HELP_MSG_MISSING_DEPENDENCY([openjdk])
AC_MSG_NOTICE([Could not find a valid Boot JDK. $HELP_MSG])
AC_MSG_NOTICE([This might be fixed by explicitely setting --with-boot-jdk])
AC_MSG_ERROR([Cannot continue])
fi
# If we haven't found anything yet, we've truly lost. Give up.
if test "x$BOOT_JDK_FOUND" = xno; then
HELP_MSG_MISSING_DEPENDENCY([openjdk])
AC_MSG_NOTICE([Could not find a valid Boot JDK. $HELP_MSG])
AC_MSG_NOTICE([This might be fixed by explicitely setting --with-boot-jdk])
AC_MSG_ERROR([Cannot continue])
fi
# Setup proper paths for what we found
BOOT_RTJAR="$BOOT_JDK/jre/lib/rt.jar"
if test ! -f "$BOOT_RTJAR"; then
# Setup proper paths for what we found
BOOT_RTJAR="$BOOT_JDK/jre/lib/rt.jar"
if test ! -f "$BOOT_RTJAR"; then
# On MacOSX it is called classes.jar
BOOT_RTJAR="$BOOT_JDK/../Classes/classes.jar"
if test -f "$BOOT_RTJAR"; then
# Remove the ..
BOOT_RTJAR="`cd ${BOOT_RTJAR%/*} && pwd`/${BOOT_RTJAR##*/}"
fi
fi
BOOT_TOOLSJAR="$BOOT_JDK/lib/tools.jar"
BOOT_JDK="$BOOT_JDK"
AC_SUBST(BOOT_RTJAR)
AC_SUBST(BOOT_TOOLSJAR)
AC_SUBST(BOOT_JDK)
fi
BOOT_TOOLSJAR="$BOOT_JDK/lib/tools.jar"
BOOT_JDK="$BOOT_JDK"
AC_SUBST(BOOT_RTJAR)
AC_SUBST(BOOT_TOOLSJAR)
AC_SUBST(BOOT_JDK)
# Setup tools from the Boot JDK.
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVA,java)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVAC,javac)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVAH,javah)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVAP,javap)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAR,jar)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(RMIC,rmic)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(NATIVE2ASCII,native2ascii)
# Setup tools from the Boot JDK.
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVA,java)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVAC,javac)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVAH,javah)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAVAP,javap)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(JAR,jar)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(RMIC,rmic)
BOOTJDK_CHECK_TOOL_IN_BOOTJDK(NATIVE2ASCII,native2ascii)
# Finally, set some other options...
# Finally, set some other options...
# When compiling code to be executed by the Boot JDK, force jdk7 compatibility.
BOOT_JDK_SOURCETARGET="-source 7 -target 7"
AC_SUBST(BOOT_JDK_SOURCETARGET)
AC_SUBST(JAVAC_FLAGS)
# When compiling code to be executed by the Boot JDK, force jdk7 compatibility.
BOOT_JDK_SOURCETARGET="-source 7 -target 7"
AC_SUBST(BOOT_JDK_SOURCETARGET)
AC_SUBST(JAVAC_FLAGS)
])
AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
[
##############################################################################
#
# Specify options for anything that is run with the Boot JDK.
#
AC_ARG_WITH(boot-jdk-jvmargs, [AS_HELP_STRING([--with-boot-jdk-jvmargs],
[specify JVM arguments to be passed to all invocations of the Boot JDK, overriding the default values,
e.g --with-boot-jdk-jvmargs="-Xmx8G -enableassertions"])])
##############################################################################
#
# Specify options for anything that is run with the Boot JDK.
#
AC_ARG_WITH(boot-jdk-jvmargs, [AS_HELP_STRING([--with-boot-jdk-jvmargs],
[specify JVM arguments to be passed to all invocations of the Boot JDK, overriding the default values,
e.g --with-boot-jdk-jvmargs="-Xmx8G -enableassertions"])])
if test "x$with_boot_jdk_jvmargs" = x; then
if test "x$with_boot_jdk_jvmargs" = x; then
# Not all JVM:s accept the same arguments on the command line.
# OpenJDK specific increase in thread stack for JDK build,
# well more specifically, when running javac.
if test "x$BUILD_NUM_BITS" = x32; then
STACK_SIZE=768
STACK_SIZE=768
else
# Running Javac on a JVM on a 64-bit machine, the stack takes more space
# since 64-bit pointers are pushed on the stach. Apparently, we need
# to increase the stack space when javacing the JDK....
STACK_SIZE=1536
# Running Javac on a JVM on a 64-bit machine, the stack takes more space
# since 64-bit pointers are pushed on the stach. Apparently, we need
# to increase the stack space when javacing the JDK....
STACK_SIZE=1536
fi
# Minimum amount of heap memory.
ADD_JVM_ARG_IF_OK([-Xms64M],boot_jdk_jvmargs,[$JAVA])
if test "x$OPENJDK_TARGET_OS" = "xmacosx"; then
# Why does macosx need more heap? Its the huge JDK batch.
ADD_JVM_ARG_IF_OK([-Xmx1600M],boot_jdk_jvmargs,[$JAVA])
# Why does macosx need more heap? Its the huge JDK batch.
ADD_JVM_ARG_IF_OK([-Xmx1600M],boot_jdk_jvmargs,[$JAVA])
else
ADD_JVM_ARG_IF_OK([-Xmx1100M],boot_jdk_jvmargs,[$JAVA])
ADD_JVM_ARG_IF_OK([-Xmx1100M],boot_jdk_jvmargs,[$JAVA])
fi
# When is adding -client something that speeds up the JVM?
# ADD_JVM_ARG_IF_OK([-client],boot_jdk_jvmargs,[$JAVA])
@ -329,7 +329,7 @@ if test "x$with_boot_jdk_jvmargs" = x; then
ADD_JVM_ARG_IF_OK([-XX:ThreadStackSize=$STACK_SIZE],boot_jdk_jvmargs,[$JAVA])
# Disable special log output when a debug build is used as Boot JDK...
ADD_JVM_ARG_IF_OK([-XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput],boot_jdk_jvmargs,[$JAVA])
fi
fi
AC_SUBST(BOOT_JDK_JVMARGS, $boot_jdk_jvmargs)
AC_SUBST(BOOT_JDK_JVMARGS, $boot_jdk_jvmargs)
])

View file

@ -36,7 +36,7 @@ else
JAVA_EXEC_POS=1
endif
ifneq ($(word $(JAVA_EXEC_POS),$(SJAVAC_SERVER_JAVA)),$(word $(JAVA_EXEC_POS),$(JAVA)))
$(error Bootcycle builds are not possible if --with-sjavac-server-java is specified)
$(error Bootcycle builds are not possible if --with-sjavac-server-java is specified)
endif

View file

@ -25,78 +25,77 @@
AC_DEFUN([BPERF_CHECK_CORES],
[
AC_MSG_CHECKING([for number of cores])
NUM_CORES=1
FOUND_CORES=no
AC_MSG_CHECKING([for number of cores])
NUM_CORES=1
FOUND_CORES=no
if test -f /proc/cpuinfo; then
# Looks like a Linux (or cygwin) system
NUM_CORES=`cat /proc/cpuinfo | grep -c processor`
FOUND_CORES=yes
elif test -x /usr/sbin/psrinfo; then
# Looks like a Solaris system
NUM_CORES=`LC_MESSAGES=C /usr/sbin/psrinfo -v | grep -c on-line`
FOUND_CORES=yes
elif test -x /usr/sbin/system_profiler; then
# Looks like a MacOSX system
NUM_CORES=`/usr/sbin/system_profiler -detailLevel full SPHardwareDataType | grep 'Cores' | awk '{print [$]5}'`
FOUND_CORES=yes
elif test -n "$NUMBER_OF_PROCESSORS"; then
# On windows, look in the env
NUM_CORES=$NUMBER_OF_PROCESSORS
FOUND_CORES=yes
fi
if test "x$FOUND_CORES" = xyes; then
AC_MSG_RESULT([$NUM_CORES])
else
AC_MSG_RESULT([could not detect number of cores, defaulting to 1])
AC_MSG_WARN([This will disable all parallelism from build!])
fi
if test -f /proc/cpuinfo; then
# Looks like a Linux (or cygwin) system
NUM_CORES=`cat /proc/cpuinfo | grep -c processor`
FOUND_CORES=yes
elif test -x /usr/sbin/psrinfo; then
# Looks like a Solaris system
NUM_CORES=`LC_MESSAGES=C /usr/sbin/psrinfo -v | grep -c on-line`
FOUND_CORES=yes
elif test -x /usr/sbin/system_profiler; then
# Looks like a MacOSX system
NUM_CORES=`/usr/sbin/system_profiler -detailLevel full SPHardwareDataType | grep 'Cores' | awk '{print [$]5}'`
FOUND_CORES=yes
elif test -n "$NUMBER_OF_PROCESSORS"; then
# On windows, look in the env
NUM_CORES=$NUMBER_OF_PROCESSORS
FOUND_CORES=yes
fi
if test "x$FOUND_CORES" = xyes; then
AC_MSG_RESULT([$NUM_CORES])
else
AC_MSG_RESULT([could not detect number of cores, defaulting to 1])
AC_MSG_WARN([This will disable all parallelism from build!])
fi
])
AC_DEFUN([BPERF_CHECK_MEMORY_SIZE],
[
AC_MSG_CHECKING([for memory size])
# Default to 1024 MB
MEMORY_SIZE=1024
FOUND_MEM=no
AC_MSG_CHECKING([for memory size])
# Default to 1024 MB
MEMORY_SIZE=1024
FOUND_MEM=no
if test -f /proc/meminfo; then
# Looks like a Linux (or cygwin) system
MEMORY_SIZE=`cat /proc/meminfo | grep MemTotal | awk '{print [$]2}'`
MEMORY_SIZE=`expr $MEMORY_SIZE / 1024`
FOUND_MEM=yes
elif test -x /usr/sbin/prtconf; then
# Looks like a Solaris system
MEMORY_SIZE=`/usr/sbin/prtconf | grep "Memory size" | awk '{ print [$]3 }'`
FOUND_MEM=yes
elif test -x /usr/sbin/system_profiler; then
# Looks like a MacOSX system
MEMORY_SIZE=`/usr/sbin/system_profiler -detailLevel full SPHardwareDataType | grep 'Memory' | awk '{print [$]2}'`
MEMORY_SIZE=`expr $MEMORY_SIZE \* 1024`
FOUND_MEM=yes
elif test "x$OPENJDK_BUILD_OS" = xwindows; then
# Windows, but without cygwin
MEMORY_SIZE=`wmic computersystem get totalphysicalmemory -value | grep = | cut -d "=" -f 2-`
MEMORY_SIZE=`expr $MEMORY_SIZE / 1024 / 1024`
FOUND_MEM=yes
fi
if test -f /proc/meminfo; then
# Looks like a Linux (or cygwin) system
MEMORY_SIZE=`cat /proc/meminfo | grep MemTotal | awk '{print [$]2}'`
MEMORY_SIZE=`expr $MEMORY_SIZE / 1024`
FOUND_MEM=yes
elif test -x /usr/sbin/prtconf; then
# Looks like a Solaris system
MEMORY_SIZE=`/usr/sbin/prtconf | grep "Memory size" | awk '{ print [$]3 }'`
FOUND_MEM=yes
elif test -x /usr/sbin/system_profiler; then
# Looks like a MacOSX system
MEMORY_SIZE=`/usr/sbin/system_profiler -detailLevel full SPHardwareDataType | grep 'Memory' | awk '{print [$]2}'`
MEMORY_SIZE=`expr $MEMORY_SIZE \* 1024`
FOUND_MEM=yes
elif test "x$OPENJDK_BUILD_OS" = xwindows; then
# Windows, but without cygwin
MEMORY_SIZE=`wmic computersystem get totalphysicalmemory -value | grep = | cut -d "=" -f 2-`
MEMORY_SIZE=`expr $MEMORY_SIZE / 1024 / 1024`
FOUND_MEM=yes
fi
if test "x$FOUND_MEM" = xyes; then
AC_MSG_RESULT([$MEMORY_SIZE MB])
else
AC_MSG_RESULT([could not detect memory size, defaulting to 1024 MB])
AC_MSG_WARN([This might seriously impact build performance!])
fi
if test "x$FOUND_MEM" = xyes; then
AC_MSG_RESULT([$MEMORY_SIZE MB])
else
AC_MSG_RESULT([could not detect memory size, defaulting to 1024 MB])
AC_MSG_WARN([This might seriously impact build performance!])
fi
])
AC_DEFUN_ONCE([BPERF_SETUP_BUILD_CORES],
[
# How many cores do we have on this build system?
AC_ARG_WITH(num-cores, [AS_HELP_STRING([--with-num-cores],
[number of cores in the build system, e.g. --with-num-cores=8 @<:@probed@:>@])])
[number of cores in the build system, e.g. --with-num-cores=8 @<:@probed@:>@])])
if test "x$with_num_cores" = x; then
# The number of cores were not specified, try to probe them.
BPERF_CHECK_CORES
@ -110,7 +109,7 @@ AC_DEFUN_ONCE([BPERF_SETUP_BUILD_MEMORY],
[
# How much memory do we have on this build system?
AC_ARG_WITH(memory-size, [AS_HELP_STRING([--with-memory-size],
[memory (in MB) available in the build system, e.g. --with-memory-size=1024 @<:@probed@:>@])])
[memory (in MB) available in the build system, e.g. --with-memory-size=1024 @<:@probed@:>@])])
if test "x$with_memory_size" = x; then
# The memory size was not specified, try to probe it.
BPERF_CHECK_MEMORY_SIZE
@ -125,7 +124,7 @@ AC_DEFUN_ONCE([BPERF_SETUP_BUILD_JOBS],
# Provide a decent default number of parallel jobs for make depending on
# number of cores, amount of memory and machine architecture.
AC_ARG_WITH(jobs, [AS_HELP_STRING([--with-jobs],
[number of parallel jobs to let make run @<:@calculated based on cores and memory@:>@])])
[number of parallel jobs to let make run @<:@calculated based on cores and memory@:>@])])
if test "x$with_jobs" = x; then
# Number of jobs was not specified, calculate.
AC_MSG_CHECKING([for appropriate number of jobs to run in parallel])
@ -157,179 +156,178 @@ AC_DEFUN_ONCE([BPERF_SETUP_BUILD_JOBS],
AC_DEFUN([BPERF_SETUP_CCACHE],
[
AC_ARG_ENABLE([ccache],
[AS_HELP_STRING([--disable-ccache],
[disable using ccache to speed up recompilations @<:@enabled@:>@])],
[ENABLE_CCACHE=${enable_ccache}], [ENABLE_CCACHE=yes])
if test "x$ENABLE_CCACHE" = xyes; then
OLD_PATH="$PATH"
if test "x$TOOLS_DIR" != x; then
PATH=$TOOLS_DIR:$PATH
fi
AC_PATH_PROG(CCACHE, ccache)
PATH="$OLD_PATH"
else
AC_MSG_CHECKING([for ccache])
AC_MSG_RESULT([explicitly disabled])
CCACHE=
AC_ARG_ENABLE([ccache],
[AS_HELP_STRING([--disable-ccache],
[disable using ccache to speed up recompilations @<:@enabled@:>@])],
[ENABLE_CCACHE=${enable_ccache}], [ENABLE_CCACHE=yes])
if test "x$ENABLE_CCACHE" = xyes; then
OLD_PATH="$PATH"
if test "x$TOOLS_DIR" != x; then
PATH=$TOOLS_DIR:$PATH
fi
AC_SUBST(CCACHE)
AC_PATH_PROG(CCACHE, ccache)
PATH="$OLD_PATH"
else
AC_MSG_CHECKING([for ccache])
AC_MSG_RESULT([explicitly disabled])
CCACHE=
fi
AC_SUBST(CCACHE)
AC_ARG_WITH([ccache-dir],
[AS_HELP_STRING([--with-ccache-dir],
[where to store ccache files @<:@~/.ccache@:>@])])
AC_ARG_WITH([ccache-dir],
[AS_HELP_STRING([--with-ccache-dir],
[where to store ccache files @<:@~/.ccache@:>@])])
if test "x$with_ccache_dir" != x; then
# When using a non home ccache directory, assume the use is to share ccache files
# with other users. Thus change the umask.
SET_CCACHE_DIR="CCACHE_DIR=$with_ccache_dir CCACHE_UMASK=002"
fi
CCACHE_FOUND=""
if test "x$CCACHE" != x; then
BPERF_SETUP_CCACHE_USAGE
fi
if test "x$with_ccache_dir" != x; then
# When using a non home ccache directory, assume the use is to share ccache files
# with other users. Thus change the umask.
SET_CCACHE_DIR="CCACHE_DIR=$with_ccache_dir CCACHE_UMASK=002"
fi
CCACHE_FOUND=""
if test "x$CCACHE" != x; then
BPERF_SETUP_CCACHE_USAGE
fi
])
AC_DEFUN([BPERF_SETUP_CCACHE_USAGE],
[
if test "x$CCACHE" != x; then
CCACHE_FOUND="true"
# Only use ccache if it is 3.1.4 or later, which supports
# precompiled headers.
AC_MSG_CHECKING([if ccache supports precompiled headers])
HAS_GOOD_CCACHE=`($CCACHE --version | head -n 1 | grep -E 3.1.@<:@456789@:>@) 2> /dev/null`
if test "x$HAS_GOOD_CCACHE" = x; then
AC_MSG_RESULT([no, disabling ccache])
CCACHE=
else
AC_MSG_RESULT([yes])
AC_MSG_CHECKING([if C-compiler supports ccache precompiled headers])
PUSHED_FLAGS="$CXXFLAGS"
CXXFLAGS="-fpch-preprocess $CXXFLAGS"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [])], [CC_KNOWS_CCACHE_TRICK=yes], [CC_KNOWS_CCACHE_TRICK=no])
CXXFLAGS="$PUSHED_FLAGS"
if test "x$CC_KNOWS_CCACHE_TRICK" = xyes; then
AC_MSG_RESULT([yes])
else
AC_MSG_RESULT([no, disabling ccaching of precompiled headers])
CCACHE=
fi
fi
if test "x$CCACHE" != x; then
CCACHE_FOUND="true"
# Only use ccache if it is 3.1.4 or later, which supports
# precompiled headers.
AC_MSG_CHECKING([if ccache supports precompiled headers])
HAS_GOOD_CCACHE=`($CCACHE --version | head -n 1 | grep -E 3.1.@<:@456789@:>@) 2> /dev/null`
if test "x$HAS_GOOD_CCACHE" = x; then
AC_MSG_RESULT([no, disabling ccache])
CCACHE=
else
AC_MSG_RESULT([yes])
AC_MSG_CHECKING([if C-compiler supports ccache precompiled headers])
PUSHED_FLAGS="$CXXFLAGS"
CXXFLAGS="-fpch-preprocess $CXXFLAGS"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [])], [CC_KNOWS_CCACHE_TRICK=yes], [CC_KNOWS_CCACHE_TRICK=no])
CXXFLAGS="$PUSHED_FLAGS"
if test "x$CC_KNOWS_CCACHE_TRICK" = xyes; then
AC_MSG_RESULT([yes])
else
AC_MSG_RESULT([no, disabling ccaching of precompiled headers])
CCACHE=
fi
fi
fi
if test "x$CCACHE" != x; then
CCACHE_SLOPPINESS=time_macros
CCACHE="CCACHE_COMPRESS=1 $SET_CCACHE_DIR CCACHE_SLOPPINESS=$CCACHE_SLOPPINESS $CCACHE"
CCACHE_FLAGS=-fpch-preprocess
if test "x$CCACHE" != x; then
CCACHE_SLOPPINESS=time_macros
CCACHE="CCACHE_COMPRESS=1 $SET_CCACHE_DIR CCACHE_SLOPPINESS=$CCACHE_SLOPPINESS $CCACHE"
CCACHE_FLAGS=-fpch-preprocess
if test "x$SET_CCACHE_DIR" != x; then
mkdir -p $CCACHE_DIR > /dev/null 2>&1
chmod a+rwxs $CCACHE_DIR > /dev/null 2>&1
fi
if test "x$SET_CCACHE_DIR" != x; then
mkdir -p $CCACHE_DIR > /dev/null 2>&1
chmod a+rwxs $CCACHE_DIR > /dev/null 2>&1
fi
fi
])
AC_DEFUN_ONCE([BPERF_SETUP_PRECOMPILED_HEADERS],
[
###############################################################################
#
# Can the C/C++ compiler use precompiled headers?
#
AC_ARG_ENABLE([precompiled-headers], [AS_HELP_STRING([--disable-precompiled-headers],
[disable using precompiled headers when compiling C++ @<:@enabled@:>@])],
[ENABLE_PRECOMPH=${enable_precompiled_headers}], [ENABLE_PRECOMPH=yes])
###############################################################################
#
# Can the C/C++ compiler use precompiled headers?
#
AC_ARG_ENABLE([precompiled-headers], [AS_HELP_STRING([--disable-precompiled-headers],
[disable using precompiled headers when compiling C++ @<:@enabled@:>@])],
[ENABLE_PRECOMPH=${enable_precompiled_headers}], [ENABLE_PRECOMPH=yes])
USE_PRECOMPILED_HEADER=1
if test "x$ENABLE_PRECOMPH" = xno; then
USE_PRECOMPILED_HEADER=1
if test "x$ENABLE_PRECOMPH" = xno; then
USE_PRECOMPILED_HEADER=0
fi
fi
if test "x$ENABLE_PRECOMPH" = xyes; then
if test "x$ENABLE_PRECOMPH" = xyes; then
# Check that the compiler actually supports precomp headers.
if test "x$GCC" = xyes; then
AC_MSG_CHECKING([that precompiled headers work])
echo "int alfa();" > conftest.h
$CXX -x c++-header conftest.h -o conftest.hpp.gch 2>&AS_MESSAGE_LOG_FD >&AS_MESSAGE_LOG_FD
if test ! -f conftest.hpp.gch; then
USE_PRECOMPILED_HEADER=0
AC_MSG_RESULT([no])
else
AC_MSG_RESULT([yes])
fi
rm -f conftest.h conftest.hpp.gch
AC_MSG_CHECKING([that precompiled headers work])
echo "int alfa();" > conftest.h
$CXX -x c++-header conftest.h -o conftest.hpp.gch 2>&AS_MESSAGE_LOG_FD >&AS_MESSAGE_LOG_FD
if test ! -f conftest.hpp.gch; then
USE_PRECOMPILED_HEADER=0
AC_MSG_RESULT([no])
else
AC_MSG_RESULT([yes])
fi
rm -f conftest.h conftest.hpp.gch
fi
fi
fi
AC_SUBST(USE_PRECOMPILED_HEADER)
AC_SUBST(USE_PRECOMPILED_HEADER)
])
AC_DEFUN_ONCE([BPERF_SETUP_SMART_JAVAC],
[
AC_ARG_WITH(sjavac-server-java, [AS_HELP_STRING([--with-sjavac-server-java],
[use this java binary for running the sjavac background server @<:@Boot JDK java@:>@])])
AC_ARG_WITH(sjavac-server-java, [AS_HELP_STRING([--with-sjavac-server-java],
[use this java binary for running the sjavac background server @<:@Boot JDK java@:>@])])
if test "x$with_sjavac_server_java" != x; then
if test "x$with_sjavac_server_java" != x; then
SJAVAC_SERVER_JAVA="$with_sjavac_server_java"
FOUND_VERSION=`$SJAVAC_SERVER_JAVA -version 2>&1 | grep " version \""`
if test "x$FOUND_VERSION" = x; then
AC_MSG_ERROR([Could not execute server java: $SJAVAC_SERVER_JAVA])
AC_MSG_ERROR([Could not execute server java: $SJAVAC_SERVER_JAVA])
fi
else
else
SJAVAC_SERVER_JAVA=""
# Hotspot specific options.
ADD_JVM_ARG_IF_OK([-verbosegc],SJAVAC_SERVER_JAVA,[$JAVA])
# JRockit specific options.
ADD_JVM_ARG_IF_OK([-Xverbose:gc],SJAVAC_SERVER_JAVA,[$JAVA])
SJAVAC_SERVER_JAVA="$JAVA $SJAVAC_SERVER_JAVA"
fi
AC_SUBST(SJAVAC_SERVER_JAVA)
fi
AC_SUBST(SJAVAC_SERVER_JAVA)
if test "$MEMORY_SIZE" -gt "2500"; then
if test "$MEMORY_SIZE" -gt "2500"; then
ADD_JVM_ARG_IF_OK([-d64],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
if test "$JVM_ARG_OK" = true; then
JVM_64BIT=true
JVM_ARG_OK=false
fi
JVM_64BIT=true
JVM_ARG_OK=false
fi
fi
if test "$JVM_64BIT" = true; then
if test "$JVM_64BIT" = true; then
if test "$MEMORY_SIZE" -gt "17000"; then
ADD_JVM_ARG_IF_OK([-Xms10G -Xmx10G],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
ADD_JVM_ARG_IF_OK([-Xms10G -Xmx10G],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
fi
if test "$MEMORY_SIZE" -gt "10000" && test "$JVM_ARG_OK" = false; then
ADD_JVM_ARG_IF_OK([-Xms6G -Xmx6G],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
ADD_JVM_ARG_IF_OK([-Xms6G -Xmx6G],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
fi
if test "$MEMORY_SIZE" -gt "5000" && test "$JVM_ARG_OK" = false; then
ADD_JVM_ARG_IF_OK([-Xms1G -Xmx3G],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
ADD_JVM_ARG_IF_OK([-Xms1G -Xmx3G],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
fi
if test "$MEMORY_SIZE" -gt "3800" && test "$JVM_ARG_OK" = false; then
ADD_JVM_ARG_IF_OK([-Xms1G -Xmx2500M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
ADD_JVM_ARG_IF_OK([-Xms1G -Xmx2500M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
fi
fi
if test "$MEMORY_SIZE" -gt "2500" && test "$JVM_ARG_OK" = false; then
fi
if test "$MEMORY_SIZE" -gt "2500" && test "$JVM_ARG_OK" = false; then
ADD_JVM_ARG_IF_OK([-Xms1000M -Xmx1500M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
fi
if test "$MEMORY_SIZE" -gt "1000" && test "$JVM_ARG_OK" = false; then
fi
if test "$MEMORY_SIZE" -gt "1000" && test "$JVM_ARG_OK" = false; then
ADD_JVM_ARG_IF_OK([-Xms400M -Xmx1100M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
fi
if test "$JVM_ARG_OK" = false; then
fi
if test "$JVM_ARG_OK" = false; then
ADD_JVM_ARG_IF_OK([-Xms256M -Xmx512M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
fi
fi
AC_MSG_CHECKING([whether to use sjavac])
AC_ARG_ENABLE([sjavac], [AS_HELP_STRING([--enable-sjavac],
[use sjavac to do fast incremental compiles @<:@disabled@:>@])],
[ENABLE_SJAVAC="${enableval}"], [ENABLE_SJAVAC='no'])
AC_MSG_RESULT([$ENABLE_SJAVAC])
AC_SUBST(ENABLE_SJAVAC)
AC_MSG_CHECKING([whether to use sjavac])
AC_ARG_ENABLE([sjavac], [AS_HELP_STRING([--enable-sjavac],
[use sjavac to do fast incremental compiles @<:@disabled@:>@])],
[ENABLE_SJAVAC="${enableval}"], [ENABLE_SJAVAC='no'])
AC_MSG_RESULT([$ENABLE_SJAVAC])
AC_SUBST(ENABLE_SJAVAC)
if test "x$ENABLE_SJAVAC" = xyes; then
if test "x$ENABLE_SJAVAC" = xyes; then
SJAVAC_SERVER_DIR="$OUTPUT_ROOT/javacservers"
else
else
SJAVAC_SERVER_DIR=
fi
AC_SUBST(SJAVAC_SERVER_DIR)
fi
AC_SUBST(SJAVAC_SERVER_DIR)
])

View file

@ -25,223 +25,223 @@
AC_DEFUN_ONCE([BDEPS_SCAN_FOR_BUILDDEPS],
[
define(LIST_OF_BUILD_DEPENDENCIES,)
if test "x$with_builddeps_server" != x || test "x$with_builddeps_conf" != x; then
if test "x$with_builddeps_conf" != x; then
AC_MSG_CHECKING([for supplied builddeps configuration file])
builddepsfile=$with_builddeps_conf
if test -s $builddepsfile; then
. $builddepsfile
AC_MSG_RESULT([loaded!])
else
AC_MSG_ERROR([The given builddeps conf file $with_builddeps_conf could not be loaded!])
fi
else
AC_MSG_CHECKING([for builddeps.conf files in sources...])
builddepsfile=`mktemp`
touch $builddepsfile
# Put all found confs into a single file.
find ${SRC_ROOT} -name builddeps.conf -exec cat \{\} \; >> $builddepsfile
# Source the file to acquire the variables
if test -s $builddepsfile; then
. $builddepsfile
AC_MSG_RESULT([found at least one!])
else
AC_MSG_ERROR([Could not find any builddeps.conf at all!])
fi
fi
# Create build and target names that use _ instead of "-" and ".".
# This is necessary to use them in variable names.
build_var=`echo ${OPENJDK_BUILD_AUTOCONF_NAME} | tr '-' '_' | tr '.' '_'`
target_var=`echo ${OPENJDK_TARGET_AUTOCONF_NAME} | tr '-' '_' | tr '.' '_'`
# Extract rewrite information for build and target
eval rewritten_build=\${REWRITE_${build_var}}
if test "x$rewritten_build" = x; then
rewritten_build=${OPENJDK_BUILD_AUTOCONF_NAME}
echo Build stays the same $rewritten_build
else
echo Rewriting build for builddeps into $rewritten_build
fi
eval rewritten_target=\${REWRITE_${target_var}}
if test "x$rewritten_target" = x; then
rewritten_target=${OPENJDK_TARGET_AUTOCONF_NAME}
echo Target stays the same $rewritten_target
else
echo Rewriting target for builddeps into $rewritten_target
fi
rewritten_build_var=`echo ${rewritten_build} | tr '-' '_' | tr '.' '_'`
rewritten_target_var=`echo ${rewritten_target} | tr '-' '_' | tr '.' '_'`
define(LIST_OF_BUILD_DEPENDENCIES,)
if test "x$with_builddeps_server" != x || test "x$with_builddeps_conf" != x; then
if test "x$with_builddeps_conf" != x; then
AC_MSG_CHECKING([for supplied builddeps configuration file])
builddepsfile=$with_builddeps_conf
if test -s $builddepsfile; then
. $builddepsfile
AC_MSG_RESULT([loaded!])
else
AC_MSG_ERROR([The given builddeps conf file $with_builddeps_conf could not be loaded!])
fi
else
AC_MSG_CHECKING([for builddeps.conf files in sources...])
builddepsfile=`mktemp`
touch $builddepsfile
# Put all found confs into a single file.
find ${SRC_ROOT} -name builddeps.conf -exec cat \{\} \; >> $builddepsfile
# Source the file to acquire the variables
if test -s $builddepsfile; then
. $builddepsfile
AC_MSG_RESULT([found at least one!])
else
AC_MSG_ERROR([Could not find any builddeps.conf at all!])
fi
fi
AC_CHECK_PROGS(BDEPS_UNZIP, [7z unzip])
if test "x$BDEPS_UNZIP" = x7z; then
BDEPS_UNZIP="7z x"
# Create build and target names that use _ instead of "-" and ".".
# This is necessary to use them in variable names.
build_var=`echo ${OPENJDK_BUILD_AUTOCONF_NAME} | tr '-' '_' | tr '.' '_'`
target_var=`echo ${OPENJDK_TARGET_AUTOCONF_NAME} | tr '-' '_' | tr '.' '_'`
# Extract rewrite information for build and target
eval rewritten_build=\${REWRITE_${build_var}}
if test "x$rewritten_build" = x; then
rewritten_build=${OPENJDK_BUILD_AUTOCONF_NAME}
echo Build stays the same $rewritten_build
else
echo Rewriting build for builddeps into $rewritten_build
fi
eval rewritten_target=\${REWRITE_${target_var}}
if test "x$rewritten_target" = x; then
rewritten_target=${OPENJDK_TARGET_AUTOCONF_NAME}
echo Target stays the same $rewritten_target
else
echo Rewriting target for builddeps into $rewritten_target
fi
rewritten_build_var=`echo ${rewritten_build} | tr '-' '_' | tr '.' '_'`
rewritten_target_var=`echo ${rewritten_target} | tr '-' '_' | tr '.' '_'`
fi
AC_CHECK_PROGS(BDEPS_UNZIP, [7z unzip])
if test "x$BDEPS_UNZIP" = x7z; then
BDEPS_UNZIP="7z x"
fi
AC_CHECK_PROGS(BDEPS_FTP, [wget lftp ftp])
AC_CHECK_PROGS(BDEPS_FTP, [wget lftp ftp])
])
AC_DEFUN([BDEPS_FTPGET],
[
# $1 is the ftp://abuilddeps.server.com/libs/cups.zip
# $2 is the local file name for the downloaded file.
VALID_TOOL=no
if test "x$BDEPS_FTP" = xwget; then
VALID_TOOL=yes
wget -O $2 $1
fi
if test "x$BDEPS_FTP" = xlftp; then
VALID_TOOL=yes
lftp -c "get $1 -o $2"
fi
if test "x$BDEPS_FTP" = xftp; then
VALID_TOOL=yes
FTPSERVER=`echo $1 | cut -f 3 -d '/'`
FTPPATH=`echo $1 | cut -f 4- -d '/'`
FTPUSERPWD=${FTPSERVER%%@*}
if test "x$FTPSERVER" != "x$FTPUSERPWD"; then
FTPUSER=${userpwd%%:*}
FTPPWD=${userpwd#*@}
FTPSERVER=${FTPSERVER#*@}
else
FTPUSER=ftp
FTPPWD=ftp
fi
# the "pass" command does not work on some
# ftp clients (read ftp.exe) but if it works,
# passive mode is better!
(\
echo "user $FTPUSER $FTPPWD" ;\
echo "pass" ;\
echo "bin" ;\
echo "get $FTPPATH $2" ;\
) | ftp -in $FTPSERVER
fi
if test "x$VALID_TOOL" != xyes; then
AC_MSG_ERROR([I do not know how to use the tool: $BDEPS_FTP])
# $1 is the ftp://abuilddeps.server.com/libs/cups.zip
# $2 is the local file name for the downloaded file.
VALID_TOOL=no
if test "x$BDEPS_FTP" = xwget; then
VALID_TOOL=yes
wget -O $2 $1
fi
if test "x$BDEPS_FTP" = xlftp; then
VALID_TOOL=yes
lftp -c "get $1 -o $2"
fi
if test "x$BDEPS_FTP" = xftp; then
VALID_TOOL=yes
FTPSERVER=`echo $1 | cut -f 3 -d '/'`
FTPPATH=`echo $1 | cut -f 4- -d '/'`
FTPUSERPWD=${FTPSERVER%%@*}
if test "x$FTPSERVER" != "x$FTPUSERPWD"; then
FTPUSER=${userpwd%%:*}
FTPPWD=${userpwd#*@}
FTPSERVER=${FTPSERVER#*@}
else
FTPUSER=ftp
FTPPWD=ftp
fi
# the "pass" command does not work on some
# ftp clients (read ftp.exe) but if it works,
# passive mode is better!
( \
echo "user $FTPUSER $FTPPWD" ; \
echo "pass" ; \
echo "bin" ; \
echo "get $FTPPATH $2" ; \
) | ftp -in $FTPSERVER
fi
if test "x$VALID_TOOL" != xyes; then
AC_MSG_ERROR([I do not know how to use the tool: $BDEPS_FTP])
fi
])
AC_DEFUN([BDEPS_CHECK_MODULE],
[
define([LIST_OF_BUILD_DEPENDENCIES],LIST_OF_BUILD_DEPENDENCIES[$2=$3'\n'])
if test "x$with_builddeps_server" != x || test "x$with_builddeps_conf" != x; then
# Source the builddeps file again, to make sure it uses the latest variables!
. $builddepsfile
# Look for a target and build machine specific resource!
eval resource=\${builddep_$2_BUILD_${rewritten_build_var}_TARGET_${rewritten_target_var}}
if test "x$resource" = x; then
# Ok, lets instead look for a target specific resource
eval resource=\${builddep_$2_TARGET_${rewritten_target_var}}
fi
if test "x$resource" = x; then
# Ok, lets instead look for a build specific resource
eval resource=\${builddep_$2_BUILD_${rewritten_build_var}}
fi
if test "x$resource" = x; then
# Ok, lets instead look for a generic resource
# (The $2 comes from M4 and not the shell, thus no need for eval here.)
resource=${builddep_$2}
fi
if test "x$resource" != x; then
AC_MSG_NOTICE([Using builddeps $resource for $2])
# If the resource in the builddeps.conf file is an existing directory,
# for example /java/linux/cups
if test -d ${resource}; then
depdir=${resource}
else
BDEPS_FETCH($2, $resource, $with_builddeps_server, $with_builddeps_dir, depdir)
fi
# Source the builddeps file again, because in the previous command, the depdir
# was updated to point at the current build dependency install directory.
. $builddepsfile
# Now extract variables from the builddeps.conf files.
theroot=${builddep_$2_ROOT}
thecflags=${builddep_$2_CFLAGS}
thelibs=${builddep_$2_LIBS}
if test "x$depdir" = x; then
AC_MSG_ERROR([Could not download build dependency $2])
fi
$1=$depdir
if test "x$theroot" != x; then
$1="$theroot"
fi
if test "x$thecflags" != x; then
$1_CFLAGS="$thecflags"
fi
if test "x$thelibs" != x; then
$1_LIBS="$thelibs"
fi
m4_default([$4], [:])
m4_ifvaln([$5], [else $5])
fi
m4_ifvaln([$5], [else $5])
define([LIST_OF_BUILD_DEPENDENCIES],LIST_OF_BUILD_DEPENDENCIES[$2=$3'\n'])
if test "x$with_builddeps_server" != x || test "x$with_builddeps_conf" != x; then
# Source the builddeps file again, to make sure it uses the latest variables!
. $builddepsfile
# Look for a target and build machine specific resource!
eval resource=\${builddep_$2_BUILD_${rewritten_build_var}_TARGET_${rewritten_target_var}}
if test "x$resource" = x; then
# Ok, lets instead look for a target specific resource
eval resource=\${builddep_$2_TARGET_${rewritten_target_var}}
fi
if test "x$resource" = x; then
# Ok, lets instead look for a build specific resource
eval resource=\${builddep_$2_BUILD_${rewritten_build_var}}
fi
if test "x$resource" = x; then
# Ok, lets instead look for a generic resource
# (The $2 comes from M4 and not the shell, thus no need for eval here.)
resource=${builddep_$2}
fi
if test "x$resource" != x; then
AC_MSG_NOTICE([Using builddeps $resource for $2])
# If the resource in the builddeps.conf file is an existing directory,
# for example /java/linux/cups
if test -d ${resource}; then
depdir=${resource}
else
BDEPS_FETCH($2, $resource, $with_builddeps_server, $with_builddeps_dir, depdir)
fi
# Source the builddeps file again, because in the previous command, the depdir
# was updated to point at the current build dependency install directory.
. $builddepsfile
# Now extract variables from the builddeps.conf files.
theroot=${builddep_$2_ROOT}
thecflags=${builddep_$2_CFLAGS}
thelibs=${builddep_$2_LIBS}
if test "x$depdir" = x; then
AC_MSG_ERROR([Could not download build dependency $2])
fi
$1=$depdir
if test "x$theroot" != x; then
$1="$theroot"
fi
if test "x$thecflags" != x; then
$1_CFLAGS="$thecflags"
fi
if test "x$thelibs" != x; then
$1_LIBS="$thelibs"
fi
m4_default([$4], [:])
m4_ifvaln([$5], [else $5])
fi
m4_ifvaln([$5], [else $5])
fi
])
AC_DEFUN([BDEPS_FETCH],
[
# $1 is for example mymodule
# $2 is for example libs/general/libmymod_1_2_3.zip
# $3 is for example ftp://mybuilddeps.myserver.com/builddeps
# $4 is for example /localhome/builddeps
# $5 is the name of the variable into which we store the depdir, eg MYMOD
# Will download ftp://mybuilddeps.myserver.com/builddeps/libs/general/libmymod_1_2_3.zip and
# unzip into the directory: /localhome/builddeps/libmymod_1_2_3
filename=`basename $2`
filebase=`echo $filename | sed 's/\.[[^\.]]*$//'`
filebase=${filename%%.*}
extension=${filename#*.}
installdir=$4/$filebase
if test ! -f $installdir/$filename.unpacked; then
AC_MSG_NOTICE([Downloading build dependency $1 from $3/$2 and installing into $installdir])
if test ! -d $installdir; then
mkdir -p $installdir
fi
if test ! -d $installdir; then
AC_MSG_ERROR([Could not create directory $installdir])
fi
tmpfile=`mktemp $installdir/$1.XXXXXXXXX`
touch $tmpfile
if test ! -f $tmpfile; then
AC_MSG_ERROR([Could not create files in directory $installdir])
fi
BDEPS_FTPGET([$3/$2] , [$tmpfile])
mv $tmpfile $installdir/$filename
if test ! -s $installdir/$filename; then
AC_MSG_ERROR([Could not download $3/$2])
fi
case "$extension" in
zip) echo "Unzipping $installdir/$filename..."
(cd $installdir ; rm -f $installdir/$filename.unpacked ; $BDEPS_UNZIP $installdir/$filename > /dev/null && touch $installdir/$filename.unpacked)
;;
tar.gz) echo "Untaring $installdir/$filename..."
(cd $installdir ; rm -f $installdir/$filename.unpacked ; tar xzf $installdir/$filename && touch $installdir/$filename.unpacked)
;;
tgz) echo "Untaring $installdir/$filename..."
(cd $installdir ; rm -f $installdir/$filename.unpacked ; tar xzf $installdir/$filename && touch $installdir/$filename.unpacked)
;;
*) AC_MSG_ERROR([Cannot handle build depency archive with extension $extension])
;;
esac
# $1 is for example mymodule
# $2 is for example libs/general/libmymod_1_2_3.zip
# $3 is for example ftp://mybuilddeps.myserver.com/builddeps
# $4 is for example /localhome/builddeps
# $5 is the name of the variable into which we store the depdir, eg MYMOD
# Will download ftp://mybuilddeps.myserver.com/builddeps/libs/general/libmymod_1_2_3.zip and
# unzip into the directory: /localhome/builddeps/libmymod_1_2_3
filename=`basename $2`
filebase=`echo $filename | sed 's/\.[[^\.]]*$//'`
filebase=${filename%%.*}
extension=${filename#*.}
installdir=$4/$filebase
if test ! -f $installdir/$filename.unpacked; then
AC_MSG_NOTICE([Downloading build dependency $1 from $3/$2 and installing into $installdir])
if test ! -d $installdir; then
mkdir -p $installdir
fi
if test -f $installdir/$filename.unpacked; then
$5=$installdir
if test ! -d $installdir; then
AC_MSG_ERROR([Could not create directory $installdir])
fi
tmpfile=`mktemp $installdir/$1.XXXXXXXXX`
touch $tmpfile
if test ! -f $tmpfile; then
AC_MSG_ERROR([Could not create files in directory $installdir])
fi
BDEPS_FTPGET([$3/$2] , [$tmpfile])
mv $tmpfile $installdir/$filename
if test ! -s $installdir/$filename; then
AC_MSG_ERROR([Could not download $3/$2])
fi
case "$extension" in
zip) echo "Unzipping $installdir/$filename..."
(cd $installdir ; rm -f $installdir/$filename.unpacked ; $BDEPS_UNZIP $installdir/$filename > /dev/null && touch $installdir/$filename.unpacked)
;;
tar.gz) echo "Untaring $installdir/$filename..."
(cd $installdir ; rm -f $installdir/$filename.unpacked ; tar xzf $installdir/$filename && touch $installdir/$filename.unpacked)
;;
tgz) echo "Untaring $installdir/$filename..."
(cd $installdir ; rm -f $installdir/$filename.unpacked ; tar xzf $installdir/$filename && touch $installdir/$filename.unpacked)
;;
*) AC_MSG_ERROR([Cannot handle build depency archive with extension $extension])
;;
esac
fi
if test -f $installdir/$filename.unpacked; then
$5=$installdir
fi
])
AC_DEFUN_ONCE([BDEPS_CONFIGURE_BUILDDEPS],
[
AC_ARG_WITH(builddeps-conf, [AS_HELP_STRING([--with-builddeps-conf],
[use this configuration file for the builddeps])])
AC_ARG_WITH(builddeps-conf, [AS_HELP_STRING([--with-builddeps-conf],
[use this configuration file for the builddeps])])
AC_ARG_WITH(builddeps-server, [AS_HELP_STRING([--with-builddeps-server],
[download and use build dependencies from this server url])])
AC_ARG_WITH(builddeps-server, [AS_HELP_STRING([--with-builddeps-server],
[download and use build dependencies from this server url])])
AC_ARG_WITH(builddeps-dir, [AS_HELP_STRING([--with-builddeps-dir],
[store downloaded build dependencies here @<:@/localhome/builddeps@:>@])],
[],
[with_builddeps_dir=/localhome/builddeps])
AC_ARG_WITH(builddeps-dir, [AS_HELP_STRING([--with-builddeps-dir],
[store downloaded build dependencies here @<:@/localhome/builddeps@:>@])],
[],
[with_builddeps_dir=/localhome/builddeps])
AC_ARG_WITH(builddeps-group, [AS_HELP_STRING([--with-builddeps-group],
[chgrp the downloaded build dependencies to this group])])
AC_ARG_WITH(builddeps-group, [AS_HELP_STRING([--with-builddeps-group],
[chgrp the downloaded build dependencies to this group])])
])

View file

@ -67,14 +67,14 @@ UNZIP="@UNZIP@"
SRC_ROOT="@SRC_ROOT@"
if [ "$OPENJDK_TARGET_OS" = "windows" ]; then
PATH="@VS_PATH@"
PATH="@VS_PATH@"
fi
# Now locate the main script and run it.
REAL_COMPARE_SCRIPT="$SRC_ROOT/common/bin/compare.sh"
if [ ! -e "$REAL_COMPARE_SCRIPT" ]; then
echo "Error: Cannot locate compare script, it should have been in $REAL_COMPARE_SCRIPT"
exit 1
echo "Error: Cannot locate compare script, it should have been in $REAL_COMPARE_SCRIPT"
exit 1
fi
. "$REAL_COMPARE_SCRIPT" "$@"

View file

@ -88,7 +88,6 @@ check_hg_updates() {
check_autoconf_timestamps
fi
fi
fi
}
@ -120,28 +119,28 @@ conf_openjdk_target=
for conf_option
do
case $conf_option in
--openjdk-target=*)
conf_openjdk_target=`expr "X$conf_option" : '[^=]*=\(.*\)'`
continue ;;
--debug-configure)
if test "x$conf_debug_configure" != xrecursive; then
conf_debug_configure=true
export conf_debug_configure
fi
continue ;;
*)
conf_processed_arguments=("${conf_processed_arguments[@]}" "$conf_option") ;;
--openjdk-target=*)
conf_openjdk_target=`expr "X$conf_option" : '[^=]*=\(.*\)'`
continue ;;
--debug-configure)
if test "x$conf_debug_configure" != xrecursive; then
conf_debug_configure=true
export conf_debug_configure
fi
continue ;;
*)
conf_processed_arguments=("${conf_processed_arguments[@]}" "$conf_option") ;;
esac
case $conf_option in
-build | --build | --buil | --bui | --bu |-build=* | --build=* | --buil=* | --bui=* | --bu=*)
conf_legacy_crosscompile="$conf_legacy_crosscompile $conf_option" ;;
-target | --target | --targe | --targ | --tar | --ta | --t | -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
conf_legacy_crosscompile="$conf_legacy_crosscompile $conf_option" ;;
-host | --host | --hos | --ho | -host=* | --host=* | --hos=* | --ho=*)
conf_legacy_crosscompile="$conf_legacy_crosscompile $conf_option" ;;
-help | --help | --hel | --he | -h)
conf_print_help=true ;;
-build | --build | --buil | --bui | --bu |-build=* | --build=* | --buil=* | --bui=* | --bu=*)
conf_legacy_crosscompile="$conf_legacy_crosscompile $conf_option" ;;
-target | --target | --targe | --targ | --tar | --ta | --t | -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
conf_legacy_crosscompile="$conf_legacy_crosscompile $conf_option" ;;
-host | --host | --hos | --ho | -host=* | --host=* | --hos=* | --ho=*)
conf_legacy_crosscompile="$conf_legacy_crosscompile $conf_option" ;;
-help | --help | --hel | --he | -h)
conf_print_help=true ;;
esac
done

File diff suppressed because it is too large Load diff

View file

@ -25,194 +25,194 @@
AC_DEFUN_ONCE([HELP_SETUP_DEPENDENCY_HELP],
[
AC_CHECK_PROGS(PKGHANDLER, apt-get yum port pkgutil pkgadd)
AC_CHECK_PROGS(PKGHANDLER, apt-get yum port pkgutil pkgadd)
])
AC_DEFUN([HELP_MSG_MISSING_DEPENDENCY],
[
# Print a helpful message on how to acquire the necessary build dependency.
# $1 is the help tag: freetyp2, cups, pulse, alsa etc
MISSING_DEPENDENCY=$1
PKGHANDLER_COMMAND=
# Print a helpful message on how to acquire the necessary build dependency.
# $1 is the help tag: freetyp2, cups, pulse, alsa etc
MISSING_DEPENDENCY=$1
PKGHANDLER_COMMAND=
case $PKGHANDLER in
apt-get)
apt_help $MISSING_DEPENDENCY ;;
case $PKGHANDLER in
apt-get)
apt_help $MISSING_DEPENDENCY ;;
yum)
yum_help $MISSING_DEPENDENCY ;;
port)
port_help $MISSING_DEPENDENCY ;;
pkgutil)
pkgutil_help $MISSING_DEPENDENCY ;;
pkgadd)
pkgadd_help $MISSING_DEPENDENCY ;;
yum_help $MISSING_DEPENDENCY ;;
port)
port_help $MISSING_DEPENDENCY ;;
pkgutil)
pkgutil_help $MISSING_DEPENDENCY ;;
pkgadd)
pkgadd_help $MISSING_DEPENDENCY ;;
* )
break ;;
esac
esac
if test "x$PKGHANDLER_COMMAND" != x; then
HELP_MSG="You might be able to fix this by running '$PKGHANDLER_COMMAND'."
fi
if test "x$PKGHANDLER_COMMAND" != x; then
HELP_MSG="You might be able to fix this by running '$PKGHANDLER_COMMAND'."
fi
])
cygwin_help() {
case $1 in
case $1 in
unzip)
PKGHANDLER_COMMAND="cd <location of cygwin setup.exe> && cmd /c setup -q -P unzip" ;;
PKGHANDLER_COMMAND="cd <location of cygwin setup.exe> && cmd /c setup -q -P unzip" ;;
zip)
PKGHANDLER_COMMAND="cd <location of cygwin setup.exe> && cmd /c setup -q -P zip" ;;
PKGHANDLER_COMMAND="cd <location of cygwin setup.exe> && cmd /c setup -q -P zip" ;;
make)
PKGHANDLER_COMMAND="cd <location of cygwin setup.exe> && cmd /c setup -q -P make" ;;
PKGHANDLER_COMMAND="cd <location of cygwin setup.exe> && cmd /c setup -q -P make" ;;
* )
break ;;
esac
break ;;
esac
}
apt_help() {
case $1 in
case $1 in
devkit)
PKGHANDLER_COMMAND="sudo apt-get install build-essential" ;;
PKGHANDLER_COMMAND="sudo apt-get install build-essential" ;;
openjdk)
PKGHANDLER_COMMAND="sudo apt-get install openjdk-7-jdk" ;;
PKGHANDLER_COMMAND="sudo apt-get install openjdk-7-jdk" ;;
alsa)
PKGHANDLER_COMMAND="sudo apt-get install libasound2-dev" ;;
PKGHANDLER_COMMAND="sudo apt-get install libasound2-dev" ;;
cups)
PKGHANDLER_COMMAND="sudo apt-get install libcups2-dev" ;;
PKGHANDLER_COMMAND="sudo apt-get install libcups2-dev" ;;
freetype2)
PKGHANDLER_COMMAND="sudo apt-get install libfreetype6-dev" ;;
PKGHANDLER_COMMAND="sudo apt-get install libfreetype6-dev" ;;
pulse)
PKGHANDLER_COMMAND="sudo apt-get install libpulse-dev" ;;
PKGHANDLER_COMMAND="sudo apt-get install libpulse-dev" ;;
x11)
PKGHANDLER_COMMAND="sudo apt-get install libX11-dev libxext-dev libxrender-dev libxtst-dev libxt-dev" ;;
PKGHANDLER_COMMAND="sudo apt-get install libX11-dev libxext-dev libxrender-dev libxtst-dev libxt-dev" ;;
ccache)
PKGHANDLER_COMMAND="sudo apt-get install ccache" ;;
PKGHANDLER_COMMAND="sudo apt-get install ccache" ;;
* )
break ;;
esac
break ;;
esac
}
yum_help() {
case $1 in
case $1 in
devkit)
PKGHANDLER_COMMAND="sudo yum groupinstall \"Development Tools\"" ;;
PKGHANDLER_COMMAND="sudo yum groupinstall \"Development Tools\"" ;;
openjdk)
PKGHANDLER_COMMAND="sudo yum install java-1.7.0-openjdk" ;;
PKGHANDLER_COMMAND="sudo yum install java-1.7.0-openjdk" ;;
alsa)
PKGHANDLER_COMMAND="sudo yum install alsa-lib-devel" ;;
PKGHANDLER_COMMAND="sudo yum install alsa-lib-devel" ;;
cups)
PKGHANDLER_COMMAND="sudo yum install cups-devel" ;;
PKGHANDLER_COMMAND="sudo yum install cups-devel" ;;
freetype2)
PKGHANDLER_COMMAND="sudo yum install freetype-devel" ;;
PKGHANDLER_COMMAND="sudo yum install freetype-devel" ;;
pulse)
PKGHANDLER_COMMAND="sudo yum install pulseaudio-libs-devel" ;;
PKGHANDLER_COMMAND="sudo yum install pulseaudio-libs-devel" ;;
x11)
PKGHANDLER_COMMAND="sudo yum install libXtst-devel libXt-devel libXrender-devel" ;;
PKGHANDLER_COMMAND="sudo yum install libXtst-devel libXt-devel libXrender-devel" ;;
ccache)
PKGHANDLER_COMMAND="sudo yum install ccache" ;;
PKGHANDLER_COMMAND="sudo yum install ccache" ;;
* )
break ;;
esac
break ;;
esac
}
port_help() {
PKGHANDLER_COMMAND=""
PKGHANDLER_COMMAND=""
}
pkgutil_help() {
PKGHANDLER_COMMAND=""
PKGHANDLER_COMMAND=""
}
pkgadd_help() {
PKGHANDLER_COMMAND=""
PKGHANDLER_COMMAND=""
}
AC_DEFUN_ONCE([HELP_PRINT_SUMMARY_AND_WARNINGS],
[
# Finally output some useful information to the user
# Finally output some useful information to the user
if test "x$CCACHE_FOUND" != x; then
if test "x$HAS_GOOD_CCACHE" = x; then
CCACHE_STATUS="installed, but disabled (version older than 3.1.4)"
CCACHE_HELP_MSG="You have ccache installed, but it is a version prior to 3.1.4. Try upgrading."
else
CCACHE_STATUS="installed and in use"
fi
else
if test "x$GCC" = xyes; then
CCACHE_STATUS="not installed (consider installing)"
CCACHE_HELP_MSG="You do not have ccache installed. Try installing it."
else
CCACHE_STATUS="not available for your system"
fi
fi
if test "x$CCACHE_FOUND" != x; then
if test "x$HAS_GOOD_CCACHE" = x; then
CCACHE_STATUS="installed, but disabled (version older than 3.1.4)"
CCACHE_HELP_MSG="You have ccache installed, but it is a version prior to 3.1.4. Try upgrading."
else
CCACHE_STATUS="installed and in use"
fi
else
if test "x$GCC" = xyes; then
CCACHE_STATUS="not installed (consider installing)"
CCACHE_HELP_MSG="You do not have ccache installed. Try installing it."
else
CCACHE_STATUS="not available for your system"
fi
fi
printf "\n"
printf "====================================================\n"
printf "A new configuration has been successfully created in\n"
printf "$OUTPUT_ROOT\n"
if test "x$CONFIGURE_COMMAND_LINE" != x; then
printf "using configure arguments '$CONFIGURE_COMMAND_LINE'.\n"
else
printf "using default settings.\n"
fi
printf "\n"
printf "====================================================\n"
printf "A new configuration has been successfully created in\n"
printf "$OUTPUT_ROOT\n"
if test "x$CONFIGURE_COMMAND_LINE" != x; then
printf "using configure arguments '$CONFIGURE_COMMAND_LINE'.\n"
else
printf "using default settings.\n"
fi
printf "\n"
printf "Configuration summary:\n"
printf "* Debug level: $DEBUG_LEVEL\n"
printf "* JDK variant: $JDK_VARIANT\n"
printf "* JVM variants: $with_jvm_variants\n"
printf "* OpenJDK target: OS: $OPENJDK_TARGET_OS, CPU architecture: $OPENJDK_TARGET_CPU_ARCH, address length: $OPENJDK_TARGET_CPU_BITS\n"
printf "\n"
printf "Configuration summary:\n"
printf "* Debug level: $DEBUG_LEVEL\n"
printf "* JDK variant: $JDK_VARIANT\n"
printf "* JVM variants: $with_jvm_variants\n"
printf "* OpenJDK target: OS: $OPENJDK_TARGET_OS, CPU architecture: $OPENJDK_TARGET_CPU_ARCH, address length: $OPENJDK_TARGET_CPU_BITS\n"
printf "\n"
printf "Tools summary:\n"
if test "x$OPENJDK_BUILD_OS" = "xwindows"; then
printf "* Environment: $WINDOWS_ENV_VENDOR version $WINDOWS_ENV_VERSION (root at $WINDOWS_ENV_ROOT_PATH)\n"
fi
printf "* Boot JDK: $BOOT_JDK_VERSION (at $BOOT_JDK)\n"
printf "* C Compiler: $CC_VENDOR version $CC_VERSION (at $CC)\n"
printf "* C++ Compiler: $CXX_VENDOR version $CXX_VERSION (at $CXX)\n"
printf "\n"
printf "Tools summary:\n"
if test "x$OPENJDK_BUILD_OS" = "xwindows"; then
printf "* Environment: $WINDOWS_ENV_VENDOR version $WINDOWS_ENV_VERSION (root at $WINDOWS_ENV_ROOT_PATH)\n"
fi
printf "* Boot JDK: $BOOT_JDK_VERSION (at $BOOT_JDK)\n"
printf "* C Compiler: $CC_VENDOR version $CC_VERSION (at $CC)\n"
printf "* C++ Compiler: $CXX_VENDOR version $CXX_VERSION (at $CXX)\n"
printf "\n"
printf "Build performance summary:\n"
printf "* Cores to use: $JOBS\n"
printf "* Memory limit: $MEMORY_SIZE MB\n"
printf "* ccache status: $CCACHE_STATUS\n"
printf "\n"
printf "\n"
printf "Build performance summary:\n"
printf "* Cores to use: $JOBS\n"
printf "* Memory limit: $MEMORY_SIZE MB\n"
printf "* ccache status: $CCACHE_STATUS\n"
printf "\n"
if test "x$CCACHE_HELP_MSG" != x && test "x$HIDE_PERFORMANCE_HINTS" = "xno"; then
printf "Build performance tip: ccache gives a tremendous speedup for C++ recompilations.\n"
printf "$CCACHE_HELP_MSG\n"
HELP_MSG_MISSING_DEPENDENCY([ccache])
printf "$HELP_MSG\n"
printf "\n"
fi
if test "x$CCACHE_HELP_MSG" != x && test "x$HIDE_PERFORMANCE_HINTS" = "xno"; then
printf "Build performance tip: ccache gives a tremendous speedup for C++ recompilations.\n"
printf "$CCACHE_HELP_MSG\n"
HELP_MSG_MISSING_DEPENDENCY([ccache])
printf "$HELP_MSG\n"
printf "\n"
fi
if test "x$BUILDING_MULTIPLE_JVM_VARIANTS" = "xyes"; then
printf "NOTE: You have requested to build more than one version of the JVM, which\n"
printf "will result in longer build times.\n"
printf "\n"
fi
if test "x$BUILDING_MULTIPLE_JVM_VARIANTS" = "xyes"; then
printf "NOTE: You have requested to build more than one version of the JVM, which\n"
printf "will result in longer build times.\n"
printf "\n"
fi
if test "x$FOUND_ALT_VARIABLES" != "x"; then
printf "WARNING: You have old-style ALT_ environment variables set.\n"
printf "These are not respected, and will be ignored. It is recommended\n"
printf "that you clean your environment. The following variables are set:\n"
printf "$FOUND_ALT_VARIABLES\n"
printf "\n"
fi
if test "x$FOUND_ALT_VARIABLES" != "x"; then
printf "WARNING: You have old-style ALT_ environment variables set.\n"
printf "These are not respected, and will be ignored. It is recommended\n"
printf "that you clean your environment. The following variables are set:\n"
printf "$FOUND_ALT_VARIABLES\n"
printf "\n"
fi
if test "x$OUTPUT_DIR_IS_LOCAL" != "xyes"; then
printf "WARNING: Your build output directory is not on a local disk.\n"
printf "This will severely degrade build performance!\n"
printf "It is recommended that you create an output directory on a local disk,\n"
printf "and run the configure script again from that directory.\n"
printf "\n"
fi
if test "x$OUTPUT_DIR_IS_LOCAL" != "xyes"; then
printf "WARNING: Your build output directory is not on a local disk.\n"
printf "This will severely degrade build performance!\n"
printf "It is recommended that you create an output directory on a local disk,\n"
printf "and run the configure script again from that directory.\n"
printf "\n"
fi
if test "x$IS_RECONFIGURE" = "xyes"; then
printf "WARNING: The result of this configuration has overridden an older\n"
printf "configuration. You *should* run 'make clean' to make sure you get a\n"
printf "proper build. Failure to do so might result in strange build problems.\n"
printf "\n"
fi
if test "x$IS_RECONFIGURE" = "xyes"; then
printf "WARNING: The result of this configuration has overridden an older\n"
printf "configuration. You *should* run 'make clean' to make sure you get a\n"
printf "proper build. Failure to do so might result in strange build problems.\n"
printf "\n"
fi
])

View file

@ -25,235 +25,234 @@
AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_VARIANT],
[
###############################################################################
#
# Check which variant of the JDK that we want to build.
# Currently we have:
# normal: standard edition
# but the custom make system may add other variants
#
# Effectively the JDK variant gives a name to a specific set of
# modules to compile into the JDK. In the future, these modules
# might even be Jigsaw modules.
#
AC_MSG_CHECKING([which variant of the JDK to build])
AC_ARG_WITH([jdk-variant], [AS_HELP_STRING([--with-jdk-variant],
[JDK variant to build (normal) @<:@normal@:>@])])
###############################################################################
#
# Check which variant of the JDK that we want to build.
# Currently we have:
# normal: standard edition
# but the custom make system may add other variants
#
# Effectively the JDK variant gives a name to a specific set of
# modules to compile into the JDK. In the future, these modules
# might even be Jigsaw modules.
#
AC_MSG_CHECKING([which variant of the JDK to build])
AC_ARG_WITH([jdk-variant], [AS_HELP_STRING([--with-jdk-variant],
[JDK variant to build (normal) @<:@normal@:>@])])
if test "x$with_jdk_variant" = xnormal || test "x$with_jdk_variant" = x; then
if test "x$with_jdk_variant" = xnormal || test "x$with_jdk_variant" = x; then
JDK_VARIANT="normal"
else
else
AC_MSG_ERROR([The available JDK variants are: normal])
fi
fi
AC_SUBST(JDK_VARIANT)
AC_SUBST(JDK_VARIANT)
AC_MSG_RESULT([$JDK_VARIANT])
AC_MSG_RESULT([$JDK_VARIANT])
])
AC_DEFUN_ONCE([JDKOPT_SETUP_JVM_VARIANTS],
[
###############################################################################
#
# Check which variants of the JVM that we want to build.
# Currently we have:
# server: normal interpreter and a tiered C1/C2 compiler
# client: normal interpreter and C1 (no C2 compiler) (only 32-bit platforms)
# minimal1: reduced form of client with optional VM services and features stripped out
# kernel: kernel footprint JVM that passes the TCK without major performance problems,
# ie normal interpreter and C1, only the serial GC, kernel jvmti etc
# zero: no machine code interpreter, no compiler
# zeroshark: zero interpreter and shark/llvm compiler backend
AC_MSG_CHECKING([which variants of the JVM to build])
AC_ARG_WITH([jvm-variants], [AS_HELP_STRING([--with-jvm-variants],
[JVM variants (separated by commas) to build (server, client, minimal1, kernel, zero, zeroshark) @<:@server@:>@])])
###############################################################################
#
# Check which variants of the JVM that we want to build.
# Currently we have:
# server: normal interpreter and a tiered C1/C2 compiler
# client: normal interpreter and C1 (no C2 compiler) (only 32-bit platforms)
# minimal1: reduced form of client with optional VM services and features stripped out
# kernel: kernel footprint JVM that passes the TCK without major performance problems,
# ie normal interpreter and C1, only the serial GC, kernel jvmti etc
# zero: no machine code interpreter, no compiler
# zeroshark: zero interpreter and shark/llvm compiler backend
AC_MSG_CHECKING([which variants of the JVM to build])
AC_ARG_WITH([jvm-variants], [AS_HELP_STRING([--with-jvm-variants],
[JVM variants (separated by commas) to build (server, client, minimal1, kernel, zero, zeroshark) @<:@server@:>@])])
if test "x$with_jvm_variants" = x; then
with_jvm_variants="server"
fi
if test "x$with_jvm_variants" = x; then
with_jvm_variants="server"
fi
JVM_VARIANTS=",$with_jvm_variants,"
TEST_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/server,//' -e 's/client,//' -e 's/minimal1,//' -e 's/kernel,//' -e 's/zero,//' -e 's/zeroshark,//'`
JVM_VARIANTS=",$with_jvm_variants,"
TEST_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/server,//' -e 's/client,//' -e 's/minimal1,//' -e 's/kernel,//' -e 's/zero,//' -e 's/zeroshark,//'`
if test "x$TEST_VARIANTS" != "x,"; then
AC_MSG_ERROR([The available JVM variants are: server, client, minimal1, kernel, zero, zeroshark])
fi
AC_MSG_RESULT([$with_jvm_variants])
if test "x$TEST_VARIANTS" != "x,"; then
AC_MSG_ERROR([The available JVM variants are: server, client, minimal1, kernel, zero, zeroshark])
fi
AC_MSG_RESULT([$with_jvm_variants])
JVM_VARIANT_SERVER=`$ECHO "$JVM_VARIANTS" | $SED -e '/,server,/!s/.*/false/g' -e '/,server,/s/.*/true/g'`
JVM_VARIANT_CLIENT=`$ECHO "$JVM_VARIANTS" | $SED -e '/,client,/!s/.*/false/g' -e '/,client,/s/.*/true/g'`
JVM_VARIANT_MINIMAL1=`$ECHO "$JVM_VARIANTS" | $SED -e '/,minimal1,/!s/.*/false/g' -e '/,minimal1,/s/.*/true/g'`
JVM_VARIANT_KERNEL=`$ECHO "$JVM_VARIANTS" | $SED -e '/,kernel,/!s/.*/false/g' -e '/,kernel,/s/.*/true/g'`
JVM_VARIANT_ZERO=`$ECHO "$JVM_VARIANTS" | $SED -e '/,zero,/!s/.*/false/g' -e '/,zero,/s/.*/true/g'`
JVM_VARIANT_ZEROSHARK=`$ECHO "$JVM_VARIANTS" | $SED -e '/,zeroshark,/!s/.*/false/g' -e '/,zeroshark,/s/.*/true/g'`
JVM_VARIANT_SERVER=`$ECHO "$JVM_VARIANTS" | $SED -e '/,server,/!s/.*/false/g' -e '/,server,/s/.*/true/g'`
JVM_VARIANT_CLIENT=`$ECHO "$JVM_VARIANTS" | $SED -e '/,client,/!s/.*/false/g' -e '/,client,/s/.*/true/g'`
JVM_VARIANT_MINIMAL1=`$ECHO "$JVM_VARIANTS" | $SED -e '/,minimal1,/!s/.*/false/g' -e '/,minimal1,/s/.*/true/g'`
JVM_VARIANT_KERNEL=`$ECHO "$JVM_VARIANTS" | $SED -e '/,kernel,/!s/.*/false/g' -e '/,kernel,/s/.*/true/g'`
JVM_VARIANT_ZERO=`$ECHO "$JVM_VARIANTS" | $SED -e '/,zero,/!s/.*/false/g' -e '/,zero,/s/.*/true/g'`
JVM_VARIANT_ZEROSHARK=`$ECHO "$JVM_VARIANTS" | $SED -e '/,zeroshark,/!s/.*/false/g' -e '/,zeroshark,/s/.*/true/g'`
if test "x$JVM_VARIANT_CLIENT" = xtrue; then
if test "x$JVM_VARIANT_CLIENT" = xtrue; then
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
AC_MSG_ERROR([You cannot build a client JVM for a 64-bit machine.])
AC_MSG_ERROR([You cannot build a client JVM for a 64-bit machine.])
fi
fi
if test "x$JVM_VARIANT_KERNEL" = xtrue; then
fi
if test "x$JVM_VARIANT_KERNEL" = xtrue; then
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
AC_MSG_ERROR([You cannot build a kernel JVM for a 64-bit machine.])
AC_MSG_ERROR([You cannot build a kernel JVM for a 64-bit machine.])
fi
fi
if test "x$JVM_VARIANT_MINIMAL1" = xtrue; then
fi
if test "x$JVM_VARIANT_MINIMAL1" = xtrue; then
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
AC_MSG_ERROR([You cannot build a minimal JVM for a 64-bit machine.])
AC_MSG_ERROR([You cannot build a minimal JVM for a 64-bit machine.])
fi
fi
fi
# Replace the commas with AND for use in the build directory name.
ANDED_JVM_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/^,//' -e 's/,$//' -e 's/,/AND/'`
COUNT_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/server,/1/' -e 's/client,/1/' -e 's/minimal1,/1/' -e 's/kernel,/1/' -e 's/zero,/1/' -e 's/zeroshark,/1/'`
if test "x$COUNT_VARIANTS" != "x,1"; then
# Replace the commas with AND for use in the build directory name.
ANDED_JVM_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/^,//' -e 's/,$//' -e 's/,/AND/'`
COUNT_VARIANTS=`$ECHO "$JVM_VARIANTS" | $SED -e 's/server,/1/' -e 's/client,/1/' -e 's/minimal1,/1/' -e 's/kernel,/1/' -e 's/zero,/1/' -e 's/zeroshark,/1/'`
if test "x$COUNT_VARIANTS" != "x,1"; then
BUILDING_MULTIPLE_JVM_VARIANTS=yes
else
else
BUILDING_MULTIPLE_JVM_VARIANTS=no
fi
fi
AC_SUBST(JVM_VARIANTS)
AC_SUBST(JVM_VARIANT_SERVER)
AC_SUBST(JVM_VARIANT_CLIENT)
AC_SUBST(JVM_VARIANT_MINIMAL1)
AC_SUBST(JVM_VARIANT_KERNEL)
AC_SUBST(JVM_VARIANT_ZERO)
AC_SUBST(JVM_VARIANT_ZEROSHARK)
AC_SUBST(JVM_VARIANTS)
AC_SUBST(JVM_VARIANT_SERVER)
AC_SUBST(JVM_VARIANT_CLIENT)
AC_SUBST(JVM_VARIANT_MINIMAL1)
AC_SUBST(JVM_VARIANT_KERNEL)
AC_SUBST(JVM_VARIANT_ZERO)
AC_SUBST(JVM_VARIANT_ZEROSHARK)
INCLUDE_SA=true
if test "x$JVM_VARIANT_ZERO" = xtrue ; then
INCLUDE_SA=true
if test "x$JVM_VARIANT_ZERO" = xtrue ; then
INCLUDE_SA=false
fi
if test "x$JVM_VARIANT_ZEROSHARK" = xtrue ; then
fi
if test "x$JVM_VARIANT_ZEROSHARK" = xtrue ; then
INCLUDE_SA=false
fi
AC_SUBST(INCLUDE_SA)
fi
AC_SUBST(INCLUDE_SA)
if test "x$OPENJDK_TARGET_OS" = "xmacosx"; then
MACOSX_UNIVERSAL="true"
fi
AC_SUBST(MACOSX_UNIVERSAL)
if test "x$OPENJDK_TARGET_OS" = "xmacosx"; then
MACOSX_UNIVERSAL="true"
fi
AC_SUBST(MACOSX_UNIVERSAL)
])
AC_DEFUN_ONCE([JDKOPT_SETUP_DEBUG_LEVEL],
[
###############################################################################
#
# Set the debug level
# release: no debug information, all optimizations, no asserts.
# fastdebug: debug information (-g), all optimizations, all asserts
# slowdebug: debug information (-g), no optimizations, all asserts
#
DEBUG_LEVEL="release"
AC_MSG_CHECKING([which debug level to use])
AC_ARG_ENABLE([debug], [AS_HELP_STRING([--enable-debug],
[set the debug level to fastdebug (shorthand for --with-debug-level=fastdebug) @<:@disabled@:>@])],
[
###############################################################################
#
# Set the debug level
# release: no debug information, all optimizations, no asserts.
# fastdebug: debug information (-g), all optimizations, all asserts
# slowdebug: debug information (-g), no optimizations, all asserts
#
DEBUG_LEVEL="release"
AC_MSG_CHECKING([which debug level to use])
AC_ARG_ENABLE([debug], [AS_HELP_STRING([--enable-debug],
[set the debug level to fastdebug (shorthand for --with-debug-level=fastdebug) @<:@disabled@:>@])],
[
ENABLE_DEBUG="${enableval}"
DEBUG_LEVEL="fastdebug"
], [ENABLE_DEBUG="no"])
], [ENABLE_DEBUG="no"])
AC_ARG_WITH([debug-level], [AS_HELP_STRING([--with-debug-level],
[set the debug level (release, fastdebug, slowdebug) @<:@release@:>@])],
[
AC_ARG_WITH([debug-level], [AS_HELP_STRING([--with-debug-level],
[set the debug level (release, fastdebug, slowdebug) @<:@release@:>@])],
[
DEBUG_LEVEL="${withval}"
if test "x$ENABLE_DEBUG" = xyes; then
AC_MSG_ERROR([You cannot use both --enable-debug and --with-debug-level at the same time.])
AC_MSG_ERROR([You cannot use both --enable-debug and --with-debug-level at the same time.])
fi
])
AC_MSG_RESULT([$DEBUG_LEVEL])
])
AC_MSG_RESULT([$DEBUG_LEVEL])
if test "x$DEBUG_LEVEL" != xrelease && \
test "x$DEBUG_LEVEL" != xfastdebug && \
test "x$DEBUG_LEVEL" != xslowdebug; then
AC_MSG_ERROR([Allowed debug levels are: release, fastdebug and slowdebug])
fi
if test "x$DEBUG_LEVEL" != xrelease && \
test "x$DEBUG_LEVEL" != xfastdebug && \
test "x$DEBUG_LEVEL" != xslowdebug; then
AC_MSG_ERROR([Allowed debug levels are: release, fastdebug and slowdebug])
fi
###############################################################################
#
# Setup legacy vars/targets and new vars to deal with different debug levels.
#
###############################################################################
#
# Setup legacy vars/targets and new vars to deal with different debug levels.
#
case $DEBUG_LEVEL in
release )
VARIANT="OPT"
FASTDEBUG="false"
DEBUG_CLASSFILES="false"
BUILD_VARIANT_RELEASE=""
HOTSPOT_DEBUG_LEVEL="product"
HOTSPOT_EXPORT="product"
;;
fastdebug )
VARIANT="DBG"
FASTDEBUG="true"
DEBUG_CLASSFILES="true"
BUILD_VARIANT_RELEASE="-fastdebug"
HOTSPOT_DEBUG_LEVEL="fastdebug"
HOTSPOT_EXPORT="fastdebug"
;;
slowdebug )
VARIANT="DBG"
FASTDEBUG="false"
DEBUG_CLASSFILES="true"
BUILD_VARIANT_RELEASE="-debug"
HOTSPOT_DEBUG_LEVEL="jvmg"
HOTSPOT_EXPORT="debug"
;;
esac
case $DEBUG_LEVEL in
release )
VARIANT="OPT"
FASTDEBUG="false"
DEBUG_CLASSFILES="false"
BUILD_VARIANT_RELEASE=""
HOTSPOT_DEBUG_LEVEL="product"
HOTSPOT_EXPORT="product"
;;
fastdebug )
VARIANT="DBG"
FASTDEBUG="true"
DEBUG_CLASSFILES="true"
BUILD_VARIANT_RELEASE="-fastdebug"
HOTSPOT_DEBUG_LEVEL="fastdebug"
HOTSPOT_EXPORT="fastdebug"
;;
slowdebug )
VARIANT="DBG"
FASTDEBUG="false"
DEBUG_CLASSFILES="true"
BUILD_VARIANT_RELEASE="-debug"
HOTSPOT_DEBUG_LEVEL="jvmg"
HOTSPOT_EXPORT="debug"
;;
esac
#####
# Generate the legacy makefile targets for hotspot.
# The hotspot api for selecting the build artifacts, really, needs to be improved.
# JDK-7195896 will fix this on the hotspot side by using the JVM_VARIANT_* variables to
# determine what needs to be built. All we will need to set here is all_product, all_fastdebug etc
# But until then ...
HOTSPOT_TARGET=""
#####
# Generate the legacy makefile targets for hotspot.
# The hotspot api for selecting the build artifacts, really, needs to be improved.
# JDK-7195896 will fix this on the hotspot side by using the JVM_VARIANT_* variables to
# determine what needs to be built. All we will need to set here is all_product, all_fastdebug etc
# But until then ...
HOTSPOT_TARGET=""
if test "x$JVM_VARIANT_SERVER" = xtrue; then
if test "x$JVM_VARIANT_SERVER" = xtrue; then
HOTSPOT_TARGET="$HOTSPOT_TARGET${HOTSPOT_DEBUG_LEVEL} "
fi
fi
if test "x$JVM_VARIANT_CLIENT" = xtrue; then
if test "x$JVM_VARIANT_CLIENT" = xtrue; then
HOTSPOT_TARGET="$HOTSPOT_TARGET${HOTSPOT_DEBUG_LEVEL}1 "
fi
fi
if test "x$JVM_VARIANT_MINIMAL1" = xtrue; then
if test "x$JVM_VARIANT_MINIMAL1" = xtrue; then
HOTSPOT_TARGET="$HOTSPOT_TARGET${HOTSPOT_DEBUG_LEVEL}minimal1 "
fi
fi
if test "x$JVM_VARIANT_KERNEL" = xtrue; then
if test "x$JVM_VARIANT_KERNEL" = xtrue; then
HOTSPOT_TARGET="$HOTSPOT_TARGET${HOTSPOT_DEBUG_LEVEL}kernel "
fi
fi
if test "x$JVM_VARIANT_ZERO" = xtrue; then
if test "x$JVM_VARIANT_ZERO" = xtrue; then
HOTSPOT_TARGET="$HOTSPOT_TARGET${HOTSPOT_DEBUG_LEVEL}zero "
fi
fi
if test "x$JVM_VARIANT_ZEROSHARK" = xtrue; then
if test "x$JVM_VARIANT_ZEROSHARK" = xtrue; then
HOTSPOT_TARGET="$HOTSPOT_TARGET${HOTSPOT_DEBUG_LEVEL}shark "
fi
fi
HOTSPOT_TARGET="$HOTSPOT_TARGET docs export_$HOTSPOT_EXPORT"
HOTSPOT_TARGET="$HOTSPOT_TARGET docs export_$HOTSPOT_EXPORT"
# On Macosx universal binaries are produced, but they only contain
# 64 bit intel. This invalidates control of which jvms are built
# from configure, but only server is valid anyway. Fix this
# when hotspot makefiles are rewritten.
if test "x$MACOSX_UNIVERSAL" = xtrue; then
# On Macosx universal binaries are produced, but they only contain
# 64 bit intel. This invalidates control of which jvms are built
# from configure, but only server is valid anyway. Fix this
# when hotspot makefiles are rewritten.
if test "x$MACOSX_UNIVERSAL" = xtrue; then
HOTSPOT_TARGET=universal_${HOTSPOT_EXPORT}
fi
fi
#####
#####
AC_SUBST(DEBUG_LEVEL)
AC_SUBST(VARIANT)
AC_SUBST(FASTDEBUG)
AC_SUBST(DEBUG_CLASSFILES)
AC_SUBST(BUILD_VARIANT_RELEASE)
AC_SUBST(DEBUG_LEVEL)
AC_SUBST(VARIANT)
AC_SUBST(FASTDEBUG)
AC_SUBST(DEBUG_CLASSFILES)
AC_SUBST(BUILD_VARIANT_RELEASE)
])
@ -264,7 +263,7 @@ AC_SUBST(BUILD_VARIANT_RELEASE)
AC_DEFUN_ONCE([JDKOPT_SETUP_OPEN_OR_CUSTOM],
[
AC_ARG_ENABLE([openjdk-only], [AS_HELP_STRING([--enable-openjdk-only],
[suppress building custom source even if present @<:@disabled@:>@])],,[enable_openjdk_only="no"])
[suppress building custom source even if present @<:@disabled@:>@])],,[enable_openjdk_only="no"])
AC_MSG_CHECKING([for presence of closed sources])
if test -d "$SRC_ROOT/jdk/src/closed"; then
@ -301,100 +300,100 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_OPEN_OR_CUSTOM],
AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_OPTIONS],
[
###############################################################################
#
# Should we build a JDK/JVM with headful support (ie a graphical ui)?
# We always build headless support.
#
AC_MSG_CHECKING([headful support])
AC_ARG_ENABLE([headful], [AS_HELP_STRING([--disable-headful],
[disable building headful support (graphical UI support) @<:@enabled@:>@])],
[SUPPORT_HEADFUL=${enable_headful}], [SUPPORT_HEADFUL=yes])
###############################################################################
#
# Should we build a JDK/JVM with headful support (ie a graphical ui)?
# We always build headless support.
#
AC_MSG_CHECKING([headful support])
AC_ARG_ENABLE([headful], [AS_HELP_STRING([--disable-headful],
[disable building headful support (graphical UI support) @<:@enabled@:>@])],
[SUPPORT_HEADFUL=${enable_headful}], [SUPPORT_HEADFUL=yes])
SUPPORT_HEADLESS=yes
BUILD_HEADLESS="BUILD_HEADLESS:=true"
SUPPORT_HEADLESS=yes
BUILD_HEADLESS="BUILD_HEADLESS:=true"
if test "x$SUPPORT_HEADFUL" = xyes; then
if test "x$SUPPORT_HEADFUL" = xyes; then
# We are building both headful and headless.
headful_msg="include support for both headful and headless"
fi
fi
if test "x$SUPPORT_HEADFUL" = xno; then
if test "x$SUPPORT_HEADFUL" = xno; then
# Thus we are building headless only.
BUILD_HEADLESS="BUILD_HEADLESS:=true"
headful_msg="headless only"
fi
fi
AC_MSG_RESULT([$headful_msg])
AC_MSG_RESULT([$headful_msg])
AC_SUBST(SUPPORT_HEADLESS)
AC_SUBST(SUPPORT_HEADFUL)
AC_SUBST(BUILD_HEADLESS)
AC_SUBST(SUPPORT_HEADLESS)
AC_SUBST(SUPPORT_HEADFUL)
AC_SUBST(BUILD_HEADLESS)
# Control wether Hotspot runs Queens test after build.
AC_ARG_ENABLE([hotspot-test-in-build], [AS_HELP_STRING([--enable-hotspot-test-in-build],
[run the Queens test after Hotspot build @<:@disabled@:>@])],,
[enable_hotspot_test_in_build=no])
if test "x$enable_hotspot_test_in_build" = "xyes"; then
# Control wether Hotspot runs Queens test after build.
AC_ARG_ENABLE([hotspot-test-in-build], [AS_HELP_STRING([--enable-hotspot-test-in-build],
[run the Queens test after Hotspot build @<:@disabled@:>@])],,
[enable_hotspot_test_in_build=no])
if test "x$enable_hotspot_test_in_build" = "xyes"; then
TEST_IN_BUILD=true
else
else
TEST_IN_BUILD=false
fi
AC_SUBST(TEST_IN_BUILD)
fi
AC_SUBST(TEST_IN_BUILD)
###############################################################################
#
# Choose cacerts source file
#
AC_ARG_WITH(cacerts-file, [AS_HELP_STRING([--with-cacerts-file],
[specify alternative cacerts file])])
if test "x$with_cacerts_file" != x; then
###############################################################################
#
# Choose cacerts source file
#
AC_ARG_WITH(cacerts-file, [AS_HELP_STRING([--with-cacerts-file],
[specify alternative cacerts file])])
if test "x$with_cacerts_file" != x; then
CACERTS_FILE=$with_cacerts_file
else
else
CACERTS_FILE=${SRC_ROOT}/jdk/src/share/lib/security/cacerts
fi
AC_SUBST(CACERTS_FILE)
fi
AC_SUBST(CACERTS_FILE)
###############################################################################
#
# Enable or disable unlimited crypto
#
AC_ARG_ENABLE(unlimited-crypto, [AS_HELP_STRING([--enable-unlimited-crypto],
[Enable unlimited crypto policy @<:@disabled@:>@])],,
[enable_unlimited_crypto=no])
if test "x$enable_unlimited_crypto" = "xyes"; then
###############################################################################
#
# Enable or disable unlimited crypto
#
AC_ARG_ENABLE(unlimited-crypto, [AS_HELP_STRING([--enable-unlimited-crypto],
[Enable unlimited crypto policy @<:@disabled@:>@])],,
[enable_unlimited_crypto=no])
if test "x$enable_unlimited_crypto" = "xyes"; then
UNLIMITED_CRYPTO=true
else
else
UNLIMITED_CRYPTO=false
fi
AC_SUBST(UNLIMITED_CRYPTO)
fi
AC_SUBST(UNLIMITED_CRYPTO)
###############################################################################
#
# Enable or disable the elliptic curve crypto implementation
#
AC_DEFUN_ONCE([JDKOPT_DETECT_INTREE_EC],
[
AC_MSG_CHECKING([if elliptic curve crypto implementation is present])
###############################################################################
#
# Enable or disable the elliptic curve crypto implementation
#
AC_DEFUN_ONCE([JDKOPT_DETECT_INTREE_EC],
[
AC_MSG_CHECKING([if elliptic curve crypto implementation is present])
if test -d "${SRC_ROOT}/jdk/src/share/native/sun/security/ec/impl"; then
ENABLE_INTREE_EC=yes
AC_MSG_RESULT([yes])
else
ENABLE_INTREE_EC=no
AC_MSG_RESULT([no])
fi
if test -d "${SRC_ROOT}/jdk/src/share/native/sun/security/ec/impl"; then
ENABLE_INTREE_EC=yes
AC_MSG_RESULT([yes])
else
ENABLE_INTREE_EC=no
AC_MSG_RESULT([no])
fi
AC_SUBST(ENABLE_INTREE_EC)
])
AC_SUBST(ENABLE_INTREE_EC)
])
###############################################################################
#
# Compress jars
#
COMPRESS_JARS=false
###############################################################################
#
# Compress jars
#
COMPRESS_JARS=false
AC_SUBST(COMPRESS_JARS)
AC_SUBST(COMPRESS_JARS)
])
###############################################################################
@ -403,153 +402,152 @@ AC_SUBST(COMPRESS_JARS)
#
AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_VERSION_NUMBERS],
[
# Source the version numbers
. $AUTOCONF_DIR/version-numbers
# Source the version numbers
. $AUTOCONF_DIR/version-numbers
# Get the settings from parameters
AC_ARG_WITH(milestone, [AS_HELP_STRING([--with-milestone],
[Set milestone value for build @<:@internal@:>@])])
if test "x$with_milestone" = xyes; then
AC_MSG_ERROR([Milestone must have a value])
elif test "x$with_milestone" != x; then
# Get the settings from parameters
AC_ARG_WITH(milestone, [AS_HELP_STRING([--with-milestone],
[Set milestone value for build @<:@internal@:>@])])
if test "x$with_milestone" = xyes; then
AC_MSG_ERROR([Milestone must have a value])
elif test "x$with_milestone" != x; then
MILESTONE="$with_milestone"
fi
if test "x$MILESTONE" = x; then
MILESTONE=internal
fi
AC_ARG_WITH(update-version, [AS_HELP_STRING([--with-update-version],
[Set update version value for build @<:@b00@:>@])])
if test "x$with_update_version" = xyes; then
AC_MSG_ERROR([Update version must have a value])
elif test "x$with_update_version" != x; then
JDK_UPDATE_VERSION="$with_update_version"
fi
AC_ARG_WITH(user-release-suffix, [AS_HELP_STRING([--with-user-release-suffix],
[Add a custom string to the version string if build number isn't set.@<:@username_builddateb00@:>@])])
if test "x$with_user_release_suffix" = xyes; then
AC_MSG_ERROR([Release suffix must have a value])
elif test "x$with_user_release_suffix" != x; then
USER_RELEASE_SUFFIX="$with_user_release_suffix"
fi
AC_ARG_WITH(build-number, [AS_HELP_STRING([--with-build-number],
[Set build number value for build @<:@b00@:>@])])
if test "x$with_build_number" = xyes; then
AC_MSG_ERROR([Build number must have a value])
elif test "x$with_build_number" != x; then
JDK_BUILD_NUMBER="$with_build_number"
fi
# Define default USER_RELEASE_SUFFIX if BUILD_NUMBER and USER_RELEASE_SUFFIX are not set
if test "x$JDK_BUILD_NUMBER" = x; then
JDK_BUILD_NUMBER=b00
if test "x$USER_RELEASE_SUFFIX" = x; then
BUILD_DATE=`date '+%Y_%m_%d_%H_%M'`
# Avoid [:alnum:] since it depends on the locale.
CLEAN_USERNAME=`echo "$USER" | $TR -d -c 'abcdefghijklmnopqrstuvqxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'`
USER_RELEASE_SUFFIX=`echo "${CLEAN_USERNAME}_${BUILD_DATE}" | $TR 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
fi
fi
if test "x$MILESTONE" = x; then
MILESTONE=internal
fi
# Now set the JDK version, milestone, build number etc.
AC_SUBST(USER_RELEASE_SUFFIX)
AC_SUBST(JDK_MAJOR_VERSION)
AC_SUBST(JDK_MINOR_VERSION)
AC_SUBST(JDK_MICRO_VERSION)
AC_SUBST(JDK_UPDATE_VERSION)
AC_SUBST(JDK_BUILD_NUMBER)
AC_SUBST(MILESTONE)
AC_SUBST(LAUNCHER_NAME)
AC_SUBST(PRODUCT_NAME)
AC_SUBST(PRODUCT_SUFFIX)
AC_SUBST(JDK_RC_PLATFORM_NAME)
AC_SUBST(COMPANY_NAME)
AC_SUBST(MACOSX_BUNDLE_NAME_BASE)
AC_SUBST(MACOSX_BUNDLE_ID_BASE)
AC_ARG_WITH(update-version, [AS_HELP_STRING([--with-update-version],
[Set update version value for build @<:@b00@:>@])])
if test "x$with_update_version" = xyes; then
AC_MSG_ERROR([Update version must have a value])
elif test "x$with_update_version" != x; then
JDK_UPDATE_VERSION="$with_update_version"
fi
COPYRIGHT_YEAR=`date +'%Y'`
AC_SUBST(COPYRIGHT_YEAR)
AC_ARG_WITH(user-release-suffix, [AS_HELP_STRING([--with-user-release-suffix],
[Add a custom string to the version string if build number isn't set.@<:@username_builddateb00@:>@])])
if test "x$with_user_release_suffix" = xyes; then
AC_MSG_ERROR([Release suffix must have a value])
elif test "x$with_user_release_suffix" != x; then
USER_RELEASE_SUFFIX="$with_user_release_suffix"
fi
if test "x$JDK_UPDATE_VERSION" != x; then
JDK_VERSION="${JDK_MAJOR_VERSION}.${JDK_MINOR_VERSION}.${JDK_MICRO_VERSION}_${JDK_UPDATE_VERSION}"
else
JDK_VERSION="${JDK_MAJOR_VERSION}.${JDK_MINOR_VERSION}.${JDK_MICRO_VERSION}"
fi
AC_SUBST(JDK_VERSION)
AC_ARG_WITH(build-number, [AS_HELP_STRING([--with-build-number],
[Set build number value for build @<:@b00@:>@])])
if test "x$with_build_number" = xyes; then
AC_MSG_ERROR([Build number must have a value])
elif test "x$with_build_number" != x; then
JDK_BUILD_NUMBER="$with_build_number"
fi
# Define default USER_RELEASE_SUFFIX if BUILD_NUMBER and USER_RELEASE_SUFFIX are not set
if test "x$JDK_BUILD_NUMBER" = x; then
JDK_BUILD_NUMBER=b00
if test "x$USER_RELEASE_SUFFIX" = x; then
BUILD_DATE=`date '+%Y_%m_%d_%H_%M'`
# Avoid [:alnum:] since it depends on the locale.
CLEAN_USERNAME=`echo "$USER" | $TR -d -c 'abcdefghijklmnopqrstuvqxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'`
USER_RELEASE_SUFFIX=`echo "${CLEAN_USERNAME}_${BUILD_DATE}" | $TR 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
fi
fi
COOKED_BUILD_NUMBER=`$ECHO $JDK_BUILD_NUMBER | $SED -e 's/^b//' -e 's/^0//'`
AC_SUBST(COOKED_BUILD_NUMBER)
# Now set the JDK version, milestone, build number etc.
AC_SUBST(USER_RELEASE_SUFFIX)
AC_SUBST(JDK_MAJOR_VERSION)
AC_SUBST(JDK_MINOR_VERSION)
AC_SUBST(JDK_MICRO_VERSION)
AC_SUBST(JDK_UPDATE_VERSION)
AC_SUBST(JDK_BUILD_NUMBER)
AC_SUBST(MILESTONE)
AC_SUBST(LAUNCHER_NAME)
AC_SUBST(PRODUCT_NAME)
AC_SUBST(PRODUCT_SUFFIX)
AC_SUBST(JDK_RC_PLATFORM_NAME)
AC_SUBST(COMPANY_NAME)
AC_SUBST(MACOSX_BUNDLE_NAME_BASE)
AC_SUBST(MACOSX_BUNDLE_ID_BASE)
COPYRIGHT_YEAR=`date +'%Y'`
AC_SUBST(COPYRIGHT_YEAR)
if test "x$JDK_UPDATE_VERSION" != x; then
JDK_VERSION="${JDK_MAJOR_VERSION}.${JDK_MINOR_VERSION}.${JDK_MICRO_VERSION}_${JDK_UPDATE_VERSION}"
else
JDK_VERSION="${JDK_MAJOR_VERSION}.${JDK_MINOR_VERSION}.${JDK_MICRO_VERSION}"
fi
AC_SUBST(JDK_VERSION)
COOKED_BUILD_NUMBER=`$ECHO $JDK_BUILD_NUMBER | $SED -e 's/^b//' -e 's/^0//'`
AC_SUBST(COOKED_BUILD_NUMBER)
])
AC_DEFUN_ONCE([JDKOPT_SETUP_BUILD_TWEAKS],
[
HOTSPOT_MAKE_ARGS="$HOTSPOT_TARGET"
AC_SUBST(HOTSPOT_MAKE_ARGS)
# The name of the Service Agent jar.
SALIB_NAME="${LIBRARY_PREFIX}saproc${SHARED_LIBRARY_SUFFIX}"
if test "x$OPENJDK_TARGET_OS" = "xwindows"; then
SALIB_NAME="${LIBRARY_PREFIX}sawindbg${SHARED_LIBRARY_SUFFIX}"
fi
AC_SUBST(SALIB_NAME)
HOTSPOT_MAKE_ARGS="$HOTSPOT_TARGET"
AC_SUBST(HOTSPOT_MAKE_ARGS)
# The name of the Service Agent jar.
SALIB_NAME="${LIBRARY_PREFIX}saproc${SHARED_LIBRARY_SUFFIX}"
if test "x$OPENJDK_TARGET_OS" = "xwindows"; then
SALIB_NAME="${LIBRARY_PREFIX}sawindbg${SHARED_LIBRARY_SUFFIX}"
fi
AC_SUBST(SALIB_NAME)
])
AC_DEFUN_ONCE([JDKOPT_SETUP_DEBUG_SYMBOLS],
[
#
# ENABLE_DEBUG_SYMBOLS
# This must be done after the toolchain is setup, since we're looking at objcopy.
#
AC_ARG_ENABLE([debug-symbols],
[AS_HELP_STRING([--disable-debug-symbols],[disable generation of debug symbols @<:@enabled@:>@])])
#
# ENABLE_DEBUG_SYMBOLS
# This must be done after the toolchain is setup, since we're looking at objcopy.
#
AC_ARG_ENABLE([debug-symbols],
[AS_HELP_STRING([--disable-debug-symbols],[disable generation of debug symbols @<:@enabled@:>@])])
AC_MSG_CHECKING([if we should generate debug symbols])
AC_MSG_CHECKING([if we should generate debug symbols])
if test "x$enable_debug_symbols" = "xyes" && test "x$OBJCOPY" = x; then
# explicit enabling of enable-debug-symbols and can't find objcopy
# this is an error
AC_MSG_ERROR([Unable to find objcopy, cannot enable debug-symbols])
fi
if test "x$enable_debug_symbols" = "xyes"; then
ENABLE_DEBUG_SYMBOLS=true
elif test "x$enable_debug_symbols" = "xno"; then
ENABLE_DEBUG_SYMBOLS=false
else
# default on macosx is false
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
ENABLE_DEBUG_SYMBOLS=false
# Default is on if objcopy is found, otherwise off
elif test "x$OBJCOPY" != x || test "x$OPENJDK_TARGET_OS" = xwindows; then
ENABLE_DEBUG_SYMBOLS=true
else
ENABLE_DEBUG_SYMBOLS=false
if test "x$enable_debug_symbols" = "xyes" && test "x$OBJCOPY" = x; then
# explicit enabling of enable-debug-symbols and can't find objcopy
# this is an error
AC_MSG_ERROR([Unable to find objcopy, cannot enable debug-symbols])
fi
fi
AC_MSG_RESULT([$ENABLE_DEBUG_SYMBOLS])
if test "x$enable_debug_symbols" = "xyes"; then
ENABLE_DEBUG_SYMBOLS=true
elif test "x$enable_debug_symbols" = "xno"; then
ENABLE_DEBUG_SYMBOLS=false
else
# default on macosx is false
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
ENABLE_DEBUG_SYMBOLS=false
# Default is on if objcopy is found, otherwise off
elif test "x$OBJCOPY" != x || test "x$OPENJDK_TARGET_OS" = xwindows; then
ENABLE_DEBUG_SYMBOLS=true
else
ENABLE_DEBUG_SYMBOLS=false
fi
fi
#
# ZIP_DEBUGINFO_FILES
#
AC_MSG_CHECKING([if we should zip debug-info files])
AC_ARG_ENABLE([zip-debug-info],
[AS_HELP_STRING([--disable-zip-debug-info],[disable zipping of debug-info files @<:@enabled@:>@])],
[enable_zip_debug_info="${enableval}"], [enable_zip_debug_info="yes"])
AC_MSG_RESULT([${enable_zip_debug_info}])
AC_MSG_RESULT([$ENABLE_DEBUG_SYMBOLS])
if test "x${enable_zip_debug_info}" = "xno"; then
ZIP_DEBUGINFO_FILES=false
else
ZIP_DEBUGINFO_FILES=true
fi
#
# ZIP_DEBUGINFO_FILES
#
AC_MSG_CHECKING([if we should zip debug-info files])
AC_ARG_ENABLE([zip-debug-info],
[AS_HELP_STRING([--disable-zip-debug-info],[disable zipping of debug-info files @<:@enabled@:>@])],
[enable_zip_debug_info="${enableval}"], [enable_zip_debug_info="yes"])
AC_MSG_RESULT([${enable_zip_debug_info}])
AC_SUBST(ENABLE_DEBUG_SYMBOLS)
AC_SUBST(ZIP_DEBUGINFO_FILES)
AC_SUBST(CFLAGS_DEBUG_SYMBOLS)
AC_SUBST(CXXFLAGS_DEBUG_SYMBOLS)
if test "x${enable_zip_debug_info}" = "xno"; then
ZIP_DEBUGINFO_FILES=false
else
ZIP_DEBUGINFO_FILES=true
fi
AC_SUBST(ENABLE_DEBUG_SYMBOLS)
AC_SUBST(ZIP_DEBUGINFO_FILES)
AC_SUBST(CFLAGS_DEBUG_SYMBOLS)
AC_SUBST(CXXFLAGS_DEBUG_SYMBOLS)
])
# Support for customization of the build process. Some build files
@ -557,5 +555,5 @@ AC_SUBST(CXXFLAGS_DEBUG_SYMBOLS)
# for a degree of customization of the build targets and the rules/recipes
# to create them
AC_ARG_WITH([custom-make-dir], [AS_HELP_STRING([--with-custom-make-dir],
[use this directory for custom build/make files])], [CUSTOM_MAKE_DIR=$with_custom_make_dir])
[use this directory for custom build/make files])], [CUSTOM_MAKE_DIR=$with_custom_make_dir])
AC_SUBST(CUSTOM_MAKE_DIR)

File diff suppressed because it is too large Load diff

View file

@ -53,37 +53,37 @@ AC_DEFUN([PLATFORM_EXTRACT_VARS_FROM_CPU],
VAR_CPU_ARCH=ppc
VAR_CPU_BITS=32
VAR_CPU_ENDIAN=big
;;
;;
powerpc64)
VAR_CPU=ppc64
VAR_CPU_ARCH=ppc
VAR_CPU_BITS=64
VAR_CPU_ENDIAN=big
;;
;;
s390)
VAR_CPU=s390
VAR_CPU_ARCH=s390
VAR_CPU_BITS=32
VAR_CPU_ENDIAN=big
;;
;;
s390x)
VAR_CPU=s390x
VAR_CPU_ARCH=s390
VAR_CPU_BITS=64
VAR_CPU_ENDIAN=big
;;
;;
sparc)
VAR_CPU=sparc
VAR_CPU_ARCH=sparc
VAR_CPU_BITS=32
VAR_CPU_ENDIAN=big
;;
;;
sparcv9)
VAR_CPU=sparcv9
VAR_CPU_ARCH=sparc
VAR_CPU_BITS=64
VAR_CPU_ENDIAN=big
;;
;;
*)
AC_MSG_ERROR([unsupported cpu $1])
;;
@ -140,56 +140,56 @@ AC_DEFUN([PLATFORM_EXTRACT_VARS_FROM_OS],
# OPENJDK_BUILD_OS, etc.
AC_DEFUN([PLATFORM_EXTRACT_TARGET_AND_BUILD],
[
# Copy the autoconf trip/quadruplet verbatim to OPENJDK_TARGET_AUTOCONF_NAME
# (from the autoconf "host") and OPENJDK_BUILD_AUTOCONF_NAME
# Note that we might later on rewrite e.g. OPENJDK_TARGET_CPU due to reduced build,
# but this will not change the value of OPENJDK_TARGET_AUTOCONF_NAME.
OPENJDK_TARGET_AUTOCONF_NAME="$host"
OPENJDK_BUILD_AUTOCONF_NAME="$build"
AC_SUBST(OPENJDK_TARGET_AUTOCONF_NAME)
AC_SUBST(OPENJDK_BUILD_AUTOCONF_NAME)
# Copy the autoconf trip/quadruplet verbatim to OPENJDK_TARGET_AUTOCONF_NAME
# (from the autoconf "host") and OPENJDK_BUILD_AUTOCONF_NAME
# Note that we might later on rewrite e.g. OPENJDK_TARGET_CPU due to reduced build,
# but this will not change the value of OPENJDK_TARGET_AUTOCONF_NAME.
OPENJDK_TARGET_AUTOCONF_NAME="$host"
OPENJDK_BUILD_AUTOCONF_NAME="$build"
AC_SUBST(OPENJDK_TARGET_AUTOCONF_NAME)
AC_SUBST(OPENJDK_BUILD_AUTOCONF_NAME)
# Convert the autoconf OS/CPU value to our own data, into the VAR_OS/CPU variables.
PLATFORM_EXTRACT_VARS_FROM_OS($build_os)
PLATFORM_EXTRACT_VARS_FROM_CPU($build_cpu)
# ..and setup our own variables. (Do this explicitely to facilitate searching)
OPENJDK_BUILD_OS="$VAR_OS"
OPENJDK_BUILD_OS_API="$VAR_OS_API"
OPENJDK_BUILD_OS_ENV="$VAR_OS_ENV"
OPENJDK_BUILD_CPU="$VAR_CPU"
OPENJDK_BUILD_CPU_ARCH="$VAR_CPU_ARCH"
OPENJDK_BUILD_CPU_BITS="$VAR_CPU_BITS"
OPENJDK_BUILD_CPU_ENDIAN="$VAR_CPU_ENDIAN"
AC_SUBST(OPENJDK_BUILD_OS)
AC_SUBST(OPENJDK_BUILD_OS_API)
AC_SUBST(OPENJDK_BUILD_CPU)
AC_SUBST(OPENJDK_BUILD_CPU_ARCH)
AC_SUBST(OPENJDK_BUILD_CPU_BITS)
AC_SUBST(OPENJDK_BUILD_CPU_ENDIAN)
# Convert the autoconf OS/CPU value to our own data, into the VAR_OS/CPU variables.
PLATFORM_EXTRACT_VARS_FROM_OS($build_os)
PLATFORM_EXTRACT_VARS_FROM_CPU($build_cpu)
# ..and setup our own variables. (Do this explicitely to facilitate searching)
OPENJDK_BUILD_OS="$VAR_OS"
OPENJDK_BUILD_OS_API="$VAR_OS_API"
OPENJDK_BUILD_OS_ENV="$VAR_OS_ENV"
OPENJDK_BUILD_CPU="$VAR_CPU"
OPENJDK_BUILD_CPU_ARCH="$VAR_CPU_ARCH"
OPENJDK_BUILD_CPU_BITS="$VAR_CPU_BITS"
OPENJDK_BUILD_CPU_ENDIAN="$VAR_CPU_ENDIAN"
AC_SUBST(OPENJDK_BUILD_OS)
AC_SUBST(OPENJDK_BUILD_OS_API)
AC_SUBST(OPENJDK_BUILD_CPU)
AC_SUBST(OPENJDK_BUILD_CPU_ARCH)
AC_SUBST(OPENJDK_BUILD_CPU_BITS)
AC_SUBST(OPENJDK_BUILD_CPU_ENDIAN)
AC_MSG_CHECKING([openjdk-build os-cpu])
AC_MSG_RESULT([$OPENJDK_BUILD_OS-$OPENJDK_BUILD_CPU])
AC_MSG_CHECKING([openjdk-build os-cpu])
AC_MSG_RESULT([$OPENJDK_BUILD_OS-$OPENJDK_BUILD_CPU])
# Convert the autoconf OS/CPU value to our own data, into the VAR_OS/CPU variables.
PLATFORM_EXTRACT_VARS_FROM_OS($host_os)
PLATFORM_EXTRACT_VARS_FROM_CPU($host_cpu)
# ... and setup our own variables. (Do this explicitely to facilitate searching)
OPENJDK_TARGET_OS="$VAR_OS"
OPENJDK_TARGET_OS_API="$VAR_OS_API"
OPENJDK_TARGET_OS_ENV="$VAR_OS_ENV"
OPENJDK_TARGET_CPU="$VAR_CPU"
OPENJDK_TARGET_CPU_ARCH="$VAR_CPU_ARCH"
OPENJDK_TARGET_CPU_BITS="$VAR_CPU_BITS"
OPENJDK_TARGET_CPU_ENDIAN="$VAR_CPU_ENDIAN"
AC_SUBST(OPENJDK_TARGET_OS)
AC_SUBST(OPENJDK_TARGET_OS_API)
AC_SUBST(OPENJDK_TARGET_CPU)
AC_SUBST(OPENJDK_TARGET_CPU_ARCH)
AC_SUBST(OPENJDK_TARGET_CPU_BITS)
AC_SUBST(OPENJDK_TARGET_CPU_ENDIAN)
# Convert the autoconf OS/CPU value to our own data, into the VAR_OS/CPU variables.
PLATFORM_EXTRACT_VARS_FROM_OS($host_os)
PLATFORM_EXTRACT_VARS_FROM_CPU($host_cpu)
# ... and setup our own variables. (Do this explicitely to facilitate searching)
OPENJDK_TARGET_OS="$VAR_OS"
OPENJDK_TARGET_OS_API="$VAR_OS_API"
OPENJDK_TARGET_OS_ENV="$VAR_OS_ENV"
OPENJDK_TARGET_CPU="$VAR_CPU"
OPENJDK_TARGET_CPU_ARCH="$VAR_CPU_ARCH"
OPENJDK_TARGET_CPU_BITS="$VAR_CPU_BITS"
OPENJDK_TARGET_CPU_ENDIAN="$VAR_CPU_ENDIAN"
AC_SUBST(OPENJDK_TARGET_OS)
AC_SUBST(OPENJDK_TARGET_OS_API)
AC_SUBST(OPENJDK_TARGET_CPU)
AC_SUBST(OPENJDK_TARGET_CPU_ARCH)
AC_SUBST(OPENJDK_TARGET_CPU_BITS)
AC_SUBST(OPENJDK_TARGET_CPU_ENDIAN)
AC_MSG_CHECKING([openjdk-target os-cpu])
AC_MSG_RESULT([$OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU])
AC_MSG_CHECKING([openjdk-target os-cpu])
AC_MSG_RESULT([$OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU])
])
# Check if a reduced build (32-bit on 64-bit platforms) is requested, and modify behaviour
@ -198,7 +198,7 @@ AC_DEFUN([PLATFORM_EXTRACT_TARGET_AND_BUILD],
AC_DEFUN([PLATFORM_SETUP_TARGET_CPU_BITS],
[
AC_ARG_WITH(target-bits, [AS_HELP_STRING([--with-target-bits],
[build 32-bit or 64-bit binaries (for platforms that support it), e.g. --with-target-bits=32 @<:@guessed@:>@])])
[build 32-bit or 64-bit binaries (for platforms that support it), e.g. --with-target-bits=32 @<:@guessed@:>@])])
# We have three types of compiles:
# native == normal compilation, target system == build system
@ -238,184 +238,183 @@ AC_DEFUN([PLATFORM_SETUP_TARGET_CPU_BITS],
fi
AC_SUBST(COMPILE_TYPE)
AC_MSG_CHECKING([compilation type])
AC_MSG_RESULT([$COMPILE_TYPE])
AC_MSG_CHECKING([compilation type])
AC_MSG_RESULT([$COMPILE_TYPE])
])
# Setup the legacy variables, for controlling the old makefiles.
#
# Setup the legacy variables, for controlling the old makefiles.
#
AC_DEFUN([PLATFORM_SETUP_LEGACY_VARS],
[
# Also store the legacy naming of the cpu.
# Ie i586 and amd64 instead of x86 and x86_64
OPENJDK_TARGET_CPU_LEGACY="$OPENJDK_TARGET_CPU"
if test "x$OPENJDK_TARGET_CPU" = xx86; then
OPENJDK_TARGET_CPU_LEGACY="i586"
elif test "x$OPENJDK_TARGET_OS" != xmacosx && test "x$OPENJDK_TARGET_CPU" = xx86_64; then
# On all platforms except MacOSX replace x86_64 with amd64.
OPENJDK_TARGET_CPU_LEGACY="amd64"
fi
AC_SUBST(OPENJDK_TARGET_CPU_LEGACY)
# Also store the legacy naming of the cpu.
# Ie i586 and amd64 instead of x86 and x86_64
OPENJDK_TARGET_CPU_LEGACY="$OPENJDK_TARGET_CPU"
if test "x$OPENJDK_TARGET_CPU" = xx86; then
OPENJDK_TARGET_CPU_LEGACY="i586"
elif test "x$OPENJDK_TARGET_OS" != xmacosx && test "x$OPENJDK_TARGET_CPU" = xx86_64; then
# On all platforms except MacOSX replace x86_64 with amd64.
OPENJDK_TARGET_CPU_LEGACY="amd64"
fi
AC_SUBST(OPENJDK_TARGET_CPU_LEGACY)
# And the second legacy naming of the cpu.
# Ie i386 and amd64 instead of x86 and x86_64.
OPENJDK_TARGET_CPU_LEGACY_LIB="$OPENJDK_TARGET_CPU"
if test "x$OPENJDK_TARGET_CPU" = xx86; then
OPENJDK_TARGET_CPU_LEGACY_LIB="i386"
elif test "x$OPENJDK_TARGET_CPU" = xx86_64; then
OPENJDK_TARGET_CPU_LEGACY_LIB="amd64"
fi
AC_SUBST(OPENJDK_TARGET_CPU_LEGACY_LIB)
# And the second legacy naming of the cpu.
# Ie i386 and amd64 instead of x86 and x86_64.
OPENJDK_TARGET_CPU_LEGACY_LIB="$OPENJDK_TARGET_CPU"
if test "x$OPENJDK_TARGET_CPU" = xx86; then
OPENJDK_TARGET_CPU_LEGACY_LIB="i386"
elif test "x$OPENJDK_TARGET_CPU" = xx86_64; then
OPENJDK_TARGET_CPU_LEGACY_LIB="amd64"
fi
AC_SUBST(OPENJDK_TARGET_CPU_LEGACY_LIB)
# This is the name of the cpu (but using i386 and amd64 instead of
# x86 and x86_64, respectively), preceeded by a /, to be used when
# locating libraries. On macosx, it's empty, though.
OPENJDK_TARGET_CPU_LIBDIR="/$OPENJDK_TARGET_CPU_LEGACY_LIB"
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
OPENJDK_TARGET_CPU_LIBDIR=""
fi
AC_SUBST(OPENJDK_TARGET_CPU_LIBDIR)
# This is the name of the cpu (but using i386 and amd64 instead of
# x86 and x86_64, respectively), preceeded by a /, to be used when
# locating libraries. On macosx, it's empty, though.
OPENJDK_TARGET_CPU_LIBDIR="/$OPENJDK_TARGET_CPU_LEGACY_LIB"
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
OPENJDK_TARGET_CPU_LIBDIR=""
fi
AC_SUBST(OPENJDK_TARGET_CPU_LIBDIR)
# OPENJDK_TARGET_CPU_ISADIR is normally empty. On 64-bit Solaris systems, it is set to
# /amd64 or /sparcv9. This string is appended to some library paths, like this:
# /usr/lib${OPENJDK_TARGET_CPU_ISADIR}/libexample.so
OPENJDK_TARGET_CPU_ISADIR=""
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
if test "x$OPENJDK_TARGET_CPU" = xx86_64; then
OPENJDK_TARGET_CPU_ISADIR="/amd64"
elif test "x$OPENJDK_TARGET_CPU" = xsparcv9; then
OPENJDK_TARGET_CPU_ISADIR="/sparcv9"
fi
# OPENJDK_TARGET_CPU_ISADIR is normally empty. On 64-bit Solaris systems, it is set to
# /amd64 or /sparcv9. This string is appended to some library paths, like this:
# /usr/lib${OPENJDK_TARGET_CPU_ISADIR}/libexample.so
OPENJDK_TARGET_CPU_ISADIR=""
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
if test "x$OPENJDK_TARGET_CPU" = xx86_64; then
OPENJDK_TARGET_CPU_ISADIR="/amd64"
elif test "x$OPENJDK_TARGET_CPU" = xsparcv9; then
OPENJDK_TARGET_CPU_ISADIR="/sparcv9"
fi
AC_SUBST(OPENJDK_TARGET_CPU_ISADIR)
fi
AC_SUBST(OPENJDK_TARGET_CPU_ISADIR)
# Setup OPENJDK_TARGET_CPU_OSARCH, which is used to set the os.arch Java system property
OPENJDK_TARGET_CPU_OSARCH="$OPENJDK_TARGET_CPU"
if test "x$OPENJDK_TARGET_OS" = xlinux && test "x$OPENJDK_TARGET_CPU" = xx86; then
# On linux only, we replace x86 with i386.
OPENJDK_TARGET_CPU_OSARCH="i386"
elif test "x$OPENJDK_TARGET_OS" != xmacosx && test "x$OPENJDK_TARGET_CPU" = xx86_64; then
# On all platforms except macosx, we replace x86_64 with amd64.
OPENJDK_TARGET_CPU_OSARCH="amd64"
fi
AC_SUBST(OPENJDK_TARGET_CPU_OSARCH)
# Setup OPENJDK_TARGET_CPU_OSARCH, which is used to set the os.arch Java system property
OPENJDK_TARGET_CPU_OSARCH="$OPENJDK_TARGET_CPU"
if test "x$OPENJDK_TARGET_OS" = xlinux && test "x$OPENJDK_TARGET_CPU" = xx86; then
# On linux only, we replace x86 with i386.
OPENJDK_TARGET_CPU_OSARCH="i386"
elif test "x$OPENJDK_TARGET_OS" != xmacosx && test "x$OPENJDK_TARGET_CPU" = xx86_64; then
# On all platforms except macosx, we replace x86_64 with amd64.
OPENJDK_TARGET_CPU_OSARCH="amd64"
fi
AC_SUBST(OPENJDK_TARGET_CPU_OSARCH)
OPENJDK_TARGET_CPU_JLI="$OPENJDK_TARGET_CPU"
if test "x$OPENJDK_TARGET_CPU" = xx86; then
OPENJDK_TARGET_CPU_JLI="i386"
elif test "x$OPENJDK_TARGET_OS" != xmacosx && test "x$OPENJDK_TARGET_CPU" = xx86_64; then
# On all platforms except macosx, we replace x86_64 with amd64.
OPENJDK_TARGET_CPU_JLI="amd64"
OPENJDK_TARGET_CPU_JLI="$OPENJDK_TARGET_CPU"
if test "x$OPENJDK_TARGET_CPU" = xx86; then
OPENJDK_TARGET_CPU_JLI="i386"
elif test "x$OPENJDK_TARGET_OS" != xmacosx && test "x$OPENJDK_TARGET_CPU" = xx86_64; then
# On all platforms except macosx, we replace x86_64 with amd64.
OPENJDK_TARGET_CPU_JLI="amd64"
fi
# Now setup the -D flags for building libjli.
OPENJDK_TARGET_CPU_JLI_CFLAGS="-DLIBARCHNAME='\"$OPENJDK_TARGET_CPU_JLI\"'"
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
if test "x$OPENJDK_TARGET_CPU_ARCH" = xsparc; then
OPENJDK_TARGET_CPU_JLI_CFLAGS="$OPENJDK_TARGET_CPU_JLI_CFLAGS -DLIBARCH32NAME='\"sparc\"' -DLIBARCH64NAME='\"sparcv9\"'"
elif test "x$OPENJDK_TARGET_CPU_ARCH" = xx86; then
OPENJDK_TARGET_CPU_JLI_CFLAGS="$OPENJDK_TARGET_CPU_JLI_CFLAGS -DLIBARCH32NAME='\"i386\"' -DLIBARCH64NAME='\"amd64\"'"
fi
# Now setup the -D flags for building libjli.
OPENJDK_TARGET_CPU_JLI_CFLAGS="-DLIBARCHNAME='\"$OPENJDK_TARGET_CPU_JLI\"'"
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
if test "x$OPENJDK_TARGET_CPU_ARCH" = xsparc; then
OPENJDK_TARGET_CPU_JLI_CFLAGS="$OPENJDK_TARGET_CPU_JLI_CFLAGS -DLIBARCH32NAME='\"sparc\"' -DLIBARCH64NAME='\"sparcv9\"'"
elif test "x$OPENJDK_TARGET_CPU_ARCH" = xx86; then
OPENJDK_TARGET_CPU_JLI_CFLAGS="$OPENJDK_TARGET_CPU_JLI_CFLAGS -DLIBARCH32NAME='\"i386\"' -DLIBARCH64NAME='\"amd64\"'"
fi
fi
AC_SUBST(OPENJDK_TARGET_CPU_JLI_CFLAGS)
fi
AC_SUBST(OPENJDK_TARGET_CPU_JLI_CFLAGS)
# Setup OPENJDK_TARGET_OS_API_DIR, used in source paths.
if test "x$OPENJDK_TARGET_OS_API" = xposix; then
OPENJDK_TARGET_OS_API_DIR="solaris"
fi
if test "x$OPENJDK_TARGET_OS_API" = xwinapi; then
OPENJDK_TARGET_OS_API_DIR="windows"
fi
AC_SUBST(OPENJDK_TARGET_OS_API_DIR)
# Setup OPENJDK_TARGET_OS_API_DIR, used in source paths.
if test "x$OPENJDK_TARGET_OS_API" = xposix; then
OPENJDK_TARGET_OS_API_DIR="solaris"
fi
if test "x$OPENJDK_TARGET_OS_API" = xwinapi; then
OPENJDK_TARGET_OS_API_DIR="windows"
fi
AC_SUBST(OPENJDK_TARGET_OS_API_DIR)
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
A_LP64="LP64:="
# -D_LP64=1 is only set on linux and mac. Setting on windows causes diff in
# unpack200.exe
if test "x$OPENJDK_TARGET_OS" = xlinux || test "x$OPENJDK_TARGET_OS" = xmacosx; then
ADD_LP64="-D_LP64=1"
fi
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
A_LP64="LP64:="
# -D_LP64=1 is only set on linux and mac. Setting on windows causes diff in
# unpack200.exe
if test "x$OPENJDK_TARGET_OS" = xlinux || test "x$OPENJDK_TARGET_OS" = xmacosx; then
ADD_LP64="-D_LP64=1"
fi
AC_SUBST(LP64,$A_LP64)
fi
AC_SUBST(LP64,$A_LP64)
if test "x$COMPILE_TYPE" = "xcross"; then
# FIXME: ... or should this include reduced builds..?
DEFINE_CROSS_COMPILE_ARCH="CROSS_COMPILE_ARCH:=$OPENJDK_TARGET_CPU_LEGACY"
else
DEFINE_CROSS_COMPILE_ARCH=""
fi
AC_SUBST(DEFINE_CROSS_COMPILE_ARCH)
# ZERO_ARCHDEF is used to enable architecture-specific code
case "${OPENJDK_TARGET_CPU}" in
ppc*) ZERO_ARCHDEF=PPC ;;
s390*) ZERO_ARCHDEF=S390 ;;
sparc*) ZERO_ARCHDEF=SPARC ;;
x86_64*) ZERO_ARCHDEF=AMD64 ;;
x86) ZERO_ARCHDEF=IA32 ;;
*) ZERO_ARCHDEF=$(echo "${OPENJDK_TARGET_CPU_LEGACY_LIB}" | tr a-z A-Z)
esac
AC_SUBST(ZERO_ARCHDEF)
if test "x$COMPILE_TYPE" = "xcross"; then
# FIXME: ... or should this include reduced builds..?
DEFINE_CROSS_COMPILE_ARCH="CROSS_COMPILE_ARCH:=$OPENJDK_TARGET_CPU_LEGACY"
else
DEFINE_CROSS_COMPILE_ARCH=""
fi
AC_SUBST(DEFINE_CROSS_COMPILE_ARCH)
# ZERO_ARCHDEF is used to enable architecture-specific code
case "${OPENJDK_TARGET_CPU}" in
ppc*) ZERO_ARCHDEF=PPC ;;
s390*) ZERO_ARCHDEF=S390 ;;
sparc*) ZERO_ARCHDEF=SPARC ;;
x86_64*) ZERO_ARCHDEF=AMD64 ;;
x86) ZERO_ARCHDEF=IA32 ;;
*) ZERO_ARCHDEF=$(echo "${OPENJDK_TARGET_CPU_LEGACY_LIB}" | tr a-z A-Z)
esac
AC_SUBST(ZERO_ARCHDEF)
])
AC_DEFUN([PLATFORM_SET_RELEASE_FILE_OS_VALUES],
[
if test "x$OPENJDK_TARGET_OS" = "xsolaris"; then
REQUIRED_OS_NAME=SunOS
REQUIRED_OS_VERSION=5.10
fi
if test "x$OPENJDK_TARGET_OS" = "xlinux"; then
REQUIRED_OS_NAME=Linux
REQUIRED_OS_VERSION=2.6
fi
if test "x$OPENJDK_TARGET_OS" = "xwindows"; then
REQUIRED_OS_NAME=Windows
if test "x$OPENJDK_TARGET_CPU_BITS" = "x64"; then
REQUIRED_OS_VERSION=5.2
else
REQUIRED_OS_VERSION=5.1
fi
fi
if test "x$OPENJDK_TARGET_OS" = "xmacosx"; then
REQUIRED_OS_NAME=Darwin
REQUIRED_OS_VERSION=11.2
if test "x$OPENJDK_TARGET_OS" = "xsolaris"; then
REQUIRED_OS_NAME=SunOS
REQUIRED_OS_VERSION=5.10
fi
if test "x$OPENJDK_TARGET_OS" = "xlinux"; then
REQUIRED_OS_NAME=Linux
REQUIRED_OS_VERSION=2.6
fi
if test "x$OPENJDK_TARGET_OS" = "xwindows"; then
REQUIRED_OS_NAME=Windows
if test "x$OPENJDK_TARGET_CPU_BITS" = "x64"; then
REQUIRED_OS_VERSION=5.2
else
REQUIRED_OS_VERSION=5.1
fi
fi
if test "x$OPENJDK_TARGET_OS" = "xmacosx"; then
REQUIRED_OS_NAME=Darwin
REQUIRED_OS_VERSION=11.2
fi
AC_SUBST(REQUIRED_OS_NAME)
AC_SUBST(REQUIRED_OS_VERSION)
AC_SUBST(REQUIRED_OS_NAME)
AC_SUBST(REQUIRED_OS_VERSION)
])
#%%% Build and target systems %%%
AC_DEFUN_ONCE([PLATFORM_SETUP_OPENJDK_BUILD_AND_TARGET],
[
# Figure out the build and target systems. # Note that in autoconf terminology, "build" is obvious, but "target"
# is confusing; it assumes you are cross-compiling a cross-compiler (!) and "target" is thus the target of the
# product you're building. The target of this build is called "host". Since this is confusing to most people, we
# have not adopted that system, but use "target" as the platform we are building for. In some places though we need
# to use the configure naming style.
AC_CANONICAL_BUILD
AC_CANONICAL_HOST
AC_CANONICAL_TARGET
# Figure out the build and target systems. # Note that in autoconf terminology, "build" is obvious, but "target"
# is confusing; it assumes you are cross-compiling a cross-compiler (!) and "target" is thus the target of the
# product you're building. The target of this build is called "host". Since this is confusing to most people, we
# have not adopted that system, but use "target" as the platform we are building for. In some places though we need
# to use the configure naming style.
AC_CANONICAL_BUILD
AC_CANONICAL_HOST
AC_CANONICAL_TARGET
PLATFORM_EXTRACT_TARGET_AND_BUILD
PLATFORM_SETUP_TARGET_CPU_BITS
PLATFORM_SET_RELEASE_FILE_OS_VALUES
PLATFORM_SETUP_LEGACY_VARS
PLATFORM_EXTRACT_TARGET_AND_BUILD
PLATFORM_SETUP_TARGET_CPU_BITS
PLATFORM_SET_RELEASE_FILE_OS_VALUES
PLATFORM_SETUP_LEGACY_VARS
])
AC_DEFUN_ONCE([PLATFORM_SETUP_OPENJDK_BUILD_OS_VERSION],
[
###############################################################################
###############################################################################
# Note that this is the build platform OS version!
# Note that this is the build platform OS version!
OS_VERSION="`uname -r | ${SED} 's!\.! !g' | ${SED} 's!-! !g'`"
OS_VERSION_MAJOR="`${ECHO} ${OS_VERSION} | ${CUT} -f 1 -d ' '`"
OS_VERSION_MINOR="`${ECHO} ${OS_VERSION} | ${CUT} -f 2 -d ' '`"
OS_VERSION_MICRO="`${ECHO} ${OS_VERSION} | ${CUT} -f 3 -d ' '`"
AC_SUBST(OS_VERSION_MAJOR)
AC_SUBST(OS_VERSION_MINOR)
AC_SUBST(OS_VERSION_MICRO)
OS_VERSION="`uname -r | ${SED} 's!\.! !g' | ${SED} 's!-! !g'`"
OS_VERSION_MAJOR="`${ECHO} ${OS_VERSION} | ${CUT} -f 1 -d ' '`"
OS_VERSION_MINOR="`${ECHO} ${OS_VERSION} | ${CUT} -f 2 -d ' '`"
OS_VERSION_MICRO="`${ECHO} ${OS_VERSION} | ${CUT} -f 3 -d ' '`"
AC_SUBST(OS_VERSION_MAJOR)
AC_SUBST(OS_VERSION_MINOR)
AC_SUBST(OS_VERSION_MICRO)
])
# Support macro for PLATFORM_SETUP_OPENJDK_TARGET_BITS.
@ -441,68 +440,68 @@ AC_DEFUN([PLATFORM_SET_COMPILER_TARGET_BITS_FLAGS],
AC_DEFUN_ONCE([PLATFORM_SETUP_OPENJDK_TARGET_BITS],
[
###############################################################################
#
# Now we check if libjvm.so will use 32 or 64 bit pointers for the C/C++ code.
# (The JVM can use 32 or 64 bit Java pointers but that decision
# is made at runtime.)
#
###############################################################################
#
# Now we check if libjvm.so will use 32 or 64 bit pointers for the C/C++ code.
# (The JVM can use 32 or 64 bit Java pointers but that decision
# is made at runtime.)
#
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
# Always specify -m flags on Solaris
PLATFORM_SET_COMPILER_TARGET_BITS_FLAGS
elif test "x$COMPILE_TYPE" = xreduced; then
if test "x$OPENJDK_TARGET_OS" != xwindows; then
# Specify -m if running reduced on other Posix platforms
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
# Always specify -m flags on Solaris
PLATFORM_SET_COMPILER_TARGET_BITS_FLAGS
elif test "x$COMPILE_TYPE" = xreduced; then
if test "x$OPENJDK_TARGET_OS" != xwindows; then
# Specify -m if running reduced on other Posix platforms
PLATFORM_SET_COMPILER_TARGET_BITS_FLAGS
fi
fi
fi
# Make compilation sanity check
AC_CHECK_HEADERS([stdio.h], , [
AC_MSG_NOTICE([Failed to compile stdio.h. This likely implies missing compile dependencies.])
if test "x$COMPILE_TYPE" = xreduced; then
AC_MSG_NOTICE([You are doing a reduced build. Check that you have 32-bit libraries installed.])
elif test "x$COMPILE_TYPE" = xcross; then
AC_MSG_NOTICE([You are doing a cross-compilation. Check that you have all target platform libraries installed.])
# Make compilation sanity check
AC_CHECK_HEADERS([stdio.h], , [
AC_MSG_NOTICE([Failed to compile stdio.h. This likely implies missing compile dependencies.])
if test "x$COMPILE_TYPE" = xreduced; then
AC_MSG_NOTICE([You are doing a reduced build. Check that you have 32-bit libraries installed.])
elif test "x$COMPILE_TYPE" = xcross; then
AC_MSG_NOTICE([You are doing a cross-compilation. Check that you have all target platform libraries installed.])
fi
AC_MSG_ERROR([Cannot continue.])
])
AC_CHECK_SIZEOF([int *], [1111])
if test "x$SIZEOF_INT_P" != "x$ac_cv_sizeof_int_p"; then
# Workaround autoconf bug, see http://lists.gnu.org/archive/html/autoconf/2010-07/msg00004.html
SIZEOF_INT_P="$ac_cv_sizeof_int_p"
fi
AC_MSG_ERROR([Cannot continue.])
])
AC_CHECK_SIZEOF([int *], [1111])
if test "x$SIZEOF_INT_P" != "x$ac_cv_sizeof_int_p"; then
# Workaround autoconf bug, see http://lists.gnu.org/archive/html/autoconf/2010-07/msg00004.html
SIZEOF_INT_P="$ac_cv_sizeof_int_p"
fi
if test "x$SIZEOF_INT_P" = x; then
if test "x$SIZEOF_INT_P" = x; then
# The test failed, lets stick to the assumed value.
AC_MSG_WARN([The number of bits in the target could not be determined, using $OPENJDK_TARGET_CPU_BITS.])
else
else
TESTED_TARGET_CPU_BITS=`expr 8 \* $SIZEOF_INT_P`
if test "x$TESTED_TARGET_CPU_BITS" != "x$OPENJDK_TARGET_CPU_BITS"; then
AC_MSG_ERROR([The tested number of bits in the target ($TESTED_TARGET_CPU_BITS) differs from the number of bits expected to be found in the target ($OPENJDK_TARGET_CPU_BITS)])
AC_MSG_ERROR([The tested number of bits in the target ($TESTED_TARGET_CPU_BITS) differs from the number of bits expected to be found in the target ($OPENJDK_TARGET_CPU_BITS)])
fi
fi
fi
AC_MSG_CHECKING([for target address size])
AC_MSG_RESULT([$OPENJDK_TARGET_CPU_BITS bits])
AC_MSG_CHECKING([for target address size])
AC_MSG_RESULT([$OPENJDK_TARGET_CPU_BITS bits])
])
AC_DEFUN_ONCE([PLATFORM_SETUP_OPENJDK_TARGET_ENDIANNESS],
[
###############################################################################
#
# Is the target little of big endian?
#
AC_C_BIGENDIAN([ENDIAN="big"],[ENDIAN="little"],[ENDIAN="unknown"],[ENDIAN="universal_endianness"])
###############################################################################
#
# Is the target little of big endian?
#
AC_C_BIGENDIAN([ENDIAN="big"],[ENDIAN="little"],[ENDIAN="unknown"],[ENDIAN="universal_endianness"])
if test "x$ENDIAN" = xuniversal_endianness; then
if test "x$ENDIAN" = xuniversal_endianness; then
AC_MSG_ERROR([Building with both big and little endianness is not supported])
fi
if test "x$ENDIAN" != "x$OPENJDK_TARGET_CPU_ENDIAN"; then
fi
if test "x$ENDIAN" != "x$OPENJDK_TARGET_CPU_ENDIAN"; then
AC_MSG_ERROR([The tested endian in the target ($ENDIAN) differs from the endian expected to be found in the target ($OPENJDK_TARGET_CPU_ENDIAN)])
fi
fi
])

View file

@ -25,55 +25,54 @@
AC_DEFUN_ONCE([SRCDIRS_SETUP_TOPDIRS],
[
# Where are the sources. Any of these can be overridden
# using --with-override-corba and the likes.
LANGTOOLS_TOPDIR="$SRC_ROOT/langtools"
CORBA_TOPDIR="$SRC_ROOT/corba"
JAXP_TOPDIR="$SRC_ROOT/jaxp"
JAXWS_TOPDIR="$SRC_ROOT/jaxws"
HOTSPOT_TOPDIR="$SRC_ROOT/hotspot"
NASHORN_TOPDIR="$SRC_ROOT/nashorn"
JDK_TOPDIR="$SRC_ROOT/jdk"
AC_SUBST(LANGTOOLS_TOPDIR)
AC_SUBST(CORBA_TOPDIR)
AC_SUBST(JAXP_TOPDIR)
AC_SUBST(JAXWS_TOPDIR)
AC_SUBST(HOTSPOT_TOPDIR)
AC_SUBST(NASHORN_TOPDIR)
AC_SUBST(JDK_TOPDIR)
# Where are the sources. Any of these can be overridden
# using --with-override-corba and the likes.
LANGTOOLS_TOPDIR="$SRC_ROOT/langtools"
CORBA_TOPDIR="$SRC_ROOT/corba"
JAXP_TOPDIR="$SRC_ROOT/jaxp"
JAXWS_TOPDIR="$SRC_ROOT/jaxws"
HOTSPOT_TOPDIR="$SRC_ROOT/hotspot"
NASHORN_TOPDIR="$SRC_ROOT/nashorn"
JDK_TOPDIR="$SRC_ROOT/jdk"
AC_SUBST(LANGTOOLS_TOPDIR)
AC_SUBST(CORBA_TOPDIR)
AC_SUBST(JAXP_TOPDIR)
AC_SUBST(JAXWS_TOPDIR)
AC_SUBST(HOTSPOT_TOPDIR)
AC_SUBST(NASHORN_TOPDIR)
AC_SUBST(JDK_TOPDIR)
])
AC_DEFUN_ONCE([SRCDIRS_SETUP_ALTERNATIVE_TOPDIRS],
[
###############################################################################
#
# Pickup additional source for a component from outside of the source root
# or override source for a component.
#
AC_ARG_WITH(add-source-root, [AS_HELP_STRING([--with-add-source-root],
[for each and every source directory, look in this additional source root for
the same directory; if it exists and have files in it, include it in the build])])
###############################################################################
#
# Pickup additional source for a component from outside of the source root
# or override source for a component.
#
AC_ARG_WITH(add-source-root, [AS_HELP_STRING([--with-add-source-root],
[for each and every source directory, look in this additional source root for
the same directory; if it exists and have files in it, include it in the build])])
AC_ARG_WITH(override-source-root, [AS_HELP_STRING([--with-override-source-root],
[for each and every source directory, look in this override source root for
the same directory; if it exists, use that directory instead and
ignore the directory in the original source root])])
AC_ARG_WITH(override-source-root, [AS_HELP_STRING([--with-override-source-root],
[for each and every source directory, look in this override source root for
the same directory; if it exists, use that directory instead and
ignore the directory in the original source root])])
AC_ARG_WITH(adds-and-overrides, [AS_HELP_STRING([--with-adds-and-overrides],
[use the subdirs 'adds' and 'overrides' in the specified directory as
add-source-root and override-source-root])])
AC_ARG_WITH(adds-and-overrides, [AS_HELP_STRING([--with-adds-and-overrides],
[use the subdirs 'adds' and 'overrides' in the specified directory as
add-source-root and override-source-root])])
if test "x$with_adds_and_overrides" != x; then
if test "x$with_adds_and_overrides" != x; then
with_add_source_root="$with_adds_and_overrides/adds"
with_override_source_root="$with_adds_and_overrides/overrides"
fi
fi
if test "x$with_add_source_root" != x; then
if test "x$with_add_source_root" != x; then
if ! test -d $with_add_source_root; then
AC_MSG_ERROR([Trying to use a non-existant add-source-root $with_add_source_root])
AC_MSG_ERROR([Trying to use a non-existant add-source-root $with_add_source_root])
fi
CURDIR="$PWD"
cd "$with_add_source_root"
@ -82,219 +81,218 @@ if test "x$with_add_source_root" != x; then
# Verify that the addon source root does not have any root makefiles.
# If it does, then it is usually an error, prevent this.
if test -f $with_add_source_root/langtools/makefiles/Makefile || \
test -f $with_add_source_root/langtools/make/Makefile; then
AC_MSG_ERROR([Your add source root seems to contain a full langtools repo! An add source root should only contain additional sources.])
test -f $with_add_source_root/langtools/make/Makefile; then
AC_MSG_ERROR([Your add source root seems to contain a full langtools repo! An add source root should only contain additional sources.])
fi
if test -f $with_add_source_root/corba/makefiles/Makefile || \
test -f $with_add_source_root/corba/make/Makefile; then
AC_MSG_ERROR([Your add source root seems to contain a full corba repo! An add source root should only contain additional sources.])
test -f $with_add_source_root/corba/make/Makefile; then
AC_MSG_ERROR([Your add source root seems to contain a full corba repo! An add source root should only contain additional sources.])
fi
if test -f $with_add_source_root/jaxp/makefiles/Makefile || \
test -f $with_add_source_root/jaxp/make/Makefile; then
AC_MSG_ERROR([Your add source root seems to contain a full jaxp repo! An add source root should only contain additional sources.])
test -f $with_add_source_root/jaxp/make/Makefile; then
AC_MSG_ERROR([Your add source root seems to contain a full jaxp repo! An add source root should only contain additional sources.])
fi
if test -f $with_add_source_root/jaxws/makefiles/Makefile || \
test -f $with_add_source_root/jaxws/make/Makefile; then
AC_MSG_ERROR([Your add source root seems to contain a full jaxws repo! An add source root should only contain additional sources.])
test -f $with_add_source_root/jaxws/make/Makefile; then
AC_MSG_ERROR([Your add source root seems to contain a full jaxws repo! An add source root should only contain additional sources.])
fi
if test -f $with_add_source_root/hotspot/makefiles/Makefile || \
test -f $with_add_source_root/hotspot/make/Makefile; then
AC_MSG_ERROR([Your add source root seems to contain a full hotspot repo! An add source root should only contain additional sources.])
test -f $with_add_source_root/hotspot/make/Makefile; then
AC_MSG_ERROR([Your add source root seems to contain a full hotspot repo! An add source root should only contain additional sources.])
fi
if test -f $with_add_source_root/nashorn/makefiles/Makefile || \
test -f $with_add_source_root/nashorn/make/Makefile; then
AC_MSG_ERROR([Your add source root seems to contain a full nashorn repo! An add source root should only contain additional sources.])
test -f $with_add_source_root/nashorn/make/Makefile; then
AC_MSG_ERROR([Your add source root seems to contain a full nashorn repo! An add source root should only contain additional sources.])
fi
if test -f $with_add_source_root/jdk/makefiles/Makefile || \
test -f $with_add_source_root/jdk/make/Makefile; then
AC_MSG_ERROR([Your add source root seems to contain a full JDK repo! An add source root should only contain additional sources.])
test -f $with_add_source_root/jdk/make/Makefile; then
AC_MSG_ERROR([Your add source root seems to contain a full JDK repo! An add source root should only contain additional sources.])
fi
fi
AC_SUBST(ADD_SRC_ROOT)
fi
AC_SUBST(ADD_SRC_ROOT)
if test "x$with_override_source_root" != x; then
if test "x$with_override_source_root" != x; then
if ! test -d $with_override_source_root; then
AC_MSG_ERROR([Trying to use a non-existant override-source-root $with_override_source_root])
AC_MSG_ERROR([Trying to use a non-existant override-source-root $with_override_source_root])
fi
CURDIR="$PWD"
cd "$with_override_source_root"
OVERRIDE_SRC_ROOT="`pwd`"
cd "$CURDIR"
if test -f $with_override_source_root/langtools/makefiles/Makefile || \
test -f $with_override_source_root/langtools/make/Makefile; then
AC_MSG_ERROR([Your override source root seems to contain a full langtools repo! An override source root should only contain sources that override.])
test -f $with_override_source_root/langtools/make/Makefile; then
AC_MSG_ERROR([Your override source root seems to contain a full langtools repo! An override source root should only contain sources that override.])
fi
if test -f $with_override_source_root/corba/makefiles/Makefile || \
test -f $with_override_source_root/corba/make/Makefile; then
AC_MSG_ERROR([Your override source root seems to contain a full corba repo! An override source root should only contain sources that override.])
test -f $with_override_source_root/corba/make/Makefile; then
AC_MSG_ERROR([Your override source root seems to contain a full corba repo! An override source root should only contain sources that override.])
fi
if test -f $with_override_source_root/jaxp/makefiles/Makefile || \
test -f $with_override_source_root/jaxp/make/Makefile; then
AC_MSG_ERROR([Your override source root seems to contain a full jaxp repo! An override source root should only contain sources that override.])
test -f $with_override_source_root/jaxp/make/Makefile; then
AC_MSG_ERROR([Your override source root seems to contain a full jaxp repo! An override source root should only contain sources that override.])
fi
if test -f $with_override_source_root/jaxws/makefiles/Makefile || \
test -f $with_override_source_root/jaxws/make/Makefile; then
AC_MSG_ERROR([Your override source root seems to contain a full jaxws repo! An override source root should only contain sources that override.])
test -f $with_override_source_root/jaxws/make/Makefile; then
AC_MSG_ERROR([Your override source root seems to contain a full jaxws repo! An override source root should only contain sources that override.])
fi
if test -f $with_override_source_root/hotspot/makefiles/Makefile || \
test -f $with_override_source_root/hotspot/make/Makefile; then
AC_MSG_ERROR([Your override source root seems to contain a full hotspot repo! An override source root should only contain sources that override.])
test -f $with_override_source_root/hotspot/make/Makefile; then
AC_MSG_ERROR([Your override source root seems to contain a full hotspot repo! An override source root should only contain sources that override.])
fi
if test -f $with_override_source_root/nashorn/makefiles/Makefile || \
test -f $with_override_source_root/nashorn/make/Makefile; then
AC_MSG_ERROR([Your override source root seems to contain a full nashorn repo! An override source root should only contain sources that override.])
test -f $with_override_source_root/nashorn/make/Makefile; then
AC_MSG_ERROR([Your override source root seems to contain a full nashorn repo! An override source root should only contain sources that override.])
fi
if test -f $with_override_source_root/jdk/makefiles/Makefile || \
test -f $with_override_source_root/jdk/make/Makefile; then
AC_MSG_ERROR([Your override source root seems to contain a full JDK repo! An override source root should only contain sources that override.])
test -f $with_override_source_root/jdk/make/Makefile; then
AC_MSG_ERROR([Your override source root seems to contain a full JDK repo! An override source root should only contain sources that override.])
fi
fi
AC_SUBST(OVERRIDE_SRC_ROOT)
fi
AC_SUBST(OVERRIDE_SRC_ROOT)
###############################################################################
#
# Override a repo completely, this is used for example when you have 3 small
# development sandboxes of the langtools sources and want to avoid having 3 full
# OpenJDK sources checked out on disk.
#
# Assuming that the 3 langtools sandboxes are located here:
# /home/fredrik/sandbox1/langtools
# /home/fredrik/sandbox2/langtools
# /home/fredrik/sandbox3/langtools
#
# From the source root you create build subdirs manually:
# mkdir -p build1 build2 build3
# in each build directory run:
# (cd build1 && ../configure --with-override-langtools=/home/fredrik/sandbox1 && make)
# (cd build2 && ../configure --with-override-langtools=/home/fredrik/sandbox2 && make)
# (cd build3 && ../configure --with-override-langtools=/home/fredrik/sandbox3 && make)
#
###############################################################################
#
# Override a repo completely, this is used for example when you have 3 small
# development sandboxes of the langtools sources and want to avoid having 3 full
# OpenJDK sources checked out on disk.
#
# Assuming that the 3 langtools sandboxes are located here:
# /home/fredrik/sandbox1/langtools
# /home/fredrik/sandbox2/langtools
# /home/fredrik/sandbox3/langtools
#
# From the source root you create build subdirs manually:
# mkdir -p build1 build2 build3
# in each build directory run:
# (cd build1 && ../configure --with-override-langtools=/home/fredrik/sandbox1 && make)
# (cd build2 && ../configure --with-override-langtools=/home/fredrik/sandbox2 && make)
# (cd build3 && ../configure --with-override-langtools=/home/fredrik/sandbox3 && make)
#
AC_ARG_WITH(override-langtools, [AS_HELP_STRING([--with-override-langtools],
[use this langtools dir for the build])])
AC_ARG_WITH(override-langtools, [AS_HELP_STRING([--with-override-langtools],
[use this langtools dir for the build])])
AC_ARG_WITH(override-corba, [AS_HELP_STRING([--with-override-corba],
[use this corba dir for the build])])
AC_ARG_WITH(override-corba, [AS_HELP_STRING([--with-override-corba],
[use this corba dir for the build])])
AC_ARG_WITH(override-jaxp, [AS_HELP_STRING([--with-override-jaxp],
[use this jaxp dir for the build])])
AC_ARG_WITH(override-jaxp, [AS_HELP_STRING([--with-override-jaxp],
[use this jaxp dir for the build])])
AC_ARG_WITH(override-jaxws, [AS_HELP_STRING([--with-override-jaxws],
[use this jaxws dir for the build])])
AC_ARG_WITH(override-jaxws, [AS_HELP_STRING([--with-override-jaxws],
[use this jaxws dir for the build])])
AC_ARG_WITH(override-hotspot, [AS_HELP_STRING([--with-override-hotspot],
[use this hotspot dir for the build])])
AC_ARG_WITH(override-hotspot, [AS_HELP_STRING([--with-override-hotspot],
[use this hotspot dir for the build])])
AC_ARG_WITH(override-nashorn, [AS_HELP_STRING([--with-override-nashorn],
[use this nashorn dir for the build])])
AC_ARG_WITH(override-nashorn, [AS_HELP_STRING([--with-override-nashorn],
[use this nashorn dir for the build])])
AC_ARG_WITH(override-jdk, [AS_HELP_STRING([--with-override-jdk],
[use this jdk dir for the build])])
AC_ARG_WITH(override-jdk, [AS_HELP_STRING([--with-override-jdk],
[use this jdk dir for the build])])
if test "x$with_override_langtools" != x; then
if test "x$with_override_langtools" != x; then
CURDIR="$PWD"
cd "$with_override_langtools"
LANGTOOLS_TOPDIR="`pwd`"
cd "$CURDIR"
if ! test -f $LANGTOOLS_TOPDIR/makefiles/Makefile; then
AC_MSG_ERROR([You have to override langtools with a full langtools repo!])
AC_MSG_ERROR([You have to override langtools with a full langtools repo!])
fi
AC_MSG_CHECKING([if langtools should be overridden])
AC_MSG_RESULT([yes with $LANGTOOLS_TOPDIR])
fi
if test "x$with_override_corba" != x; then
fi
if test "x$with_override_corba" != x; then
CURDIR="$PWD"
cd "$with_override_corba"
CORBA_TOPDIR="`pwd`"
cd "$CURDIR"
if ! test -f $CORBA_TOPDIR/makefiles/Makefile; then
AC_MSG_ERROR([You have to override corba with a full corba repo!])
AC_MSG_ERROR([You have to override corba with a full corba repo!])
fi
AC_MSG_CHECKING([if corba should be overridden])
AC_MSG_RESULT([yes with $CORBA_TOPDIR])
fi
if test "x$with_override_jaxp" != x; then
fi
if test "x$with_override_jaxp" != x; then
CURDIR="$PWD"
cd "$with_override_jaxp"
JAXP_TOPDIR="`pwd`"
cd "$CURDIR"
if ! test -f $JAXP_TOPDIR/makefiles/Makefile; then
AC_MSG_ERROR([You have to override jaxp with a full jaxp repo!])
AC_MSG_ERROR([You have to override jaxp with a full jaxp repo!])
fi
AC_MSG_CHECKING([if jaxp should be overridden])
AC_MSG_RESULT([yes with $JAXP_TOPDIR])
fi
if test "x$with_override_jaxws" != x; then
fi
if test "x$with_override_jaxws" != x; then
CURDIR="$PWD"
cd "$with_override_jaxws"
JAXWS_TOPDIR="`pwd`"
cd "$CURDIR"
if ! test -f $JAXWS_TOPDIR/makefiles/Makefile; then
AC_MSG_ERROR([You have to override jaxws with a full jaxws repo!])
AC_MSG_ERROR([You have to override jaxws with a full jaxws repo!])
fi
AC_MSG_CHECKING([if jaxws should be overridden])
AC_MSG_RESULT([yes with $JAXWS_TOPDIR])
fi
if test "x$with_override_hotspot" != x; then
fi
if test "x$with_override_hotspot" != x; then
CURDIR="$PWD"
cd "$with_override_hotspot"
HOTSPOT_TOPDIR="`pwd`"
cd "$CURDIR"
if ! test -f $HOTSPOT_TOPDIR/make/Makefile && \
! test -f $HOTSPOT_TOPDIR/makefiles/Makefile; then
AC_MSG_ERROR([You have to override hotspot with a full hotspot repo!])
! test -f $HOTSPOT_TOPDIR/makefiles/Makefile; then
AC_MSG_ERROR([You have to override hotspot with a full hotspot repo!])
fi
AC_MSG_CHECKING([if hotspot should be overridden])
AC_MSG_RESULT([yes with $HOTSPOT_TOPDIR])
fi
if test "x$with_override_nashorn" != x; then
fi
if test "x$with_override_nashorn" != x; then
CURDIR="$PWD"
cd "$with_override_nashorn"
NASHORN_TOPDIR="`pwd`"
cd "$CURDIR"
if ! test -f $NASHORN_TOPDIR/makefiles/Makefile; then
AC_MSG_ERROR([You have to override nashorn with a full nashorn repo!])
AC_MSG_ERROR([You have to override nashorn with a full nashorn repo!])
fi
AC_MSG_CHECKING([if nashorn should be overridden])
AC_MSG_RESULT([yes with $NASHORN_TOPDIR])
fi
if test "x$with_override_jdk" != x; then
fi
if test "x$with_override_jdk" != x; then
CURDIR="$PWD"
cd "$with_override_jdk"
JDK_TOPDIR="`pwd`"
cd "$CURDIR"
if ! test -f $JDK_TOPDIR/makefiles/Makefile; then
AC_MSG_ERROR([You have to override JDK with a full JDK repo!])
AC_MSG_ERROR([You have to override JDK with a full JDK repo!])
fi
AC_MSG_CHECKING([if JDK should be overridden])
AC_MSG_RESULT([yes with $JDK_TOPDIR])
fi
fi
])
AC_DEFUN_ONCE([SRCDIRS_SETUP_OUTPUT_DIRS],
[
BUILD_OUTPUT="$OUTPUT_ROOT"
AC_SUBST(BUILD_OUTPUT)
BUILD_OUTPUT="$OUTPUT_ROOT"
AC_SUBST(BUILD_OUTPUT)
HOTSPOT_DIST="$OUTPUT_ROOT/hotspot/dist"
BUILD_HOTSPOT=true
AC_SUBST(HOTSPOT_DIST)
AC_SUBST(BUILD_HOTSPOT)
AC_ARG_WITH(import-hotspot, [AS_HELP_STRING([--with-import-hotspot],
[import hotspot binaries from this jdk image or hotspot build dist dir instead of building from source])])
if test "x$with_import_hotspot" != x; then
HOTSPOT_DIST="$OUTPUT_ROOT/hotspot/dist"
BUILD_HOTSPOT=true
AC_SUBST(HOTSPOT_DIST)
AC_SUBST(BUILD_HOTSPOT)
AC_ARG_WITH(import-hotspot, [AS_HELP_STRING([--with-import-hotspot],
[import hotspot binaries from this jdk image or hotspot build dist dir instead of building from source])])
if test "x$with_import_hotspot" != x; then
CURDIR="$PWD"
cd "$with_import_hotspot"
HOTSPOT_DIST="`pwd`"
cd "$CURDIR"
if ! (test -d $HOTSPOT_DIST/lib && test -d $HOTSPOT_DIST/jre/lib); then
AC_MSG_ERROR([You have to import hotspot from a full jdk image or hotspot build dist dir!])
AC_MSG_ERROR([You have to import hotspot from a full jdk image or hotspot build dist dir!])
fi
AC_MSG_CHECKING([if hotspot should be imported])
AC_MSG_RESULT([yes from $HOTSPOT_DIST])
BUILD_HOTSPOT=false
fi
fi
JDK_OUTPUTDIR="$OUTPUT_ROOT/jdk"
JDK_OUTPUTDIR="$OUTPUT_ROOT/jdk"
])

View file

@ -56,17 +56,17 @@ MAKE:=@MAKE@
# Pass along the verbosity and log level settings.
ifeq (,$(findstring VERBOSE=,$(MAKE)))
MAKE:=$(MAKE) $(VERBOSE) VERBOSE="$(VERBOSE)" LOG_LEVEL="$(LOG_LEVEL)"
MAKE:=$(MAKE) $(VERBOSE) VERBOSE="$(VERBOSE)" LOG_LEVEL="$(LOG_LEVEL)"
endif
# No implicit variables or rules!
ifeq (,$(findstring -R,$(MAKE)))
MAKE:=$(MAKE) -R
MAKE:=$(MAKE) -R
endif
# Specify where the common include directory for makefiles is.
ifeq (,$(findstring -I @SRC_ROOT@/common/makefiles,$(MAKE)))
MAKE:=$(MAKE) -I @SRC_ROOT@/common/makefiles
MAKE:=$(MAKE) -I @SRC_ROOT@/common/makefiles
endif
# The "human readable" name of this configuration
@ -175,7 +175,7 @@ else
RELEASE=$(JDK_VERSION)-$(MILESTONE)$(BUILD_VARIANT_RELEASE)
endif
ifneq ($(USER_RELEASE_SUFFIX),)
ifneq ($(USER_RELEASE_SUFFIX), )
FULL_VERSION=$(RELEASE)-$(USER_RELEASE_SUFFIX)-$(JDK_BUILD_NUMBER)
else
FULL_VERSION=$(RELEASE)-$(JDK_BUILD_NUMBER)
@ -461,7 +461,7 @@ NEW_JAVADOC = $(BOOTSTRAP_JAVAC_ARGS) com.sun.tools.javadoc.Main
# Guarding this against resetting value. Legacy make files include spec multiple
# times.
ifndef RC_FLAGS
RC_FLAGS:=@RC_FLAGS@
RC_FLAGS:=@RC_FLAGS@
endif
# A specific java binary with specific options can be used to run
@ -541,9 +541,9 @@ BUILD_LOG_PREVIOUS:=@BUILD_LOG_PREVIOUS@
# we have solved how to prevent the log wrapper to wait
# for the background sjavac server process.
ifeq (@ENABLE_SJAVAC@X@OPENJDK_BUILD_OS_API@,yesXwinapi)
BUILD_LOG_WRAPPER:=
BUILD_LOG_WRAPPER:=
else
BUILD_LOG_WRAPPER:=@BUILD_LOG_WRAPPER@
BUILD_LOG_WRAPPER:=@BUILD_LOG_WRAPPER@
endif
# Build setup
@ -563,12 +563,12 @@ MSVCR_DLL:=@MSVCR_DLL@
# of the next macro to get rid of superfluous files.
ADD_SRCS=$1
ifneq (,$(ADD_SRC_ROOT))
# Append wildcard rule to pickup any matching source roots found below ADD_SRC_ROOT
ADD_SRCS+=$(wildcard $(subst $(SRC_ROOT),$(ADD_SRC_ROOT),$1))
# Append wildcard rule to pickup any matching source roots found below ADD_SRC_ROOT
ADD_SRCS+=$(wildcard $(subst $(SRC_ROOT),$(ADD_SRC_ROOT),$1))
endif
ifneq (,$(OVERRIDE_SRC_ROOT))
# Append wildcard rule to pickup any matching source roots found below OVERRIDE_SRC_ROOT
ADD_SRCS+=$(wildcard $(subst $(SRC_ROOT),$(OVERRIDE_SRC_ROOT),$1))
# Append wildcard rule to pickup any matching source roots found below OVERRIDE_SRC_ROOT
ADD_SRCS+=$(wildcard $(subst $(SRC_ROOT),$(OVERRIDE_SRC_ROOT),$1))
endif
# OVR_SRCS creates a filter expression to filter out sources in
@ -578,9 +578,9 @@ endif
# We cannot do the scan in configure, since that would force us to rerun configure when
# we add overridden sources.
ifneq (,$(OVERRIDE_SRC_ROOT))
OVR_SRCS:=$(addsuffix %,$(subst $(OVERRIDE_SRC_ROOT),$(SRC_ROOT),$(sort $(dir $(shell $(FIND) $(OVERRIDE_SRC_ROOT) -type f)))))
OVR_SRCS:=$(addsuffix %,$(subst $(OVERRIDE_SRC_ROOT),$(SRC_ROOT),$(sort $(dir $(shell $(FIND) $(OVERRIDE_SRC_ROOT) -type f)))))
else
OVR_SRCS:=
OVR_SRCS:=
endif
####################################################

File diff suppressed because it is too large Load diff

View file

@ -36,7 +36,7 @@ include MakeBase.gmk
default: all
# Get all files except .hg in the hotspot directory.
HOTSPOT_FILES := $(shell $(FIND) -L $(HOTSPOT_TOPDIR) -name ".hg" -prune -o -print)
HOTSPOT_FILES := $(shell $(FIND) -L $(HOTSPOT_TOPDIR) -name ".hg" -prune -o -print)
# The old build creates hotspot output dir before calling hotspot and
# not doing it breaks builds on msys.

View file

@ -23,52 +23,52 @@
# questions.
#
PREFIXES=-pkgPrefix CORBA org.omg \
-pkgPrefix CosNaming org.omg \
-pkgPrefix CosTransactions org.omg \
-pkgPrefix CosTSInteroperation org.omg \
-pkgPrefix DynamicAny org.omg \
-pkgPrefix Dynamic org.omg \
-pkgPrefix IOP org.omg \
-pkgPrefix Messaging org.omg \
-pkgPrefix PortableInterceptor org.omg \
-pkgPrefix PortableServer org.omg \
-pkgPrefix activation com.sun.corba.se.spi \
-pkgPrefix GIOP com.sun.corba.se \
-pkgPrefix PortableActivationIDL com.sun.corba.se \
-pkgPrefix messages com.sun.corba.se
PREFIXES=-pkgPrefix CORBA org.omg \
-pkgPrefix CosNaming org.omg \
-pkgPrefix CosTransactions org.omg \
-pkgPrefix CosTSInteroperation org.omg \
-pkgPrefix DynamicAny org.omg \
-pkgPrefix Dynamic org.omg \
-pkgPrefix IOP org.omg \
-pkgPrefix Messaging org.omg \
-pkgPrefix PortableInterceptor org.omg \
-pkgPrefix PortableServer org.omg \
-pkgPrefix activation com.sun.corba.se.spi \
-pkgPrefix GIOP com.sun.corba.se \
-pkgPrefix PortableActivationIDL com.sun.corba.se \
-pkgPrefix messages com.sun.corba.se
define add_idl_package
# param 1 = MYPACKAGE
# param 2 = src root
# param 3 = gensrc root
# param 4 = source idl to compile
# param 5 = target idl package
# param 6 = delete these files that were output from the idlj
# param 7 = idls that match these patterns should be compiled with -oldImplBase
# param 8 = the idlj command
# Save the generated java files to a temporary directory so
# that we can find them and create proper dependencies.
# After that, we move them to the real gensrc target dir.
$4_TMPDIR:=tmp___$(subst /,_,$(patsubst $2/%,%,$4))___
ifneq ($$(filter $7,$4),)
$4_OLDIMPLBASE:=-oldImplBase
$4_OLDIMPLBASE_MSG:=with -oldImplBase
endif
$5 : $4
# param 1 = MYPACKAGE
# param 2 = src root
# param 3 = gensrc root
# param 4 = source idl to compile
# param 5 = target idl package
# param 6 = delete these files that were output from the idlj
# param 7 = idls that match these patterns should be compiled with -oldImplBase
# param 8 = the idlj command
# Save the generated java files to a temporary directory so
# that we can find them and create proper dependencies.
# After that, we move them to the real gensrc target dir.
$4_TMPDIR:=tmp___$(subst /,_,$(patsubst $2/%,%,$4))___
ifneq ($$(filter $7,$4),)
$4_OLDIMPLBASE:=-oldImplBase
$4_OLDIMPLBASE_MSG:=with -oldImplBase
endif
$5 : $4
$(MKDIR) -p $3/$$($4_TMPDIR)
$(RM) -rf $3/$$($4_TMPDIR)
$(MKDIR) -p $(dir $5)
$(ECHO) $(LOG_INFO) Compiling IDL $(patsubst $2/%,%,$4)
$8 -td $3/$$($4_TMPDIR) \
-i $2/org/omg/CORBA \
-i $2/org/omg/PortableInterceptor \
-i $2/org/omg/PortableServer \
-D CORBA3 -corba 3.0 \
-fall \
$$($4_OLDIMPLBASE) \
$(PREFIXES) \
$4
-i $2/org/omg/CORBA \
-i $2/org/omg/PortableInterceptor \
-i $2/org/omg/PortableServer \
-D CORBA3 -corba 3.0 \
-fall \
$$($4_OLDIMPLBASE) \
$(PREFIXES) \
$4
$(RM) -f $$(addprefix $3/$$($4_TMPDIR)/,$6)
$(CP) -r $3/$$($4_TMPDIR)/* $3
($(CD) $3/$$($4_TMPDIR) && $(FIND) . -type f | $(SED) 's!\./!$3/!g' | $(NAWK) '{ print $$$$1 ": $4" }' > $5)
@ -76,31 +76,31 @@ define add_idl_package
endef
define SetupIdlCompilation
# param 1 is for example BUILD_IDLS
# param 2,3,4,5,6,7,8 are named args.
# IDLJ,SRC,BIN,INCLUDES,EXCLUDES,OLDIMPLBASES,DELETES
$(foreach i,2 3 4 5 6 7 8 9 10 11 12 13 14 15, $(if $($i),$1_$(strip $($i)))$(NEWLINE))
$(call LogSetupMacroEntry,SetupIdlCompilation($1),$2,$3,$4,$5,$6,$7,$8,$9,$(10),$(11),$(12),$(13),$(14),$(15))
$(if $(16),$(error Internal makefile error: Too many arguments to SetupIdlCompilation, please update IdlCompilation.gmk))
# param 1 is for example BUILD_IDLS
# param 2,3,4,5,6,7,8 are named args.
# IDLJ,SRC,BIN,INCLUDES,EXCLUDES,OLDIMPLBASES,DELETES
$(foreach i,2 3 4 5 6 7 8 9 10 11 12 13 14 15, $(if $($i),$1_$(strip $($i)))$(NEWLINE))
$(call LogSetupMacroEntry,SetupIdlCompilation($1),$2,$3,$4,$5,$6,$7,$8,$9,$(10),$(11),$(12),$(13),$(14),$(15))
$(if $(16),$(error Internal makefile error: Too many arguments to SetupIdlCompilation, please update IdlCompilation.gmk))
# Find all existing java files and existing class files.
$$(eval $$(call MakeDir,$$($1_BIN)))
$1_SRCS := $$(shell find $$($1_SRC) -name "*.idl")
$1_BINS := $$(shell find $$($1_BIN) -name "*.java")
# Prepend the source/bin path to the filter expressions.
$1_SRC_INCLUDES := $$(addprefix $$($1_SRC)/,$$($1_INCLUDES))
$1_SRC_EXCLUDES := $$(addprefix $$($1_SRC)/,$$($1_EXCLUDES))
$1_BIN_INCLUDES := $$(addprefix $$($1_BIN)/,$$($1_INCLUDES))
$1_BIN_EXCLUDES := $$(addprefix $$($1_BIN)/,$$($1_EXCLUDES))
$1_OLDIMPLBASES := $$(addprefix $$($1_SRC)/,$$($1_OLDIMPLBASES))
# Now remove unwanted java/class files.
$1_SRCS := $$(filter $$($1_SRC_INCLUDES),$$($1_SRCS))
$1_SRCS := $$(filter-out $$($1_SRC_EXCLUDES),$$($1_SRCS))
$1_BINS := $$(filter $$($1_BIN_INCLUDES),$$($1_BINS))
$1_BINS := $$(filter-out $$($1_BIN_EXCLUDES),$$($1_BINS))
$1 := $$(sort $$(patsubst $$($1_SRC)/%.idl,$$($1_BIN)/%.idl.d,$$($1_SRCS)))
# Now create the dependencies for each idl target.
$$(foreach p,$$($1),$$(eval $$(call add_idl_package,$1,$$($1_SRC),$$($1_BIN),$$(patsubst $$($1_BIN)/%.idl.d,$$($1_SRC)/%.idl,$$p),$$p,$$($1_DELETES),$$($1_OLDIMPLBASES),$$($1_IDLJ))))
# Find all existing java files and existing class files.
$$(eval $$(call MakeDir,$$($1_BIN)))
$1_SRCS := $$(shell find $$($1_SRC) -name "*.idl")
$1_BINS := $$(shell find $$($1_BIN) -name "*.java")
# Prepend the source/bin path to the filter expressions.
$1_SRC_INCLUDES := $$(addprefix $$($1_SRC)/,$$($1_INCLUDES))
$1_SRC_EXCLUDES := $$(addprefix $$($1_SRC)/,$$($1_EXCLUDES))
$1_BIN_INCLUDES := $$(addprefix $$($1_BIN)/,$$($1_INCLUDES))
$1_BIN_EXCLUDES := $$(addprefix $$($1_BIN)/,$$($1_EXCLUDES))
$1_OLDIMPLBASES := $$(addprefix $$($1_SRC)/,$$($1_OLDIMPLBASES))
# Now remove unwanted java/class files.
$1_SRCS := $$(filter $$($1_SRC_INCLUDES),$$($1_SRCS))
$1_SRCS := $$(filter-out $$($1_SRC_EXCLUDES),$$($1_SRCS))
$1_BINS := $$(filter $$($1_BIN_INCLUDES),$$($1_BINS))
$1_BINS := $$(filter-out $$($1_BIN_EXCLUDES),$$($1_BINS))
$1 := $$(sort $$(patsubst $$($1_SRC)/%.idl,$$($1_BIN)/%.idl.d,$$($1_SRCS)))
# Now create the dependencies for each idl target.
$$(foreach p,$$($1),$$(eval $$(call add_idl_package,$1,$$($1_SRC),$$($1_BIN),$$(patsubst $$($1_BIN)/%.idl.d,$$($1_SRC)/%.idl,$$p),$$p,$$($1_DELETES),$$($1_OLDIMPLBASES),$$($1_IDLJ))))
endef
.SUFFIXES: .java .class .package

File diff suppressed because it is too large Load diff

View file

@ -36,7 +36,7 @@ MKDIR=mkdir
PRINTF=printf
PWD=pwd
# Insure we have a path that looks like it came from pwd
# (This is mostly for Windows sake and drive letters)
# (This is mostly for Windows sake and drive letters)
define UnixPath # path
$(shell (cd "$1" && $(PWD)))
endef
@ -47,19 +47,19 @@ ifdef OPENJDK
OPEN_BUILD=true
else
OPEN_BUILD := $(if $(or $(wildcard $(root_dir)/jdk/src/closed), \
$(wildcard $(root_dir)/jdk/make/closed), \
$(wildcard $(root_dir)/jdk/test/closed), \
$(wildcard $(root_dir)/hotspot/src/closed), \
$(wildcard $(root_dir)/hotspot/make/closed), \
$(wildcard $(root_dir)/hotspot/test/closed)), \
false,true)
$(wildcard $(root_dir)/jdk/make/closed), \
$(wildcard $(root_dir)/jdk/test/closed), \
$(wildcard $(root_dir)/hotspot/src/closed), \
$(wildcard $(root_dir)/hotspot/make/closed), \
$(wildcard $(root_dir)/hotspot/test/closed)), \
false,true)
endif
HOTSPOT_AVAILABLE := $(if $(wildcard $(root_dir)/hotspot),true,false)
###########################################################################
# To help in adoption of the new configure&&make build process, a bridge
# build will use the old settings to run configure and do the build.
# build will use the old settings to run configure and do the build.
# Build with the configure bridge. After running configure, restart make
# to parse the new spec file.
@ -85,80 +85,80 @@ $(BUILD_DIR_ROOT)/.bridge2configureOptsLatest: FRC
$(RM) $@.tmp
$(MKDIR) -p $(BUILD_DIR_ROOT)
@$(ECHO) " --with-debug-level=$(if $(DEBUG_LEVEL),$(DEBUG_LEVEL),release) " >> $@.tmp
ifdef ARCH_DATA_MODEL
@$(ECHO) " --with-target-bits=$(ARCH_DATA_MODEL) " >> $@.tmp
endif
ifeq ($(ARCH_DATA_MODEL),32)
@$(ECHO) " --with-jvm-variants=client,server " >> $@.tmp
endif
ifdef ALT_PARALLEL_COMPILE_JOBS
@$(ECHO) " --with-num-cores=$(ALT_PARALLEL_COMPILE_JOBS) " >> $@.tmp
endif
ifdef ALT_BOOTDIR
@$(ECHO) " --with-boot-jdk=$(call UnixPath,$(ALT_BOOTDIR)) " >> $@.tmp
endif
ifdef ALT_CUPS_HEADERS_PATH
@$(ECHO) " --with-cups-include=$(call UnixPath,$(ALT_CUPS_HEADERS_PATH)) " >> $@.tmp
endif
ifdef ALT_FREETYPE_HEADERS_PATH
@$(ECHO) " --with-freetype=$(call UnixPath,$(ALT_FREETYPE_HEADERS_PATH)/..) " >> $@.tmp
endif
ifdef ENABLE_SJAVAC
@$(ECHO) " --enable-sjavac" >> $@.tmp
endif
ifeq ($(HOTSPOT_AVAILABLE),false)
ifdef ALT_JDK_IMPORT_PATH
@$(ECHO) " --with-import-hotspot=$(call UnixPath,$(ALT_JDK_IMPORT_PATH)) " >> $@.tmp
endif
endif
ifeq ($(OPEN_BUILD),true)
@$(ECHO) " --enable-openjdk-only " >> $@.tmp
else
# Todo: move to closed?
ifdef ALT_MOZILLA_HEADERS_PATH
@$(ECHO) " --with-mozilla-headers=$(call UnixPath,$(ALT_MOZILLA_HEADERS_PATH)) " >> $@.tmp
endif
ifdef ALT_JUNIT_DIR
@$(ECHO) " --with-junit-dir=$(call UnixPath,$(ALT_JUNIT_DIR)) " >> $@.tmp
endif
ifdef ANT_HOME
@$(ECHO) " --with-ant-home=$(call UnixPath,$(ANT_HOME)) " >> $@.tmp
endif
ifdef ALT_JAVAFX_ZIP_DIR
@$(ECHO) " --with-javafx-zip-dir=$(call UnixPath,$(ALT_JAVAFX_ZIP_DIR)) " >> $@.tmp
endif
ifdef ALT_JMC_ZIP_DIR
@$(ECHO) " --with-jmc-zip-dir=$(call UnixPath,$(ALT_JMC_ZIP_DIR)) " >> $@.tmp
endif
ifdef ALT_WIXDIR
@$(ECHO) " --with-wix=$(call UnixPath,$(ALT_WIXDIR)) " >> $@.tmp
endif
ifdef ALT_INSTALL_LZMA_PATH
@$(ECHO) " --with-lzma-path=$(call UnixPath,$(ALT_INSTALL_LZMA_PATH)) " >> $@.tmp
endif
ifdef ALT_INSTALL_UPX_PATH
@$(ECHO) " --with-upx-path=$(call UnixPath,$(ALT_INSTALL_UPX_PATH)) " >> $@.tmp
endif
ifdef ALT_INSTALL_UPX_FILENAME
@$(ECHO) " --with-upx-filename=$(call UnixPath,$(ALT_INSTALL_UPX_FILENAME)) " >> $@.tmp
endif
ifdef ALT_CCSS_SIGNING_DIR
@$(ECHO) " --with-ccss-signing=$(call UnixPath,$(ALT_CCSS_SIGNING_DIR)) " >> $@.tmp
endif
ifdef ALT_SLASH_JAVA
@$(ECHO) " --with-java-devtools=$(call UnixPath,$(ALT_SLASH_JAVA)/devtools) " >> $@.tmp
endif
ifdef ALT_SPARKLE_FRAMEWORK_DIR
@$(ECHO) " --with-sparkle-framework=$(call UnixPath,$(ALT_SPARKLE_FRAMEWORK_DIR)) " >> $@.tmp
endif
endif
ifdef ARCH_DATA_MODEL
@$(ECHO) " --with-target-bits=$(ARCH_DATA_MODEL) " >> $@.tmp
endif
ifeq ($(ARCH_DATA_MODEL),32)
@$(ECHO) " --with-jvm-variants=client,server " >> $@.tmp
endif
ifdef ALT_PARALLEL_COMPILE_JOBS
@$(ECHO) " --with-num-cores=$(ALT_PARALLEL_COMPILE_JOBS) " >> $@.tmp
endif
ifdef ALT_BOOTDIR
@$(ECHO) " --with-boot-jdk=$(call UnixPath,$(ALT_BOOTDIR)) " >> $@.tmp
endif
ifdef ALT_CUPS_HEADERS_PATH
@$(ECHO) " --with-cups-include=$(call UnixPath,$(ALT_CUPS_HEADERS_PATH)) " >> $@.tmp
endif
ifdef ALT_FREETYPE_HEADERS_PATH
@$(ECHO) " --with-freetype=$(call UnixPath,$(ALT_FREETYPE_HEADERS_PATH)/..) " >> $@.tmp
endif
ifdef ENABLE_SJAVAC
@$(ECHO) " --enable-sjavac" >> $@.tmp
endif
ifeq ($(HOTSPOT_AVAILABLE),false)
ifdef ALT_JDK_IMPORT_PATH
@$(ECHO) " --with-import-hotspot=$(call UnixPath,$(ALT_JDK_IMPORT_PATH)) " >> $@.tmp
endif
endif
ifeq ($(OPEN_BUILD),true)
@$(ECHO) " --enable-openjdk-only " >> $@.tmp
else
# Todo: move to closed?
ifdef ALT_MOZILLA_HEADERS_PATH
@$(ECHO) " --with-mozilla-headers=$(call UnixPath,$(ALT_MOZILLA_HEADERS_PATH)) " >> $@.tmp
endif
ifdef ALT_JUNIT_DIR
@$(ECHO) " --with-junit-dir=$(call UnixPath,$(ALT_JUNIT_DIR)) " >> $@.tmp
endif
ifdef ANT_HOME
@$(ECHO) " --with-ant-home=$(call UnixPath,$(ANT_HOME)) " >> $@.tmp
endif
ifdef ALT_JAVAFX_ZIP_DIR
@$(ECHO) " --with-javafx-zip-dir=$(call UnixPath,$(ALT_JAVAFX_ZIP_DIR)) " >> $@.tmp
endif
ifdef ALT_JMC_ZIP_DIR
@$(ECHO) " --with-jmc-zip-dir=$(call UnixPath,$(ALT_JMC_ZIP_DIR)) " >> $@.tmp
endif
ifdef ALT_WIXDIR
@$(ECHO) " --with-wix=$(call UnixPath,$(ALT_WIXDIR)) " >> $@.tmp
endif
ifdef ALT_INSTALL_LZMA_PATH
@$(ECHO) " --with-lzma-path=$(call UnixPath,$(ALT_INSTALL_LZMA_PATH)) " >> $@.tmp
endif
ifdef ALT_INSTALL_UPX_PATH
@$(ECHO) " --with-upx-path=$(call UnixPath,$(ALT_INSTALL_UPX_PATH)) " >> $@.tmp
endif
ifdef ALT_INSTALL_UPX_FILENAME
@$(ECHO) " --with-upx-filename=$(call UnixPath,$(ALT_INSTALL_UPX_FILENAME)) " >> $@.tmp
endif
ifdef ALT_CCSS_SIGNING_DIR
@$(ECHO) " --with-ccss-signing=$(call UnixPath,$(ALT_CCSS_SIGNING_DIR)) " >> $@.tmp
endif
ifdef ALT_SLASH_JAVA
@$(ECHO) " --with-java-devtools=$(call UnixPath,$(ALT_SLASH_JAVA)/devtools) " >> $@.tmp
endif
ifdef ALT_SPARKLE_FRAMEWORK_DIR
@$(ECHO) " --with-sparkle-framework=$(call UnixPath,$(ALT_SPARKLE_FRAMEWORK_DIR)) " >> $@.tmp
endif
endif
@if [ -f $@ ] ; then \
if ! $(CMP) $@ $@.tmp > /dev/null ; then \
$(CP) $@.tmp $@ ; \
fi ; \
else \
$(CP) $@.tmp $@ ; \
fi
if ! $(CMP) $@ $@.tmp > /dev/null ; then \
$(CP) $@.tmp $@ ; \
fi ; \
else \
$(CP) $@.tmp $@ ; \
fi
$(RM) $@.tmp
PHONY_LIST += bridge2configure bridgeBuild
@ -170,7 +170,7 @@ ifndef JPRT_ARCHIVE_BUNDLE
JPRT_ARCHIVE_BUNDLE=/tmp/jprt_bundles/j2sdk-image.zip
endif
ifndef JPRT_ARCHIVE_INSTALL_BUNDLE
JPRT_ARCHIVE_INSTALL_BUNDLE=/tmp/jprt_bundles/product-install.zip
JPRT_ARCHIVE_INSTALL_BUNDLE=/tmp/jprt_bundles/product-install.zip
endif
# These targets execute in a SPEC free context, before calling bridgeBuild
@ -212,9 +212,9 @@ bundles-only: start-make
$(MKDIR) -p $(BUILD_OUTPUT)/bundles
$(CD) $(SRC_JDK_IMAGE_DIR) && $(ZIP) -y -q -r $(BUILD_OUTPUT)/bundles/$(JDK_IMAGE_SUBDIR).zip .
$(CD) $(SRC_JRE_IMAGE_DIR) && $(ZIP) -y -q -r $(BUILD_OUTPUT)/bundles/$(JRE_IMAGE_SUBDIR).zip .
if [ -d $(BUILD_OUTPUT)/install/bundles ] ; then \
$(CD) $(BUILD_OUTPUT)/install/bundles && $(ZIP) -y -q -r $(JPRT_ARCHIVE_INSTALL_BUNDLE) . ; \
fi
if [ -d $(BUILD_OUTPUT)/install/bundles ] ; then \
$(CD) $(BUILD_OUTPUT)/install/bundles && $(ZIP) -y -q -r $(JPRT_ARCHIVE_INSTALL_BUNDLE) . ; \
fi
@$(call TargetExit)
# Copy images to one unified location regardless of platform etc.
@ -226,19 +226,19 @@ final-images-only: start-make
$(MKDIR) -p $(BUILD_OUTPUT)/final-images/$(JRE_IMAGE_SUBDIR)
$(CP) -R -P $(SRC_JDK_IMAGE_DIR)/* $(BUILD_OUTPUT)/final-images/$(JDK_IMAGE_SUBDIR)/
$(CP) -R -P $(SRC_JRE_IMAGE_DIR)/* $(BUILD_OUTPUT)/final-images/$(JRE_IMAGE_SUBDIR)/
ifeq ($(OPENJDK_TARGET_OS),macosx)
$(MKDIR) -p $(BUILD_OUTPUT)/final-images/$(JDK_BUNDLE_SUBDIR)
$(MKDIR) -p $(BUILD_OUTPUT)/final-images/$(JRE_BUNDLE_SUBDIR)
$(CP) -R -P $(SRC_JDK_BUNDLE_DIR)/* $(BUILD_OUTPUT)/final-images/$(JDK_BUNDLE_SUBDIR)/
$(CP) -R -P $(SRC_JRE_BUNDLE_DIR)/* $(BUILD_OUTPUT)/final-images/$(JRE_BUNDLE_SUBDIR)/
endif
ifeq ($(OPENJDK_TARGET_OS),macosx)
$(MKDIR) -p $(BUILD_OUTPUT)/final-images/$(JDK_BUNDLE_SUBDIR)
$(MKDIR) -p $(BUILD_OUTPUT)/final-images/$(JRE_BUNDLE_SUBDIR)
$(CP) -R -P $(SRC_JDK_BUNDLE_DIR)/* $(BUILD_OUTPUT)/final-images/$(JDK_BUNDLE_SUBDIR)/
$(CP) -R -P $(SRC_JRE_BUNDLE_DIR)/* $(BUILD_OUTPUT)/final-images/$(JRE_BUNDLE_SUBDIR)/
endif
@$(call TargetExit)
# Keep track of phony targets
PHONY_LIST += jprt_build_product jprt_build_fastdebug jprt_build_debug \
jprt_build_generic bundles jprt_bundle \
final-images final-images-only
jprt_build_generic bundles jprt_bundle \
final-images final-images-only
###########################################################################
# Phony targets

View file

@ -38,19 +38,19 @@ include $(SRC_ROOT)/common/makefiles/MakeBase.gmk
# Remove any build.log from a previous run, if they exist
ifneq (,$(BUILD_LOG))
ifneq (,$(BUILD_LOG_PREVIOUS))
# Rotate old log
$(shell $(RM) $(BUILD_LOG_PREVIOUS) 2> /dev/null)
$(shell $(MV) $(BUILD_LOG) $(BUILD_LOG_PREVIOUS) 2> /dev/null)
else
$(shell $(RM) $(BUILD_LOG) 2> /dev/null)
endif
$(shell $(RM) $(OUTPUT_ROOT)/build-trace-time.log 2> /dev/null)
ifneq (,$(BUILD_LOG_PREVIOUS))
# Rotate old log
$(shell $(RM) $(BUILD_LOG_PREVIOUS) 2> /dev/null)
$(shell $(MV) $(BUILD_LOG) $(BUILD_LOG_PREVIOUS) 2> /dev/null)
else
$(shell $(RM) $(BUILD_LOG) 2> /dev/null)
endif
$(shell $(RM) $(OUTPUT_ROOT)/build-trace-time.log 2> /dev/null)
endif
# Remove any javac server logs and port files. This
# prevents a new make run to reuse the previous servers.
ifneq (,$(SJAVAC_SERVER_DIR))
$(shell $(MKDIR) -p $(SJAVAC_SERVER_DIR) && $(RM) -rf $(SJAVAC_SERVER_DIR)/*)
$(shell $(MKDIR) -p $(SJAVAC_SERVER_DIR) && $(RM) -rf $(SJAVAC_SERVER_DIR)/*)
endif
# Reset the build timers.
@ -103,8 +103,8 @@ jaxws-only: start-make
@$(call TargetExit)
ifeq ($(BUILD_HOTSPOT),true)
hotspot: hotspot-only
hotspot-only: start-make
hotspot: hotspot-only
hotspot-only: start-make
@$(call TargetEnter)
@($(CD) $(SRC_ROOT)/common/makefiles && $(BUILD_LOG_WRAPPER) $(MAKE) $(MAKE_ARGS) -f HotspotWrapper.gmk)
@$(call TargetExit)
@ -149,9 +149,9 @@ profiles-only: start-make
@$(call TargetExit)
profiles-oscheck:
ifneq ($(OPENJDK_TARGET_OS), linux)
@echo "Error: The Java SE 8 Compact Profiles are only implemented for Linux at this time" && exit 1
endif
ifneq ($(OPENJDK_TARGET_OS), linux)
@echo "Error: The Java SE 8 Compact Profiles are only implemented for Linux at this time" && exit 1
endif
install: images install-only
install-only: start-make
@ -202,12 +202,12 @@ clean: clean-langtools clean-corba clean-jaxp clean-jaxws clean-hotspot clean-jd
dist-clean: clean
@($(CD) $(OUTPUT_ROOT) && $(RM) -r *spec.gmk config.* configure-arguments Makefile compare.sh spec.sh tmp javacservers)
@$(if $(filter $(CONF_NAME),$(notdir $(OUTPUT_ROOT))), \
if test "x`$(LS) $(OUTPUT_ROOT)`" != x; then \
$(ECHO) "Warning: Not removing non-empty configuration directory for '$(CONF_NAME)'" ;\
else \
($(CD) $(SRC_ROOT) && $(ECHO) "Removing configuration directory for '$(CONF_NAME)'" && $(RM) -r $(OUTPUT_ROOT)) \
fi \
)
if test "x`$(LS) $(OUTPUT_ROOT)`" != x; then \
$(ECHO) "Warning: Not removing non-empty configuration directory for '$(CONF_NAME)'" ; \
else \
($(CD) $(SRC_ROOT) && $(ECHO) "Removing configuration directory for '$(CONF_NAME)'" && $(RM) -r $(OUTPUT_ROOT)) \
fi \
)
@$(ECHO) Cleaned everything, you will have to re-run configure.
clean-langtools:

View file

@ -48,314 +48,316 @@ $(subst $(SPACE),\n,$(strip $1)))))\
$(compress_post)
decompress_paths=$(SED) -f $(SRC_ROOT)/common/makefiles/support/ListPathsSafely-uncompress.sed -e 's|X99|\\n|g' \
-e 's|X98|$(OUTPUT_ROOT)|g' -e 's|X97|$(SRC_ROOT)|g' \
-e 's|X00|X|g' | tr '\n' '$2'
-e 's|X98|$(OUTPUT_ROOT)|g' -e 's|X97|$(SRC_ROOT)|g' \
-e 's|X00|X|g' | tr '\n' '$2'
define ListPathsSafely_If
$(if $(word $3,$($1)),$(eval $1_LPS$3:=$(call compress_paths,$(wordlist $3,$4,$($1)))))
$(if $(word $3,$($1)),$(eval $1_LPS$3:=$(call compress_paths,$(wordlist $3,$4,$($1)))))
endef
define ListPathsSafely_Printf
$(if $(strip $($1_LPS$4)),printf -- "$(strip $($1_LPS$4))\n" | $(decompress_paths) $3)
$(if $(strip $($1_LPS$4)),$(if $(findstring $(LOG_LEVEL),trace),,@)printf \
-- "$(strip $($1_LPS$4))\n" | $(decompress_paths) $3)
endef
# Receipt example:
# rm -f thepaths
# $(call ListPathsSafely,THEPATHS,\n, >> thepaths)
# The \n argument means translate spaces into \n
# if instead , , (a space) is supplied, then spaces remain spaces.
# rm -f thepaths
# $(call ListPathsSafely,THEPATHS,\n, >> thepaths)
# The \n argument means translate spaces into \n
# if instead , , (a space) is supplied, then spaces remain spaces.
define ListPathsSafely
$(if $(word 16001,$($1)),$(error Cannot list safely more than 16000 paths. $1 has $(words $($1)) paths!))
$(call ListPathsSafely_If,$1,$2,1,250)
$(call ListPathsSafely_If,$1,$2,251,500)
$(call ListPathsSafely_If,$1,$2,501,750)
$(call ListPathsSafely_If,$1,$2,751,1000)
$(if $(word 16001,$($1)),$(error Cannot list safely more than 16000 paths. $1 has $(words $($1)) paths!))
$(ECHO) $(LOG_DEBUG) Writing $(words $($1)) paths to '$3'
$(call ListPathsSafely_If,$1,$2,1,250)
$(call ListPathsSafely_If,$1,$2,251,500)
$(call ListPathsSafely_If,$1,$2,501,750)
$(call ListPathsSafely_If,$1,$2,751,1000)
$(call ListPathsSafely_If,$1,$2,1001,1250)
$(call ListPathsSafely_If,$1,$2,1251,1500)
$(call ListPathsSafely_If,$1,$2,1501,1750)
$(call ListPathsSafely_If,$1,$2,1751,2000)
$(call ListPathsSafely_If,$1,$2,1001,1250)
$(call ListPathsSafely_If,$1,$2,1251,1500)
$(call ListPathsSafely_If,$1,$2,1501,1750)
$(call ListPathsSafely_If,$1,$2,1751,2000)
$(call ListPathsSafely_If,$1,$2,2001,2250)
$(call ListPathsSafely_If,$1,$2,2251,2500)
$(call ListPathsSafely_If,$1,$2,2501,2750)
$(call ListPathsSafely_If,$1,$2,2751,3000)
$(call ListPathsSafely_If,$1,$2,2001,2250)
$(call ListPathsSafely_If,$1,$2,2251,2500)
$(call ListPathsSafely_If,$1,$2,2501,2750)
$(call ListPathsSafely_If,$1,$2,2751,3000)
$(call ListPathsSafely_If,$1,$2,3001,3250)
$(call ListPathsSafely_If,$1,$2,3251,3500)
$(call ListPathsSafely_If,$1,$2,3501,3750)
$(call ListPathsSafely_If,$1,$2,3751,4000)
$(call ListPathsSafely_If,$1,$2,3001,3250)
$(call ListPathsSafely_If,$1,$2,3251,3500)
$(call ListPathsSafely_If,$1,$2,3501,3750)
$(call ListPathsSafely_If,$1,$2,3751,4000)
$(call ListPathsSafely_If,$1,$2,4001,4250)
$(call ListPathsSafely_If,$1,$2,4251,4500)
$(call ListPathsSafely_If,$1,$2,4501,4750)
$(call ListPathsSafely_If,$1,$2,4751,5000)
$(call ListPathsSafely_If,$1,$2,4001,4250)
$(call ListPathsSafely_If,$1,$2,4251,4500)
$(call ListPathsSafely_If,$1,$2,4501,4750)
$(call ListPathsSafely_If,$1,$2,4751,5000)
$(call ListPathsSafely_If,$1,$2,5001,5250)
$(call ListPathsSafely_If,$1,$2,5251,5500)
$(call ListPathsSafely_If,$1,$2,5501,5750)
$(call ListPathsSafely_If,$1,$2,5751,6000)
$(call ListPathsSafely_If,$1,$2,5001,5250)
$(call ListPathsSafely_If,$1,$2,5251,5500)
$(call ListPathsSafely_If,$1,$2,5501,5750)
$(call ListPathsSafely_If,$1,$2,5751,6000)
$(call ListPathsSafely_If,$1,$2,6001,6250)
$(call ListPathsSafely_If,$1,$2,6251,6500)
$(call ListPathsSafely_If,$1,$2,6501,6750)
$(call ListPathsSafely_If,$1,$2,6751,7000)
$(call ListPathsSafely_If,$1,$2,6001,6250)
$(call ListPathsSafely_If,$1,$2,6251,6500)
$(call ListPathsSafely_If,$1,$2,6501,6750)
$(call ListPathsSafely_If,$1,$2,6751,7000)
$(call ListPathsSafely_If,$1,$2,7001,7250)
$(call ListPathsSafely_If,$1,$2,7251,7500)
$(call ListPathsSafely_If,$1,$2,7501,7750)
$(call ListPathsSafely_If,$1,$2,7751,8000)
$(call ListPathsSafely_If,$1,$2,7001,7250)
$(call ListPathsSafely_If,$1,$2,7251,7500)
$(call ListPathsSafely_If,$1,$2,7501,7750)
$(call ListPathsSafely_If,$1,$2,7751,8000)
$(call ListPathsSafely_If,$1,$2,8001,8250)
$(call ListPathsSafely_If,$1,$2,8251,8500)
$(call ListPathsSafely_If,$1,$2,8501,8750)
$(call ListPathsSafely_If,$1,$2,8751,9000)
$(call ListPathsSafely_If,$1,$2,8001,8250)
$(call ListPathsSafely_If,$1,$2,8251,8500)
$(call ListPathsSafely_If,$1,$2,8501,8750)
$(call ListPathsSafely_If,$1,$2,8751,9000)
$(call ListPathsSafely_If,$1,$2,9001,9250)
$(call ListPathsSafely_If,$1,$2,9251,9500)
$(call ListPathsSafely_If,$1,$2,9501,9750)
$(call ListPathsSafely_If,$1,$2,9751,10000)
$(call ListPathsSafely_If,$1,$2,9001,9250)
$(call ListPathsSafely_If,$1,$2,9251,9500)
$(call ListPathsSafely_If,$1,$2,9501,9750)
$(call ListPathsSafely_If,$1,$2,9751,10000)
$(call ListPathsSafely_If,$1,$2,10001,10250)
$(call ListPathsSafely_If,$1,$2,10251,10500)
$(call ListPathsSafely_If,$1,$2,10501,10750)
$(call ListPathsSafely_If,$1,$2,10751,11000)
$(call ListPathsSafely_If,$1,$2,10001,10250)
$(call ListPathsSafely_If,$1,$2,10251,10500)
$(call ListPathsSafely_If,$1,$2,10501,10750)
$(call ListPathsSafely_If,$1,$2,10751,11000)
$(call ListPathsSafely_If,$1,$2,11001,11250)
$(call ListPathsSafely_If,$1,$2,11251,11500)
$(call ListPathsSafely_If,$1,$2,11501,11750)
$(call ListPathsSafely_If,$1,$2,11751,12000)
$(call ListPathsSafely_If,$1,$2,11001,11250)
$(call ListPathsSafely_If,$1,$2,11251,11500)
$(call ListPathsSafely_If,$1,$2,11501,11750)
$(call ListPathsSafely_If,$1,$2,11751,12000)
$(call ListPathsSafely_If,$1,$2,12001,12250)
$(call ListPathsSafely_If,$1,$2,12251,12500)
$(call ListPathsSafely_If,$1,$2,12501,12750)
$(call ListPathsSafely_If,$1,$2,12751,13000)
$(call ListPathsSafely_If,$1,$2,12001,12250)
$(call ListPathsSafely_If,$1,$2,12251,12500)
$(call ListPathsSafely_If,$1,$2,12501,12750)
$(call ListPathsSafely_If,$1,$2,12751,13000)
$(call ListPathsSafely_If,$1,$2,13001,13250)
$(call ListPathsSafely_If,$1,$2,13251,13500)
$(call ListPathsSafely_If,$1,$2,13501,13750)
$(call ListPathsSafely_If,$1,$2,13751,14000)
$(call ListPathsSafely_If,$1,$2,13001,13250)
$(call ListPathsSafely_If,$1,$2,13251,13500)
$(call ListPathsSafely_If,$1,$2,13501,13750)
$(call ListPathsSafely_If,$1,$2,13751,14000)
$(call ListPathsSafely_If,$1,$2,14001,14250)
$(call ListPathsSafely_If,$1,$2,14251,14500)
$(call ListPathsSafely_If,$1,$2,14501,14750)
$(call ListPathsSafely_If,$1,$2,14751,15000)
$(call ListPathsSafely_If,$1,$2,14001,14250)
$(call ListPathsSafely_If,$1,$2,14251,14500)
$(call ListPathsSafely_If,$1,$2,14501,14750)
$(call ListPathsSafely_If,$1,$2,14751,15000)
$(call ListPathsSafely_If,$1,$2,15001,15250)
$(call ListPathsSafely_If,$1,$2,15251,15500)
$(call ListPathsSafely_If,$1,$2,15501,15750)
$(call ListPathsSafely_If,$1,$2,15751,16000)
$(call ListPathsSafely_If,$1,$2,15001,15250)
$(call ListPathsSafely_If,$1,$2,15251,15500)
$(call ListPathsSafely_If,$1,$2,15501,15750)
$(call ListPathsSafely_If,$1,$2,15751,16000)
$(call ListPathsSafely_Printf,$1,$2,$3,1)
$(call ListPathsSafely_Printf,$1,$2,$3,251)
$(call ListPathsSafely_Printf,$1,$2,$3,501)
$(call ListPathsSafely_Printf,$1,$2,$3,751)
$(call ListPathsSafely_Printf,$1,$2,$3,1)
$(call ListPathsSafely_Printf,$1,$2,$3,251)
$(call ListPathsSafely_Printf,$1,$2,$3,501)
$(call ListPathsSafely_Printf,$1,$2,$3,751)
$(call ListPathsSafely_Printf,$1,$2,$3,1001)
$(call ListPathsSafely_Printf,$1,$2,$3,1251)
$(call ListPathsSafely_Printf,$1,$2,$3,1501)
$(call ListPathsSafely_Printf,$1,$2,$3,1751)
$(call ListPathsSafely_Printf,$1,$2,$3,1001)
$(call ListPathsSafely_Printf,$1,$2,$3,1251)
$(call ListPathsSafely_Printf,$1,$2,$3,1501)
$(call ListPathsSafely_Printf,$1,$2,$3,1751)
$(call ListPathsSafely_Printf,$1,$2,$3,2001)
$(call ListPathsSafely_Printf,$1,$2,$3,2251)
$(call ListPathsSafely_Printf,$1,$2,$3,2501)
$(call ListPathsSafely_Printf,$1,$2,$3,2751)
$(call ListPathsSafely_Printf,$1,$2,$3,2001)
$(call ListPathsSafely_Printf,$1,$2,$3,2251)
$(call ListPathsSafely_Printf,$1,$2,$3,2501)
$(call ListPathsSafely_Printf,$1,$2,$3,2751)
$(call ListPathsSafely_Printf,$1,$2,$3,3001)
$(call ListPathsSafely_Printf,$1,$2,$3,3251)
$(call ListPathsSafely_Printf,$1,$2,$3,3501)
$(call ListPathsSafely_Printf,$1,$2,$3,3751)
$(call ListPathsSafely_Printf,$1,$2,$3,3001)
$(call ListPathsSafely_Printf,$1,$2,$3,3251)
$(call ListPathsSafely_Printf,$1,$2,$3,3501)
$(call ListPathsSafely_Printf,$1,$2,$3,3751)
$(call ListPathsSafely_Printf,$1,$2,$3,4001)
$(call ListPathsSafely_Printf,$1,$2,$3,4251)
$(call ListPathsSafely_Printf,$1,$2,$3,4501)
$(call ListPathsSafely_Printf,$1,$2,$3,4751)
$(call ListPathsSafely_Printf,$1,$2,$3,4001)
$(call ListPathsSafely_Printf,$1,$2,$3,4251)
$(call ListPathsSafely_Printf,$1,$2,$3,4501)
$(call ListPathsSafely_Printf,$1,$2,$3,4751)
$(call ListPathsSafely_Printf,$1,$2,$3,5001)
$(call ListPathsSafely_Printf,$1,$2,$3,5251)
$(call ListPathsSafely_Printf,$1,$2,$3,5501)
$(call ListPathsSafely_Printf,$1,$2,$3,5751)
$(call ListPathsSafely_Printf,$1,$2,$3,5001)
$(call ListPathsSafely_Printf,$1,$2,$3,5251)
$(call ListPathsSafely_Printf,$1,$2,$3,5501)
$(call ListPathsSafely_Printf,$1,$2,$3,5751)
$(call ListPathsSafely_Printf,$1,$2,$3,6001)
$(call ListPathsSafely_Printf,$1,$2,$3,6251)
$(call ListPathsSafely_Printf,$1,$2,$3,6501)
$(call ListPathsSafely_Printf,$1,$2,$3,6751)
$(call ListPathsSafely_Printf,$1,$2,$3,6001)
$(call ListPathsSafely_Printf,$1,$2,$3,6251)
$(call ListPathsSafely_Printf,$1,$2,$3,6501)
$(call ListPathsSafely_Printf,$1,$2,$3,6751)
$(call ListPathsSafely_Printf,$1,$2,$3,7001)
$(call ListPathsSafely_Printf,$1,$2,$3,7251)
$(call ListPathsSafely_Printf,$1,$2,$3,7501)
$(call ListPathsSafely_Printf,$1,$2,$3,7751)
$(call ListPathsSafely_Printf,$1,$2,$3,7001)
$(call ListPathsSafely_Printf,$1,$2,$3,7251)
$(call ListPathsSafely_Printf,$1,$2,$3,7501)
$(call ListPathsSafely_Printf,$1,$2,$3,7751)
$(call ListPathsSafely_Printf,$1,$2,$3,8001)
$(call ListPathsSafely_Printf,$1,$2,$3,8251)
$(call ListPathsSafely_Printf,$1,$2,$3,8501)
$(call ListPathsSafely_Printf,$1,$2,$3,8751)
$(call ListPathsSafely_Printf,$1,$2,$3,8001)
$(call ListPathsSafely_Printf,$1,$2,$3,8251)
$(call ListPathsSafely_Printf,$1,$2,$3,8501)
$(call ListPathsSafely_Printf,$1,$2,$3,8751)
$(call ListPathsSafely_Printf,$1,$2,$3,9001)
$(call ListPathsSafely_Printf,$1,$2,$3,9251)
$(call ListPathsSafely_Printf,$1,$2,$3,9501)
$(call ListPathsSafely_Printf,$1,$2,$3,9751)
$(call ListPathsSafely_Printf,$1,$2,$3,9001)
$(call ListPathsSafely_Printf,$1,$2,$3,9251)
$(call ListPathsSafely_Printf,$1,$2,$3,9501)
$(call ListPathsSafely_Printf,$1,$2,$3,9751)
$(call ListPathsSafely_Printf,$1,$2,$3,10001)
$(call ListPathsSafely_Printf,$1,$2,$3,10251)
$(call ListPathsSafely_Printf,$1,$2,$3,10501)
$(call ListPathsSafely_Printf,$1,$2,$3,10751)
$(call ListPathsSafely_Printf,$1,$2,$3,10001)
$(call ListPathsSafely_Printf,$1,$2,$3,10251)
$(call ListPathsSafely_Printf,$1,$2,$3,10501)
$(call ListPathsSafely_Printf,$1,$2,$3,10751)
$(call ListPathsSafely_Printf,$1,$2,$3,11001)
$(call ListPathsSafely_Printf,$1,$2,$3,11251)
$(call ListPathsSafely_Printf,$1,$2,$3,11501)
$(call ListPathsSafely_Printf,$1,$2,$3,11751)
$(call ListPathsSafely_Printf,$1,$2,$3,11001)
$(call ListPathsSafely_Printf,$1,$2,$3,11251)
$(call ListPathsSafely_Printf,$1,$2,$3,11501)
$(call ListPathsSafely_Printf,$1,$2,$3,11751)
$(call ListPathsSafely_Printf,$1,$2,$3,12001)
$(call ListPathsSafely_Printf,$1,$2,$3,12251)
$(call ListPathsSafely_Printf,$1,$2,$3,12501)
$(call ListPathsSafely_Printf,$1,$2,$3,12751)
$(call ListPathsSafely_Printf,$1,$2,$3,12001)
$(call ListPathsSafely_Printf,$1,$2,$3,12251)
$(call ListPathsSafely_Printf,$1,$2,$3,12501)
$(call ListPathsSafely_Printf,$1,$2,$3,12751)
$(call ListPathsSafely_Printf,$1,$2,$3,13001)
$(call ListPathsSafely_Printf,$1,$2,$3,13251)
$(call ListPathsSafely_Printf,$1,$2,$3,13501)
$(call ListPathsSafely_Printf,$1,$2,$3,13751)
$(call ListPathsSafely_Printf,$1,$2,$3,13001)
$(call ListPathsSafely_Printf,$1,$2,$3,13251)
$(call ListPathsSafely_Printf,$1,$2,$3,13501)
$(call ListPathsSafely_Printf,$1,$2,$3,13751)
$(call ListPathsSafely_Printf,$1,$2,$3,14001)
$(call ListPathsSafely_Printf,$1,$2,$3,14251)
$(call ListPathsSafely_Printf,$1,$2,$3,14501)
$(call ListPathsSafely_Printf,$1,$2,$3,14751)
$(call ListPathsSafely_Printf,$1,$2,$3,14001)
$(call ListPathsSafely_Printf,$1,$2,$3,14251)
$(call ListPathsSafely_Printf,$1,$2,$3,14501)
$(call ListPathsSafely_Printf,$1,$2,$3,14751)
$(call ListPathsSafely_Printf,$1,$2,$3,15001)
$(call ListPathsSafely_Printf,$1,$2,$3,15251)
$(call ListPathsSafely_Printf,$1,$2,$3,15501)
$(call ListPathsSafely_Printf,$1,$2,$3,15751)
$(call ListPathsSafely_Printf,$1,$2,$3,15001)
$(call ListPathsSafely_Printf,$1,$2,$3,15251)
$(call ListPathsSafely_Printf,$1,$2,$3,15501)
$(call ListPathsSafely_Printf,$1,$2,$3,15751)
endef
define ListPathsSafelyNow_IfPrintf
ifneq (,$$(word $4,$$($1)))
$$(eval $1_LPS$4:=$$(call compress_paths,$$(wordlist $4,$5,$$($1))))
$$(shell printf -- "$$(strip $$($1_LPS$4))\n" | $(decompress_paths) $3)
endif
ifneq (,$$(word $4,$$($1)))
$$(eval $1_LPS$4:=$$(call compress_paths,$$(wordlist $4,$5,$$($1))))
$$(shell printf -- "$$(strip $$($1_LPS$4))\n" | $(decompress_paths) $3)
endif
endef
# And an non-receipt version:
define ListPathsSafelyNow
ifneq (,$$(word 10001,$$($1)))
$$(error Cannot list safely more than 10000 paths. $1 has $$(words $$($1)) paths!)
endif
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,1,250)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,251,500)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,501,750)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,751,1000)
ifneq (,$$(word 10001,$$($1)))
$$(error Cannot list safely more than 10000 paths. $1 has $$(words $$($1)) paths!)
endif
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,1,250)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,251,500)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,501,750)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,751,1000)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,1001,1250)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,1251,1500)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,1501,1750)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,1751,2000)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,1001,1250)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,1251,1500)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,1501,1750)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,1751,2000)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,2001,2250)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,2251,2500)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,2501,2750)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,2751,3000)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,2001,2250)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,2251,2500)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,2501,2750)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,2751,3000)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,3001,3250)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,3251,3500)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,3501,3750)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,3751,4000)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,3001,3250)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,3251,3500)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,3501,3750)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,3751,4000)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,4001,4250)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,4251,4500)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,4501,4750)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,4751,5000)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,4001,4250)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,4251,4500)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,4501,4750)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,4751,5000)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,5001,5250)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,5251,5500)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,5501,5750)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,5751,6000)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,5001,5250)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,5251,5500)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,5501,5750)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,5751,6000)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,6001,6250)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,6251,6500)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,6501,6750)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,6751,7000)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,6001,6250)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,6251,6500)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,6501,6750)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,6751,7000)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,7001,7250)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,7251,7500)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,7501,7750)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,7751,8000)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,7001,7250)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,7251,7500)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,7501,7750)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,7751,8000)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,8001,8250)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,8251,8500)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,8501,8750)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,8751,9000)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,8001,8250)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,8251,8500)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,8501,8750)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,8751,9000)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,9001,9250)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,9251,9500)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,9501,9750)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,9751,10000)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,9001,9250)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,9251,9500)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,9501,9750)
$(call ListPathsSafelyNow_IfPrintf,$1,$2,$3,9751,10000)
endef
# The source tips can come from the Mercurial repository, or in the files
# $(HGTIP_FILENAME) which contains the tip but is also positioned in the same
# directory as the original $(HGDIR) directory.
# These should not be := assignments, only used from the root Makefile.
# $(HGTIP_FILENAME) which contains the tip but is also positioned in the same
# directory as the original $(HGDIR) directory.
# These should not be := assignments, only used from the root Makefile.
HG_VERSION = $(shell $(HG) version 2> /dev/null)
HG_DIRECTORY=.hg
HGTIP_FILENAME=.hgtip
HG_SEARCH = ./REPO ./*/REPO ./*/*/REPO ./*/*/*/REPO
REPO_LIST = $(patsubst ./%,%,$(patsubst %/,%,$(sort $(dir \
$(shell $(CD) $(SRC_ROOT) ; ( $(LS) -d $(HG_SEARCH:%/REPO=%/$(HG_DIRECTORY)) ; \
$(LS) $(HG_SEARCH:%/REPO=%/$(HGTIP_FILENAME)) ) \
2> /dev/null)))))
$(LS) $(HG_SEARCH:%/REPO=%/$(HGTIP_FILENAME)) ) \
2> /dev/null)))))
# Emit the repo:tip pairs to $@
define GetSourceTips
$(CD) $(SRC_ROOT) ; \
for i in $(REPO_LIST) IGNORE ; do \
if [ "$${i}" = "IGNORE" ] ; then \
continue; \
elif [ -d $${i}/$(HG_DIRECTORY) -a "$(HG_VERSION)" != "" ] ; then \
$(PRINTF) " %s:%s" \
"$${i}" `$(HG) tip --repository $${i} --template '{node|short}\n'` ; \
elif [ -f $${i}/$(HGTIP_FILENAME) ] ; then \
$(PRINTF) " %s:%s" \
"$${i}" `$(CAT) $${i}/$(HGTIP_FILENAME)` ; \
fi; \
done >> $@
$(PRINTF) "\n" >> $@
$(CD) $(SRC_ROOT) ; \
for i in $(REPO_LIST) IGNORE ; do \
if [ "$${i}" = "IGNORE" ] ; then \
continue; \
elif [ -d $${i}/$(HG_DIRECTORY) -a "$(HG_VERSION)" != "" ] ; then \
$(PRINTF) " %s:%s" \
"$${i}" `$(HG) tip --repository $${i} --template '{node|short}\n'` ; \
elif [ -f $${i}/$(HGTIP_FILENAME) ] ; then \
$(PRINTF) " %s:%s" \
"$${i}" `$(CAT) $${i}/$(HGTIP_FILENAME)` ; \
fi; \
done >> $@
$(PRINTF) "\n" >> $@
endef
# Create the HGTIP_FILENAME file. Called from jdk/make/closed/bundles.gmk
define CreateHgTip
$(HG) tip --repository $1 --template '{node|short}\n' > $1/$(HGTIP_FILENAME);\
$(ECHO) $1/$(HGTIP_FILENAME)
$(HG) tip --repository $1 --template '{node|short}\n' > $1/$(HGTIP_FILENAME); \
$(ECHO) $1/$(HGTIP_FILENAME)
endef
define SetupLogging
ifeq ($$(LOG_LEVEL),trace)
# Shell redefinition trick inspired by http://www.cmcrossroads.com/ask-mr-make/6535-tracing-rule-execution-in-gnu-make
# For each target executed, will print
# Building <TARGET> (from <FIRST PREREQUISITE>) (<ALL NEWER PREREQUISITES> newer)
# but with a limit of 20 on <ALL NEWER PREREQUISITES>, to avoid cluttering logs too much
# (and causing a crash on Cygwin).
# Default shell seems to always be /bin/sh. Must override with bash to get this to work on Solaris.
# Only use time if it's GNU time which supports format and output file.
WRAPPER_SHELL:=/bin/bash $$(SRC_ROOT)/common/bin/shell-tracer.sh $$(if $$(findstring yes,$$(IS_GNU_TIME)),$$(TIME),-) $$(OUTPUT_ROOT)/build-trace-time.log /bin/bash
SHELL=$$(warning $$(if $$@,Building $$@,Running shell command) $$(if $$<, (from $$<))$$(if $$?, ($$(wordlist 1, 20, $$?) $$(if $$(wordlist 21, 22, $$?), ... [in total $$(words $$?) files]) newer)))$$(WRAPPER_SHELL)
endif
# Never remove warning messages; this is just for completeness
LOG_WARN=
ifneq ($$(findstring $$(LOG_LEVEL),info debug trace),)
LOG_INFO=
else
LOG_INFO=> /dev/null
endif
ifneq ($$(findstring $$(LOG_LEVEL),debug trace),)
LOG_DEBUG=
else
LOG_DEBUG=> /dev/null
endif
ifneq ($$(findstring $$(LOG_LEVEL),trace),)
LOG_TRACE=
else
LOG_TRACE=> /dev/null
endif
ifeq ($$(LOG_LEVEL),trace)
# Shell redefinition trick inspired by http://www.cmcrossroads.com/ask-mr-make/6535-tracing-rule-execution-in-gnu-make
# For each target executed, will print
# Building <TARGET> (from <FIRST PREREQUISITE>) (<ALL NEWER PREREQUISITES> newer)
# but with a limit of 20 on <ALL NEWER PREREQUISITES>, to avoid cluttering logs too much
# (and causing a crash on Cygwin).
# Default shell seems to always be /bin/sh. Must override with bash to get this to work on Solaris.
# Only use time if it's GNU time which supports format and output file.
WRAPPER_SHELL:=/bin/bash $$(SRC_ROOT)/common/bin/shell-tracer.sh $$(if $$(findstring yes,$$(IS_GNU_TIME)),$$(TIME),-) $$(OUTPUT_ROOT)/build-trace-time.log /bin/bash
SHELL=$$(warning $$(if $$@,Building $$@,Running shell command) $$(if $$<, (from $$<))$$(if $$?, ($$(wordlist 1, 20, $$?) $$(if $$(wordlist 21, 22, $$?), ... [in total $$(words $$?) files]) newer)))$$(WRAPPER_SHELL)
endif
# Never remove warning messages; this is just for completeness
LOG_WARN=
ifneq ($$(findstring $$(LOG_LEVEL),info debug trace),)
LOG_INFO=
else
LOG_INFO=> /dev/null
endif
ifneq ($$(findstring $$(LOG_LEVEL),debug trace),)
LOG_DEBUG=
else
LOG_DEBUG=> /dev/null
endif
ifneq ($$(findstring $$(LOG_LEVEL),trace),)
LOG_TRACE=
else
LOG_TRACE=> /dev/null
endif
endef
# Make sure logging is setup for everyone that includes MakeBase.gmk.
@ -363,43 +365,43 @@ $(eval $(call SetupLogging))
# This is to be called by all SetupFoo macros
define LogSetupMacroEntry
$(if $(27),$(error Internal makefile error: Too many arguments to LogSetupMacroEntry, please update MakeBase.gmk))
$(if $(findstring $(LOG_LEVEL),debug trace), $(info $1 $(foreach i,2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26,$(if $($i),$(NEWLINE) $(strip [$i] $($i))))))
$(if $(27),$(error Internal makefile error: Too many arguments to LogSetupMacroEntry, please update MakeBase.gmk))
$(if $(findstring $(LOG_LEVEL),debug trace), $(info $1 $(foreach i,2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26,$(if $($i),$(NEWLINE) $(strip [$i] $($i))))))
endef
# Make directory without forking mkdir if not needed
define MakeDir
ifneq ($$(wildcard $1 $2 $3 $4 $5 $6 $7 $8 $9),$$(strip $1 $2 $3 $4 $5 $6 $7 $8 $9))
$$(shell $(MKDIR) -p $1 $2 $3 $4 $5 $6 $7 $8 $9)
endif
ifneq ($$(wildcard $1 $2 $3 $4 $5 $6 $7 $8 $9),$$(strip $1 $2 $3 $4 $5 $6 $7 $8 $9))
$$(shell $(MKDIR) -p $1 $2 $3 $4 $5 $6 $7 $8 $9)
endif
endef
ifeq ($(OPENJDK_TARGET_OS),solaris)
# On Solaris, if the target is a symlink and exists, cp won't overwrite.
# Cp has to operate in recursive mode to allow for -P flag, to preserve soft links. If the
# name of the target file differs from the source file, rename after copy.
define install-file
# On Solaris, if the target is a symlink and exists, cp won't overwrite.
# Cp has to operate in recursive mode to allow for -P flag, to preserve soft links. If the
# name of the target file differs from the source file, rename after copy.
define install-file
$(MKDIR) -p $(@D)
$(RM) '$@'
$(CP) -f -r -P '$<' '$(@D)'
if [ "$(@F)" != "$(<F)" ]; then $(MV) '$(@D)/$(<F)' '$@'; fi
endef
endef
else ifeq ($(OPENJDK_TARGET_OS),macosx)
# On mac, extended attributes sometimes creep into the source files, which may later
# cause the creation of ._* files which confuses testing. Clear these with xattr if
# set. Some files get their write permissions removed after being copied to the
# output dir. When these are copied again to images, xattr would fail. By only clearing
# attributes when they are present, failing on this is avoided.
define install-file
# On mac, extended attributes sometimes creep into the source files, which may later
# cause the creation of ._* files which confuses testing. Clear these with xattr if
# set. Some files get their write permissions removed after being copied to the
# output dir. When these are copied again to images, xattr would fail. By only clearing
# attributes when they are present, failing on this is avoided.
define install-file
$(MKDIR) -p $(@D)
$(CP) -fRP '$<' '$@'
if [ -n "`$(XATTR) -l '$@'`" ]; then $(XATTR) -c '$@'; fi
endef
endef
else
define install-file
define install-file
$(MKDIR) -p $(@D)
$(CP) -fP '$<' '$@'
endef
endef
endif
# Convenience functions for working around make's limitations with $(filter ).
@ -421,13 +423,13 @@ not-containing = $(foreach v,$2,$(if $(findstring $1,$v),,$v))
#
# Param 1 - Dir to find in
ifeq ($(OPENJDK_BUILD_OS),windows)
define FillCacheFind
define FillCacheFind
FIND_CACHE_DIR += $1
FIND_CACHE := $$(sort $$(FIND_CACHE) $$(shell $(FIND) $1 -type f -o -type l))
endef
endef
else
define FillCacheFind
endef
define FillCacheFind
endef
endif
# Mimics find by looking in the cache if all of the directories have been cached.
@ -437,9 +439,9 @@ endif
# The extra - is needed when FIND_CACHE_DIR is empty but should be harmless.
# Param 1 - Dirs to find in
define CacheFind
$(if $(filter-out $(addsuffix %,- $(FIND_CACHE_DIR)),$1),\
$(shell $(FIND) $1 -type f -o -type l),\
$(filter $(addsuffix %,$1),$(FIND_CACHE)))
$(if $(filter-out $(addsuffix %,- $(FIND_CACHE_DIR)),$1), \
$(shell $(FIND) $1 -type f -o -type l), \
$(filter $(addsuffix %,$1),$(FIND_CACHE)))
endef
################################################################################

View file

@ -57,55 +57,55 @@ global_targets=help jprt% bridgeBuild
##############################
define CheckEnvironment
# Find all environment or command line variables that begin with ALT.
$(if $(list_alt_overrides),
@$(PRINTF) "\nWARNING: You have the following ALT_ variables set:\n"
@$(PRINTF) "$(foreach var,$(list_alt_overrides),$(var)=$$$(var))\n"
@$(PRINTF) "ALT_ variables are deprecated and will be ignored. Please clean your environment.\n\n"
)
# Find all environment or command line variables that begin with ALT.
$(if $(list_alt_overrides),
@$(PRINTF) "\nWARNING: You have the following ALT_ variables set:\n"
@$(PRINTF) "$(foreach var,$(list_alt_overrides),$(var)=$$$(var))\n"
@$(PRINTF) "ALT_ variables are deprecated and will be ignored. Please clean your environment.\n\n"
)
endef
### Functions for timers
# Record starting time for build of a sub repository.
define RecordStartTime
$(MKDIR) -p $(BUILDTIMESDIR)
$(DATE) '+%Y %m %d %H %M %S' | $(NAWK) '{ print $$1,$$2,$$3,$$4,$$5,$$6,($$4*3600+$$5*60+$$6) }' > $(BUILDTIMESDIR)/build_time_start_$1
$(DATE) '+%Y-%m-%d %H:%M:%S' > $(BUILDTIMESDIR)/build_time_start_$1_human_readable
$(MKDIR) -p $(BUILDTIMESDIR)
$(DATE) '+%Y %m %d %H %M %S' | $(NAWK) '{ print $$1,$$2,$$3,$$4,$$5,$$6,($$4*3600+$$5*60+$$6) }' > $(BUILDTIMESDIR)/build_time_start_$1
$(DATE) '+%Y-%m-%d %H:%M:%S' > $(BUILDTIMESDIR)/build_time_start_$1_human_readable
endef
# Record ending time and calculate the difference and store it in a
# easy to read format. Handles builds that cross midnight. Expects
# that a build will never take 24 hours or more.
define RecordEndTime
$(DATE) '+%Y %m %d %H %M %S' | $(NAWK) '{ print $$1,$$2,$$3,$$4,$$5,$$6,($$4*3600+$$5*60+$$6) }' > $(BUILDTIMESDIR)/build_time_end_$1
$(DATE) '+%Y-%m-%d %H:%M:%S' > $(BUILDTIMESDIR)/build_time_end_$1_human_readable
$(ECHO) `$(CAT) $(BUILDTIMESDIR)/build_time_start_$1` `$(CAT) $(BUILDTIMESDIR)/build_time_end_$1` $1 | \
$(NAWK) '{ F=$$7; T=$$14; if (F > T) { T+=3600*24 }; D=T-F; H=int(D/3600); \
M=int((D-H*3600)/60); S=D-H*3600-M*60; printf("%02d:%02d:%02d %s\n",H,M,S,$$15); }' \
> $(BUILDTIMESDIR)/build_time_diff_$1
$(DATE) '+%Y %m %d %H %M %S' | $(NAWK) '{ print $$1,$$2,$$3,$$4,$$5,$$6,($$4*3600+$$5*60+$$6) }' > $(BUILDTIMESDIR)/build_time_end_$1
$(DATE) '+%Y-%m-%d %H:%M:%S' > $(BUILDTIMESDIR)/build_time_end_$1_human_readable
$(ECHO) `$(CAT) $(BUILDTIMESDIR)/build_time_start_$1` `$(CAT) $(BUILDTIMESDIR)/build_time_end_$1` $1 | \
$(NAWK) '{ F=$$7; T=$$14; if (F > T) { T+=3600*24 }; D=T-F; H=int(D/3600); \
M=int((D-H*3600)/60); S=D-H*3600-M*60; printf("%02d:%02d:%02d %s\n",H,M,S,$$15); }' \
> $(BUILDTIMESDIR)/build_time_diff_$1
endef
# Find all build_time_* files and print their contents in a list sorted
# on the name of the sub repository.
define ReportBuildTimes
$(BUILD_LOG_WRAPPER) $(PRINTF) -- "----- Build times -------\nStart %s\nEnd %s\n%s\n%s\n-------------------------\n" \
"`$(CAT) $(BUILDTIMESDIR)/build_time_start_TOTAL_human_readable`" \
"`$(CAT) $(BUILDTIMESDIR)/build_time_end_TOTAL_human_readable`" \
"`$(LS) $(BUILDTIMESDIR)/build_time_diff_* | $(GREP) -v _TOTAL | $(XARGS) $(CAT) | $(SORT) -k 2`" \
"`$(CAT) $(BUILDTIMESDIR)/build_time_diff_TOTAL`"
$(BUILD_LOG_WRAPPER) $(PRINTF) -- "----- Build times -------\nStart %s\nEnd %s\n%s\n%s\n-------------------------\n" \
"`$(CAT) $(BUILDTIMESDIR)/build_time_start_TOTAL_human_readable`" \
"`$(CAT) $(BUILDTIMESDIR)/build_time_end_TOTAL_human_readable`" \
"`$(LS) $(BUILDTIMESDIR)/build_time_diff_* | $(GREP) -v _TOTAL | $(XARGS) $(CAT) | $(SORT) -k 2`" \
"`$(CAT) $(BUILDTIMESDIR)/build_time_diff_TOTAL`"
endef
define ResetAllTimers
$$(shell $(MKDIR) -p $(BUILDTIMESDIR) && $(RM) $(BUILDTIMESDIR)/build_time_*)
$$(shell $(MKDIR) -p $(BUILDTIMESDIR) && $(RM) $(BUILDTIMESDIR)/build_time_*)
endef
define StartGlobalTimer
$(call RecordStartTime,TOTAL)
$(call RecordStartTime,TOTAL)
endef
define StopGlobalTimer
$(call RecordEndTime,TOTAL)
$(call RecordEndTime,TOTAL)
endef
### Functions for managing makefile structure (start/end of makefile and individual targets)
@ -123,184 +123,184 @@ endef
# Check if the current target is the final target, as specified by
# the user on the command line. If so, call AtRootMakeEnd.
define CheckIfMakeAtEnd
# Check if the current target is the last goal
$(if $(filter $@,$(call LastGoal)),$(call AtMakeEnd))
# If the target is 'foo-only', check if our goal was stated as 'foo'
$(if $(filter $@,$(call LastGoal)-only),$(call AtMakeEnd))
# Check if the current target is the last goal
$(if $(filter $@,$(call LastGoal)),$(call AtMakeEnd))
# If the target is 'foo-only', check if our goal was stated as 'foo'
$(if $(filter $@,$(call LastGoal)-only),$(call AtMakeEnd))
endef
# Hook to be called when starting to execute a top-level target
define TargetEnter
$(BUILD_LOG_WRAPPER) $(PRINTF) "## Starting $(patsubst %-only,%,$@)\n"
$(call RecordStartTime,$(patsubst %-only,%,$@))
$(BUILD_LOG_WRAPPER) $(PRINTF) "## Starting $(patsubst %-only,%,$@)\n"
$(call RecordStartTime,$(patsubst %-only,%,$@))
endef
# Hook to be called when finish executing a top-level target
define TargetExit
$(call RecordEndTime,$(patsubst %-only,%,$@))
$(BUILD_LOG_WRAPPER) $(PRINTF) "## Finished $(patsubst %-only,%,$@) (build time %s)\n\n" \
"`$(CAT) $(BUILDTIMESDIR)/build_time_diff_$(patsubst %-only,%,$@) | $(CUT) -f 1 -d ' '`"
$(call CheckIfMakeAtEnd)
$(call RecordEndTime,$(patsubst %-only,%,$@))
$(BUILD_LOG_WRAPPER) $(PRINTF) "## Finished $(patsubst %-only,%,$@) (build time %s)\n\n" \
"`$(CAT) $(BUILDTIMESDIR)/build_time_diff_$(patsubst %-only,%,$@) | $(CUT) -f 1 -d ' '`"
$(call CheckIfMakeAtEnd)
endef
# Hook to be called as the very first thing when running a normal build
define AtMakeStart
$(if $(findstring --jobserver,$(MAKEFLAGS)),$(error make -j is not supported, use make JOBS=n))
$(call CheckEnvironment)
@$(PRINTF) $(LOG_INFO) "Running make as '$(MAKE) $(MFLAGS) $(MAKE_ARGS)'\n"
@$(PRINTF) "Building $(PRODUCT_NAME) for target '$(call GetRealTarget)' in configuration '$(CONF_NAME)'\n\n"
$(call StartGlobalTimer)
$(if $(findstring --jobserver,$(MAKEFLAGS)),$(error make -j is not supported, use make JOBS=n))
$(call CheckEnvironment)
@$(PRINTF) $(LOG_INFO) "Running make as '$(MAKE) $(MFLAGS) $(MAKE_ARGS)'\n"
@$(PRINTF) "Building $(PRODUCT_NAME) for target '$(call GetRealTarget)' in configuration '$(CONF_NAME)'\n\n"
$(call StartGlobalTimer)
endef
# Hook to be called as the very last thing for targets that are "top level" targets
define AtMakeEnd
[ -f $(SJAVAC_SERVER_DIR)/server.port ] && echo Stopping sjavac server && $(TOUCH) $(SJAVAC_SERVER_DIR)/server.port.stop; true
$(call StopGlobalTimer)
$(call ReportBuildTimes)
@$(PRINTF) "Finished building $(PRODUCT_NAME) for target '$(call GetRealTarget)'\n"
$(call CheckEnvironment)
[ -f $(SJAVAC_SERVER_DIR)/server.port ] && echo Stopping sjavac server && $(TOUCH) $(SJAVAC_SERVER_DIR)/server.port.stop; true
$(call StopGlobalTimer)
$(call ReportBuildTimes)
@$(PRINTF) "Finished building $(PRODUCT_NAME) for target '$(call GetRealTarget)'\n"
$(call CheckEnvironment)
endef
### Functions for parsing and setting up make options from command-line
define FatalError
# If the user specificed a "global" target (e.g. 'help'), do not exit but continue running
$$(if $$(filter-out $(global_targets),$$(call GetRealTarget)),$$(error Cannot continue))
# If the user specificed a "global" target (e.g. 'help'), do not exit but continue running
$$(if $$(filter-out $(global_targets),$$(call GetRealTarget)),$$(error Cannot continue))
endef
define ParseLogLevel
ifeq ($$(origin VERBOSE),undefined)
# Setup logging according to LOG (but only if VERBOSE is not given)
ifeq ($$(origin VERBOSE),undefined)
# Setup logging according to LOG (but only if VERBOSE is not given)
# If the "nofile" argument is given, act on it and strip it away
ifneq ($$(findstring nofile,$$(LOG)),)
# Reset the build log wrapper, regardless of other values
override BUILD_LOG_WRAPPER=
# COMMA is defined in spec.gmk, but that is not included yet
COMMA=,
# First try to remove ",nofile" if it exists
LOG_STRIPPED1=$$(subst $$(COMMA)nofile,,$$(LOG))
# Otherwise just remove "nofile"
LOG_STRIPPED2=$$(subst nofile,,$$(LOG_STRIPPED1))
# We might have ended up with a leading comma. Remove it
LOG_STRIPPED3=$$(strip $$(patsubst $$(COMMA)%,%,$$(LOG_STRIPPED2)))
LOG_LEVEL:=$$(LOG_STRIPPED3)
else
LOG_LEVEL:=$$(LOG)
endif
ifeq ($$(LOG_LEVEL),)
# Set LOG to "warn" as default if not set (and no VERBOSE given)
override LOG_LEVEL=warn
endif
ifeq ($$(LOG_LEVEL),warn)
VERBOSE=-s
else ifeq ($$(LOG_LEVEL),info)
VERBOSE=-s
else ifeq ($$(LOG_LEVEL),debug)
VERBOSE=
else ifeq ($$(LOG_LEVEL),trace)
VERBOSE=
else
$$(info Error: LOG must be one of: warn, info, debug or trace.)
$$(eval $$(call FatalError))
endif
# If the "nofile" argument is given, act on it and strip it away
ifneq ($$(findstring nofile,$$(LOG)),)
# Reset the build log wrapper, regardless of other values
override BUILD_LOG_WRAPPER=
# COMMA is defined in spec.gmk, but that is not included yet
COMMA=,
# First try to remove ",nofile" if it exists
LOG_STRIPPED1=$$(subst $$(COMMA)nofile,,$$(LOG))
# Otherwise just remove "nofile"
LOG_STRIPPED2=$$(subst nofile,,$$(LOG_STRIPPED1))
# We might have ended up with a leading comma. Remove it
LOG_STRIPPED3=$$(strip $$(patsubst $$(COMMA)%,%,$$(LOG_STRIPPED2)))
LOG_LEVEL:=$$(LOG_STRIPPED3)
else
# Provide resonable interpretations of LOG_LEVEL if VERBOSE is given.
ifeq ($(VERBOSE),)
LOG_LEVEL:=debug
else
LOG_LEVEL:=warn
endif
ifneq ($$(LOG),)
# We have both a VERBOSE and a LOG argument. This is OK only if this is a repeated call by ourselves,
# but complain if this is the top-level make call.
ifeq ($$(MAKELEVEL),0)
$$(info Cannot use LOG=$$(LOG) and VERBOSE=$$(VERBOSE) at the same time. Choose one.)
$$(eval $$(call FatalError))
endif
endif
LOG_LEVEL:=$$(LOG)
endif
ifeq ($$(LOG_LEVEL),)
# Set LOG to "warn" as default if not set (and no VERBOSE given)
override LOG_LEVEL=warn
endif
ifeq ($$(LOG_LEVEL),warn)
VERBOSE=-s
else ifeq ($$(LOG_LEVEL),info)
VERBOSE=-s
else ifeq ($$(LOG_LEVEL),debug)
VERBOSE=
else ifeq ($$(LOG_LEVEL),trace)
VERBOSE=
else
$$(info Error: LOG must be one of: warn, info, debug or trace.)
$$(eval $$(call FatalError))
endif
else
# Provide resonable interpretations of LOG_LEVEL if VERBOSE is given.
ifeq ($(VERBOSE),)
LOG_LEVEL:=debug
else
LOG_LEVEL:=warn
endif
ifneq ($$(LOG),)
# We have both a VERBOSE and a LOG argument. This is OK only if this is a repeated call by ourselves,
# but complain if this is the top-level make call.
ifeq ($$(MAKELEVEL),0)
$$(info Cannot use LOG=$$(LOG) and VERBOSE=$$(VERBOSE) at the same time. Choose one.)
$$(eval $$(call FatalError))
endif
endif
endif
endef
define ParseConfAndSpec
ifneq ($$(filter-out $(global_targets),$$(call GetRealTarget)),)
# If we only have global targets, no need to bother with SPEC or CONF
ifneq ($$(origin SPEC),undefined)
# We have been given a SPEC, check that it works out properly
ifeq ($$(wildcard $$(SPEC)),)
$$(info Cannot locate spec.gmk, given by SPEC=$$(SPEC))
$$(eval $$(call FatalError))
endif
ifneq ($$(origin CONF),undefined)
# We also have a CONF argument. This is OK only if this is a repeated call by ourselves,
# but complain if this is the top-level make call.
ifeq ($$(MAKELEVEL),0)
$$(info Cannot use CONF=$$(CONF) and SPEC=$$(SPEC) at the same time. Choose one.)
$$(eval $$(call FatalError))
endif
endif
# ... OK, we're satisfied, we'll use this SPEC later on
else
# Find all spec.gmk files in the build output directory
output_dir=$$(root_dir)/build
all_spec_files=$$(wildcard $$(output_dir)/*/spec.gmk)
ifeq ($$(all_spec_files),)
$$(info No configurations found for $$(root_dir)! Please run configure to create a configuration.)
$$(eval $$(call FatalError))
endif
# Extract the configuration names from the path
all_confs=$$(patsubst %/spec.gmk,%,$$(patsubst $$(output_dir)/%,%,$$(all_spec_files)))
ifneq ($$(origin CONF),undefined)
# User have given a CONF= argument.
ifeq ($$(CONF),)
# If given CONF=, match all configurations
matching_confs=$$(strip $$(all_confs))
else
# Otherwise select those that contain the given CONF string
matching_confs=$$(strip $$(foreach var,$$(all_confs),$$(if $$(findstring $$(CONF),$$(var)),$$(var))))
endif
ifeq ($$(matching_confs),)
$$(info No configurations found matching CONF=$$(CONF))
$$(info Available configurations:)
$$(foreach var,$$(all_confs),$$(info * $$(var)))
$$(eval $$(call FatalError))
else
ifeq ($$(words $$(matching_confs)),1)
$$(info Building '$$(matching_confs)' (matching CONF=$$(CONF)))
else
$$(info Building target '$(call GetRealTarget)' in the following configurations (matching CONF=$$(CONF)):)
$$(foreach var,$$(matching_confs),$$(info * $$(var)))
endif
endif
# Create a SPEC definition. This will contain the path to one or more spec.gmk files.
SPEC=$$(addsuffix /spec.gmk,$$(addprefix $$(output_dir)/,$$(matching_confs)))
else
# No CONF or SPEC given, check the available configurations
ifneq ($$(words $$(all_spec_files)),1)
$$(info No CONF given, but more than one configuration found in $$(output_dir).)
$$(info Available configurations:)
$$(foreach var,$$(all_confs),$$(info * $$(var)))
$$(info Please retry building with CONF=<config pattern> (or SPEC=<specfile>))
$$(eval $$(call FatalError))
endif
# We found exactly one configuration, use it
SPEC=$$(strip $$(all_spec_files))
endif
ifneq ($$(filter-out $(global_targets),$$(call GetRealTarget)),)
# If we only have global targets, no need to bother with SPEC or CONF
ifneq ($$(origin SPEC),undefined)
# We have been given a SPEC, check that it works out properly
ifeq ($$(wildcard $$(SPEC)),)
$$(info Cannot locate spec.gmk, given by SPEC=$$(SPEC))
$$(eval $$(call FatalError))
endif
ifneq ($$(origin CONF),undefined)
# We also have a CONF argument. This is OK only if this is a repeated call by ourselves,
# but complain if this is the top-level make call.
ifeq ($$(MAKELEVEL),0)
$$(info Cannot use CONF=$$(CONF) and SPEC=$$(SPEC) at the same time. Choose one.)
$$(eval $$(call FatalError))
endif
endif
# ... OK, we're satisfied, we'll use this SPEC later on
else
# Find all spec.gmk files in the build output directory
output_dir=$$(root_dir)/build
all_spec_files=$$(wildcard $$(output_dir)/*/spec.gmk)
ifeq ($$(all_spec_files),)
$$(info No configurations found for $$(root_dir)! Please run configure to create a configuration.)
$$(eval $$(call FatalError))
endif
# Extract the configuration names from the path
all_confs=$$(patsubst %/spec.gmk,%,$$(patsubst $$(output_dir)/%,%,$$(all_spec_files)))
ifneq ($$(origin CONF),undefined)
# User have given a CONF= argument.
ifeq ($$(CONF),)
# If given CONF=, match all configurations
matching_confs=$$(strip $$(all_confs))
else
# Otherwise select those that contain the given CONF string
matching_confs=$$(strip $$(foreach var,$$(all_confs),$$(if $$(findstring $$(CONF),$$(var)),$$(var))))
endif
ifeq ($$(matching_confs),)
$$(info No configurations found matching CONF=$$(CONF))
$$(info Available configurations:)
$$(foreach var,$$(all_confs),$$(info * $$(var)))
$$(eval $$(call FatalError))
else
ifeq ($$(words $$(matching_confs)),1)
$$(info Building '$$(matching_confs)' (matching CONF=$$(CONF)))
else
$$(info Building target '$(call GetRealTarget)' in the following configurations (matching CONF=$$(CONF)):)
$$(foreach var,$$(matching_confs),$$(info * $$(var)))
endif
endif
# Create a SPEC definition. This will contain the path to one or more spec.gmk files.
SPEC=$$(addsuffix /spec.gmk,$$(addprefix $$(output_dir)/,$$(matching_confs)))
else
# No CONF or SPEC given, check the available configurations
ifneq ($$(words $$(all_spec_files)),1)
$$(info No CONF given, but more than one configuration found in $$(output_dir).)
$$(info Available configurations:)
$$(foreach var,$$(all_confs),$$(info * $$(var)))
$$(info Please retry building with CONF=<config pattern> (or SPEC=<specfile>))
$$(eval $$(call FatalError))
endif
# We found exactly one configuration, use it
SPEC=$$(strip $$(all_spec_files))
endif
endif
endif
endef
### Convenience functions from Main.gmk
# Cleans the component given as $1
define CleanComponent
@$(PRINTF) "Cleaning $1 build artifacts ..."
@($(CD) $(OUTPUT_ROOT) && $(RM) -r $1)
@$(PRINTF) " done\n"
@$(PRINTF) "Cleaning $1 build artifacts ..."
@($(CD) $(OUTPUT_ROOT) && $(RM) -r $1)
@$(PRINTF) " done\n"
endef
endif # _MAKEHELPERS_GMK

File diff suppressed because it is too large Load diff

View file

@ -24,74 +24,74 @@
#
define SetupRMICompilation
# param 1 is a name for a variable to depend on.
# param 2 and up are named args.
# CLASSES:=List of classes to generate stubs for
# CLASSES_DIR:=Directory where to look for classes
# STUB_CLASSES_DIR:=Directory in where to put stub classes
# RUN_V11:=Set to run rmic with -v1.1
# RUN_V12:=Set to run rmic with -v1.2
# RUN_IIOP:=Set to run rmic with -iiop
# RUN_IIOP_STDPKG:=Set to run rmic with -iiop -standardPackage
# KEEP_GENERATED:=Set to keep generated sources around
$(foreach i,2 3 4 5 6 7 8 9 10 11 12 13 14 15, $(if $($i),$1_$(strip $($i)))$(NEWLINE))
$(call LogSetupMacroEntry,SetupRMICompilation($1),$2,$3,$4,$5,$6,$7,$8,$9,$(10),$(11),$(12),$(13),$(14),$(15))
$(if $(16),$(error Internal makefile error: Too many arguments to SetupRMICompilation, please update RMICompilation.gmk))
# param 1 is a name for a variable to depend on.
# param 2 and up are named args.
# CLASSES:=List of classes to generate stubs for
# CLASSES_DIR:=Directory where to look for classes
# STUB_CLASSES_DIR:=Directory in where to put stub classes
# RUN_V11:=Set to run rmic with -v1.1
# RUN_V12:=Set to run rmic with -v1.2
# RUN_IIOP:=Set to run rmic with -iiop
# RUN_IIOP_STDPKG:=Set to run rmic with -iiop -standardPackage
# KEEP_GENERATED:=Set to keep generated sources around
$(foreach i,2 3 4 5 6 7 8 9 10 11 12 13 14 15, $(if $($i),$1_$(strip $($i)))$(NEWLINE))
$(call LogSetupMacroEntry,SetupRMICompilation($1),$2,$3,$4,$5,$6,$7,$8,$9,$(10),$(11),$(12),$(13),$(14),$(15))
$(if $(16),$(error Internal makefile error: Too many arguments to SetupRMICompilation, please update RMICompilation.gmk))
$1_DEP_FILE := $$($1_STUB_CLASSES_DIR)/$1_rmic
$1_DEP_FILE := $$($1_STUB_CLASSES_DIR)/$1_rmic
$1_CLASSES_SLASH := $$(subst .,/,$$($1_CLASSES))
$1_CLASS_FILES := $$(addprefix $$($1_CLASSES_DIR)/,$$(addsuffix .class,$$($1_CLASSES_SLASH)))
$1_STUB_FILES := $$(addprefix $$($1_STUB_CLASSES_DIR)/,$$(addsuffix _Stub.class,$$($1_CLASSES_SLASH)))
$1_TARGETS := $$($1_STUB_FILES)
$1_ARGS :=
ifneq (,$$($1_RUN_V11))
$1_SKEL_FILES := $$(addprefix $$($1_STUB_CLASSES_DIR)/,$$(addsuffix _Skel.class,$$($1_CLASSES_SLASH)))
$1_TARGETS += $$($1_SKEL_FILES)
$1_ARGS += -v1.1
endif
ifneq (,$$($1_RUN_V12))
$1_ARGS += -v1.2
endif
$1_CLASSES_SLASH := $$(subst .,/,$$($1_CLASSES))
$1_CLASS_FILES := $$(addprefix $$($1_CLASSES_DIR)/,$$(addsuffix .class,$$($1_CLASSES_SLASH)))
$1_STUB_FILES := $$(addprefix $$($1_STUB_CLASSES_DIR)/,$$(addsuffix _Stub.class,$$($1_CLASSES_SLASH)))
$1_TARGETS := $$($1_STUB_FILES)
$1_ARGS :=
ifneq (,$$($1_RUN_V11))
$1_SKEL_FILES := $$(addprefix $$($1_STUB_CLASSES_DIR)/,$$(addsuffix _Skel.class,$$($1_CLASSES_SLASH)))
$1_TARGETS += $$($1_SKEL_FILES)
$1_ARGS += -v1.1
endif
ifneq (,$$($1_RUN_V12))
$1_ARGS += -v1.2
endif
$1_TIE_BASE_FILES := $$(foreach f,$$($1_CLASSES_SLASH),$$(dir $$f)_$$(notdir $$f))
$1_TIE_FILES := $$(addprefix $$($1_STUB_CLASSES_DIR)/org/omg/stub/,$$(addsuffix _Tie.class,$$($1_TIE_BASE_FILES)))
$1_TIE_STDPKG_FILES := $$(addprefix $$($1_STUB_CLASSES_DIR)/,$$(addsuffix _Tie.class,$$($1_TIE_BASE_FILES)))
$1_TIE_BASE_FILES := $$(foreach f,$$($1_CLASSES_SLASH),$$(dir $$f)_$$(notdir $$f))
$1_TIE_FILES := $$(addprefix $$($1_STUB_CLASSES_DIR)/org/omg/stub/,$$(addsuffix _Tie.class,$$($1_TIE_BASE_FILES)))
$1_TIE_STDPKG_FILES := $$(addprefix $$($1_STUB_CLASSES_DIR)/,$$(addsuffix _Tie.class,$$($1_TIE_BASE_FILES)))
ifneq (,$$($1_RUN_IIOP))
$1_TARGETS += $$($1_TIE_FILES)
$1_ARGS += -iiop
endif
ifneq (,$$($1_RUN_IIOP_STDPKG))
$1_TARGETS += $$($1_TIE_STDPKG_FILES)
$1_ARGS2 := -iiop -standardPackage
endif
ifneq (,$$($1_RUN_IIOP))
$1_TARGETS += $$($1_TIE_FILES)
$1_ARGS += -iiop
endif
ifneq (,$$($1_RUN_IIOP_STDPKG))
$1_TARGETS += $$($1_TIE_STDPKG_FILES)
$1_ARGS2 := -iiop -standardPackage
endif
ifneq (,$$($1_KEEP_GENERATED))
$1_ARGS += -keepgenerated
$1_TARGETS += $$(subst .class,.java,$$($1_TARGETS))
endif
ifneq (,$$($1_KEEP_GENERATED))
$1_ARGS += -keepgenerated
$1_TARGETS += $$(subst .class,.java,$$($1_TARGETS))
endif
$1_DOLLAR_SAFE_CLASSES := $$(subst $$$$,\$$$$,$$($1_CLASSES))
$1_DOLLAR_SAFE_CLASSES := $$(subst $$$$,\$$$$,$$($1_CLASSES))
$$($1_TARGETS): $$($1_DEP_FILE) $$($1_CLASS_FILES)
$$($1_TARGETS): $$($1_DEP_FILE) $$($1_CLASS_FILES)
$$($1_DEP_FILE): $$($1_CLASS_FILES)
$$($1_DEP_FILE): $$($1_CLASS_FILES)
$(MKDIR) -p $$($1_STUB_CLASSES_DIR)
if [ "x$$($1_ARGS)" != "x" ]; then \
$(ECHO) $(LOG_INFO) Running rmic $$($1_ARGS) for $$($1_DOLLAR_SAFE_CLASSES) &&\
$(RMIC) $$($1_ARGS) -classpath "$$($1_CLASSES_DIR)" \
-d $$($1_STUB_CLASSES_DIR) $$($1_DOLLAR_SAFE_CLASSES);\
$(ECHO) $(LOG_INFO) Running rmic $$($1_ARGS) for $$($1_DOLLAR_SAFE_CLASSES) && \
$(RMIC) $$($1_ARGS) -classpath "$$($1_CLASSES_DIR)" \
-d $$($1_STUB_CLASSES_DIR) $$($1_DOLLAR_SAFE_CLASSES); \
fi;
if [ "x$$($1_ARGS2)" != "x" ]; then \
$(ECHO) $(LOG_INFO) Running rmic $$($1_ARGS2) for $$($1_DOLLAR_SAFE_CLASSES) &&\
$(RMIC) $$($1_ARGS2) -classpath "$$($1_CLASSES_DIR)" \
-d $$($1_STUB_CLASSES_DIR) $$($1_DOLLAR_SAFE_CLASSES);\
$(ECHO) $(LOG_INFO) Running rmic $$($1_ARGS2) for $$($1_DOLLAR_SAFE_CLASSES) && \
$(RMIC) $$($1_ARGS2) -classpath "$$($1_CLASSES_DIR)" \
-d $$($1_STUB_CLASSES_DIR) $$($1_DOLLAR_SAFE_CLASSES); \
fi;
$1 := $$($1_TARGETS)
$1 := $$($1_TARGETS)
# By marking as secondary, this "touch" file doesn't need to be touched and will never exist.
.SECONDARY: $$($1_DEP_FILE)
# By marking as secondary, this "touch" file doesn't need to be touched and will never exist.
.SECONDARY: $$($1_DEP_FILE)
endef

View file

@ -43,7 +43,7 @@
#
# To build the full set of crosstools, use a command line looking like this:
#
# make tars RPM_DIR_x86_64=/tmp/oel55-x86_64/Server/ RPM_DIR_i686=/tmp/oel55-i686/Server/
# make tars RPM_DIR_x86_64=/tmp/oel55-x86_64/Server/ RPM_DIR_i686=/tmp/oel55-i686/Server/
#
# To create a x86_64 package without the redundant i686 cross compiler, do
# like this:
@ -54,70 +54,68 @@
# Main makefile which iterates over all host and target platforms.
#
os := $(shell uname -o)
cpu := x86_64
os := $(shell uname -o)
cpu := x86_64
#$(shell uname -p)
#
# This wrapper script can handle exactly these platforms
#
platforms := $(foreach p,x86_64 i686,$(p)-unknown-linux-gnu)
#platforms := $(foreach p,x86_64,$(p)-unknown-linux-gnu)
platforms := $(foreach p,x86_64 i686,$(p)-unknown-linux-gnu)
#platforms := $(foreach p,x86_64,$(p)-unknown-linux-gnu)
# Figure out what platform this is building on.
me := $(cpu)-$(if $(findstring Linux,$(os)),unknown-linux-gnu)
me := $(cpu)-$(if $(findstring Linux,$(os)),unknown-linux-gnu)
$(info Building on platform $(me))
all compile : $(platforms)
all compile : $(platforms)
ifeq (,$(SKIP_ME))
$(foreach p,$(filter-out $(me),$(platforms)),$(eval $(p) : $$(me)))
$(foreach p,$(filter-out $(me),$(platforms)),$(eval $(p) : $$(me)))
endif
OUTPUT_ROOT = $(abspath ../../../build/devkit)
RESULT = $(OUTPUT_ROOT)/result
OUTPUT_ROOT = $(abspath ../../../build/devkit)
RESULT = $(OUTPUT_ROOT)/result
submakevars = HOST=$@ BUILD=$(me) \
RESULT=$(RESULT) PREFIX=$(RESULT)/$@ \
OUTPUT_ROOT=$(OUTPUT_ROOT)
$(platforms) :
@echo 'Building compilers for $@'
@echo 'Targets: $(platforms)'
for p in $@ $(filter-out $@,$(platforms)); do \
$(MAKE) -f Tools.gmk all $(submakevars) \
TARGET=$$p || exit 1 ; \
done
@echo 'Building ccache program for $@'
$(MAKE) -f Tools.gmk ccache $(submakevars) TARGET=$@
@echo 'All done"'
submakevars = HOST=$@ BUILD=$(me) \
RESULT=$(RESULT) PREFIX=$(RESULT)/$@ \
OUTPUT_ROOT=$(OUTPUT_ROOT)
$(platforms) :
@echo 'Building compilers for $@'
@echo 'Targets: $(platforms)'
for p in $@ $(filter-out $@,$(platforms)); do \
$(MAKE) -f Tools.gmk all $(submakevars) \
TARGET=$$p || exit 1 ; \
done
@echo 'Building ccache program for $@'
$(MAKE) -f Tools.gmk ccache $(submakevars) TARGET=$@
@echo 'All done"'
$(foreach a,i686 x86_64,$(eval $(a) : $(filter $(a)%,$(platforms))))
ia32 : i686
today := $(shell date +%Y%m%d)
ia32 : i686
today := $(shell date +%Y%m%d)
define Mktar
$(1)_tar = $$(RESULT)/sdk-$(1)-$$(today).tar.gz
$$($(1)_tar) : PLATFORM = $(1)
TARFILES += $$($(1)_tar)
$$($(1)_tar) : $(1) $$(shell find $$(RESULT)/$(1))
$(1)_tar = $$(RESULT)/sdk-$(1)-$$(today).tar.gz
$$($(1)_tar) : PLATFORM = $(1)
TARFILES += $$($(1)_tar)
$$($(1)_tar) : $(1) $$(shell find $$(RESULT)/$(1))
endef
$(foreach p,$(platforms),$(eval $(call Mktar,$(p))))
tars : all $(TARFILES)
onlytars : $(TARFILES)
%.tar.gz :
@echo 'Creating compiler package $@'
cd $(RESULT)/$(PLATFORM) && tar -czf $@ *
touch $@
tars : all $(TARFILES)
onlytars : $(TARFILES)
%.tar.gz :
@echo 'Creating compiler package $@'
cd $(RESULT)/$(PLATFORM) && tar -czf $@ *
touch $@
clean :
clean :
rm -rf build result
FORCE :
.PHONY : $(configs) $(platforms)
FORCE :
.PHONY : $(configs) $(platforms)

View file

@ -43,45 +43,45 @@ $(info TARGET=$(TARGET))
$(info HOST=$(HOST))
$(info BUILD=$(BUILD))
ARCH := $(word 1,$(subst -, ,$(TARGET)))
ARCH := $(word 1,$(subst -, ,$(TARGET)))
##########################################################################################
# Define external dependencies
# Latest that could be made to work.
gcc_ver := gcc-4.7.3
binutils_ver := binutils-2.22
ccache_ver := ccache-3.1.9
mpfr_ver := mpfr-3.0.1
gmp_ver := gmp-4.3.2
mpc_ver := mpc-1.0.1
gcc_ver := gcc-4.7.3
binutils_ver := binutils-2.22
ccache_ver := ccache-3.1.9
mpfr_ver := mpfr-3.0.1
gmp_ver := gmp-4.3.2
mpc_ver := mpc-1.0.1
GCC := http://ftp.gnu.org/pub/gnu/gcc/$(gcc_ver)/$(gcc_ver).tar.bz2
BINUTILS := http://ftp.gnu.org/pub/gnu/binutils/$(binutils_ver).tar.bz2
CCACHE := http://samba.org/ftp/ccache/$(ccache_ver).tar.gz
MPFR := http://www.mpfr.org/${mpfr_ver}/${mpfr_ver}.tar.bz2
GMP := http://ftp.gnu.org/pub/gnu/gmp/${gmp_ver}.tar.bz2
MPC := http://www.multiprecision.org/mpc/download/${mpc_ver}.tar.gz
GCC := http://ftp.gnu.org/pub/gnu/gcc/$(gcc_ver)/$(gcc_ver).tar.bz2
BINUTILS := http://ftp.gnu.org/pub/gnu/binutils/$(binutils_ver).tar.bz2
CCACHE := http://samba.org/ftp/ccache/$(ccache_ver).tar.gz
MPFR := http://www.mpfr.org/${mpfr_ver}/${mpfr_ver}.tar.bz2
GMP := http://ftp.gnu.org/pub/gnu/gmp/${gmp_ver}.tar.bz2
MPC := http://www.multiprecision.org/mpc/download/${mpc_ver}.tar.gz
# RPMs in OEL5.5
RPM_LIST := \
kernel-headers \
glibc-2 glibc-headers glibc-devel \
cups-libs cups-devel \
libX11 libX11-devel \
xorg-x11-proto-devel \
alsa-lib alsa-lib-devel \
libXext libXext-devel \
libXtst libXtst-devel \
libXrender libXrender-devel \
freetype freetype-devel \
libXt libXt-devel \
libSM libSM-devel \
libICE libICE-devel \
libXi libXi-devel \
libXdmcp libXdmcp-devel \
libXau libXau-devel \
libgcc
RPM_LIST := \
kernel-headers \
glibc-2 glibc-headers glibc-devel \
cups-libs cups-devel \
libX11 libX11-devel \
xorg-x11-proto-devel \
alsa-lib alsa-lib-devel \
libXext libXext-devel \
libXtst libXtst-devel \
libXrender libXrender-devel \
freetype freetype-devel \
libXt libXt-devel \
libSM libSM-devel \
libICE libICE-devel \
libXi libXi-devel \
libXdmcp libXdmcp-devel \
libXau libXau-devel \
libgcc
ifeq ($(ARCH),x86_64)
@ -90,7 +90,7 @@ ifeq ($(ARCH),x86_64)
ifeq ($(BUILD),$(HOST))
ifeq ($(TARGET),$(HOST))
# When building the native compiler for x86_64, enable mixed mode.
RPM_ARCHS += i386 i686
RPM_ARCHS += i386 i686
endif
endif
else
@ -110,47 +110,47 @@ endif
# Ensure we have 32-bit libs also for x64. We enable mixed-mode.
ifeq (x86_64,$(ARCH))
LIBDIRS := lib64 lib
CFLAGS_lib := -m32
LIBDIRS := lib64 lib
CFLAGS_lib := -m32
else
LIBDIRS := lib
LIBDIRS := lib
endif
# Define directories
RESULT := $(OUTPUT_ROOT)/result
BUILDDIR := $(OUTPUT_ROOT)/$(HOST)/$(TARGET)
PREFIX := $(RESULT)/$(HOST)
TARGETDIR := $(PREFIX)/$(TARGET)
SYSROOT := $(TARGETDIR)/sys-root
DOWNLOAD := $(OUTPUT_ROOT)/download
SRCDIR := $(OUTPUT_ROOT)/src
RESULT := $(OUTPUT_ROOT)/result
BUILDDIR := $(OUTPUT_ROOT)/$(HOST)/$(TARGET)
PREFIX := $(RESULT)/$(HOST)
TARGETDIR := $(PREFIX)/$(TARGET)
SYSROOT := $(TARGETDIR)/sys-root
DOWNLOAD := $(OUTPUT_ROOT)/download
SRCDIR := $(OUTPUT_ROOT)/src
# Marker file for unpacking rpms
rpms := $(SYSROOT)/rpms_unpacked
rpms := $(SYSROOT)/rpms_unpacked
# Need to patch libs that are linker scripts to use non-absolute paths
libs := $(SYSROOT)/libs_patched
libs := $(SYSROOT)/libs_patched
##########################################################################################
# Unpack source packages
# Generate downloading + unpacking of sources.
define Download
$(1)_DIR = $(abspath $(SRCDIR)/$(basename $(basename $(notdir $($(1))))))
$(1)_CFG = $$($(1)_DIR)/configure
$(1)_FILE = $(DOWNLOAD)/$(notdir $($(1)))
$(1)_DIR = $(abspath $(SRCDIR)/$(basename $(basename $(notdir $($(1))))))
$(1)_CFG = $$($(1)_DIR)/configure
$(1)_FILE = $(DOWNLOAD)/$(notdir $($(1)))
$$($(1)_CFG) : $$($(1)_FILE)
mkdir -p $$(SRCDIR)
tar -C $$(SRCDIR) -x$$(if $$(findstring .gz, $$<),z,j)f $$<
$$(foreach p,$$(abspath $$(wildcard $$(notdir $$($(1)_DIR)).patch)), \
echo PATCHING $$(p) ; \
patch -d $$($(1)_DIR) -p1 -i $$(p) ; \
)
touch $$@
$$($(1)_CFG) : $$($(1)_FILE)
mkdir -p $$(SRCDIR)
tar -C $$(SRCDIR) -x$$(if $$(findstring .gz, $$<),z,j)f $$<
$$(foreach p,$$(abspath $$(wildcard $$(notdir $$($(1)_DIR)).patch)), \
echo PATCHING $$(p) ; \
patch -d $$($(1)_DIR) -p1 -i $$(p) ; \
)
touch $$@
$$($(1)_FILE) :
wget -P $(DOWNLOAD) $$($(1))
$$($(1)_FILE) :
wget -P $(DOWNLOAD) $$($(1))
endef
# Download and unpack all source packages
@ -161,24 +161,24 @@ $(foreach p,GCC BINUTILS CCACHE MPFR GMP MPC,$(eval $(call Download,$(p))))
# Note. For building linux you should install rpm2cpio.
define unrpm
$(SYSROOT)/$(notdir $(1)).unpacked \
: $(1)
$$(rpms) : $(SYSROOT)/$(notdir $(1)).unpacked
$(SYSROOT)/$(notdir $(1)).unpacked \
: $(1)
$$(rpms) : $(SYSROOT)/$(notdir $(1)).unpacked
endef
%.unpacked :
$(info Unpacking target rpms and libraries from $<)
@(mkdir -p $(@D); \
cd $(@D); \
rpm2cpio $< | \
cpio --extract --make-directories \
-f \
"./usr/share/doc/*" \
"./usr/share/man/*" \
"./usr/X11R6/man/*" \
"*/X11/locale/*" \
|| die ; )
touch $@
%.unpacked :
$(info Unpacking target rpms and libraries from $<)
@(mkdir -p $(@D); \
cd $(@D); \
rpm2cpio $< | \
cpio --extract --make-directories \
-f \
"./usr/share/doc/*" \
"./usr/share/man/*" \
"./usr/X11R6/man/*" \
"*/X11/locale/*" \
|| die ; )
touch $@
$(foreach p,$(RPM_FILE_LIST),$(eval $(call unrpm,$(p))))
@ -190,17 +190,17 @@ $(foreach p,$(RPM_FILE_LIST),$(eval $(call unrpm,$(p))))
# have it anyway, but just to make sure...
# Patch libc.so and libpthread.so to force linking against libraries in sysroot
# and not the ones installed on the build machine.
$(libs) : $(rpms)
@echo Patching libc and pthreads
@(for f in `find $(SYSROOT) -name libc.so -o -name libpthread.so`; do \
(cat $$f | sed -e 's|/usr/lib64/||g' \
-e 's|/usr/lib/||g' \
-e 's|/lib64/||g' \
-e 's|/lib/||g' ) > $$f.tmp ; \
mv $$f.tmp $$f ; \
done)
@mkdir -p $(SYSROOT)/usr/lib
@touch $@
$(libs) : $(rpms)
@echo Patching libc and pthreads
@(for f in `find $(SYSROOT) -name libc.so -o -name libpthread.so`; do \
(cat $$f | sed -e 's|/usr/lib64/||g' \
-e 's|/usr/lib/||g' \
-e 's|/lib64/||g' \
-e 's|/lib/||g' ) > $$f.tmp ; \
mv $$f.tmp $$f ; \
done)
@mkdir -p $(SYSROOT)/usr/lib
@touch $@
##########################################################################################
@ -210,29 +210,29 @@ $(foreach t,binutils mpfr gmp mpc gcc ccache,$(eval $(t) = $(TARGETDIR)/$($(t)_v
##########################################################################################
# Default base config
CONFIG = --target=$(TARGET) \
--host=$(HOST) --build=$(BUILD) \
--prefix=$(PREFIX)
CONFIG = --target=$(TARGET) \
--host=$(HOST) --build=$(BUILD) \
--prefix=$(PREFIX)
PATHEXT = $(RESULT)/$(BUILD)/bin:
PATHEXT = $(RESULT)/$(BUILD)/bin:
PATHPRE = PATH=$(PATHEXT)$(PATH)
BUILDPAR = -j16
PATHPRE = PATH=$(PATHEXT)$(PATH)
BUILDPAR = -j16
# Default commands to when making
MAKECMD =
INSTALLCMD = install
MAKECMD =
INSTALLCMD = install
declare_tools = CC$(1)=$(2)gcc LD$(1)=$(2)ld AR$(1)=$(2)ar AS$(1)=$(2)as RANLIB$(1)=$(2)ranlib CXX$(1)=$(2)g++ OBJDUMP$(1)=$(2)objdump
declare_tools = CC$(1)=$(2)gcc LD$(1)=$(2)ld AR$(1)=$(2)ar AS$(1)=$(2)as RANLIB$(1)=$(2)ranlib CXX$(1)=$(2)g++ OBJDUMP$(1)=$(2)objdump
ifeq ($(HOST),$(BUILD))
ifeq ($(HOST),$(TARGET))
TOOLS = $(call declare_tools,_FOR_TARGET,)
endif
ifeq ($(HOST),$(TARGET))
TOOLS = $(call declare_tools,_FOR_TARGET,)
endif
endif
TOOLS ?= $(call declare_tools,_FOR_TARGET,$(TARGET)-)
TOOLS ?= $(call declare_tools,_FOR_TARGET,$(TARGET)-)
##########################################################################################
@ -241,42 +241,42 @@ TOOLS ?= $(call declare_tools,_FOR_TARGET,$(TARGET)-)
# If multilib, the second should be 32-bit, and we resolve
# CFLAG_<name> to most likely -m32.
define mk_bfd
$$(info Libs for $(1))
$$(BUILDDIR)/$$(binutils_ver)-$(subst /,-,$(1))/Makefile \
: CFLAGS += $$(CFLAGS_$(1))
$$(BUILDDIR)/$$(binutils_ver)-$(subst /,-,$(1))/Makefile \
: LIBDIRS = --libdir=$(TARGETDIR)/$(1)
$$(info Libs for $(1))
$$(BUILDDIR)/$$(binutils_ver)-$(subst /,-,$(1))/Makefile \
: CFLAGS += $$(CFLAGS_$(1))
$$(BUILDDIR)/$$(binutils_ver)-$(subst /,-,$(1))/Makefile \
: LIBDIRS = --libdir=$(TARGETDIR)/$(1)
bfdlib += $$(TARGETDIR)/$$(binutils_ver)-$(subst /,-,$(1)).done
bfdmakes += $$(BUILDDIR)/$$(binutils_ver)-$(subst /,-,$(1))/Makefile
bfdlib += $$(TARGETDIR)/$$(binutils_ver)-$(subst /,-,$(1)).done
bfdmakes += $$(BUILDDIR)/$$(binutils_ver)-$(subst /,-,$(1))/Makefile
endef
# Create one set of bfds etc for each multilib arch
$(foreach l,$(LIBDIRS),$(eval $(call mk_bfd,$(l))))
# Only build these two libs.
$(bfdlib) : MAKECMD = all-libiberty all-bfd
$(bfdlib) : INSTALLCMD = install-libiberty install-bfd
$(bfdlib) : MAKECMD = all-libiberty all-bfd
$(bfdlib) : INSTALLCMD = install-libiberty install-bfd
# Building targets libbfd + libiberty. HOST==TARGET, i.e not
# for a cross env.
$(bfdmakes) : CONFIG = --target=$(TARGET) \
--host=$(TARGET) --build=$(BUILD) \
--prefix=$(TARGETDIR) \
--with-sysroot=$(SYSROOT) \
$(LIBDIRS)
$(bfdmakes) : CONFIG = --target=$(TARGET) \
--host=$(TARGET) --build=$(BUILD) \
--prefix=$(TARGETDIR) \
--with-sysroot=$(SYSROOT) \
$(LIBDIRS)
$(bfdmakes) : TOOLS = $(call declare_tools,_FOR_TARGET,$(TARGET)-) $(call declare_tools,,$(TARGET)-)
$(bfdmakes) : TOOLS = $(call declare_tools,_FOR_TARGET,$(TARGET)-) $(call declare_tools,,$(TARGET)-)
##########################################################################################
$(gcc) \
$(binutils) \
$(gmp) \
$(mpfr) \
$(mpc) \
$(bfdmakes) \
$(ccache) : ENVS += $(TOOLS)
$(gcc) \
$(binutils) \
$(gmp) \
$(mpfr) \
$(mpc) \
$(bfdmakes) \
$(ccache) : ENVS += $(TOOLS)
# libdir to work around hateful bfd stuff installing into wrong dirs...
# ensure we have 64 bit bfd support in the HOST library. I.e our
@ -286,161 +286,161 @@ $(ccache) : ENVS += $(TOOLS)
$(BUILDDIR)/$(binutils_ver)/Makefile : CONFIG += --enable-64-bit-bfd --libdir=$(PREFIX)/$(word 1,$(LIBDIRS))
# Makefile creation. Simply run configure in build dir.
$(bfdmakes) \
$(BUILDDIR)/$(binutils_ver)/Makefile \
: $(BINUTILS_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
@mkdir -p $(@D)
( \
cd $(@D) ; \
$(PATHPRE) $(ENVS) CFLAGS="$(CFLAGS)" \
$(BINUTILS_CFG) \
$(CONFIG) \
--with-sysroot=$(SYSROOT) \
--disable-nls \
--program-prefix=$(TARGET)- \
--enable-multilib \
) > $(@D)/log.config 2>&1
@echo 'done'
$(bfdmakes) \
$(BUILDDIR)/$(binutils_ver)/Makefile \
: $(BINUTILS_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
@mkdir -p $(@D)
( \
cd $(@D) ; \
$(PATHPRE) $(ENVS) CFLAGS="$(CFLAGS)" \
$(BINUTILS_CFG) \
$(CONFIG) \
--with-sysroot=$(SYSROOT) \
--disable-nls \
--program-prefix=$(TARGET)- \
--enable-multilib \
) > $(@D)/log.config 2>&1
@echo 'done'
$(BUILDDIR)/$(mpfr_ver)/Makefile \
: $(MPFR_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
@mkdir -p $(@D)
( \
cd $(@D) ; \
$(PATHPRE) $(ENVS) CFLAGS="$(CFLAGS)" \
$(MPFR_CFG) \
$(CONFIG) \
--program-prefix=$(TARGET)- \
--enable-shared=no \
--with-gmp=$(PREFIX) \
) > $(@D)/log.config 2>&1
@echo 'done'
$(BUILDDIR)/$(mpfr_ver)/Makefile \
: $(MPFR_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
@mkdir -p $(@D)
( \
cd $(@D) ; \
$(PATHPRE) $(ENVS) CFLAGS="$(CFLAGS)" \
$(MPFR_CFG) \
$(CONFIG) \
--program-prefix=$(TARGET)- \
--enable-shared=no \
--with-gmp=$(PREFIX) \
) > $(@D)/log.config 2>&1
@echo 'done'
$(BUILDDIR)/$(gmp_ver)/Makefile \
: $(GMP_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
@mkdir -p $(@D)
( \
cd $(@D) ; \
$(PATHPRE) $(ENVS) CFLAGS="$(CFLAGS)" \
$(GMP_CFG) \
--host=$(HOST) --build=$(BUILD) \
--prefix=$(PREFIX) \
--disable-nls \
--program-prefix=$(TARGET)- \
--enable-shared=no \
--with-mpfr=$(PREFIX) \
) > $(@D)/log.config 2>&1
@echo 'done'
$(BUILDDIR)/$(gmp_ver)/Makefile \
: $(GMP_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
@mkdir -p $(@D)
( \
cd $(@D) ; \
$(PATHPRE) $(ENVS) CFLAGS="$(CFLAGS)" \
$(GMP_CFG) \
--host=$(HOST) --build=$(BUILD) \
--prefix=$(PREFIX) \
--disable-nls \
--program-prefix=$(TARGET)- \
--enable-shared=no \
--with-mpfr=$(PREFIX) \
) > $(@D)/log.config 2>&1
@echo 'done'
$(BUILDDIR)/$(mpc_ver)/Makefile \
: $(MPC_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
@mkdir -p $(@D)
( \
cd $(@D) ; \
$(PATHPRE) $(ENVS) CFLAGS="$(CFLAGS)" \
$(MPC_CFG) \
$(CONFIG) \
--program-prefix=$(TARGET)- \
--enable-shared=no \
--with-mpfr=$(PREFIX) \
--with-gmp=$(PREFIX) \
) > $(@D)/log.config 2>&1
@echo 'done'
$(BUILDDIR)/$(mpc_ver)/Makefile \
: $(MPC_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
@mkdir -p $(@D)
( \
cd $(@D) ; \
$(PATHPRE) $(ENVS) CFLAGS="$(CFLAGS)" \
$(MPC_CFG) \
$(CONFIG) \
--program-prefix=$(TARGET)- \
--enable-shared=no \
--with-mpfr=$(PREFIX) \
--with-gmp=$(PREFIX) \
) > $(@D)/log.config 2>&1
@echo 'done'
# Only valid if glibc target -> linux
# proper destructor handling for c++
ifneq (,$(findstring linux,$(TARGET)))
$(BUILDDIR)/$(gcc_ver)/Makefile : CONFIG += --enable-__cxa_atexit
$(BUILDDIR)/$(gcc_ver)/Makefile : CONFIG += --enable-__cxa_atexit
endif
# Want:
# c,c++
# shared libs
# multilib (-m32/-m64 on x64)
# skip native language.
# and link and assemble with the binutils we created
# earlier, so --with-gnu*
$(BUILDDIR)/$(gcc_ver)/Makefile \
: $(GCC_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
mkdir -p $(@D)
( \
cd $(@D) ; \
$(PATHPRE) $(ENVS) $(GCC_CFG) $(EXTRA_CFLAGS) \
$(CONFIG) \
--with-sysroot=$(SYSROOT) \
--enable-languages=c,c++ \
--enable-shared \
--enable-multilib \
--disable-nls \
--with-gnu-as \
--with-gnu-ld \
--with-mpfr=$(PREFIX) \
--with-gmp=$(PREFIX) \
--with-mpc=$(PREFIX) \
) > $(@D)/log.config 2>&1
@echo 'done'
# c,c++
# shared libs
# multilib (-m32/-m64 on x64)
# skip native language.
# and link and assemble with the binutils we created
# earlier, so --with-gnu*
$(BUILDDIR)/$(gcc_ver)/Makefile \
: $(GCC_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
mkdir -p $(@D)
( \
cd $(@D) ; \
$(PATHPRE) $(ENVS) $(GCC_CFG) $(EXTRA_CFLAGS) \
$(CONFIG) \
--with-sysroot=$(SYSROOT) \
--enable-languages=c,c++ \
--enable-shared \
--enable-multilib \
--disable-nls \
--with-gnu-as \
--with-gnu-ld \
--with-mpfr=$(PREFIX) \
--with-gmp=$(PREFIX) \
--with-mpc=$(PREFIX) \
) > $(@D)/log.config 2>&1
@echo 'done'
# need binutils for gcc
$(gcc) : $(binutils)
$(gcc) : $(binutils)
# as of 4.3 or so need these for doing config
$(BUILDDIR)/$(gcc_ver)/Makefile : $(gmp) $(mpfr) $(mpc)
$(mpfr) : $(gmp)
$(mpc) : $(gmp) $(mpfr)
$(mpfr) : $(gmp)
$(mpc) : $(gmp) $(mpfr)
##########################################################################################
# very straightforward. just build a ccache. it is only for host.
$(BUILDDIR)/$(ccache_ver)/Makefile \
: $(CCACHE_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
@mkdir -p $(@D)
@( \
cd $(@D) ; \
$(PATHPRE) $(ENVS) $(CCACHE_CFG) \
$(CONFIG) \
) > $(@D)/log.config 2>&1
@echo 'done'
$(BUILDDIR)/$(ccache_ver)/Makefile \
: $(CCACHE_CFG)
$(info Configuring $@. Log in $(@D)/log.config)
@mkdir -p $(@D)
@( \
cd $(@D) ; \
$(PATHPRE) $(ENVS) $(CCACHE_CFG) \
$(CONFIG) \
) > $(@D)/log.config 2>&1
@echo 'done'
gccpatch = $(TARGETDIR)/gcc-patched
gccpatch = $(TARGETDIR)/gcc-patched
##########################################################################################
# For some reason cpp is not created as a target-compiler
ifeq ($(HOST),$(TARGET))
$(gccpatch) : $(gcc) link_libs
@echo -n 'Creating compiler symlinks...'
@for f in cpp; do \
if [ ! -e $(PREFIX)/bin/$(TARGET)-$$f ];\
then \
cd $(PREFIX)/bin && \
ln -s $$f $(TARGET)-$$f ; \
fi \
done
@touch $@
@echo 'done'
$(gccpatch) : $(gcc) link_libs
@echo -n 'Creating compiler symlinks...'
@for f in cpp; do \
if [ ! -e $(PREFIX)/bin/$(TARGET)-$$f ]; \
then \
cd $(PREFIX)/bin && \
ln -s $$f $(TARGET)-$$f ; \
fi \
done
@touch $@
@echo 'done'
##########################################################################################
# Ugly at best. Seems that when we compile host->host compiler, that are NOT
# the BUILD compiler, the result will not try searching for libs in package root.
# "Solve" this by create links from the target libdirs to where they are.
link_libs:
@echo -n 'Creating library symlinks...'
@$(foreach l,$(LIBDIRS), \
for f in `cd $(PREFIX)/$(l) && ls`; do \
if [ ! -e $(TARGETDIR)/$(l)/$$f ]; then \
mkdir -p $(TARGETDIR)/$(l) && \
cd $(TARGETDIR)/$(l)/ && \
ln -s $(if $(findstring /,$(l)),../,)../../$(l)/$$f $$f; \
fi \
done;)
@echo 'done'
##########################################################################################
# Ugly at best. Seems that when we compile host->host compiler, that are NOT
# the BUILD compiler, the result will not try searching for libs in package root.
# "Solve" this by create links from the target libdirs to where they are.
link_libs:
@echo -n 'Creating library symlinks...'
@$(foreach l,$(LIBDIRS), \
for f in `cd $(PREFIX)/$(l) && ls`; do \
if [ ! -e $(TARGETDIR)/$(l)/$$f ]; then \
mkdir -p $(TARGETDIR)/$(l) && \
cd $(TARGETDIR)/$(l)/ && \
ln -s $(if $(findstring /,$(l)),../,)../../$(l)/$$f $$f; \
fi \
done;)
@echo 'done'
else
$(gccpatch) :
@echo 'done'
$(gccpatch) :
@echo 'done'
endif
##########################################################################################
@ -450,24 +450,24 @@ endif
# Use path to our build hosts cross tools
# Always need to build cross tools for build host self.
$(TARGETDIR)/%.done : $(BUILDDIR)/%/Makefile
$(info Building $(basename $@). Log in $(<D)/log.build)
$(PATHPRE) $(ENVS) $(MAKE) $(BUILDPAR) -f $< -C $(<D) $(MAKECMD) $(MAKECMD.$(notdir $@)) > $(<D)/log.build 2>&1
@echo -n 'installing...'
$(PATHPRE) $(MAKE) $(INSTALLPAR) -f $< -C $(<D) $(INSTALLCMD) $(MAKECMD.$(notdir $@)) > $(<D)/log.install 2>&1
@touch $@
@echo 'done'
$(info Building $(basename $@). Log in $(<D)/log.build)
$(PATHPRE) $(ENVS) $(MAKE) $(BUILDPAR) -f $< -C $(<D) $(MAKECMD) $(MAKECMD.$(notdir $@)) > $(<D)/log.build 2>&1
@echo -n 'installing...'
$(PATHPRE) $(MAKE) $(INSTALLPAR) -f $< -C $(<D) $(INSTALLCMD) $(MAKECMD.$(notdir $@)) > $(<D)/log.install 2>&1
@touch $@
@echo 'done'
##########################################################################################
bfdlib : $(bfdlib)
binutils : $(binutils)
rpms : $(rpms)
libs : $(libs)
sysroot : rpms libs
gcc : sysroot $(gcc) $(gccpatch)
all : binutils gcc bfdlib
bfdlib : $(bfdlib)
binutils : $(binutils)
rpms : $(rpms)
libs : $(libs)
sysroot : rpms libs
gcc : sysroot $(gcc) $(gccpatch)
all : binutils gcc bfdlib
# this is only built for host. so separate.
ccache : $(ccache)
ccache : $(ccache)
.PHONY : gcc all binutils bfdlib link_libs rpms libs sysroot
.PHONY : gcc all binutils bfdlib link_libs rpms libs sysroot

View file

@ -24,49 +24,48 @@
#
# EXCLUDE_PKGS is the list of packages to exclude from the
# Java API Specification. Do not add these to CORE_PKGS.
# Java API Specification. Do not add these to CORE_PKGS.
# The concatenation of EXCLUDE_PKGS and CORE_PKGS
# should make up the list of all packages under the
# src/shared/classes directory of the JDK source tree.
#
EXCLUDE_PKGS = \
java.awt.peer \
java.awt.dnd.peer \
sun.* \
com.sun.* \
org.apache.* \
org.jcp.* \
org.w3c.dom.css \
org.w3c.dom.html \
org.w3c.dom.stylesheets \
org.w3c.dom.traversal \
org.w3c.dom.ranges \
org.w3c.dom.views \
org.omg.stub.javax.management.remote.rmi
EXCLUDE_PKGS = \
java.awt.peer \
java.awt.dnd.peer \
sun.* \
com.sun.* \
org.apache.* \
org.jcp.* \
org.w3c.dom.css \
org.w3c.dom.html \
org.w3c.dom.stylesheets \
org.w3c.dom.traversal \
org.w3c.dom.ranges \
org.omg.stub.javax.management.remote.rmi
#
# ACTIVE_JSR_PKGS are packages that are part of an active JSR process--
# one that is doing its own review. These packages are not included when
# creating diff pages for the platform's JCP process.
#
# (see /java/pubs/apisrc/jdk/6.0/beta/make/docs/active_jsr_pkgs)
# (see /java/pubs/apisrc/jdk/6.0/beta/make/docs/active_jsr_pkgs)
# Note:
# This is a list of regular expressions. So foo.* matches "foo" and "foo.bar".
#
ACTIVE_JSR_PKGS= \
java.lang.invoke \
java.sql \
javax.activation \
javax.annotation.* \
javax.jws.* \
javax.lang.* \
javax.management.* \
javax.script \
javax.sql.* \
javax.tools.* \
javax.xml.* \
org.w3c.* \
org.xml.sax
java.lang.invoke \
java.sql \
javax.activation \
javax.annotation.* \
javax.jws.* \
javax.lang.* \
javax.management.* \
javax.script \
javax.sql.* \
javax.tools.* \
javax.xml.* \
org.w3c.* \
org.xml.sax
#
# CORE_PKGS is the list of packages that form the
@ -77,224 +76,225 @@ ACTIVE_JSR_PKGS= \
### determines which table the packages go in on the main page.
### Currently, there is only table ("Platform Packages") and
### everything goes in it, so REGEXP is "*". But if that policy
### changes, packages added will need to be reflected in that
### changes, packages added will need to be reflected in that
### list of wildcard expressions, as well.
###
CORE_PKGS = \
java.applet \
java.awt \
java.awt.color \
java.awt.datatransfer \
java.awt.dnd \
java.awt.event \
java.awt.font \
java.awt.geom \
java.awt.im \
java.awt.im.spi \
java.awt.image \
java.awt.image.renderable \
java.awt.print \
java.beans \
java.beans.beancontext \
java.io \
java.lang \
java.lang.annotation \
java.lang.instrument \
java.lang.invoke \
java.lang.management \
java.lang.ref \
java.lang.reflect \
java.math \
java.net \
java.nio \
java.nio.channels \
java.nio.channels.spi \
java.nio.charset \
java.nio.charset.spi \
java.nio.file \
java.nio.file.attribute \
java.nio.file.spi \
java.rmi \
java.rmi.activation \
java.rmi.dgc \
java.rmi.registry \
java.rmi.server \
java.security \
java.security.acl \
java.security.cert \
java.security.interfaces \
java.security.spec \
java.sql \
java.text \
java.text.spi \
java.time \
java.time.chrono \
java.time.format \
java.time.temporal \
java.time.zone \
java.util \
java.util.concurrent \
java.util.concurrent.atomic \
java.util.concurrent.locks \
java.util.function \
java.util.jar \
java.util.logging \
java.util.prefs \
java.util.regex \
java.util.spi \
java.util.stream \
java.util.zip \
javax.accessibility \
javax.activation \
javax.activity \
javax.annotation \
javax.annotation.processing \
javax.crypto \
javax.crypto.interfaces \
javax.crypto.spec \
javax.imageio \
javax.imageio.event \
javax.imageio.metadata \
javax.imageio.plugins.jpeg \
javax.imageio.plugins.bmp \
javax.imageio.spi \
javax.imageio.stream \
javax.jws \
javax.jws.soap \
javax.lang.model \
javax.lang.model.element \
javax.lang.model.type \
javax.lang.model.util \
javax.management \
javax.management.loading \
javax.management.monitor \
javax.management.relation \
javax.management.openmbean \
javax.management.timer \
javax.management.modelmbean \
javax.management.remote \
javax.management.remote.rmi \
javax.naming \
javax.naming.directory \
javax.naming.event \
javax.naming.ldap \
javax.naming.spi \
javax.net \
javax.net.ssl \
javax.print \
javax.print.attribute \
javax.print.attribute.standard \
javax.print.event \
javax.rmi \
javax.rmi.CORBA \
javax.rmi.ssl \
javax.script \
javax.security.auth \
javax.security.auth.callback \
javax.security.auth.kerberos \
javax.security.auth.login \
javax.security.auth.spi \
javax.security.auth.x500 \
javax.security.cert \
javax.security.sasl \
javax.sound.sampled \
javax.sound.sampled.spi \
javax.sound.midi \
javax.sound.midi.spi \
javax.sql \
javax.sql.rowset \
javax.sql.rowset.serial \
javax.sql.rowset.spi \
javax.swing \
javax.swing.border \
javax.swing.colorchooser \
javax.swing.filechooser \
javax.swing.event \
javax.swing.table \
javax.swing.text \
javax.swing.text.html \
javax.swing.text.html.parser \
javax.swing.text.rtf \
javax.swing.tree \
javax.swing.undo \
javax.swing.plaf \
javax.swing.plaf.basic \
javax.swing.plaf.metal \
javax.swing.plaf.multi \
javax.swing.plaf.nimbus \
javax.swing.plaf.synth \
javax.tools \
javax.tools.annotation \
javax.transaction \
javax.transaction.xa \
javax.xml.parsers \
javax.xml.bind \
javax.xml.bind.annotation \
javax.xml.bind.annotation.adapters \
javax.xml.bind.attachment \
javax.xml.bind.helpers \
javax.xml.bind.util \
javax.xml.soap \
javax.xml.ws \
javax.xml.ws.handler \
javax.xml.ws.handler.soap \
javax.xml.ws.http \
javax.xml.ws.soap \
javax.xml.ws.spi \
javax.xml.ws.spi.http \
javax.xml.ws.wsaddressing \
javax.xml.transform \
javax.xml.transform.sax \
javax.xml.transform.dom \
javax.xml.transform.stax \
javax.xml.transform.stream \
javax.xml \
javax.xml.crypto \
javax.xml.crypto.dom \
javax.xml.crypto.dsig \
javax.xml.crypto.dsig.dom \
javax.xml.crypto.dsig.keyinfo \
javax.xml.crypto.dsig.spec \
javax.xml.datatype \
javax.xml.validation \
javax.xml.namespace \
javax.xml.xpath \
javax.xml.stream \
javax.xml.stream.events \
javax.xml.stream.util \
org.ietf.jgss \
org.omg.CORBA \
org.omg.CORBA.DynAnyPackage \
org.omg.CORBA.ORBPackage \
org.omg.CORBA.TypeCodePackage \
org.omg.stub.java.rmi \
org.omg.CORBA.portable \
org.omg.CORBA_2_3 \
org.omg.CORBA_2_3.portable \
org.omg.CosNaming \
org.omg.CosNaming.NamingContextExtPackage \
org.omg.CosNaming.NamingContextPackage \
org.omg.SendingContext \
org.omg.PortableServer \
org.omg.PortableServer.CurrentPackage \
org.omg.PortableServer.POAPackage \
org.omg.PortableServer.POAManagerPackage \
org.omg.PortableServer.ServantLocatorPackage \
org.omg.PortableServer.portable \
org.omg.PortableInterceptor \
org.omg.PortableInterceptor.ORBInitInfoPackage \
org.omg.Messaging \
org.omg.IOP \
org.omg.IOP.CodecFactoryPackage \
org.omg.IOP.CodecPackage \
org.omg.Dynamic \
org.omg.DynamicAny \
org.omg.DynamicAny.DynAnyPackage \
org.omg.DynamicAny.DynAnyFactoryPackage \
org.w3c.dom \
org.w3c.dom.events \
org.w3c.dom.bootstrap \
org.w3c.dom.ls \
org.xml.sax \
org.xml.sax.ext \
org.xml.sax.helpers
CORE_PKGS = \
java.applet \
java.awt \
java.awt.color \
java.awt.datatransfer \
java.awt.dnd \
java.awt.event \
java.awt.font \
java.awt.geom \
java.awt.im \
java.awt.im.spi \
java.awt.image \
java.awt.image.renderable \
java.awt.print \
java.beans \
java.beans.beancontext \
java.io \
java.lang \
java.lang.annotation \
java.lang.instrument \
java.lang.invoke \
java.lang.management \
java.lang.ref \
java.lang.reflect \
java.math \
java.net \
java.nio \
java.nio.channels \
java.nio.channels.spi \
java.nio.charset \
java.nio.charset.spi \
java.nio.file \
java.nio.file.attribute \
java.nio.file.spi \
java.rmi \
java.rmi.activation \
java.rmi.dgc \
java.rmi.registry \
java.rmi.server \
java.security \
java.security.acl \
java.security.cert \
java.security.interfaces \
java.security.spec \
java.sql \
java.text \
java.text.spi \
java.time \
java.time.chrono \
java.time.format \
java.time.temporal \
java.time.zone \
java.util \
java.util.concurrent \
java.util.concurrent.atomic \
java.util.concurrent.locks \
java.util.function \
java.util.jar \
java.util.logging \
java.util.prefs \
java.util.regex \
java.util.spi \
java.util.stream \
java.util.zip \
javax.accessibility \
javax.activation \
javax.activity \
javax.annotation \
javax.annotation.processing \
javax.crypto \
javax.crypto.interfaces \
javax.crypto.spec \
javax.imageio \
javax.imageio.event \
javax.imageio.metadata \
javax.imageio.plugins.jpeg \
javax.imageio.plugins.bmp \
javax.imageio.spi \
javax.imageio.stream \
javax.jws \
javax.jws.soap \
javax.lang.model \
javax.lang.model.element \
javax.lang.model.type \
javax.lang.model.util \
javax.management \
javax.management.loading \
javax.management.monitor \
javax.management.relation \
javax.management.openmbean \
javax.management.timer \
javax.management.modelmbean \
javax.management.remote \
javax.management.remote.rmi \
javax.naming \
javax.naming.directory \
javax.naming.event \
javax.naming.ldap \
javax.naming.spi \
javax.net \
javax.net.ssl \
javax.print \
javax.print.attribute \
javax.print.attribute.standard \
javax.print.event \
javax.rmi \
javax.rmi.CORBA \
javax.rmi.ssl \
javax.script \
javax.security.auth \
javax.security.auth.callback \
javax.security.auth.kerberos \
javax.security.auth.login \
javax.security.auth.spi \
javax.security.auth.x500 \
javax.security.cert \
javax.security.sasl \
javax.sound.sampled \
javax.sound.sampled.spi \
javax.sound.midi \
javax.sound.midi.spi \
javax.sql \
javax.sql.rowset \
javax.sql.rowset.serial \
javax.sql.rowset.spi \
javax.swing \
javax.swing.border \
javax.swing.colorchooser \
javax.swing.filechooser \
javax.swing.event \
javax.swing.table \
javax.swing.text \
javax.swing.text.html \
javax.swing.text.html.parser \
javax.swing.text.rtf \
javax.swing.tree \
javax.swing.undo \
javax.swing.plaf \
javax.swing.plaf.basic \
javax.swing.plaf.metal \
javax.swing.plaf.multi \
javax.swing.plaf.nimbus \
javax.swing.plaf.synth \
javax.tools \
javax.tools.annotation \
javax.transaction \
javax.transaction.xa \
javax.xml.parsers \
javax.xml.bind \
javax.xml.bind.annotation \
javax.xml.bind.annotation.adapters \
javax.xml.bind.attachment \
javax.xml.bind.helpers \
javax.xml.bind.util \
javax.xml.soap \
javax.xml.ws \
javax.xml.ws.handler \
javax.xml.ws.handler.soap \
javax.xml.ws.http \
javax.xml.ws.soap \
javax.xml.ws.spi \
javax.xml.ws.spi.http \
javax.xml.ws.wsaddressing \
javax.xml.transform \
javax.xml.transform.sax \
javax.xml.transform.dom \
javax.xml.transform.stax \
javax.xml.transform.stream \
javax.xml \
javax.xml.crypto \
javax.xml.crypto.dom \
javax.xml.crypto.dsig \
javax.xml.crypto.dsig.dom \
javax.xml.crypto.dsig.keyinfo \
javax.xml.crypto.dsig.spec \
javax.xml.datatype \
javax.xml.validation \
javax.xml.namespace \
javax.xml.xpath \
javax.xml.stream \
javax.xml.stream.events \
javax.xml.stream.util \
org.ietf.jgss \
org.omg.CORBA \
org.omg.CORBA.DynAnyPackage \
org.omg.CORBA.ORBPackage \
org.omg.CORBA.TypeCodePackage \
org.omg.stub.java.rmi \
org.omg.CORBA.portable \
org.omg.CORBA_2_3 \
org.omg.CORBA_2_3.portable \
org.omg.CosNaming \
org.omg.CosNaming.NamingContextExtPackage \
org.omg.CosNaming.NamingContextPackage \
org.omg.SendingContext \
org.omg.PortableServer \
org.omg.PortableServer.CurrentPackage \
org.omg.PortableServer.POAPackage \
org.omg.PortableServer.POAManagerPackage \
org.omg.PortableServer.ServantLocatorPackage \
org.omg.PortableServer.portable \
org.omg.PortableInterceptor \
org.omg.PortableInterceptor.ORBInitInfoPackage \
org.omg.Messaging \
org.omg.IOP \
org.omg.IOP.CodecFactoryPackage \
org.omg.IOP.CodecPackage \
org.omg.Dynamic \
org.omg.DynamicAny \
org.omg.DynamicAny.DynAnyPackage \
org.omg.DynamicAny.DynAnyFactoryPackage \
org.w3c.dom \
org.w3c.dom.events \
org.w3c.dom.bootstrap \
org.w3c.dom.ls \
org.w3c.dom.views \
org.xml.sax \
org.xml.sax.ext \
org.xml.sax.helpers

File diff suppressed because it is too large Load diff

View file

@ -25,7 +25,7 @@
#
# This file contains the package names of all the "non-core"
# API published in the Java 2 SDK documentation. "Non-core" means
# API published in the Java 2 SDK documentation. "Non-core" means
# it includes all published API outside of the JDK API specification.
#
# These environment variables are used by javadoc in
@ -33,78 +33,78 @@
# team when determining which APIs to extract javadoc
# comments from.
DOMAPI_PKGS = com.sun.java.browser.dom \
org.w3c.dom \
org.w3c.dom.bootstrap \
org.w3c.dom.ls \
org.w3c.dom.ranges \
org.w3c.dom.traversal \
org.w3c.dom.html \
org.w3c.dom.stylesheets \
org.w3c.dom.css \
org.w3c.dom.events \
org.w3c.dom.views
DOMAPI_PKGS = com.sun.java.browser.dom \
org.w3c.dom \
org.w3c.dom.bootstrap \
org.w3c.dom.ls \
org.w3c.dom.ranges \
org.w3c.dom.traversal \
org.w3c.dom.html \
org.w3c.dom.stylesheets \
org.w3c.dom.css \
org.w3c.dom.events \
org.w3c.dom.views
JDI_PKGS = com.sun.jdi \
com.sun.jdi.event \
com.sun.jdi.request \
com.sun.jdi.connect \
com.sun.jdi.connect.spi
JDI_PKGS = com.sun.jdi \
com.sun.jdi.event \
com.sun.jdi.request \
com.sun.jdi.connect \
com.sun.jdi.connect.spi
MGMT_PKGS = com.sun.management
MGMT_PKGS = com.sun.management
JAAS_PKGS = com.sun.security.auth \
com.sun.security.auth.callback \
com.sun.security.auth.login \
com.sun.security.auth.module
JAAS_PKGS = com.sun.security.auth \
com.sun.security.auth.callback \
com.sun.security.auth.login \
com.sun.security.auth.module
JGSS_PKGS = com.sun.security.jgss
JGSS_PKGS = com.sun.security.jgss
OLD_JSSE_PKGS = com.sun.net.ssl
OLD_JSSE_PKGS = com.sun.net.ssl
HTTPSERVER_PKGS = com.sun.net.httpserver \
com.sun.net.httpserver.spi
HTTPSERVER_PKGS = com.sun.net.httpserver \
com.sun.net.httpserver.spi
NIO_PKGS = com.sun.nio.file
NIO_PKGS = com.sun.nio.file
DOCLETAPI_PKGS = com.sun.javadoc
DOCLETAPI_PKGS = com.sun.javadoc
TAGLETAPI_FILE = com/sun/tools/doclets/Taglet.java
TAGLETAPI_FILE = com/sun/tools/doclets/Taglet.java
TAGLETAPI_PKGS = com.sun.tools.doclets
TAGLETAPI_PKGS = com.sun.tools.doclets
ATTACH_PKGS = com.sun.tools.attach \
com.sun.tools.attach.spi
ATTACH_PKGS = com.sun.tools.attach \
com.sun.tools.attach.spi
JCONSOLE_PKGS = com.sun.tools.jconsole
JCONSOLE_PKGS = com.sun.tools.jconsole
TREEAPI_PKGS = com.sun.source.doctree \
com.sun.source.tree \
com.sun.source.util \
jdk
TREEAPI_PKGS = com.sun.source.doctree \
com.sun.source.tree \
com.sun.source.util \
jdk
SMARTCARDIO_PKGS = javax.smartcardio
SCTPAPI_PKGS = com.sun.nio.sctp
SCTPAPI_PKGS = com.sun.nio.sctp
ifeq ($(PLATFORM), macosx)
APPLE_EXT_PKGS = com.apple.concurrent \
com.apple.eawt \
com.apple.eawt.event \
com.apple.eio
APPLE_EXT_PKGS = com.apple.concurrent \
com.apple.eawt \
com.apple.eawt.event \
com.apple.eio
endif
JDK_PKGS = jdk
JDK_PKGS = jdk
# non-core packages in rt.jar
NON_CORE_PKGS = $(DOMAPI_PKGS) \
$(MGMT_PKGS) \
$(JAAS_PKGS) \
$(JGSS_PKGS) \
$(NIO_PKGS) \
$(OLD_JSSE_PKGS) \
$(HTTPSERVER_PKGS) \
$(SMARTCARDIO_PKGS) \
$(SCTPAPI_PKGS) \
$(APPLE_EXT_PKGS) \
$(JDK_PKGS)
NON_CORE_PKGS = $(DOMAPI_PKGS) \
$(MGMT_PKGS) \
$(JAAS_PKGS) \
$(JGSS_PKGS) \
$(NIO_PKGS) \
$(OLD_JSSE_PKGS) \
$(HTTPSERVER_PKGS) \
$(SMARTCARDIO_PKGS) \
$(SCTPAPI_PKGS) \
$(APPLE_EXT_PKGS) \
$(JDK_PKGS)

View file

@ -232,3 +232,4 @@ d411c60a8c2fe8fdc572af907775e90f7eefd513 jdk8-b104
a4bb3b4500164748a9c33b2283cfda76d89f25ab jdk8-b108
428428cf5e06163322144cfb5367e1faa86acf20 jdk8-b109
3d2b7ce93c5c2e3db748f29c3d29620a8b3b748a jdk8-b110
85c1c94e723582f9a1dd0251502c42b73d6deea7 jdk8-b111

File diff suppressed because one or more lines are too long

View file

@ -24,19 +24,19 @@
#
# Locate this Makefile
ifeq ($(filter /%,$(lastword $(MAKEFILE_LIST))),)
makefile_path:=$(CURDIR)/$(lastword $(MAKEFILE_LIST))
ifeq ($(filter /%, $(lastword $(MAKEFILE_LIST))), )
makefile_path := $(CURDIR)/$(lastword $(MAKEFILE_LIST))
else
makefile_path:=$(lastword $(MAKEFILE_LIST))
makefile_path := $(lastword $(MAKEFILE_LIST))
endif
repo_dir:=$(patsubst %/makefiles/Makefile,%,$(makefile_path))
repo_dir := $(patsubst %/makefiles/Makefile, %, $(makefile_path))
# What is the name of this subsystem (langtools, corba, etc)?
subsystem_name:=$(notdir $(repo_dir))
subsystem_name := $(notdir $(repo_dir))
# Try to locate top-level makefile
top_level_makefile:=$(repo_dir)/../common/makefiles/Makefile
ifneq ($(wildcard $(top_level_makefile)),)
top_level_makefile := $(repo_dir)/../common/makefiles/Makefile
ifneq ($(wildcard $(top_level_makefile)), )
$(info Will run $(subsystem_name) target on top-level Makefile)
$(info WARNING: This is a non-recommended way of building!)
$(info ===================================================)

View file

@ -383,3 +383,5 @@ c81dd5393a5e333df7cb1f6621f5897ada6522b5 jdk8-b109
58043478c26d4e8bf48700acea5f97aba8b417d4 hs25-b52
6209b0ed51c086d4127bac0e086c8f326d1764d7 jdk8-b110
562a3d356de67670b4172b82aca2d30743449e04 hs25-b53
f6962730bbde82f279a0ae3a1c14bc5e58096c6e jdk8-b111
4a845c7a463844cead9e1e1641d6bcfb8a77f1c7 hs25-b54

View file

@ -44,6 +44,7 @@
// close all file descriptors
static void close_files(struct ps_prochandle* ph) {
lib_info* lib = NULL;
// close core file descriptor
if (ph->core->core_fd >= 0)
close(ph->core->core_fd);
@ -149,8 +150,7 @@ static map_info* add_class_share_map_info(struct ps_prochandle* ph, off_t offset
// Return the map_info for the given virtual address. We keep a sorted
// array of pointers in ph->map_array, so we can binary search.
static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr)
{
static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr) {
int mid, lo = 0, hi = ph->core->num_maps - 1;
map_info *mp;
@ -230,9 +230,9 @@ struct FileMapHeader {
size_t _used; // for setting space top on read
// 4991491 NOTICE These are C++ bool's in filemap.hpp and must match up with
// the C type matching the C++ bool type on any given platform. For
// Hotspot on BSD we assume the corresponding C type is char but
// licensees on BSD versions may need to adjust the type of these fields.
// the C type matching the C++ bool type on any given platform.
// We assume the corresponding C type is char but licensees
// may need to adjust the type of these fields.
char _read_only; // read only space?
char _allow_exec; // executable code in space?
@ -286,10 +286,12 @@ static bool read_string(struct ps_prochandle* ph, uintptr_t addr, char* buf, siz
#define USE_SHARED_SPACES_SYM "_UseSharedSpaces"
// mangled name of Arguments::SharedArchivePath
#define SHARED_ARCHIVE_PATH_SYM "_ZN9Arguments17SharedArchivePathE"
#define LIBJVM_NAME "/libjvm.dylib"
#else
#define USE_SHARED_SPACES_SYM "UseSharedSpaces"
// mangled name of Arguments::SharedArchivePath
#define SHARED_ARCHIVE_PATH_SYM "__ZN9Arguments17SharedArchivePathE"
#define LIBJVM_NAME "/libjvm.so"
#endif // __APPLE_
static bool init_classsharing_workaround(struct ps_prochandle* ph) {
@ -300,12 +302,7 @@ static bool init_classsharing_workaround(struct ps_prochandle* ph) {
// we are iterating over shared objects from the core dump. look for
// libjvm.so.
const char *jvm_name = 0;
#ifdef __APPLE__
if ((jvm_name = strstr(lib->name, "/libjvm.dylib")) != 0)
#else
if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0)
#endif // __APPLE__
{
if ((jvm_name = strstr(lib->name, LIBJVM_NAME)) != 0) {
char classes_jsa[PATH_MAX];
struct FileMapHeader header;
int fd = -1;
@ -399,8 +396,8 @@ static bool init_classsharing_workaround(struct ps_prochandle* ph) {
}
}
return true;
}
lib = lib->next;
}
lib = lib->next;
}
return true;
}
@ -432,8 +429,8 @@ static bool sort_map_array(struct ps_prochandle* ph) {
// allocate map_array
map_info** array;
if ( (array = (map_info**) malloc(sizeof(map_info*) * num_maps)) == NULL) {
print_debug("can't allocate memory for map array\n");
return false;
print_debug("can't allocate memory for map array\n");
return false;
}
// add maps to array
@ -450,7 +447,7 @@ static bool sort_map_array(struct ps_prochandle* ph) {
ph->core->map_array = array;
// sort the map_info array by base virtual address.
qsort(ph->core->map_array, ph->core->num_maps, sizeof (map_info*),
core_cmp_mapping);
core_cmp_mapping);
// print map
if (is_debug()) {
@ -458,7 +455,7 @@ static bool sort_map_array(struct ps_prochandle* ph) {
print_debug("---- sorted virtual address map ----\n");
for (j = 0; j < ph->core->num_maps; j++) {
print_debug("base = 0x%lx\tsize = %d\n", ph->core->map_array[j]->vaddr,
ph->core->map_array[j]->memsz);
ph->core->map_array[j]->memsz);
}
}
@ -1091,9 +1088,9 @@ static bool core_handle_note(struct ps_prochandle* ph, ELF_PHDR* note_phdr) {
notep->n_type, notep->n_descsz);
if (notep->n_type == NT_PRSTATUS) {
if (core_handle_prstatus(ph, descdata, notep->n_descsz) != true) {
return false;
}
if (core_handle_prstatus(ph, descdata, notep->n_descsz) != true) {
return false;
}
}
p = descdata + ROUNDUP(notep->n_descsz, 4);
}
@ -1121,7 +1118,7 @@ static bool read_core_segments(struct ps_prochandle* ph, ELF_EHDR* core_ehdr) {
* contains a set of saved /proc structures), and PT_LOAD (which
* represents a memory mapping from the process's address space).
*
* Difference b/w Solaris PT_NOTE and BSD PT_NOTE:
* Difference b/w Solaris PT_NOTE and Linux/BSD PT_NOTE:
*
* In Solaris there are two PT_NOTE segments the first PT_NOTE (if present)
* contains /proc structs in the pre-2.6 unstructured /proc format. the last
@ -1167,32 +1164,61 @@ err:
// read segments of a shared object
static bool read_lib_segments(struct ps_prochandle* ph, int lib_fd, ELF_EHDR* lib_ehdr, uintptr_t lib_base) {
int i = 0;
ELF_PHDR* phbuf;
ELF_PHDR* lib_php = NULL;
int i = 0;
ELF_PHDR* phbuf;
ELF_PHDR* lib_php = NULL;
if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL)
return false;
int page_size=sysconf(_SC_PAGE_SIZE);
// we want to process only PT_LOAD segments that are not writable.
// i.e., text segments. The read/write/exec (data) segments would
// have been already added from core file segments.
for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
if (add_map_info(ph, lib_fd, lib_php->p_offset, lib_php->p_vaddr + lib_base, lib_php->p_filesz) == NULL)
goto err;
if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL) {
return false;
}
// we want to process only PT_LOAD segments that are not writable.
// i.e., text segments. The read/write/exec (data) segments would
// have been already added from core file segments.
for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
uintptr_t target_vaddr = lib_php->p_vaddr + lib_base;
map_info *existing_map = core_lookup(ph, target_vaddr);
if (existing_map == NULL){
if (add_map_info(ph, lib_fd, lib_php->p_offset,
target_vaddr, lib_php->p_filesz) == NULL) {
goto err;
}
} else {
if ((existing_map->memsz != page_size) &&
(existing_map->fd != lib_fd) &&
(existing_map->memsz != lib_php->p_filesz)){
print_debug("address conflict @ 0x%lx (size = %ld, flags = %d\n)",
target_vaddr, lib_php->p_filesz, lib_php->p_flags);
goto err;
}
/* replace PT_LOAD segment with library segment */
print_debug("overwrote with new address mapping (memsz %ld -> %ld)\n",
existing_map->memsz, lib_php->p_filesz);
existing_map->fd = lib_fd;
existing_map->offset = lib_php->p_offset;
existing_map->memsz = lib_php->p_filesz;
}
lib_php++;
}
}
free(phbuf);
return true;
lib_php++;
}
free(phbuf);
return true;
err:
free(phbuf);
return false;
free(phbuf);
return false;
}
// process segments from interpreter (ld-elf.so.1)
// process segments from interpreter (ld.so or ld-linux.so or ld-elf.so)
static bool read_interp_segments(struct ps_prochandle* ph) {
ELF_EHDR interp_ehdr;
@ -1303,32 +1329,34 @@ static bool read_shared_lib_info(struct ps_prochandle* ph) {
debug_base = dyn.d_un.d_ptr;
// at debug_base we have struct r_debug. This has first link map in r_map field
if (ps_pread(ph, (psaddr_t) debug_base + FIRST_LINK_MAP_OFFSET,
&first_link_map_addr, sizeof(uintptr_t)) != PS_OK) {
&first_link_map_addr, sizeof(uintptr_t)) != PS_OK) {
print_debug("can't read first link map address\n");
return false;
}
// read ld_base address from struct r_debug
// XXX: There is no r_ldbase member on BSD
/*
#if 0 // There is no r_ldbase member on BSD
if (ps_pread(ph, (psaddr_t) debug_base + LD_BASE_OFFSET, &ld_base_addr,
sizeof(uintptr_t)) != PS_OK) {
print_debug("can't read ld base address\n");
return false;
}
ph->core->ld_base_addr = ld_base_addr;
*/
#else
ph->core->ld_base_addr = 0;
#endif
print_debug("interpreter base address is 0x%lx\n", ld_base_addr);
// now read segments from interp (i.e ld-elf.so.1)
if (read_interp_segments(ph) != true)
// now read segments from interp (i.e ld.so or ld-linux.so or ld-elf.so)
if (read_interp_segments(ph) != true) {
return false;
}
// after adding interpreter (ld.so) mappings sort again
if (sort_map_array(ph) != true)
if (sort_map_array(ph) != true) {
return false;
}
print_debug("first link map is at 0x%lx\n", first_link_map_addr);
@ -1380,8 +1408,9 @@ static bool read_shared_lib_info(struct ps_prochandle* ph) {
add_lib_info_fd(ph, lib_name, lib_fd, lib_base);
// Map info is added for the library (lib_name) so
// we need to re-sort it before calling the p_pdread.
if (sort_map_array(ph) != true)
if (sort_map_array(ph) != true) {
return false;
}
} else {
print_debug("can't read ELF header for shared object %s\n", lib_name);
close(lib_fd);
@ -1392,7 +1421,7 @@ static bool read_shared_lib_info(struct ps_prochandle* ph) {
// read next link_map address
if (ps_pread(ph, (psaddr_t) link_map_addr + LINK_MAP_NEXT_OFFSET,
&link_map_addr, sizeof(uintptr_t)) != PS_OK) {
&link_map_addr, sizeof(uintptr_t)) != PS_OK) {
print_debug("can't read next link in link_map\n");
return false;
}
@ -1408,7 +1437,7 @@ struct ps_prochandle* Pgrab_core(const char* exec_file, const char* core_file) {
struct ps_prochandle* ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle));
if (ph == NULL) {
print_debug("cant allocate ps_prochandle\n");
print_debug("can't allocate ps_prochandle\n");
return NULL;
}
@ -1444,38 +1473,45 @@ struct ps_prochandle* Pgrab_core(const char* exec_file, const char* core_file) {
}
if (read_elf_header(ph->core->exec_fd, &exec_ehdr) != true || exec_ehdr.e_type != ET_EXEC) {
print_debug("executable file is not a valid ELF ET_EXEC file\n");
goto err;
print_debug("executable file is not a valid ELF ET_EXEC file\n");
goto err;
}
// process core file segments
if (read_core_segments(ph, &core_ehdr) != true)
goto err;
if (read_core_segments(ph, &core_ehdr) != true) {
goto err;
}
// process exec file segments
if (read_exec_segments(ph, &exec_ehdr) != true)
goto err;
if (read_exec_segments(ph, &exec_ehdr) != true) {
goto err;
}
// exec file is also treated like a shared object for symbol search
if (add_lib_info_fd(ph, exec_file, ph->core->exec_fd,
(uintptr_t)0 + find_base_address(ph->core->exec_fd, &exec_ehdr)) == NULL)
goto err;
(uintptr_t)0 + find_base_address(ph->core->exec_fd, &exec_ehdr)) == NULL) {
goto err;
}
// allocate and sort maps into map_array, we need to do this
// here because read_shared_lib_info needs to read from debuggee
// address space
if (sort_map_array(ph) != true)
if (sort_map_array(ph) != true) {
goto err;
}
if (read_shared_lib_info(ph) != true)
if (read_shared_lib_info(ph) != true) {
goto err;
}
// sort again because we have added more mappings from shared objects
if (sort_map_array(ph) != true)
if (sort_map_array(ph) != true) {
goto err;
}
if (init_classsharing_workaround(ph) != true)
if (init_classsharing_workaround(ph) != true) {
goto err;
}
print_debug("Leave Pgrab_core\n");
return ph;

View file

@ -41,155 +41,158 @@
// ps_prochandle cleanup helper functions
// close all file descriptors
static void close_elf_files(struct ps_prochandle* ph) {
lib_info* lib = NULL;
static void close_files(struct ps_prochandle* ph) {
lib_info* lib = NULL;
// close core file descriptor
if (ph->core->core_fd >= 0)
close(ph->core->core_fd);
// close core file descriptor
if (ph->core->core_fd >= 0)
close(ph->core->core_fd);
// close exec file descriptor
if (ph->core->exec_fd >= 0)
close(ph->core->exec_fd);
// close exec file descriptor
if (ph->core->exec_fd >= 0)
close(ph->core->exec_fd);
// close interp file descriptor
if (ph->core->interp_fd >= 0)
close(ph->core->interp_fd);
// close interp file descriptor
if (ph->core->interp_fd >= 0)
close(ph->core->interp_fd);
// close class share archive file
if (ph->core->classes_jsa_fd >= 0)
close(ph->core->classes_jsa_fd);
// close class share archive file
if (ph->core->classes_jsa_fd >= 0)
close(ph->core->classes_jsa_fd);
// close all library file descriptors
lib = ph->libs;
while (lib) {
int fd = lib->fd;
if (fd >= 0 && fd != ph->core->exec_fd) close(fd);
lib = lib->next;
}
// close all library file descriptors
lib = ph->libs;
while (lib) {
int fd = lib->fd;
if (fd >= 0 && fd != ph->core->exec_fd) {
close(fd);
}
lib = lib->next;
}
}
// clean all map_info stuff
static void destroy_map_info(struct ps_prochandle* ph) {
map_info* map = ph->core->maps;
while (map) {
map_info* next = map->next;
free(map);
map = next;
map_info* next = map->next;
free(map);
map = next;
}
if (ph->core->map_array) {
free(ph->core->map_array);
free(ph->core->map_array);
}
// Part of the class sharing workaround
map = ph->core->class_share_maps;
while (map) {
map_info* next = map->next;
free(map);
map = next;
map_info* next = map->next;
free(map);
map = next;
}
}
// ps_prochandle operations
static void core_release(struct ps_prochandle* ph) {
if (ph->core) {
close_elf_files(ph);
destroy_map_info(ph);
free(ph->core);
}
if (ph->core) {
close_files(ph);
destroy_map_info(ph);
free(ph->core);
}
}
static map_info* allocate_init_map(int fd, off_t offset, uintptr_t vaddr, size_t memsz) {
map_info* map;
if ( (map = (map_info*) calloc(1, sizeof(map_info))) == NULL) {
print_debug("can't allocate memory for map_info\n");
return NULL;
}
map_info* map;
if ( (map = (map_info*) calloc(1, sizeof(map_info))) == NULL) {
print_debug("can't allocate memory for map_info\n");
return NULL;
}
// initialize map
map->fd = fd;
map->offset = offset;
map->vaddr = vaddr;
map->memsz = memsz;
return map;
// initialize map
map->fd = fd;
map->offset = offset;
map->vaddr = vaddr;
map->memsz = memsz;
return map;
}
// add map info with given fd, offset, vaddr and memsz
static map_info* add_map_info(struct ps_prochandle* ph, int fd, off_t offset,
uintptr_t vaddr, size_t memsz) {
map_info* map;
if ((map = allocate_init_map(fd, offset, vaddr, memsz)) == NULL) {
return NULL;
}
map_info* map;
if ((map = allocate_init_map(fd, offset, vaddr, memsz)) == NULL) {
return NULL;
}
// add this to map list
map->next = ph->core->maps;
ph->core->maps = map;
ph->core->num_maps++;
// add this to map list
map->next = ph->core->maps;
ph->core->maps = map;
ph->core->num_maps++;
return map;
return map;
}
// Part of the class sharing workaround
static void add_class_share_map_info(struct ps_prochandle* ph, off_t offset,
static map_info* add_class_share_map_info(struct ps_prochandle* ph, off_t offset,
uintptr_t vaddr, size_t memsz) {
map_info* map;
if ((map = allocate_init_map(ph->core->classes_jsa_fd,
offset, vaddr, memsz)) == NULL) {
return;
}
map_info* map;
if ((map = allocate_init_map(ph->core->classes_jsa_fd,
offset, vaddr, memsz)) == NULL) {
return NULL;
}
map->next = ph->core->class_share_maps;
ph->core->class_share_maps = map;
map->next = ph->core->class_share_maps;
ph->core->class_share_maps = map;
return map;
}
// Return the map_info for the given virtual address. We keep a sorted
// array of pointers in ph->map_array, so we can binary search.
static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr)
{
int mid, lo = 0, hi = ph->core->num_maps - 1;
map_info *mp;
static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr) {
int mid, lo = 0, hi = ph->core->num_maps - 1;
map_info *mp;
while (hi - lo > 1) {
mid = (lo + hi) / 2;
if (addr >= ph->core->map_array[mid]->vaddr)
lo = mid;
else
hi = mid;
}
while (hi - lo > 1) {
mid = (lo + hi) / 2;
if (addr >= ph->core->map_array[mid]->vaddr) {
lo = mid;
} else {
hi = mid;
}
}
if (addr < ph->core->map_array[hi]->vaddr)
mp = ph->core->map_array[lo];
else
mp = ph->core->map_array[hi];
if (addr < ph->core->map_array[hi]->vaddr) {
mp = ph->core->map_array[lo];
} else {
mp = ph->core->map_array[hi];
}
if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz)
if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz) {
return (mp);
}
// Part of the class sharing workaround
// Unfortunately, we have no way of detecting -Xshare state.
// Check out the share maps atlast, if we don't find anywhere.
// This is done this way so to avoid reading share pages
// ahead of other normal maps. For eg. with -Xshare:off we don't
// want to prefer class sharing data to data from core.
mp = ph->core->class_share_maps;
if (mp) {
print_debug("can't locate map_info at 0x%lx, trying class share maps\n", addr);
}
while (mp) {
if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz) {
print_debug("located map_info at 0x%lx from class share maps\n", addr);
return (mp);
}
mp = mp->next;
}
// Part of the class sharing workaround
// Unfortunately, we have no way of detecting -Xshare state.
// Check out the share maps atlast, if we don't find anywhere.
// This is done this way so to avoid reading share pages
// ahead of other normal maps. For eg. with -Xshare:off we don't
// want to prefer class sharing data to data from core.
mp = ph->core->class_share_maps;
if (mp) {
print_debug("can't locate map_info at 0x%lx, trying class share maps\n",
addr);
}
while (mp) {
if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz) {
print_debug("located map_info at 0x%lx from class share maps\n",
addr);
return (mp);
}
mp = mp->next;
}
print_debug("can't locate map_info at 0x%lx\n", addr);
return (NULL);
print_debug("can't locate map_info at 0x%lx\n", addr);
return (NULL);
}
//---------------------------------------------------------------
@ -226,9 +229,9 @@ struct FileMapHeader {
size_t _used; // for setting space top on read
// 4991491 NOTICE These are C++ bool's in filemap.hpp and must match up with
// the C type matching the C++ bool type on any given platform. For
// Hotspot on Linux we assume the corresponding C type is char but
// licensees on Linux versions may need to adjust the type of these fields.
// the C type matching the C++ bool type on any given platform.
// We assume the corresponding C type is char but licensees
// may need to adjust the type of these fields.
char _read_only; // read only space?
char _allow_exec; // executable code in space?
@ -238,154 +241,159 @@ struct FileMapHeader {
};
static bool read_jboolean(struct ps_prochandle* ph, uintptr_t addr, jboolean* pvalue) {
jboolean i;
if (ps_pdread(ph, (psaddr_t) addr, &i, sizeof(i)) == PS_OK) {
*pvalue = i;
return true;
} else {
return false;
}
jboolean i;
if (ps_pdread(ph, (psaddr_t) addr, &i, sizeof(i)) == PS_OK) {
*pvalue = i;
return true;
} else {
return false;
}
}
static bool read_pointer(struct ps_prochandle* ph, uintptr_t addr, uintptr_t* pvalue) {
uintptr_t uip;
if (ps_pdread(ph, (psaddr_t) addr, &uip, sizeof(uip)) == PS_OK) {
*pvalue = uip;
return true;
} else {
return false;
}
uintptr_t uip;
if (ps_pdread(ph, (psaddr_t) addr, (char *)&uip, sizeof(uip)) == PS_OK) {
*pvalue = uip;
return true;
} else {
return false;
}
}
// used to read strings from debuggee
static bool read_string(struct ps_prochandle* ph, uintptr_t addr, char* buf, size_t size) {
size_t i = 0;
char c = ' ';
size_t i = 0;
char c = ' ';
while (c != '\0') {
if (ps_pdread(ph, (psaddr_t) addr, &c, sizeof(char)) != PS_OK)
return false;
if (i < size - 1)
buf[i] = c;
else // smaller buffer
return false;
i++; addr++;
}
while (c != '\0') {
if (ps_pdread(ph, (psaddr_t) addr, &c, sizeof(char)) != PS_OK) {
return false;
}
if (i < size - 1) {
buf[i] = c;
} else {
// smaller buffer
return false;
}
i++; addr++;
}
buf[i] = '\0';
return true;
buf[i] = '\0';
return true;
}
#define USE_SHARED_SPACES_SYM "UseSharedSpaces"
// mangled name of Arguments::SharedArchivePath
#define SHARED_ARCHIVE_PATH_SYM "_ZN9Arguments17SharedArchivePathE"
#define LIBJVM_NAME "/libjvm.so"
static bool init_classsharing_workaround(struct ps_prochandle* ph) {
lib_info* lib = ph->libs;
while (lib != NULL) {
// we are iterating over shared objects from the core dump. look for
// libjvm.so.
const char *jvm_name = 0;
if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0) {
char classes_jsa[PATH_MAX];
struct FileMapHeader header;
size_t n = 0;
int fd = -1, m = 0;
uintptr_t base = 0, useSharedSpacesAddr = 0;
uintptr_t sharedArchivePathAddrAddr = 0, sharedArchivePathAddr = 0;
jboolean useSharedSpaces = 0;
map_info* mi = 0;
lib_info* lib = ph->libs;
while (lib != NULL) {
// we are iterating over shared objects from the core dump. look for
// libjvm.so.
const char *jvm_name = 0;
if ((jvm_name = strstr(lib->name, LIBJVM_NAME)) != 0) {
char classes_jsa[PATH_MAX];
struct FileMapHeader header;
int fd = -1;
int m = 0;
size_t n = 0;
uintptr_t base = 0, useSharedSpacesAddr = 0;
uintptr_t sharedArchivePathAddrAddr = 0, sharedArchivePathAddr = 0;
jboolean useSharedSpaces = 0;
map_info* mi = 0;
memset(classes_jsa, 0, sizeof(classes_jsa));
jvm_name = lib->name;
useSharedSpacesAddr = lookup_symbol(ph, jvm_name, USE_SHARED_SPACES_SYM);
if (useSharedSpacesAddr == 0) {
print_debug("can't lookup 'UseSharedSpaces' flag\n");
return false;
}
// Hotspot vm types are not exported to build this library. So
// using equivalent type jboolean to read the value of
// UseSharedSpaces which is same as hotspot type "bool".
if (read_jboolean(ph, useSharedSpacesAddr, &useSharedSpaces) != true) {
print_debug("can't read the value of 'UseSharedSpaces' flag\n");
return false;
}
if ((int)useSharedSpaces == 0) {
print_debug("UseSharedSpaces is false, assuming -Xshare:off!\n");
return true;
}
sharedArchivePathAddrAddr = lookup_symbol(ph, jvm_name, SHARED_ARCHIVE_PATH_SYM);
if (sharedArchivePathAddrAddr == 0) {
print_debug("can't lookup shared archive path symbol\n");
return false;
}
if (read_pointer(ph, sharedArchivePathAddrAddr, &sharedArchivePathAddr) != true) {
print_debug("can't read shared archive path pointer\n");
return false;
}
if (read_string(ph, sharedArchivePathAddr, classes_jsa, sizeof(classes_jsa)) != true) {
print_debug("can't read shared archive path value\n");
return false;
}
print_debug("looking for %s\n", classes_jsa);
// open the class sharing archive file
fd = pathmap_open(classes_jsa);
if (fd < 0) {
print_debug("can't open %s!\n", classes_jsa);
ph->core->classes_jsa_fd = -1;
return false;
} else {
print_debug("opened %s\n", classes_jsa);
}
// read FileMapHeader from the file
memset(&header, 0, sizeof(struct FileMapHeader));
if ((n = read(fd, &header, sizeof(struct FileMapHeader)))
!= sizeof(struct FileMapHeader)) {
print_debug("can't read shared archive file map header from %s\n", classes_jsa);
close(fd);
return false;
}
// check file magic
if (header._magic != 0xf00baba2) {
print_debug("%s has bad shared archive file magic number 0x%x, expecing 0xf00baba2\n",
classes_jsa, header._magic);
close(fd);
return false;
}
// check version
if (header._version != CURRENT_ARCHIVE_VERSION) {
print_debug("%s has wrong shared archive file version %d, expecting %d\n",
classes_jsa, header._version, CURRENT_ARCHIVE_VERSION);
close(fd);
return false;
}
ph->core->classes_jsa_fd = fd;
// add read-only maps from classes.jsa to the list of maps
for (m = 0; m < NUM_SHARED_MAPS; m++) {
if (header._space[m]._read_only) {
base = (uintptr_t) header._space[m]._base;
// no need to worry about the fractional pages at-the-end.
// possible fractional pages are handled by core_read_data.
add_class_share_map_info(ph, (off_t) header._space[m]._file_offset,
base, (size_t) header._space[m]._used);
print_debug("added a share archive map at 0x%lx\n", base);
}
}
return true;
memset(classes_jsa, 0, sizeof(classes_jsa));
jvm_name = lib->name;
useSharedSpacesAddr = lookup_symbol(ph, jvm_name, USE_SHARED_SPACES_SYM);
if (useSharedSpacesAddr == 0) {
print_debug("can't lookup 'UseSharedSpaces' flag\n");
return false;
}
lib = lib->next;
// Hotspot vm types are not exported to build this library. So
// using equivalent type jboolean to read the value of
// UseSharedSpaces which is same as hotspot type "bool".
if (read_jboolean(ph, useSharedSpacesAddr, &useSharedSpaces) != true) {
print_debug("can't read the value of 'UseSharedSpaces' flag\n");
return false;
}
if ((int)useSharedSpaces == 0) {
print_debug("UseSharedSpaces is false, assuming -Xshare:off!\n");
return true;
}
sharedArchivePathAddrAddr = lookup_symbol(ph, jvm_name, SHARED_ARCHIVE_PATH_SYM);
if (sharedArchivePathAddrAddr == 0) {
print_debug("can't lookup shared archive path symbol\n");
return false;
}
if (read_pointer(ph, sharedArchivePathAddrAddr, &sharedArchivePathAddr) != true) {
print_debug("can't read shared archive path pointer\n");
return false;
}
if (read_string(ph, sharedArchivePathAddr, classes_jsa, sizeof(classes_jsa)) != true) {
print_debug("can't read shared archive path value\n");
return false;
}
print_debug("looking for %s\n", classes_jsa);
// open the class sharing archive file
fd = pathmap_open(classes_jsa);
if (fd < 0) {
print_debug("can't open %s!\n", classes_jsa);
ph->core->classes_jsa_fd = -1;
return false;
} else {
print_debug("opened %s\n", classes_jsa);
}
// read FileMapHeader from the file
memset(&header, 0, sizeof(struct FileMapHeader));
if ((n = read(fd, &header, sizeof(struct FileMapHeader)))
!= sizeof(struct FileMapHeader)) {
print_debug("can't read shared archive file map header from %s\n", classes_jsa);
close(fd);
return false;
}
// check file magic
if (header._magic != 0xf00baba2) {
print_debug("%s has bad shared archive file magic number 0x%x, expecing 0xf00baba2\n",
classes_jsa, header._magic);
close(fd);
return false;
}
// check version
if (header._version != CURRENT_ARCHIVE_VERSION) {
print_debug("%s has wrong shared archive file version %d, expecting %d\n",
classes_jsa, header._version, CURRENT_ARCHIVE_VERSION);
close(fd);
return false;
}
ph->core->classes_jsa_fd = fd;
// add read-only maps from classes.jsa to the list of maps
for (m = 0; m < NUM_SHARED_MAPS; m++) {
if (header._space[m]._read_only) {
base = (uintptr_t) header._space[m]._base;
// no need to worry about the fractional pages at-the-end.
// possible fractional pages are handled by core_read_data.
add_class_share_map_info(ph, (off_t) header._space[m]._file_offset,
base, (size_t) header._space[m]._used);
print_debug("added a share archive map at 0x%lx\n", base);
}
}
return true;
}
return true;
lib = lib->next;
}
return true;
}
@ -396,54 +404,58 @@ static bool init_classsharing_workaround(struct ps_prochandle* ph) {
// callback for sorting the array of map_info pointers.
static int core_cmp_mapping(const void *lhsp, const void *rhsp)
{
const map_info *lhs = *((const map_info **)lhsp);
const map_info *rhs = *((const map_info **)rhsp);
const map_info *lhs = *((const map_info **)lhsp);
const map_info *rhs = *((const map_info **)rhsp);
if (lhs->vaddr == rhs->vaddr)
return (0);
if (lhs->vaddr == rhs->vaddr) {
return (0);
}
return (lhs->vaddr < rhs->vaddr ? -1 : 1);
return (lhs->vaddr < rhs->vaddr ? -1 : 1);
}
// we sort map_info by starting virtual address so that we can do
// binary search to read from an address.
static bool sort_map_array(struct ps_prochandle* ph) {
size_t num_maps = ph->core->num_maps;
map_info* map = ph->core->maps;
int i = 0;
size_t num_maps = ph->core->num_maps;
map_info* map = ph->core->maps;
int i = 0;
// allocate map_array
map_info** array;
if ( (array = (map_info**) malloc(sizeof(map_info*) * num_maps)) == NULL) {
print_debug("can't allocate memory for map array\n");
return false;
}
// allocate map_array
map_info** array;
if ( (array = (map_info**) malloc(sizeof(map_info*) * num_maps)) == NULL) {
print_debug("can't allocate memory for map array\n");
return false;
}
// add maps to array
while (map) {
array[i] = map;
i++;
map = map->next;
}
// add maps to array
while (map) {
array[i] = map;
i++;
map = map->next;
}
// sort is called twice. If this is second time, clear map array
if (ph->core->map_array) free(ph->core->map_array);
ph->core->map_array = array;
// sort the map_info array by base virtual address.
qsort(ph->core->map_array, ph->core->num_maps, sizeof (map_info*),
core_cmp_mapping);
// sort is called twice. If this is second time, clear map array
if (ph->core->map_array) {
free(ph->core->map_array);
}
// print map
if (is_debug()) {
int j = 0;
print_debug("---- sorted virtual address map ----\n");
for (j = 0; j < ph->core->num_maps; j++) {
print_debug("base = 0x%lx\tsize = %zu\n", ph->core->map_array[j]->vaddr,
ph->core->map_array[j]->memsz);
}
}
ph->core->map_array = array;
// sort the map_info array by base virtual address.
qsort(ph->core->map_array, ph->core->num_maps, sizeof (map_info*),
core_cmp_mapping);
return true;
// print map
if (is_debug()) {
int j = 0;
print_debug("---- sorted virtual address map ----\n");
for (j = 0; j < ph->core->num_maps; j++) {
print_debug("base = 0x%lx\tsize = %zu\n", ph->core->map_array[j]->vaddr,
ph->core->map_array[j]->memsz);
}
}
return true;
}
#ifndef MIN
@ -460,16 +472,18 @@ static bool core_read_data(struct ps_prochandle* ph, uintptr_t addr, char *buf,
off_t off;
int fd;
if (mp == NULL)
if (mp == NULL) {
break; /* No mapping for this address */
}
fd = mp->fd;
mapoff = addr - mp->vaddr;
len = MIN(resid, mp->memsz - mapoff);
off = mp->offset + mapoff;
if ((len = pread(fd, buf, len, off)) <= 0)
if ((len = pread(fd, buf, len, off)) <= 0) {
break;
}
resid -= len;
addr += len;
@ -625,8 +639,9 @@ static bool core_handle_note(struct ps_prochandle* ph, ELF_PHDR* note_phdr) {
notep->n_type, notep->n_descsz);
if (notep->n_type == NT_PRSTATUS) {
if (core_handle_prstatus(ph, descdata, notep->n_descsz) != true)
return false;
if (core_handle_prstatus(ph, descdata, notep->n_descsz) != true) {
return false;
}
}
p = descdata + ROUNDUP(notep->n_descsz, 4);
}
@ -654,7 +669,7 @@ static bool read_core_segments(struct ps_prochandle* ph, ELF_EHDR* core_ehdr) {
* contains a set of saved /proc structures), and PT_LOAD (which
* represents a memory mapping from the process's address space).
*
* Difference b/w Solaris PT_NOTE and Linux PT_NOTE:
* Difference b/w Solaris PT_NOTE and Linux/BSD PT_NOTE:
*
* In Solaris there are two PT_NOTE segments the first PT_NOTE (if present)
* contains /proc structs in the pre-2.6 unstructured /proc format. the last
@ -674,7 +689,9 @@ static bool read_core_segments(struct ps_prochandle* ph, ELF_EHDR* core_ehdr) {
for (core_php = phbuf, i = 0; i < core_ehdr->e_phnum; i++) {
switch (core_php->p_type) {
case PT_NOTE:
if (core_handle_note(ph, core_php) != true) goto err;
if (core_handle_note(ph, core_php) != true) {
goto err;
}
break;
case PT_LOAD: {
@ -832,60 +849,62 @@ err:
// read shared library info from runtime linker's data structures.
// This work is done by librtlb_db in Solaris
static bool read_shared_lib_info(struct ps_prochandle* ph) {
uintptr_t addr = ph->core->dynamic_addr;
uintptr_t debug_base;
uintptr_t first_link_map_addr;
uintptr_t ld_base_addr;
uintptr_t link_map_addr;
uintptr_t lib_base_diff;
uintptr_t lib_base;
uintptr_t lib_name_addr;
char lib_name[BUF_SIZE];
ELF_DYN dyn;
ELF_EHDR elf_ehdr;
int lib_fd;
uintptr_t addr = ph->core->dynamic_addr;
uintptr_t debug_base;
uintptr_t first_link_map_addr;
uintptr_t ld_base_addr;
uintptr_t link_map_addr;
uintptr_t lib_base_diff;
uintptr_t lib_base;
uintptr_t lib_name_addr;
char lib_name[BUF_SIZE];
ELF_DYN dyn;
ELF_EHDR elf_ehdr;
int lib_fd;
// _DYNAMIC has information of the form
// [tag] [data] [tag] [data] .....
// Both tag and data are pointer sized.
// We look for dynamic info with DT_DEBUG. This has shared object info.
// refer to struct r_debug in link.h
// _DYNAMIC has information of the form
// [tag] [data] [tag] [data] .....
// Both tag and data are pointer sized.
// We look for dynamic info with DT_DEBUG. This has shared object info.
// refer to struct r_debug in link.h
dyn.d_tag = DT_NULL;
while (dyn.d_tag != DT_DEBUG) {
if (ps_pdread(ph, (psaddr_t) addr, &dyn, sizeof(ELF_DYN)) != PS_OK) {
print_debug("can't read debug info from _DYNAMIC\n");
return false;
}
addr += sizeof(ELF_DYN);
}
dyn.d_tag = DT_NULL;
while (dyn.d_tag != DT_DEBUG) {
if (ps_pdread(ph, (psaddr_t) addr, &dyn, sizeof(ELF_DYN)) != PS_OK) {
print_debug("can't read debug info from _DYNAMIC\n");
return false;
}
addr += sizeof(ELF_DYN);
}
// we have got Dyn entry with DT_DEBUG
debug_base = dyn.d_un.d_ptr;
// at debug_base we have struct r_debug. This has first link map in r_map field
if (ps_pdread(ph, (psaddr_t) debug_base + FIRST_LINK_MAP_OFFSET,
// we have got Dyn entry with DT_DEBUG
debug_base = dyn.d_un.d_ptr;
// at debug_base we have struct r_debug. This has first link map in r_map field
if (ps_pdread(ph, (psaddr_t) debug_base + FIRST_LINK_MAP_OFFSET,
&first_link_map_addr, sizeof(uintptr_t)) != PS_OK) {
print_debug("can't read first link map address\n");
return false;
}
print_debug("can't read first link map address\n");
return false;
}
// read ld_base address from struct r_debug
if (ps_pdread(ph, (psaddr_t) debug_base + LD_BASE_OFFSET, &ld_base_addr,
// read ld_base address from struct r_debug
if (ps_pdread(ph, (psaddr_t) debug_base + LD_BASE_OFFSET, &ld_base_addr,
sizeof(uintptr_t)) != PS_OK) {
print_debug("can't read ld base address\n");
return false;
}
ph->core->ld_base_addr = ld_base_addr;
print_debug("can't read ld base address\n");
return false;
}
ph->core->ld_base_addr = ld_base_addr;
print_debug("interpreter base address is 0x%lx\n", ld_base_addr);
print_debug("interpreter base address is 0x%lx\n", ld_base_addr);
// now read segments from interp (i.e ld.so or ld-linux.so)
if (read_interp_segments(ph) != true)
// now read segments from interp (i.e ld.so or ld-linux.so or ld-elf.so)
if (read_interp_segments(ph) != true) {
return false;
}
// after adding interpreter (ld.so) mappings sort again
if (sort_map_array(ph) != true)
return false;
// after adding interpreter (ld.so) mappings sort again
if (sort_map_array(ph) != true) {
return false;
}
print_debug("first link map is at 0x%lx\n", first_link_map_addr);
@ -950,95 +969,102 @@ static bool read_shared_lib_info(struct ps_prochandle* ph) {
}
}
// read next link_map address
if (ps_pdread(ph, (psaddr_t) link_map_addr + LINK_MAP_NEXT_OFFSET,
&link_map_addr, sizeof(uintptr_t)) != PS_OK) {
print_debug("can't read next link in link_map\n");
return false;
}
}
// read next link_map address
if (ps_pdread(ph, (psaddr_t) link_map_addr + LINK_MAP_NEXT_OFFSET,
&link_map_addr, sizeof(uintptr_t)) != PS_OK) {
print_debug("can't read next link in link_map\n");
return false;
}
}
return true;
return true;
}
// the one and only one exposed stuff from this file
struct ps_prochandle* Pgrab_core(const char* exec_file, const char* core_file) {
ELF_EHDR core_ehdr;
ELF_EHDR exec_ehdr;
ELF_EHDR lib_ehdr;
ELF_EHDR core_ehdr;
ELF_EHDR exec_ehdr;
ELF_EHDR lib_ehdr;
struct ps_prochandle* ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle));
if (ph == NULL) {
print_debug("can't allocate ps_prochandle\n");
return NULL;
}
struct ps_prochandle* ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle));
if (ph == NULL) {
print_debug("can't allocate ps_prochandle\n");
return NULL;
}
if ((ph->core = (struct core_data*) calloc(1, sizeof(struct core_data))) == NULL) {
free(ph);
print_debug("can't allocate ps_prochandle\n");
return NULL;
}
if ((ph->core = (struct core_data*) calloc(1, sizeof(struct core_data))) == NULL) {
free(ph);
print_debug("can't allocate ps_prochandle\n");
return NULL;
}
// initialize ph
ph->ops = &core_ops;
ph->core->core_fd = -1;
ph->core->exec_fd = -1;
ph->core->interp_fd = -1;
// initialize ph
ph->ops = &core_ops;
ph->core->core_fd = -1;
ph->core->exec_fd = -1;
ph->core->interp_fd = -1;
// open the core file
if ((ph->core->core_fd = open(core_file, O_RDONLY)) < 0) {
print_debug("can't open core file\n");
goto err;
}
// open the core file
if ((ph->core->core_fd = open(core_file, O_RDONLY)) < 0) {
print_debug("can't open core file\n");
goto err;
}
// read core file ELF header
if (read_elf_header(ph->core->core_fd, &core_ehdr) != true || core_ehdr.e_type != ET_CORE) {
print_debug("core file is not a valid ELF ET_CORE file\n");
goto err;
}
// read core file ELF header
if (read_elf_header(ph->core->core_fd, &core_ehdr) != true || core_ehdr.e_type != ET_CORE) {
print_debug("core file is not a valid ELF ET_CORE file\n");
goto err;
}
if ((ph->core->exec_fd = open(exec_file, O_RDONLY)) < 0) {
print_debug("can't open executable file\n");
goto err;
}
if ((ph->core->exec_fd = open(exec_file, O_RDONLY)) < 0) {
print_debug("can't open executable file\n");
goto err;
}
if (read_elf_header(ph->core->exec_fd, &exec_ehdr) != true || exec_ehdr.e_type != ET_EXEC) {
print_debug("executable file is not a valid ELF ET_EXEC file\n");
goto err;
}
if (read_elf_header(ph->core->exec_fd, &exec_ehdr) != true || exec_ehdr.e_type != ET_EXEC) {
print_debug("executable file is not a valid ELF ET_EXEC file\n");
goto err;
}
// process core file segments
if (read_core_segments(ph, &core_ehdr) != true)
goto err;
// process core file segments
if (read_core_segments(ph, &core_ehdr) != true) {
goto err;
}
// process exec file segments
if (read_exec_segments(ph, &exec_ehdr) != true)
goto err;
// process exec file segments
if (read_exec_segments(ph, &exec_ehdr) != true) {
goto err;
}
// exec file is also treated like a shared object for symbol search
if (add_lib_info_fd(ph, exec_file, ph->core->exec_fd,
(uintptr_t)0 + find_base_address(ph->core->exec_fd, &exec_ehdr)) == NULL)
goto err;
// exec file is also treated like a shared object for symbol search
if (add_lib_info_fd(ph, exec_file, ph->core->exec_fd,
(uintptr_t)0 + find_base_address(ph->core->exec_fd, &exec_ehdr)) == NULL) {
goto err;
}
// allocate and sort maps into map_array, we need to do this
// here because read_shared_lib_info needs to read from debuggee
// address space
if (sort_map_array(ph) != true)
goto err;
// allocate and sort maps into map_array, we need to do this
// here because read_shared_lib_info needs to read from debuggee
// address space
if (sort_map_array(ph) != true) {
goto err;
}
if (read_shared_lib_info(ph) != true)
goto err;
if (read_shared_lib_info(ph) != true) {
goto err;
}
// sort again because we have added more mappings from shared objects
if (sort_map_array(ph) != true)
goto err;
// sort again because we have added more mappings from shared objects
if (sort_map_array(ph) != true) {
goto err;
}
if (init_classsharing_workaround(ph) != true)
goto err;
if (init_classsharing_workaround(ph) != true) {
goto err;
}
return ph;
return ph;
err:
Prelease(ph);
return NULL;
Prelease(ph);
return NULL;
}

View file

@ -67,6 +67,13 @@ public class Disassembler {
String libname = "hsdis";
String arch = System.getProperty("os.arch");
if (os.lastIndexOf("Windows", 0) != -1) {
if (arch.equals("x86")) {
libname += "-i386";
} else if (arch.equals("amd64")) {
libname += "-amd64";
} else {
libname += "-" + arch;
}
path.append(sep + "bin" + sep);
libname += ".dll";
} else if (os.lastIndexOf("SunOS", 0) != -1) {

View file

@ -0,0 +1,56 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.memory;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.oops.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
public class ProtectionDomainCacheEntry extends VMObject {
private static sun.jvm.hotspot.types.OopField protectionDomainField;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("ProtectionDomainCacheEntry");
protectionDomainField = type.getOopField("_literal");
}
public ProtectionDomainCacheEntry(Address addr) {
super(addr);
}
public Oop protectionDomain() {
return VM.getVM().getObjectHeap().newOop(protectionDomainField.getValue(addr));
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,7 +32,7 @@ import sun.jvm.hotspot.types.*;
public class ProtectionDomainEntry extends VMObject {
private static AddressField nextField;
private static sun.jvm.hotspot.types.OopField protectionDomainField;
private static AddressField pdCacheField;
static {
VM.registerVMInitializedObserver(new Observer() {
@ -46,7 +46,7 @@ public class ProtectionDomainEntry extends VMObject {
Type type = db.lookupType("ProtectionDomainEntry");
nextField = type.getAddressField("_next");
protectionDomainField = type.getOopField("_protection_domain");
pdCacheField = type.getAddressField("_pd_cache");
}
public ProtectionDomainEntry(Address addr) {
@ -54,10 +54,12 @@ public class ProtectionDomainEntry extends VMObject {
}
public ProtectionDomainEntry next() {
return (ProtectionDomainEntry) VMObjectFactory.newObject(ProtectionDomainEntry.class, addr);
return (ProtectionDomainEntry) VMObjectFactory.newObject(ProtectionDomainEntry.class, nextField.getValue(addr));
}
public Oop protectionDomain() {
return VM.getVM().getObjectHeap().newOop(protectionDomainField.getValue(addr));
ProtectionDomainCacheEntry pd_cache = (ProtectionDomainCacheEntry)
VMObjectFactory.newObject(ProtectionDomainCacheEntry.class, pdCacheField.getValue(addr));
return pd_cache.protectionDomain();
}
}

View file

@ -44,12 +44,10 @@ public class SymbolTable extends sun.jvm.hotspot.utilities.Hashtable {
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("SymbolTable");
theTableField = type.getAddressField("_the_table");
symbolTableSize = db.lookupIntConstant("SymbolTable::symbol_table_size").intValue();
}
// Fields
private static AddressField theTableField;
private static int symbolTableSize;
// Accessors
public static SymbolTable getTheTable() {
@ -57,10 +55,6 @@ public class SymbolTable extends sun.jvm.hotspot.utilities.Hashtable {
return (SymbolTable) VMObjectFactory.newObject(SymbolTable.class, tmp);
}
public static int getSymbolTableSize() {
return symbolTableSize;
}
public SymbolTable(Address addr) {
super(addr);
}

View file

@ -59,6 +59,7 @@ public abstract class AbstractHeapGraphWriter implements HeapGraphWriter {
public boolean doObj(Oop oop) {
try {
writeHeapRecordPrologue();
if (oop instanceof TypeArray) {
writePrimitiveArray((TypeArray)oop);
} else if (oop instanceof ObjArray) {
@ -97,6 +98,7 @@ public abstract class AbstractHeapGraphWriter implements HeapGraphWriter {
// not-a-Java-visible oop
writeInternalObject(oop);
}
writeHeapRecordEpilogue();
} catch (IOException exp) {
throw new RuntimeException(exp);
}
@ -416,6 +418,12 @@ public abstract class AbstractHeapGraphWriter implements HeapGraphWriter {
protected void writeHeapFooter() throws IOException {
}
protected void writeHeapRecordPrologue() throws IOException {
}
protected void writeHeapRecordEpilogue() throws IOException {
}
// HeapVisitor, OopVisitor methods can't throw any non-runtime
// exception. But, derived class write methods (which are called
// from visitor callbacks) may throw IOException. Hence, we throw

View file

@ -44,7 +44,7 @@ import sun.jvm.hotspot.runtime.*;
* WARNING: This format is still under development, and is subject to
* change without notice.
*
* header "JAVA PROFILE 1.0.1" (0-terminated)
* header "JAVA PROFILE 1.0.1" or "JAVA PROFILE 1.0.2" (0-terminated)
* u4 size of identifiers. Identifiers are used to represent
* UTF8 strings, objects, stack traces, etc. They usually
* have the same size as host pointers. For example, on
@ -292,11 +292,34 @@ import sun.jvm.hotspot.runtime.*;
* 0x00000002: cpu sampling on/off
* u2 stack trace depth
*
*
* When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally
* be generated as a sequence of heap dump segments. This sequence is
* terminated by an end record. The additional tags allowed by format
* "JAVA PROFILE 1.0.2" are:
*
* HPROF_HEAP_DUMP_SEGMENT denote a heap dump segment
*
* [heap dump sub-records]*
* The same sub-record types allowed by HPROF_HEAP_DUMP
*
* HPROF_HEAP_DUMP_END denotes the end of a heap dump
*
*/
public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
// The heap size threshold used to determine if segmented format
// ("JAVA PROFILE 1.0.2") should be used.
private static final long HPROF_SEGMENTED_HEAP_DUMP_THRESHOLD = 2L * 0x40000000;
// The approximate size of a heap segment. Used to calculate when to create
// a new segment.
private static final long HPROF_SEGMENTED_HEAP_DUMP_SEGMENT_SIZE = 1L * 0x40000000;
// hprof binary file header
private static final String HPROF_HEADER = "JAVA PROFILE 1.0.1";
private static final String HPROF_HEADER_1_0_1 = "JAVA PROFILE 1.0.1";
private static final String HPROF_HEADER_1_0_2 = "JAVA PROFILE 1.0.2";
// constants in enum HprofTag
private static final int HPROF_UTF8 = 0x01;
@ -312,6 +335,10 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
private static final int HPROF_CPU_SAMPLES = 0x0D;
private static final int HPROF_CONTROL_SETTINGS = 0x0E;
// 1.0.2 record types
private static final int HPROF_HEAP_DUMP_SEGMENT = 0x1C;
private static final int HPROF_HEAP_DUMP_END = 0x2C;
// Heap dump constants
// constants in enum HprofGcTag
private static final int HPROF_GC_ROOT_UNKNOWN = 0xFF;
@ -352,11 +379,9 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
private static final int JVM_SIGNATURE_ARRAY = '[';
private static final int JVM_SIGNATURE_CLASS = 'L';
public synchronized void write(String fileName) throws IOException {
// open file stream and create buffered data output stream
FileOutputStream fos = new FileOutputStream(fileName);
FileChannel chn = fos.getChannel();
fos = new FileOutputStream(fileName);
out = new DataOutputStream(new BufferedOutputStream(fos));
VM vm = VM.getVM();
@ -385,6 +410,9 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
FLOAT_SIZE = objectHeap.getFloatSize();
DOUBLE_SIZE = objectHeap.getDoubleSize();
// Check weather we should dump the heap as segments
useSegmentedHeapDump = vm.getUniverse().heap().used() > HPROF_SEGMENTED_HEAP_DUMP_THRESHOLD;
// hprof bin format header
writeFileHeader();
@ -394,38 +422,87 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
// hprof UTF-8 symbols section
writeSymbols();
// HPROF_LOAD_CLASS records for all classes
writeClasses();
// write heap data now
out.writeByte((byte)HPROF_HEAP_DUMP);
out.writeInt(0); // relative timestamp
// remember position of dump length, we will fixup
// length later - hprof format requires length.
out.flush();
long dumpStart = chn.position();
// write dummy length of 0 and we'll fix it later.
out.writeInt(0);
// write CLASS_DUMP records
writeClassDumpRecords();
// this will write heap data into the buffer stream
super.write();
// flush buffer stream.
out.flush();
// Fill in final length
fillInHeapRecordLength();
if (useSegmentedHeapDump) {
// Write heap segment-end record
out.writeByte((byte) HPROF_HEAP_DUMP_END);
out.writeInt(0);
out.writeInt(0);
}
// flush buffer stream and throw it.
out.flush();
out = null;
// close the file stream
fos.close();
}
@Override
protected void writeHeapRecordPrologue() throws IOException {
if (currentSegmentStart == 0) {
// write heap data header, depending on heap size use segmented heap
// format
out.writeByte((byte) (useSegmentedHeapDump ? HPROF_HEAP_DUMP_SEGMENT
: HPROF_HEAP_DUMP));
out.writeInt(0);
// remember position of dump length, we will fixup
// length later - hprof format requires length.
out.flush();
currentSegmentStart = fos.getChannel().position();
// write dummy length of 0 and we'll fix it later.
out.writeInt(0);
}
}
@Override
protected void writeHeapRecordEpilogue() throws IOException {
if (useSegmentedHeapDump) {
out.flush();
if ((fos.getChannel().position() - currentSegmentStart - 4) >= HPROF_SEGMENTED_HEAP_DUMP_SEGMENT_SIZE) {
fillInHeapRecordLength();
currentSegmentStart = 0;
}
}
}
private void fillInHeapRecordLength() throws IOException {
// now get current position to calculate length
long dumpEnd = chn.position();
long dumpEnd = fos.getChannel().position();
// calculate length of heap data
int dumpLen = (int) (dumpEnd - dumpStart - 4);
long dumpLenLong = (dumpEnd - currentSegmentStart - 4L);
// Check length boundary, overflow could happen but is _very_ unlikely
if(dumpLenLong >= (4L * 0x40000000)){
throw new RuntimeException("Heap segment size overflow.");
}
// Save the current position
long currentPosition = fos.getChannel().position();
// seek the position to write length
chn.position(dumpStart);
fos.getChannel().position(currentSegmentStart);
int dumpLen = (int) dumpLenLong;
// write length as integer
fos.write((dumpLen >>> 24) & 0xFF);
@ -433,8 +510,8 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
fos.write((dumpLen >>> 8) & 0xFF);
fos.write((dumpLen >>> 0) & 0xFF);
// close the file stream
fos.close();
//Reset to previous current position
fos.getChannel().position(currentPosition);
}
private void writeClassDumpRecords() throws IOException {
@ -443,7 +520,9 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
sysDict.allClassesDo(new SystemDictionary.ClassVisitor() {
public void visit(Klass k) {
try {
writeHeapRecordPrologue();
writeClassDumpRecord(k);
writeHeapRecordEpilogue();
} catch (IOException e) {
throw new RuntimeException(e);
}
@ -884,7 +963,12 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
// writes hprof binary file header
private void writeFileHeader() throws IOException {
// version string
out.writeBytes(HPROF_HEADER);
if(useSegmentedHeapDump) {
out.writeBytes(HPROF_HEADER_1_0_2);
}
else {
out.writeBytes(HPROF_HEADER_1_0_1);
}
out.writeByte((byte)'\0');
// write identifier size. we use pointers as identifiers.
@ -976,6 +1060,7 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
private static final int EMPTY_FRAME_DEPTH = -1;
private DataOutputStream out;
private FileOutputStream fos;
private Debugger dbg;
private ObjectHeap objectHeap;
private SymbolTable symTbl;
@ -983,6 +1068,10 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
// oopSize of the debuggee
private int OBJ_ID_SIZE;
// Added for hprof file format 1.0.2 support
private boolean useSegmentedHeapDump;
private long currentSegmentStart;
private long BOOLEAN_BASE_OFFSET;
private long BYTE_BASE_OFFSET;
private long CHAR_BASE_OFFSET;
@ -1005,6 +1094,7 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
private static class ClassData {
int instSize;
List fields;
ClassData(int instSize, List fields) {
this.instSize = instSize;
this.fields = fields;

View file

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
HS_MAJOR_VER=25
HS_MINOR_VER=0
HS_BUILD_NUMBER=53
HS_BUILD_NUMBER=54
JDK_MAJOR_VER=1
JDK_MINOR_VER=8

View file

@ -44,6 +44,7 @@ CXX=cl.exe
# /GS Inserts security stack checks in some functions (VS2005 default)
# /Oi Use intrinsics (in /O2)
# /Od Disable all optimizations
# /MP Use multiple cores for compilation
#
# NOTE: Normally following any of the above with a '-' will turn off that flag
#
@ -206,6 +207,7 @@ FASTDEBUG_OPT_OPTION = /O2 /Oy-
DEBUG_OPT_OPTION = /Od
GX_OPTION = /EHsc
LD_FLAGS = /manifest $(LD_FLAGS)
MP_FLAG = /MP
# Manifest Tool - used in VS2005 and later to adjust manifests stored
# as resources inside build artifacts.
!if "x$(MT)" == "x"
@ -219,6 +221,7 @@ FASTDEBUG_OPT_OPTION = /O2 /Oy-
DEBUG_OPT_OPTION = /Od
GX_OPTION = /EHsc
LD_FLAGS = /manifest $(LD_FLAGS)
MP_FLAG = /MP
# Manifest Tool - used in VS2005 and later to adjust manifests stored
# as resources inside build artifacts.
!if "x$(MT)" == "x"
@ -235,6 +238,7 @@ FASTDEBUG_OPT_OPTION = /O2 /Oy-
DEBUG_OPT_OPTION = /Od
GX_OPTION = /EHsc
LD_FLAGS = /manifest $(LD_FLAGS)
MP_FLAG = /MP
# Manifest Tool - used in VS2005 and later to adjust manifests stored
# as resources inside build artifacts.
!if "x$(MT)" == "x"
@ -245,6 +249,8 @@ LD_FLAGS = /SAFESEH $(LD_FLAGS)
!endif
!endif
CXX_FLAGS = $(CXX_FLAGS) $(MP_FLAG)
# If NO_OPTIMIZATIONS is defined in the environment, turn everything off
!ifdef NO_OPTIMIZATIONS
PRODUCT_OPT_OPTION = $(DEBUG_OPT_OPTION)

View file

@ -38,7 +38,7 @@ default:: $(BUILD_PCH_FILE) $(AOUT) checkAndBuildSA
!include ../local.make
!include compile.make
CXX_FLAGS=$(CXX_FLAGS) $(FASTDEBUG_OPT_OPTION) /D "CHECK_UNHANDLED_OOPS"
CXX_FLAGS=$(CXX_FLAGS) $(FASTDEBUG_OPT_OPTION)
!include $(WorkSpace)/make/windows/makefiles/vm.make
!include local.make

View file

@ -102,28 +102,33 @@ SA_CFLAGS = $(SA_CFLAGS) -ZI
!if "$(MT)" != ""
SA_LD_FLAGS = -manifest $(SA_LD_FLAGS)
!endif
SASRCFILE = $(AGENT_DIR)/src/os/win32/windbg/sawindbg.cpp
SASRCFILES = $(AGENT_DIR)/src/os/win32/windbg/sawindbg.cpp \
$(AGENT_DIR)/src/share/native/sadis.c
SA_LFLAGS = $(SA_LD_FLAGS) -nologo -subsystem:console -machine:$(MACHINE)
!if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
SA_LFLAGS = $(SA_LFLAGS) -map -debug
!endif
SA_CFLAGS = $(SA_CFLAGS) $(MP_FLAG)
# Note that we do not keep sawindbj.obj around as it would then
# get included in the dumpbin command in build_vm_def.sh
# In VS2005 or VS2008 the link command creates a .manifest file that we want
# to insert into the linked artifact so we do not need to track it separately.
# Use ";#2" for .dll and ";#1" for .exe in the MT command below:
$(SAWINDBG): $(SASRCFILE)
$(SAWINDBG): $(SASRCFILES)
set INCLUDE=$(SA_INCLUDE)$(INCLUDE)
$(CXX) @<<
-I"$(BootStrapDir)/include" -I"$(BootStrapDir)/include/win32"
-I"$(GENERATED)" $(SA_CFLAGS)
$(SASRCFILE)
$(SASRCFILES)
-out:$*.obj
<<
set LIB=$(SA_LIB)$(LIB)
$(LD) -out:$@ -DLL $*.obj dbgeng.lib $(SA_LFLAGS)
$(LD) -out:$@ -DLL sawindbg.obj sadis.obj dbgeng.lib $(SA_LFLAGS)
!if "$(MT)" != ""
$(MT) -manifest $(@F).manifest -outputresource:$(@F);#2
!endif

View file

@ -37,6 +37,9 @@
#include "runtime/vframeArray.hpp"
#include "utilities/macros.hpp"
#include "vmreg_sparc.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#endif
// Implementation of StubAssembler
@ -912,7 +915,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Register tmp2 = G3_scratch;
jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;
Label not_already_dirty, restart, refill;
Label not_already_dirty, restart, refill, young_card;
#ifdef _LP64
__ srlx(addr, CardTableModRefBS::card_shift, addr);
@ -924,9 +927,15 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ set(rs, cardtable); // cardtable := <card table base>
__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
__ cmp_and_br_short(tmp, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
__ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
__ bind(young_card);
// We didn't take the branch, so we're already dirty: return.
// Use return-from-leaf
__ retl();

View file

@ -3752,7 +3752,7 @@ static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
#define __ masm.
address start = __ pc();
Label not_already_dirty, restart, refill;
Label not_already_dirty, restart, refill, young_card;
#ifdef _LP64
__ srlx(O0, CardTableModRefBS::card_shift, O0);
@ -3763,9 +3763,15 @@ static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
__ set(addrlit, O1); // O1 := <card table base>
__ ldub(O0, O1, O2); // O2 := [O0 + O1]
__ cmp_and_br_short(O2, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
__ ldub(O0, O1, O2); // O2 := [O0 + O1]
assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
__ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
__ bind(young_card);
// We didn't take the branch, so we're already dirty: return.
// Use return-from-leaf
__ retl();

View file

@ -38,6 +38,9 @@
#include "runtime/vframeArray.hpp"
#include "utilities/macros.hpp"
#include "vmreg_x86.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#endif
// Implementation of StubAssembler
@ -1753,13 +1756,17 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index)));
#endif
__ cmpb(Address(card_addr, 0), 0);
__ cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
__ jcc(Assembler::equal, done);
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
__ cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
__ jcc(Assembler::equal, done);
// storing region crossing non-NULL, card is clean.
// dirty card and log.
__ movb(Address(card_addr, 0), 0);
__ movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
__ cmpl(queue_index, 0);
__ jcc(Assembler::equal, runtime);

View file

@ -3389,13 +3389,18 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr,
const Register card_addr = tmp;
lea(card_addr, as_Address(ArrayAddress(cardtable, index)));
#endif
cmpb(Address(card_addr, 0), 0);
cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
jcc(Assembler::equal, done);
membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
jcc(Assembler::equal, done);
// storing a region crossing, non-NULL oop, card is clean.
// dirty card and log.
movb(Address(card_addr, 0), 0);
movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
cmpl(queue_index, 0);
jcc(Assembler::equal, runtime);

View file

@ -42,7 +42,7 @@
#ifdef __APPLE__
typedef thread_t thread_id_t;
#else
typedef pthread_t thread_id_t;
typedef pid_t thread_id_t;
#endif
// _pthread_id is the pthread id, which is used by library calls

View file

@ -100,6 +100,7 @@
# include <stdint.h>
# include <inttypes.h>
# include <sys/ioctl.h>
# include <sys/syscall.h>
#if defined(__FreeBSD__) || defined(__NetBSD__)
# include <elf.h>
@ -152,6 +153,7 @@ sigset_t SR_sigset;
// utility functions
static int SR_initialize();
static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
julong os::available_memory() {
return Bsd::available_memory();
@ -247,7 +249,17 @@ void os::Bsd::initialize_system_info() {
* since it returns a 64 bit value)
*/
mib[0] = CTL_HW;
#if defined (HW_MEMSIZE) // Apple
mib[1] = HW_MEMSIZE;
#elif defined(HW_PHYSMEM) // Most of BSD
mib[1] = HW_PHYSMEM;
#elif defined(HW_REALMEM) // Old FreeBSD
mib[1] = HW_REALMEM;
#else
#error No ways to get physmem
#endif
len = sizeof(mem_val);
if (sysctl(mib, 2, &mem_val, &len, NULL, 0) != -1) {
assert(len == sizeof(mem_val), "unexpected data size");
@ -679,18 +691,12 @@ static void *java_start(Thread *thread) {
return NULL;
}
#ifdef __APPLE__
// thread_id is mach thread on macos, which pthreads graciously caches and provides for us
mach_port_t thread_id = ::pthread_mach_thread_np(::pthread_self());
guarantee(thread_id != 0, "thread id missing from pthreads");
osthread->set_thread_id(thread_id);
osthread->set_thread_id(os::Bsd::gettid());
uint64_t unique_thread_id = locate_unique_thread_id(thread_id);
#ifdef __APPLE__
uint64_t unique_thread_id = locate_unique_thread_id(osthread->thread_id());
guarantee(unique_thread_id != 0, "unique thread id was not found");
osthread->set_unique_thread_id(unique_thread_id);
#else
// thread_id is pthread_id on BSD
osthread->set_thread_id(::pthread_self());
#endif
// initialize signal mask for this thread
os::Bsd::hotspot_sigmask(thread);
@ -847,18 +853,13 @@ bool os::create_attached_thread(JavaThread* thread) {
return false;
}
osthread->set_thread_id(os::Bsd::gettid());
// Store pthread info into the OSThread
#ifdef __APPLE__
// thread_id is mach thread on macos, which pthreads graciously caches and provides for us
mach_port_t thread_id = ::pthread_mach_thread_np(::pthread_self());
guarantee(thread_id != 0, "just checking");
osthread->set_thread_id(thread_id);
uint64_t unique_thread_id = locate_unique_thread_id(thread_id);
uint64_t unique_thread_id = locate_unique_thread_id(osthread->thread_id());
guarantee(unique_thread_id != 0, "just checking");
osthread->set_unique_thread_id(unique_thread_id);
#else
osthread->set_thread_id(::pthread_self());
#endif
osthread->set_pthread_id(::pthread_self());
@ -1125,6 +1126,30 @@ size_t os::lasterror(char *buf, size_t len) {
return n;
}
// Information of current thread in variety of formats
pid_t os::Bsd::gettid() {
int retval = -1;
#ifdef __APPLE__ //XNU kernel
// despite the fact mach port is actually not a thread id use it
// instead of syscall(SYS_thread_selfid) as it certainly fits to u4
retval = ::pthread_mach_thread_np(::pthread_self());
guarantee(retval != 0, "just checking");
return retval;
#elif __FreeBSD__
retval = syscall(SYS_thr_self);
#elif __OpenBSD__
retval = syscall(SYS_getthrid);
#elif __NetBSD__
retval = (pid_t) syscall(SYS__lwp_self);
#endif
if (retval == -1) {
return getpid();
}
}
intx os::current_thread_id() {
#ifdef __APPLE__
return (intx)::pthread_mach_thread_np(::pthread_self());
@ -1132,6 +1157,7 @@ intx os::current_thread_id() {
return (intx)::pthread_self();
#endif
}
int os::current_process_id() {
// Under the old bsd thread library, bsd gives each thread
@ -1904,7 +1930,7 @@ class Semaphore : public StackObj {
bool timedwait(unsigned int sec, int nsec);
private:
jlong currenttime() const;
semaphore_t _semaphore;
os_semaphore_t _semaphore;
};
Semaphore::Semaphore() : _semaphore(0) {
@ -1972,7 +1998,7 @@ bool Semaphore::trywait() {
bool Semaphore::timedwait(unsigned int sec, int nsec) {
struct timespec ts;
jlong endtime = unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
while (1) {
int result = sem_timedwait(&_semaphore, &ts);

View file

@ -84,6 +84,7 @@ class Bsd {
static void hotspot_sigmask(Thread* thread);
static bool is_initial_thread(void);
static pid_t gettid();
static int page_size(void) { return _page_size; }
static void set_page_size(int val) { _page_size = val; }

View file

@ -53,7 +53,7 @@
// Defines Linux-specific default values. The flags are available on all
// platforms, but they may have different default values on other platforms.
//
define_pd_global(bool, UseLargePages, true);
define_pd_global(bool, UseLargePages, false);
define_pd_global(bool, UseLargePagesIndividualAllocation, false);
define_pd_global(bool, UseOSErrorReporting, false);
define_pd_global(bool, UseThreadPriorities, true) ;

View file

@ -3361,13 +3361,15 @@ bool os::Linux::setup_large_page_type(size_t page_size) {
if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
FLAG_IS_DEFAULT(UseSHM) &&
FLAG_IS_DEFAULT(UseTransparentHugePages)) {
// If UseLargePages is specified on the command line try all methods,
// if it's default, then try only UseTransparentHugePages.
if (FLAG_IS_DEFAULT(UseLargePages)) {
UseTransparentHugePages = true;
} else {
UseHugeTLBFS = UseTransparentHugePages = UseSHM = true;
}
// The type of large pages has not been specified by the user.
// Try UseHugeTLBFS and then UseSHM.
UseHugeTLBFS = UseSHM = true;
// Don't try UseTransparentHugePages since there are known
// performance issues with it turned on. This might change in the future.
UseTransparentHugePages = false;
}
if (UseTransparentHugePages) {
@ -3393,9 +3395,19 @@ bool os::Linux::setup_large_page_type(size_t page_size) {
}
void os::large_page_init() {
if (!UseLargePages) {
UseHugeTLBFS = false;
if (!UseLargePages &&
!UseTransparentHugePages &&
!UseHugeTLBFS &&
!UseSHM) {
// Not using large pages.
return;
}
if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) {
// The user explicitly turned off large pages.
// Ignore the rest of the large pages flags.
UseTransparentHugePages = false;
UseHugeTLBFS = false;
UseSHM = false;
return;
}

View file

@ -4080,8 +4080,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
// Generate any default methods - default methods are interface methods
// that have a default implementation. This is new with Lambda project.
if (has_default_methods && !access_flags.is_interface() &&
local_interfaces->length() > 0) {
if (has_default_methods && !access_flags.is_interface() ) {
DefaultMethods::generate_default_methods(
this_klass(), &all_mirandas, CHECK_(nullHandle));
}

View file

@ -345,7 +345,6 @@ class MethodFamily : public ResourceObj {
}
Symbol* generate_no_defaults_message(TRAPS) const;
Symbol* generate_abstract_method_message(Method* method, TRAPS) const;
Symbol* generate_conflicts_message(GrowableArray<Method*>* methods, TRAPS) const;
public:
@ -404,20 +403,19 @@ class MethodFamily : public ResourceObj {
_exception_message = generate_no_defaults_message(CHECK);
_exception_name = vmSymbols::java_lang_AbstractMethodError();
} else if (qualified_methods.length() == 1) {
// leave abstract methods alone, they will be found via normal search path
Method* method = qualified_methods.at(0);
if (method->is_abstract()) {
_exception_message = generate_abstract_method_message(method, CHECK);
_exception_name = vmSymbols::java_lang_AbstractMethodError();
} else {
if (!method->is_abstract()) {
_selected_target = qualified_methods.at(0);
}
} else {
_exception_message = generate_conflicts_message(&qualified_methods,CHECK);
_exception_name = vmSymbols::java_lang_IncompatibleClassChangeError();
if (TraceDefaultMethods) {
_exception_message->print_value_on(tty);
tty->print_cr("");
}
}
assert((has_target() ^ throws_exception()) == 1,
"One and only one must be true");
}
bool contains_signature(Symbol* query) {
@ -475,20 +473,6 @@ Symbol* MethodFamily::generate_no_defaults_message(TRAPS) const {
return SymbolTable::new_symbol("No qualifying defaults found", CHECK_NULL);
}
Symbol* MethodFamily::generate_abstract_method_message(Method* method, TRAPS) const {
Symbol* klass = method->klass_name();
Symbol* name = method->name();
Symbol* sig = method->signature();
stringStream ss;
ss.print("Method ");
ss.write((const char*)klass->bytes(), klass->utf8_length());
ss.print(".");
ss.write((const char*)name->bytes(), name->utf8_length());
ss.write((const char*)sig->bytes(), sig->utf8_length());
ss.print(" is abstract");
return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL);
}
Symbol* MethodFamily::generate_conflicts_message(GrowableArray<Method*>* methods, TRAPS) const {
stringStream ss;
ss.print("Conflicting default methods:");
@ -595,6 +579,18 @@ class EmptyVtableSlot : public ResourceObj {
#endif // ndef PRODUCT
};
static bool already_in_vtable_slots(GrowableArray<EmptyVtableSlot*>* slots, Method* m) {
bool found = false;
for (int j = 0; j < slots->length(); ++j) {
if (slots->at(j)->name() == m->name() &&
slots->at(j)->signature() == m->signature() ) {
found = true;
break;
}
}
return found;
}
static GrowableArray<EmptyVtableSlot*>* find_empty_vtable_slots(
InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS) {
@ -604,8 +600,10 @@ static GrowableArray<EmptyVtableSlot*>* find_empty_vtable_slots(
// All miranda methods are obvious candidates
for (int i = 0; i < mirandas->length(); ++i) {
EmptyVtableSlot* slot = new EmptyVtableSlot(mirandas->at(i));
slots->append(slot);
Method* m = mirandas->at(i);
if (!already_in_vtable_slots(slots, m)) {
slots->append(new EmptyVtableSlot(m));
}
}
// Also any overpasses in our superclasses, that we haven't implemented.
@ -621,7 +619,26 @@ static GrowableArray<EmptyVtableSlot*>* find_empty_vtable_slots(
// unless we have a real implementation of it in the current class.
Method* impl = klass->lookup_method(m->name(), m->signature());
if (impl == NULL || impl->is_overpass()) {
slots->append(new EmptyVtableSlot(m));
if (!already_in_vtable_slots(slots, m)) {
slots->append(new EmptyVtableSlot(m));
}
}
}
}
// also any default methods in our superclasses
if (super->default_methods() != NULL) {
for (int i = 0; i < super->default_methods()->length(); ++i) {
Method* m = super->default_methods()->at(i);
// m is a method that would have been a miranda if not for the
// default method processing that occurred on behalf of our superclass,
// so it's a method we want to re-examine in this new context. That is,
// unless we have a real implementation of it in the current class.
Method* impl = klass->lookup_method(m->name(), m->signature());
if (impl == NULL || impl->is_overpass()) {
if (!already_in_vtable_slots(slots, m)) {
slots->append(new EmptyVtableSlot(m));
}
}
}
}
@ -679,7 +696,7 @@ class FindMethodsByErasedSig : public HierarchyVisitor<FindMethodsByErasedSig> {
// private interface methods are not candidates for default methods
// invokespecial to private interface methods doesn't use default method logic
// future: take access controls into account for superclass methods
if (m != NULL && (!iklass->is_interface() || m->is_public())) {
if (m != NULL && !m->is_static() && (!iklass->is_interface() || m->is_public())) {
if (_family == NULL) {
_family = new StatefulMethodFamily();
}
@ -700,7 +717,7 @@ class FindMethodsByErasedSig : public HierarchyVisitor<FindMethodsByErasedSig> {
static void create_overpasses(
static void create_defaults_and_exceptions(
GrowableArray<EmptyVtableSlot*>* slots, InstanceKlass* klass, TRAPS);
static void generate_erased_defaults(
@ -721,6 +738,8 @@ static void generate_erased_defaults(
static void merge_in_new_methods(InstanceKlass* klass,
GrowableArray<Method*>* new_methods, TRAPS);
static void create_default_methods( InstanceKlass* klass,
GrowableArray<Method*>* new_methods, TRAPS);
// This is the guts of the default methods implementation. This is called just
// after the classfile has been parsed if some ancestor has default methods.
@ -782,7 +801,7 @@ void DefaultMethods::generate_default_methods(
}
#endif // ndef PRODUCT
create_overpasses(empty_slots, klass, CHECK);
create_defaults_and_exceptions(empty_slots, klass, CHECK);
#ifndef PRODUCT
if (TraceDefaultMethods) {
@ -791,66 +810,6 @@ void DefaultMethods::generate_default_methods(
#endif // ndef PRODUCT
}
#ifdef ASSERT
// Return true is broad type is a covariant return of narrow type
static bool covariant_return_type(BasicType narrow, BasicType broad) {
if (narrow == broad) {
return true;
}
if (broad == T_OBJECT) {
return true;
}
return false;
}
#endif
static int assemble_redirect(
BytecodeConstantPool* cp, BytecodeBuffer* buffer,
Symbol* incoming, Method* target, TRAPS) {
BytecodeAssembler assem(buffer, cp);
SignatureStream in(incoming, true);
SignatureStream out(target->signature(), true);
u2 parameter_count = 0;
assem.aload(parameter_count++); // load 'this'
while (!in.at_return_type()) {
assert(!out.at_return_type(), "Parameter counts do not match");
BasicType bt = in.type();
assert(out.type() == bt, "Parameter types are not compatible");
assem.load(bt, parameter_count);
if (in.is_object() && in.as_symbol(THREAD) != out.as_symbol(THREAD)) {
assem.checkcast(out.as_symbol(THREAD));
} else if (bt == T_LONG || bt == T_DOUBLE) {
++parameter_count; // longs and doubles use two slots
}
++parameter_count;
in.next();
out.next();
}
assert(out.at_return_type(), "Parameter counts do not match");
assert(covariant_return_type(out.type(), in.type()), "Return types are not compatible");
if (parameter_count == 1 && (in.type() == T_LONG || in.type() == T_DOUBLE)) {
++parameter_count; // need room for return value
}
if (target->method_holder()->is_interface()) {
assem.invokespecial(target);
} else {
assem.invokevirtual(target);
}
if (in.is_object() && in.as_symbol(THREAD) != out.as_symbol(THREAD)) {
assem.checkcast(in.as_symbol(THREAD));
}
assem._return(in.type());
return parameter_count;
}
static int assemble_method_error(
BytecodeConstantPool* cp, BytecodeBuffer* buffer, Symbol* errorName, Symbol* message, TRAPS) {
@ -924,18 +883,18 @@ static void switchover_constant_pool(BytecodeConstantPool* bpool,
}
}
// A "bridge" is a method created by javac to bridge the gap between
// an implementation and a generically-compatible, but different, signature.
// Bridges have actual bytecode implementation in classfiles.
// An "overpass", on the other hand, performs the same function as a bridge
// but does not occur in a classfile; the VM creates overpass itself,
// when it needs a path to get from a call site to an default method, and
// a bridge doesn't exist.
static void create_overpasses(
// Create default_methods list for the current class.
// With the VM only processing erased signatures, the VM only
// creates an overpass in a conflict case or a case with no candidates.
// This allows virtual methods to override the overpass, but ensures
// that a local method search will find the exception rather than an abstract
// or default method that is not a valid candidate.
static void create_defaults_and_exceptions(
GrowableArray<EmptyVtableSlot*>* slots,
InstanceKlass* klass, TRAPS) {
GrowableArray<Method*> overpasses;
GrowableArray<Method*> defaults;
BytecodeConstantPool bpool(klass->constants());
for (int i = 0; i < slots->length(); ++i) {
@ -943,7 +902,6 @@ static void create_overpasses(
if (slot->is_bound()) {
MethodFamily* method = slot->get_binding();
int max_stack = 0;
BytecodeBuffer buffer;
#ifndef PRODUCT
@ -953,26 +911,27 @@ static void create_overpasses(
tty->print_cr("");
if (method->has_target()) {
method->print_selected(tty, 1);
} else {
} else if (method->throws_exception()) {
method->print_exception(tty, 1);
}
}
#endif // ndef PRODUCT
if (method->has_target()) {
Method* selected = method->get_selected_target();
if (selected->method_holder()->is_interface()) {
max_stack = assemble_redirect(
&bpool, &buffer, slot->signature(), selected, CHECK);
defaults.push(selected);
}
} else if (method->throws_exception()) {
max_stack = assemble_method_error(&bpool, &buffer, method->get_exception_name(), method->get_exception_message(), CHECK);
}
if (max_stack != 0) {
int max_stack = assemble_method_error(&bpool, &buffer,
method->get_exception_name(), method->get_exception_message(), CHECK);
AccessFlags flags = accessFlags_from(
JVM_ACC_PUBLIC | JVM_ACC_SYNTHETIC | JVM_ACC_BRIDGE);
Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
flags, max_stack, slot->size_of_parameters(),
ConstMethod::OVERPASS, CHECK);
// We push to the methods list:
// overpass methods which are exception throwing methods
if (m != NULL) {
overpasses.push(m);
}
@ -983,11 +942,31 @@ static void create_overpasses(
#ifndef PRODUCT
if (TraceDefaultMethods) {
tty->print_cr("Created %d overpass methods", overpasses.length());
tty->print_cr("Created %d default methods", defaults.length());
}
#endif // ndef PRODUCT
switchover_constant_pool(&bpool, klass, &overpasses, CHECK);
merge_in_new_methods(klass, &overpasses, CHECK);
if (overpasses.length() > 0) {
switchover_constant_pool(&bpool, klass, &overpasses, CHECK);
merge_in_new_methods(klass, &overpasses, CHECK);
}
if (defaults.length() > 0) {
create_default_methods(klass, &defaults, CHECK);
}
}
static void create_default_methods( InstanceKlass* klass,
GrowableArray<Method*>* new_methods, TRAPS) {
int new_size = new_methods->length();
Array<Method*>* total_default_methods = MetadataFactory::new_array<Method*>(
klass->class_loader_data(), new_size, NULL, CHECK);
for (int index = 0; index < new_size; index++ ) {
total_default_methods->at_put(index, new_methods->at(index));
}
Method::sort_methods(total_default_methods, false, false);
klass->set_default_methods(total_default_methods);
}
static void sort_methods(GrowableArray<Method*>* methods) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "classfile/dictionary.hpp"
#include "classfile/systemDictionary.hpp"
#include "memory/iterator.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "utilities/hashtable.inline.hpp"
@ -38,17 +39,21 @@ Dictionary::Dictionary(int table_size)
: TwoOopHashtable<Klass*, mtClass>(table_size, sizeof(DictionaryEntry)) {
_current_class_index = 0;
_current_class_entry = NULL;
_pd_cache_table = new ProtectionDomainCacheTable(defaultProtectionDomainCacheSize);
};
Dictionary::Dictionary(int table_size, HashtableBucket<mtClass>* t,
int number_of_entries)
: TwoOopHashtable<Klass*, mtClass>(table_size, sizeof(DictionaryEntry), t, number_of_entries) {
_current_class_index = 0;
_current_class_entry = NULL;
_pd_cache_table = new ProtectionDomainCacheTable(defaultProtectionDomainCacheSize);
};
ProtectionDomainCacheEntry* Dictionary::cache_get(oop protection_domain) {
return _pd_cache_table->get(protection_domain);
}
DictionaryEntry* Dictionary::new_entry(unsigned int hash, Klass* klass,
ClassLoaderData* loader_data) {
@ -105,11 +110,12 @@ bool DictionaryEntry::contains_protection_domain(oop protection_domain) const {
}
void DictionaryEntry::add_protection_domain(oop protection_domain) {
void DictionaryEntry::add_protection_domain(Dictionary* dict, oop protection_domain) {
assert_locked_or_safepoint(SystemDictionary_lock);
if (!contains_protection_domain(protection_domain)) {
ProtectionDomainCacheEntry* entry = dict->cache_get(protection_domain);
ProtectionDomainEntry* new_head =
new ProtectionDomainEntry(protection_domain, _pd_set);
new ProtectionDomainEntry(entry, _pd_set);
// Warning: Preserve store ordering. The SystemDictionary is read
// without locks. The new ProtectionDomainEntry must be
// complete before other threads can be allowed to see it
@ -193,7 +199,10 @@ bool Dictionary::do_unloading() {
void Dictionary::always_strong_oops_do(OopClosure* blk) {
// Follow all system classes and temporary placeholders in dictionary
// Follow all system classes and temporary placeholders in dictionary; only
// protection domain oops contain references into the heap. In a first
// pass over the system dictionary determine which need to be treated as
// strongly reachable and mark them as such.
for (int index = 0; index < table_size(); index++) {
for (DictionaryEntry *probe = bucket(index);
probe != NULL;
@ -201,10 +210,13 @@ void Dictionary::always_strong_oops_do(OopClosure* blk) {
Klass* e = probe->klass();
ClassLoaderData* loader_data = probe->loader_data();
if (is_strongly_reachable(loader_data, e)) {
probe->protection_domain_set_oops_do(blk);
probe->set_strongly_reachable();
}
}
}
// Then iterate over the protection domain cache to apply the closure on the
// previously marked ones.
_pd_cache_table->always_strong_oops_do(blk);
}
@ -266,18 +278,12 @@ void Dictionary::classes_do(void f(Klass*, ClassLoaderData*)) {
}
}
void Dictionary::oops_do(OopClosure* f) {
for (int index = 0; index < table_size(); index++) {
for (DictionaryEntry* probe = bucket(index);
probe != NULL;
probe = probe->next()) {
probe->protection_domain_set_oops_do(f);
}
}
// Only the protection domain oops contain references into the heap. Iterate
// over all of them.
_pd_cache_table->oops_do(f);
}
void Dictionary::methods_do(void f(Method*)) {
for (int index = 0; index < table_size(); index++) {
for (DictionaryEntry* probe = bucket(index);
@ -292,6 +298,11 @@ void Dictionary::methods_do(void f(Method*)) {
}
}
void Dictionary::unlink(BoolObjectClosure* is_alive) {
// Only the protection domain cache table may contain references to the heap
// that need to be unlinked.
_pd_cache_table->unlink(is_alive);
}
Klass* Dictionary::try_get_next_class() {
while (true) {
@ -306,7 +317,6 @@ Klass* Dictionary::try_get_next_class() {
// never reached
}
// Add a loaded class to the system dictionary.
// Readers of the SystemDictionary aren't always locked, so _buckets
// is volatile. The store of the next field in the constructor is
@ -396,7 +406,7 @@ void Dictionary::add_protection_domain(int index, unsigned int hash,
assert(protection_domain() != NULL,
"real protection domain should be present");
entry->add_protection_domain(protection_domain());
entry->add_protection_domain(this, protection_domain());
assert(entry->contains_protection_domain(protection_domain()),
"now protection domain should be present");
@ -446,6 +456,146 @@ void Dictionary::reorder_dictionary() {
}
}
ProtectionDomainCacheTable::ProtectionDomainCacheTable(int table_size)
: Hashtable<oop, mtClass>(table_size, sizeof(ProtectionDomainCacheEntry))
{
}
void ProtectionDomainCacheTable::unlink(BoolObjectClosure* is_alive) {
assert(SafepointSynchronize::is_at_safepoint(), "must be");
for (int i = 0; i < table_size(); ++i) {
ProtectionDomainCacheEntry** p = bucket_addr(i);
ProtectionDomainCacheEntry* entry = bucket(i);
while (entry != NULL) {
if (is_alive->do_object_b(entry->literal())) {
p = entry->next_addr();
} else {
*p = entry->next();
free_entry(entry);
}
entry = *p;
}
}
}
void ProtectionDomainCacheTable::oops_do(OopClosure* f) {
for (int index = 0; index < table_size(); index++) {
for (ProtectionDomainCacheEntry* probe = bucket(index);
probe != NULL;
probe = probe->next()) {
probe->oops_do(f);
}
}
}
uint ProtectionDomainCacheTable::bucket_size() {
return sizeof(ProtectionDomainCacheEntry);
}
#ifndef PRODUCT
void ProtectionDomainCacheTable::print() {
tty->print_cr("Protection domain cache table (table_size=%d, classes=%d)",
table_size(), number_of_entries());
for (int index = 0; index < table_size(); index++) {
for (ProtectionDomainCacheEntry* probe = bucket(index);
probe != NULL;
probe = probe->next()) {
probe->print();
}
}
}
void ProtectionDomainCacheEntry::print() {
tty->print_cr("entry "PTR_FORMAT" value "PTR_FORMAT" strongly_reachable %d next "PTR_FORMAT,
this, (void*)literal(), _strongly_reachable, next());
}
#endif
void ProtectionDomainCacheTable::verify() {
int element_count = 0;
for (int index = 0; index < table_size(); index++) {
for (ProtectionDomainCacheEntry* probe = bucket(index);
probe != NULL;
probe = probe->next()) {
probe->verify();
element_count++;
}
}
guarantee(number_of_entries() == element_count,
"Verify of protection domain cache table failed");
debug_only(verify_lookup_length((double)number_of_entries() / table_size()));
}
void ProtectionDomainCacheEntry::verify() {
guarantee(literal()->is_oop(), "must be an oop");
}
void ProtectionDomainCacheTable::always_strong_oops_do(OopClosure* f) {
// the caller marked the protection domain cache entries that we need to apply
// the closure on. Only process them.
for (int index = 0; index < table_size(); index++) {
for (ProtectionDomainCacheEntry* probe = bucket(index);
probe != NULL;
probe = probe->next()) {
if (probe->is_strongly_reachable()) {
probe->reset_strongly_reachable();
probe->oops_do(f);
}
}
}
}
ProtectionDomainCacheEntry* ProtectionDomainCacheTable::get(oop protection_domain) {
unsigned int hash = compute_hash(protection_domain);
int index = hash_to_index(hash);
ProtectionDomainCacheEntry* entry = find_entry(index, protection_domain);
if (entry == NULL) {
entry = add_entry(index, hash, protection_domain);
}
return entry;
}
ProtectionDomainCacheEntry* ProtectionDomainCacheTable::find_entry(int index, oop protection_domain) {
for (ProtectionDomainCacheEntry* e = bucket(index); e != NULL; e = e->next()) {
if (e->protection_domain() == protection_domain) {
return e;
}
}
return NULL;
}
ProtectionDomainCacheEntry* ProtectionDomainCacheTable::add_entry(int index, unsigned int hash, oop protection_domain) {
assert_locked_or_safepoint(SystemDictionary_lock);
assert(index == index_for(protection_domain), "incorrect index?");
assert(find_entry(index, protection_domain) == NULL, "no double entry");
ProtectionDomainCacheEntry* p = new_entry(hash, protection_domain);
Hashtable<oop, mtClass>::add_entry(index, p);
return p;
}
void ProtectionDomainCacheTable::free(ProtectionDomainCacheEntry* to_delete) {
unsigned int hash = compute_hash(to_delete->protection_domain());
int index = hash_to_index(hash);
ProtectionDomainCacheEntry** p = bucket_addr(index);
ProtectionDomainCacheEntry* entry = bucket(index);
while (true) {
assert(entry != NULL, "sanity");
if (entry == to_delete) {
*p = entry->next();
Hashtable<oop, mtClass>::free_entry(entry);
break;
} else {
p = entry->next_addr();
entry = *p;
}
}
}
SymbolPropertyTable::SymbolPropertyTable(int table_size)
: Hashtable<Symbol*, mtSymbol>(table_size, sizeof(SymbolPropertyEntry))
{
@ -532,11 +682,13 @@ void Dictionary::print() {
tty->cr();
}
}
tty->cr();
_pd_cache_table->print();
tty->cr();
}
#endif
void Dictionary::verify() {
guarantee(number_of_entries() >= 0, "Verify of system dictionary failed");
@ -563,5 +715,7 @@ void Dictionary::verify() {
guarantee(number_of_entries() == element_count,
"Verify of system dictionary failed");
debug_only(verify_lookup_length((double)number_of_entries() / table_size()));
_pd_cache_table->verify();
}

View file

@ -27,11 +27,14 @@
#include "classfile/systemDictionary.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/oop.hpp"
#include "oops/oop.inline.hpp"
#include "utilities/hashtable.hpp"
class DictionaryEntry;
class PSPromotionManager;
class ProtectionDomainCacheTable;
class ProtectionDomainCacheEntry;
class BoolObjectClosure;
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// The data structure for the system dictionary (and the shared system
@ -45,6 +48,8 @@ private:
// pointer to the current hash table entry.
static DictionaryEntry* _current_class_entry;
ProtectionDomainCacheTable* _pd_cache_table;
DictionaryEntry* get_entry(int index, unsigned int hash,
Symbol* name, ClassLoaderData* loader_data);
@ -93,6 +98,7 @@ public:
void methods_do(void f(Method*));
void unlink(BoolObjectClosure* is_alive);
// Classes loaded by the bootstrap loader are always strongly reachable.
// If we're not doing class unloading, all classes are strongly reachable.
@ -118,6 +124,7 @@ public:
// Sharing support
void reorder_dictionary();
ProtectionDomainCacheEntry* cache_get(oop protection_domain);
#ifndef PRODUCT
void print();
@ -126,21 +133,112 @@ public:
};
// The following classes can be in dictionary.cpp, but we need these
// to be in header file so that SA's vmStructs can access.
// to be in header file so that SA's vmStructs can access them.
class ProtectionDomainCacheEntry : public HashtableEntry<oop, mtClass> {
friend class VMStructs;
private:
// Flag indicating whether this protection domain entry is strongly reachable.
// Used during iterating over the system dictionary to remember oops that need
// to be updated.
bool _strongly_reachable;
public:
oop protection_domain() { return literal(); }
void init() {
_strongly_reachable = false;
}
ProtectionDomainCacheEntry* next() {
return (ProtectionDomainCacheEntry*)HashtableEntry<oop, mtClass>::next();
}
ProtectionDomainCacheEntry** next_addr() {
return (ProtectionDomainCacheEntry**)HashtableEntry<oop, mtClass>::next_addr();
}
void oops_do(OopClosure* f) {
f->do_oop(literal_addr());
}
void set_strongly_reachable() { _strongly_reachable = true; }
bool is_strongly_reachable() { return _strongly_reachable; }
void reset_strongly_reachable() { _strongly_reachable = false; }
void print() PRODUCT_RETURN;
void verify();
};
// The ProtectionDomainCacheTable contains all protection domain oops. The system
// dictionary entries reference its entries instead of having references to oops
// directly.
// This is used to speed up system dictionary iteration: the oops in the
// protection domain are the only ones referring the Java heap. So when there is
// need to update these, instead of going over every entry of the system dictionary,
// we only need to iterate over this set.
// The amount of different protection domains used is typically magnitudes smaller
// than the number of system dictionary entries (loaded classes).
class ProtectionDomainCacheTable : public Hashtable<oop, mtClass> {
friend class VMStructs;
private:
ProtectionDomainCacheEntry* bucket(int i) {
return (ProtectionDomainCacheEntry*) Hashtable<oop, mtClass>::bucket(i);
}
// The following method is not MT-safe and must be done under lock.
ProtectionDomainCacheEntry** bucket_addr(int i) {
return (ProtectionDomainCacheEntry**) Hashtable<oop, mtClass>::bucket_addr(i);
}
ProtectionDomainCacheEntry* new_entry(unsigned int hash, oop protection_domain) {
ProtectionDomainCacheEntry* entry = (ProtectionDomainCacheEntry*) Hashtable<oop, mtClass>::new_entry(hash, protection_domain);
entry->init();
return entry;
}
static unsigned int compute_hash(oop protection_domain) {
return (unsigned int)(protection_domain->identity_hash());
}
int index_for(oop protection_domain) {
return hash_to_index(compute_hash(protection_domain));
}
ProtectionDomainCacheEntry* add_entry(int index, unsigned int hash, oop protection_domain);
ProtectionDomainCacheEntry* find_entry(int index, oop protection_domain);
public:
ProtectionDomainCacheTable(int table_size);
ProtectionDomainCacheEntry* get(oop protection_domain);
void free(ProtectionDomainCacheEntry* entry);
void unlink(BoolObjectClosure* cl);
// GC support
void oops_do(OopClosure* f);
void always_strong_oops_do(OopClosure* f);
static uint bucket_size();
void print() PRODUCT_RETURN;
void verify();
};
class ProtectionDomainEntry :public CHeapObj<mtClass> {
friend class VMStructs;
public:
ProtectionDomainEntry* _next;
oop _protection_domain;
ProtectionDomainCacheEntry* _pd_cache;
ProtectionDomainEntry(oop protection_domain, ProtectionDomainEntry* next) {
_protection_domain = protection_domain;
_next = next;
ProtectionDomainEntry(ProtectionDomainCacheEntry* pd_cache, ProtectionDomainEntry* next) {
_pd_cache = pd_cache;
_next = next;
}
ProtectionDomainEntry* next() { return _next; }
oop protection_domain() { return _protection_domain; }
oop protection_domain() { return _pd_cache->protection_domain(); }
};
// An entry in the system dictionary, this describes a class as
@ -151,6 +249,24 @@ class DictionaryEntry : public HashtableEntry<Klass*, mtClass> {
private:
// Contains the set of approved protection domains that can access
// this system dictionary entry.
//
// This protection domain set is a set of tuples:
//
// (InstanceKlass C, initiating class loader ICL, Protection Domain PD)
//
// [Note that C.protection_domain(), which is stored in the java.lang.Class
// mirror of C, is NOT the same as PD]
//
// If such an entry (C, ICL, PD) exists in the table, it means that
// it is okay for a class Foo to reference C, where
//
// Foo.protection_domain() == PD, and
// Foo's defining class loader == ICL
//
// The usage of the PD set can be seen in SystemDictionary::validate_protection_domain()
// It is essentially a cache to avoid repeated Java up-calls to
// ClassLoader.checkPackageAccess().
//
ProtectionDomainEntry* _pd_set;
ClassLoaderData* _loader_data;
@ -158,7 +274,7 @@ class DictionaryEntry : public HashtableEntry<Klass*, mtClass> {
// Tells whether a protection is in the approved set.
bool contains_protection_domain(oop protection_domain) const;
// Adds a protection domain to the approved set.
void add_protection_domain(oop protection_domain);
void add_protection_domain(Dictionary* dict, oop protection_domain);
Klass* klass() const { return (Klass*)literal(); }
Klass** klass_addr() { return (Klass**)literal_addr(); }
@ -189,12 +305,11 @@ class DictionaryEntry : public HashtableEntry<Klass*, mtClass> {
: contains_protection_domain(protection_domain());
}
void protection_domain_set_oops_do(OopClosure* f) {
void set_strongly_reachable() {
for (ProtectionDomainEntry* current = _pd_set;
current != NULL;
current = current->_next) {
f->do_oop(&(current->_protection_domain));
current->_pd_cache->set_strongly_reachable();
}
}
@ -202,7 +317,7 @@ class DictionaryEntry : public HashtableEntry<Klass*, mtClass> {
for (ProtectionDomainEntry* current = _pd_set;
current != NULL;
current = current->_next) {
current->_protection_domain->verify();
current->_pd_cache->protection_domain()->verify();
}
}

View file

@ -1376,8 +1376,15 @@ char* java_lang_Throwable::print_stack_element_to_buffer(Handle mirror,
const char* klass_name = holder->external_name();
int buf_len = (int)strlen(klass_name);
// pushing to the stack trace added one.
// The method id may point to an obsolete method, can't get more stack information
Method* method = holder->method_with_idnum(method_id);
if (method == NULL) {
char* buf = NEW_RESOURCE_ARRAY(char, buf_len + 64);
// This is what the java code prints in this case - added Redefined
sprintf(buf, "\tat %s.null (Redefined)", klass_name);
return buf;
}
char* method_name = method->name()->as_C_string();
buf_len += (int)strlen(method_name);
@ -1773,7 +1780,8 @@ oop java_lang_Throwable::get_stack_trace_element(oop throwable, int index, TRAPS
return element;
}
oop java_lang_StackTraceElement::create(Handle mirror, int method_id, int version, int bci, TRAPS) {
oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
int version, int bci, TRAPS) {
// Allocate java.lang.StackTraceElement instance
Klass* k = SystemDictionary::StackTraceElement_klass();
assert(k != NULL, "must be loaded in 1.4+");
@ -1790,8 +1798,16 @@ oop java_lang_StackTraceElement::create(Handle mirror, int method_id, int versio
oop classname = StringTable::intern((char*) str, CHECK_0);
java_lang_StackTraceElement::set_declaringClass(element(), classname);
// Fill in method name
Method* method = holder->method_with_idnum(method_id);
// Method on stack may be obsolete because it was redefined so cannot be
// found by idnum.
if (method == NULL) {
// leave name and fileName null
java_lang_StackTraceElement::set_lineNumber(element(), -1);
return element();
}
// Fill in method name
oop methodname = StringTable::intern(method->name(), CHECK_0);
java_lang_StackTraceElement::set_methodName(element(), methodname);

View file

@ -107,18 +107,13 @@ private:
add(loader_data, cp, names_count, name, lengths, cp_indices, hashValues, THREAD);
}
// Table size
enum {
symbol_table_size = 20011
};
Symbol* lookup(int index, const char* name, int len, unsigned int hash);
SymbolTable()
: Hashtable<Symbol*, mtSymbol>(symbol_table_size, sizeof (HashtableEntry<Symbol*, mtSymbol>)) {}
: Hashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>)) {}
SymbolTable(HashtableBucket<mtSymbol>* t, int number_of_entries)
: Hashtable<Symbol*, mtSymbol>(symbol_table_size, sizeof (HashtableEntry<Symbol*, mtSymbol>), t,
: Hashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>), t,
number_of_entries) {}
// Arena for permanent symbols (null class loader) that are never unloaded
@ -136,6 +131,9 @@ public:
// The symbol table
static SymbolTable* the_table() { return _the_table; }
// Size of one bucket in the string table. Used when checking for rollover.
static uint bucket_size() { return sizeof(HashtableBucket<mtSymbol>); }
static void create_table() {
assert(_the_table == NULL, "One symbol table allowed.");
_the_table = new SymbolTable();
@ -145,8 +143,11 @@ public:
static void create_table(HashtableBucket<mtSymbol>* t, int length,
int number_of_entries) {
assert(_the_table == NULL, "One symbol table allowed.");
assert(length == symbol_table_size * sizeof(HashtableBucket<mtSymbol>),
"bad shared symbol size.");
// If CDS archive used a different symbol table size, use that size instead
// which is better than giving an error.
SymbolTableSize = length/bucket_size();
_the_table = new SymbolTable(t, number_of_entries);
// if CDS give symbol table a default arena size since most symbols
// are already allocated in the shared misc section.

View file

@ -1697,6 +1697,24 @@ int SystemDictionary::calculate_systemdictionary_size(int classcount) {
return newsize;
}
#ifdef ASSERT
class VerifySDReachableAndLiveClosure : public OopClosure {
private:
BoolObjectClosure* _is_alive;
template <class T> void do_oop_work(T* p) {
oop obj = oopDesc::load_decode_heap_oop(p);
guarantee(_is_alive->do_object_b(obj), "Oop in system dictionary must be live");
}
public:
VerifySDReachableAndLiveClosure(BoolObjectClosure* is_alive) : OopClosure(), _is_alive(is_alive) { }
virtual void do_oop(oop* p) { do_oop_work(p); }
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
};
#endif
// Assumes classes in the SystemDictionary are only unloaded at a safepoint
// Note: anonymous classes are not in the SD.
bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
@ -1707,7 +1725,15 @@ bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
unloading_occurred = dictionary()->do_unloading();
constraints()->purge_loader_constraints();
resolution_errors()->purge_resolution_errors();
}
}
// Oops referenced by the system dictionary may get unreachable independently
// of the class loader (eg. cached protection domain oops). So we need to
// explicitly unlink them here instead of in Dictionary::do_unloading.
dictionary()->unlink(is_alive);
#ifdef ASSERT
VerifySDReachableAndLiveClosure cl(is_alive);
dictionary()->oops_do(&cl);
#endif
return unloading_occurred;
}

View file

@ -2442,10 +2442,16 @@ void ClassVerifier::verify_invoke_instructions(
bool subtype = ref_class_type.is_assignable_from(
current_type(), this, CHECK_VERIFY(this));
if (!subtype) {
verify_error(ErrorContext::bad_code(bci),
"Bad invokespecial instruction: "
"current class isn't assignable to reference class.");
return;
if (current_class()->is_anonymous()) {
subtype = ref_class_type.is_assignable_from(VerificationType::reference_type(
current_class()->host_klass()->name()), this, CHECK_VERIFY(this));
}
if (!subtype) {
verify_error(ErrorContext::bad_code(bci),
"Bad invokespecial instruction: "
"current class isn't assignable to reference class.");
return;
}
}
}
// Match method descriptor with operand stack
@ -2461,7 +2467,28 @@ void ClassVerifier::verify_invoke_instructions(
} else { // other methods
// Ensures that target class is assignable to method class.
if (opcode == Bytecodes::_invokespecial) {
current_frame->pop_stack(current_type(), CHECK_VERIFY(this));
if (!current_class()->is_anonymous()) {
current_frame->pop_stack(current_type(), CHECK_VERIFY(this));
} else {
// anonymous class invokespecial calls: either the
// operand stack/objectref is a subtype of the current class OR
// the objectref is a subtype of the host_klass of the current class
// to allow an anonymous class to reference methods in the host_klass
VerificationType top = current_frame->pop_stack(CHECK_VERIFY(this));
bool subtype = current_type().is_assignable_from(top, this, CHECK_VERIFY(this));
if (!subtype) {
VerificationType hosttype =
VerificationType::reference_type(current_class()->host_klass()->name());
subtype = hosttype.is_assignable_from(top, this, CHECK_VERIFY(this));
}
if (!subtype) {
verify_error( ErrorContext::bad_type(current_frame->offset(),
current_frame->stack_top_ctx(),
TypeOrigin::implicit(top)),
"Bad type on operand stack");
return;
}
}
} else if (opcode == Bytecodes::_invokevirtual) {
VerificationType stack_object_type =
current_frame->pop_stack(ref_class_type, CHECK_VERIFY(this));

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -812,8 +812,8 @@ class ClassHierarchyWalker {
Klass* k = ctxk;
Method* lm = k->lookup_method(m->name(), m->signature());
if (lm == NULL && k->oop_is_instance()) {
// It might be an abstract interface method, devoid of mirandas.
lm = ((InstanceKlass*)k)->lookup_method_in_all_interfaces(m->name(),
// It might be an interface method
lm = ((InstanceKlass*)k)->lookup_method_in_ordered_interfaces(m->name(),
m->signature());
}
if (lm == m)

View file

@ -6035,7 +6035,11 @@ void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
// is dirty.
G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
MemRegion mr(hr->bottom(), hr->pre_dummy_top());
ct_bs->verify_dirty_region(mr);
if (hr->is_young()) {
ct_bs->verify_g1_young_region(mr);
} else {
ct_bs->verify_dirty_region(mr);
}
}
void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {

View file

@ -29,6 +29,7 @@
#include "gc_implementation/g1/g1CollectedHeap.hpp"
#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "utilities/taskqueue.hpp"
@ -134,7 +135,7 @@ G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
MemRegion mr(start, end);
g1_barrier_set()->dirty(mr);
g1_barrier_set()->g1_mark_as_young(mr);
}
inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {

View file

@ -319,10 +319,10 @@ G1CollectorPolicy::G1CollectorPolicy() :
}
void G1CollectorPolicy::initialize_flags() {
set_min_alignment(HeapRegion::GrainBytes);
_min_alignment = HeapRegion::GrainBytes;
size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
set_max_alignment(MAX3(card_table_alignment, min_alignment(), page_size));
_max_alignment = MAX3(card_table_alignment, _min_alignment, page_size);
if (SurvivorRatio < 1) {
vm_exit_during_initialization("Invalid survivor ratio specified");
}

View file

@ -70,6 +70,12 @@ bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
return false;
}
if (val == g1_young_gen) {
// the card is for a young gen region. We don't need to keep track of all pointers into young
return false;
}
// Cached bit can be installed either on a clean card or on a claimed card.
jbyte new_val = val;
if (val == clean_card_val()) {
@ -85,6 +91,19 @@ bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
return true;
}
void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
jbyte *const first = byte_for(mr.start());
jbyte *const last = byte_after(mr.last());
memset(first, g1_young_gen, last - first);
}
#ifndef PRODUCT
void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) {
verify_region(mr, g1_young_gen, true);
}
#endif
G1SATBCardTableLoggingModRefBS::
G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
int max_covered_regions) :
@ -97,7 +116,11 @@ G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
void
G1SATBCardTableLoggingModRefBS::write_ref_field_work(void* field,
oop new_val) {
jbyte* byte = byte_for(field);
volatile jbyte* byte = byte_for(field);
if (*byte == g1_young_gen) {
return;
}
OrderAccess::storeload();
if (*byte != dirty_card) {
*byte = dirty_card;
Thread* thr = Thread::current();
@ -129,7 +152,7 @@ G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field,
void
G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) {
jbyte* byte = byte_for(mr.start());
volatile jbyte* byte = byte_for(mr.start());
jbyte* last_byte = byte_for(mr.last());
Thread* thr = Thread::current();
if (whole_heap) {
@ -138,25 +161,35 @@ G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) {
byte++;
}
} else {
// Enqueue if necessary.
if (thr->is_Java_thread()) {
JavaThread* jt = (JavaThread*)thr;
while (byte <= last_byte) {
if (*byte != dirty_card) {
*byte = dirty_card;
jt->dirty_card_queue().enqueue(byte);
// skip all consecutive young cards
for (; byte <= last_byte && *byte == g1_young_gen; byte++);
if (byte <= last_byte) {
OrderAccess::storeload();
// Enqueue if necessary.
if (thr->is_Java_thread()) {
JavaThread* jt = (JavaThread*)thr;
for (; byte <= last_byte; byte++) {
if (*byte == g1_young_gen) {
continue;
}
if (*byte != dirty_card) {
*byte = dirty_card;
jt->dirty_card_queue().enqueue(byte);
}
}
byte++;
}
} else {
MutexLockerEx x(Shared_DirtyCardQ_lock,
Mutex::_no_safepoint_check_flag);
while (byte <= last_byte) {
if (*byte != dirty_card) {
*byte = dirty_card;
_dcqs.shared_dirty_card_queue()->enqueue(byte);
} else {
MutexLockerEx x(Shared_DirtyCardQ_lock,
Mutex::_no_safepoint_check_flag);
for (; byte <= last_byte; byte++) {
if (*byte == g1_young_gen) {
continue;
}
if (*byte != dirty_card) {
*byte = dirty_card;
_dcqs.shared_dirty_card_queue()->enqueue(byte);
}
}
byte++;
}
}
}

View file

@ -38,7 +38,14 @@ class DirtyCardQueueSet;
// snapshot-at-the-beginning marking.
class G1SATBCardTableModRefBS: public CardTableModRefBSForCTRS {
protected:
enum G1CardValues {
g1_young_gen = CT_MR_BS_last_reserved << 1
};
public:
static int g1_young_card_val() { return g1_young_gen; }
// Add "pre_val" to a set of objects that may have been disconnected from the
// pre-marking object graph.
static void enqueue(oop pre_val);
@ -118,6 +125,9 @@ public:
_byte_map[card_index] = val;
}
void verify_g1_young_region(MemRegion mr) PRODUCT_RETURN;
void g1_mark_as_young(const MemRegion& mr);
bool mark_card_deferred(size_t card_index);
bool is_card_deferred(size_t card_index) {

View file

@ -80,6 +80,10 @@ public:
void reset() { if (_buf != NULL) _index = _sz; }
void enqueue(volatile void* ptr) {
enqueue((void*)(ptr));
}
// Enqueues the given "obj".
void enqueue(void* ptr) {
if (!_active) return;

View file

@ -214,9 +214,6 @@ class VM_CollectForMetadataAllocation: public VM_GC_Operation {
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
_loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
}
~VM_CollectForMetadataAllocation() {
MetaspaceGC::set_expand_after_GC(false);
}
virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; }
virtual void doit();
MetaWord* result() const { return _result; }

View file

@ -202,12 +202,6 @@ void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
ShouldNotReachHere(); // Unexpected use of this function
}
}
MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(
ClassLoaderData* loader_data,
size_t size, Metaspace::MetadataType mdtype) {
return collector_policy()->satisfy_failed_metadata_allocation(loader_data, size, mdtype);
}
void CollectedHeap::pre_initialize() {
// Used for ReduceInitialCardMarks (when COMPILER2 is used);

View file

@ -475,11 +475,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// the context of the vm thread.
virtual void collect_as_vm_thread(GCCause::Cause cause);
// Callback from VM_CollectForMetadataAllocation operation.
MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
size_t size,
Metaspace::MetadataType mdtype);
// Returns the barrier set for this heap
BarrierSet* barrier_set() { return _barrier_set; }

View file

@ -1,4 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -221,8 +222,17 @@ void LinkResolver::resolve_klass(KlassHandle& result, constantPoolHandle pool, i
//
// According to JVM spec. $5.4.3c & $5.4.3d
// Look up method in klasses, including static methods
// Then look up local default methods
void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) {
Method* result_oop = klass->uncached_lookup_method(name, signature);
if (result_oop == NULL) {
Array<Method*>* default_methods = InstanceKlass::cast(klass())->default_methods();
if (default_methods != NULL) {
result_oop = InstanceKlass::find_method(default_methods, name, signature);
}
}
if (EnableInvokeDynamic && result_oop != NULL) {
vmIntrinsics::ID iid = result_oop->intrinsic_id();
if (MethodHandles::is_signature_polymorphic(iid)) {
@ -234,6 +244,7 @@ void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle kl
}
// returns first instance method
// Looks up method in classes, then looks up local default methods
void LinkResolver::lookup_instance_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) {
Method* result_oop = klass->uncached_lookup_method(name, signature);
result = methodHandle(THREAD, result_oop);
@ -241,13 +252,38 @@ void LinkResolver::lookup_instance_method_in_klasses(methodHandle& result, Klass
klass = KlassHandle(THREAD, result->method_holder()->super());
result = methodHandle(THREAD, klass->uncached_lookup_method(name, signature));
}
if (result.is_null()) {
Array<Method*>* default_methods = InstanceKlass::cast(klass())->default_methods();
if (default_methods != NULL) {
result = methodHandle(InstanceKlass::find_method(default_methods, name, signature));
assert(result.is_null() || !result->is_static(), "static defaults not allowed");
}
}
}
int LinkResolver::vtable_index_of_interface_method(KlassHandle klass,
methodHandle resolved_method, TRAPS) {
int LinkResolver::vtable_index_of_miranda_method(KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) {
ResourceMark rm(THREAD);
klassVtable *vt = InstanceKlass::cast(klass())->vtable();
return vt->index_of_miranda(name, signature);
int vtable_index = Method::invalid_vtable_index;
Symbol* name = resolved_method->name();
Symbol* signature = resolved_method->signature();
// First check in default method array
if (!resolved_method->is_abstract() &&
(InstanceKlass::cast(klass())->default_methods() != NULL)) {
int index = InstanceKlass::find_method_index(InstanceKlass::cast(klass())->default_methods(), name, signature);
if (index >= 0 ) {
vtable_index = InstanceKlass::cast(klass())->default_vtable_indices()->at(index);
}
}
if (vtable_index == Method::invalid_vtable_index) {
// get vtable_index for miranda methods
ResourceMark rm(THREAD);
klassVtable *vt = InstanceKlass::cast(klass())->vtable();
vtable_index = vt->index_of_miranda(name, signature);
}
return vtable_index;
}
void LinkResolver::lookup_method_in_interfaces(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) {
@ -625,6 +661,12 @@ void LinkResolver::resolve_interface_method(methodHandle& resolved_method,
resolved_method->method_holder()->internal_name()
);
resolved_method->access_flags().print_on(tty);
if (resolved_method->is_default_method()) {
tty->print("default");
}
if (resolved_method->is_overpass()) {
tty->print("overpass");
}
tty->cr();
}
}
@ -853,6 +895,7 @@ void LinkResolver::linktime_resolve_special_method(methodHandle& resolved_method
resolved_method->signature()));
THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
}
if (TraceItables && Verbose) {
ResourceMark rm(THREAD);
tty->print("invokespecial resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ",
@ -864,8 +907,7 @@ void LinkResolver::linktime_resolve_special_method(methodHandle& resolved_method
resolved_method->method_holder()->internal_name()
);
resolved_method->access_flags().print_on(tty);
if (resolved_method->method_holder()->is_interface() &&
!resolved_method->is_abstract()) {
if (resolved_method->is_default_method()) {
tty->print("default");
}
if (resolved_method->is_overpass()) {
@ -945,10 +987,12 @@ void LinkResolver::runtime_resolve_special_method(CallInfo& result, methodHandle
sel_method->method_holder()->internal_name()
);
sel_method->access_flags().print_on(tty);
if (sel_method->method_holder()->is_interface() &&
!sel_method->is_abstract()) {
if (sel_method->is_default_method()) {
tty->print("default");
}
if (sel_method->is_overpass()) {
tty->print("overpass");
}
tty->cr();
}
@ -996,26 +1040,25 @@ void LinkResolver::linktime_resolve_virtual_method(methodHandle &resolved_method
THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
}
if (PrintVtables && Verbose) {
ResourceMark rm(THREAD);
tty->print("invokevirtual resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ",
(current_klass.is_null() ? "<NULL>" : current_klass->internal_name()),
(resolved_klass.is_null() ? "<NULL>" : resolved_klass->internal_name()),
Method::name_and_sig_as_C_string(resolved_klass(),
resolved_method->name(),
resolved_method->signature()),
resolved_method->method_holder()->internal_name()
);
resolved_method->access_flags().print_on(tty);
if (resolved_method->method_holder()->is_interface() &&
!resolved_method->is_abstract()) {
tty->print("default");
}
if (resolved_method->is_overpass()) {
tty->print("overpass");
}
tty->cr();
}
if (PrintVtables && Verbose) {
ResourceMark rm(THREAD);
tty->print("invokevirtual resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ",
(current_klass.is_null() ? "<NULL>" : current_klass->internal_name()),
(resolved_klass.is_null() ? "<NULL>" : resolved_klass->internal_name()),
Method::name_and_sig_as_C_string(resolved_klass(),
resolved_method->name(),
resolved_method->signature()),
resolved_method->method_holder()->internal_name()
);
resolved_method->access_flags().print_on(tty);
if (resolved_method->is_default_method()) {
tty->print("default");
}
if (resolved_method->is_overpass()) {
tty->print("overpass");
}
tty->cr();
}
}
// throws runtime exceptions
@ -1045,10 +1088,8 @@ void LinkResolver::runtime_resolve_virtual_method(CallInfo& result,
// do lookup based on receiver klass using the vtable index
if (resolved_method->method_holder()->is_interface()) { // miranda method
vtable_index = vtable_index_of_miranda_method(resolved_klass,
resolved_method->name(),
resolved_method->signature(), CHECK);
vtable_index = vtable_index_of_interface_method(resolved_klass,
resolved_method, CHECK);
assert(vtable_index >= 0 , "we should have valid vtable index at this point");
InstanceKlass* inst = InstanceKlass::cast(recv_klass());
@ -1104,11 +1145,10 @@ void LinkResolver::runtime_resolve_virtual_method(CallInfo& result,
vtable_index
);
selected_method->access_flags().print_on(tty);
if (selected_method->method_holder()->is_interface() &&
!selected_method->is_abstract()) {
if (selected_method->is_default_method()) {
tty->print("default");
}
if (resolved_method->is_overpass()) {
if (selected_method->is_overpass()) {
tty->print("overpass");
}
tty->cr();
@ -1191,7 +1231,6 @@ void LinkResolver::runtime_resolve_interface_method(CallInfo& result, methodHand
sel_method->name(),
sel_method->signature()));
}
// check if abstract
if (check_null_and_abstract && sel_method->is_abstract()) {
ResourceMark rm(THREAD);
@ -1220,11 +1259,10 @@ void LinkResolver::runtime_resolve_interface_method(CallInfo& result, methodHand
sel_method->method_holder()->internal_name()
);
sel_method->access_flags().print_on(tty);
if (sel_method->method_holder()->is_interface() &&
!sel_method->is_abstract()) {
if (sel_method->is_default_method()) {
tty->print("default");
}
if (resolved_method->is_overpass()) {
if (sel_method->is_overpass()) {
tty->print("overpass");
}
tty->cr();

View file

@ -130,8 +130,7 @@ class LinkResolver: AllStatic {
static void lookup_polymorphic_method (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature,
KlassHandle current_klass, Handle *appendix_result_or_null, Handle *method_type_result, TRAPS);
static int vtable_index_of_miranda_method(KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
static int vtable_index_of_interface_method(KlassHandle klass, methodHandle resolved_method, TRAPS);
static void resolve_klass (KlassHandle& result, constantPoolHandle pool, int index, TRAPS);
static void resolve_pool (KlassHandle& resolved_klass, Symbol*& method_name, Symbol*& method_signature, KlassHandle& current_klass, constantPoolHandle pool, int index, TRAPS);

View file

@ -47,85 +47,53 @@
// CollectorPolicy methods.
// Align down. If the aligning result in 0, return 'alignment'.
static size_t restricted_align_down(size_t size, size_t alignment) {
return MAX2(alignment, align_size_down_(size, alignment));
}
void CollectorPolicy::initialize_flags() {
assert(max_alignment() >= min_alignment(),
err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
max_alignment(), min_alignment()));
assert(max_alignment() % min_alignment() == 0,
err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT,
max_alignment(), min_alignment()));
assert(_max_alignment >= _min_alignment,
err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
_max_alignment, _min_alignment));
assert(_max_alignment % _min_alignment == 0,
err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT,
_max_alignment, _min_alignment));
if (MaxHeapSize < InitialHeapSize) {
vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
}
// Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
// override if MaxMetaspaceSize was set on the command line or not.
// This information is needed later to conform to the specification of the
// java.lang.management.MemoryUsage API.
//
// Ideally, we would be able to set the default value of MaxMetaspaceSize in
// globals.hpp to the aligned value, but this is not possible, since the
// alignment depends on other flags being parsed.
MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, max_alignment());
if (MetaspaceSize > MaxMetaspaceSize) {
MetaspaceSize = MaxMetaspaceSize;
}
MetaspaceSize = restricted_align_down(MetaspaceSize, min_alignment());
assert(MetaspaceSize <= MaxMetaspaceSize, "Must be");
MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, min_alignment());
MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, min_alignment());
MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment());
assert(MetaspaceSize % min_alignment() == 0, "metapace alignment");
assert(MaxMetaspaceSize % max_alignment() == 0, "maximum metaspace alignment");
if (MetaspaceSize < 256*K) {
vm_exit_during_initialization("Too small initial Metaspace size");
}
MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, _min_alignment);
}
void CollectorPolicy::initialize_size_info() {
// User inputs from -mx and ms must be aligned
set_min_heap_byte_size(align_size_up(Arguments::min_heap_size(), min_alignment()));
set_initial_heap_byte_size(align_size_up(InitialHeapSize, min_alignment()));
set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment()));
_min_heap_byte_size = align_size_up(Arguments::min_heap_size(), _min_alignment);
_initial_heap_byte_size = align_size_up(InitialHeapSize, _min_alignment);
_max_heap_byte_size = align_size_up(MaxHeapSize, _max_alignment);
// Check heap parameter properties
if (initial_heap_byte_size() < M) {
if (_initial_heap_byte_size < M) {
vm_exit_during_initialization("Too small initial heap");
}
// Check heap parameter properties
if (min_heap_byte_size() < M) {
if (_min_heap_byte_size < M) {
vm_exit_during_initialization("Too small minimum heap");
}
if (initial_heap_byte_size() <= NewSize) {
if (_initial_heap_byte_size <= NewSize) {
// make sure there is at least some room in old space
vm_exit_during_initialization("Too small initial heap for new size specified");
}
if (max_heap_byte_size() < min_heap_byte_size()) {
if (_max_heap_byte_size < _min_heap_byte_size) {
vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
}
if (initial_heap_byte_size() < min_heap_byte_size()) {
if (_initial_heap_byte_size < _min_heap_byte_size) {
vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
}
if (max_heap_byte_size() < initial_heap_byte_size()) {
if (_max_heap_byte_size < _initial_heap_byte_size) {
vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
}
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap "
SIZE_FORMAT " Maximum heap " SIZE_FORMAT,
min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size());
_min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size);
}
}
@ -180,15 +148,15 @@ size_t CollectorPolicy::compute_max_alignment() {
size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
size_t x = base_size / (NewRatio+1);
size_t new_gen_size = x > min_alignment() ?
align_size_down(x, min_alignment()) :
min_alignment();
size_t new_gen_size = x > _min_alignment ?
align_size_down(x, _min_alignment) :
_min_alignment;
return new_gen_size;
}
size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
size_t maximum_size) {
size_t alignment = min_alignment();
size_t alignment = _min_alignment;
size_t max_minus = maximum_size - alignment;
return desired_size < max_minus ? desired_size : max_minus;
}
@ -207,8 +175,8 @@ void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
void GenCollectorPolicy::initialize_flags() {
// All sizes must be multiples of the generation granularity.
set_min_alignment((uintx) Generation::GenGrain);
set_max_alignment(compute_max_alignment());
_min_alignment = (uintx) Generation::GenGrain;
_max_alignment = compute_max_alignment();
CollectorPolicy::initialize_flags();
@ -218,26 +186,26 @@ void GenCollectorPolicy::initialize_flags() {
if (NewSize > MaxNewSize) {
MaxNewSize = NewSize;
}
NewSize = align_size_down(NewSize, min_alignment());
MaxNewSize = align_size_down(MaxNewSize, min_alignment());
NewSize = align_size_down(NewSize, _min_alignment);
MaxNewSize = align_size_down(MaxNewSize, _min_alignment);
// Check validity of heap flags
assert(NewSize % min_alignment() == 0, "eden space alignment");
assert(MaxNewSize % min_alignment() == 0, "survivor space alignment");
assert(NewSize % _min_alignment == 0, "eden space alignment");
assert(MaxNewSize % _min_alignment == 0, "survivor space alignment");
if (NewSize < 3*min_alignment()) {
if (NewSize < 3 * _min_alignment) {
// make sure there room for eden and two survivor spaces
vm_exit_during_initialization("Too small new size specified");
}
if (SurvivorRatio < 1 || NewRatio < 1) {
vm_exit_during_initialization("Invalid heap ratio specified");
vm_exit_during_initialization("Invalid young gen ratio specified");
}
}
void TwoGenerationCollectorPolicy::initialize_flags() {
GenCollectorPolicy::initialize_flags();
OldSize = align_size_down(OldSize, min_alignment());
OldSize = align_size_down(OldSize, _min_alignment);
if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) {
// NewRatio will be used later to set the young generation size so we use
@ -246,11 +214,11 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
assert(NewRatio > 0, "NewRatio should have been set up earlier");
size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);
calculated_heapsize = align_size_up(calculated_heapsize, max_alignment());
calculated_heapsize = align_size_up(calculated_heapsize, _max_alignment);
MaxHeapSize = calculated_heapsize;
InitialHeapSize = calculated_heapsize;
}
MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
// adjust max heap size if necessary
if (NewSize + OldSize > MaxHeapSize) {
@ -260,18 +228,18 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
uintx calculated_size = NewSize + OldSize;
double shrink_factor = (double) MaxHeapSize / calculated_size;
// align
NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment());
NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment);
// OldSize is already aligned because above we aligned MaxHeapSize to
// max_alignment(), and we just made sure that NewSize is aligned to
// min_alignment(). In initialize_flags() we verified that max_alignment()
// is a multiple of min_alignment().
// _max_alignment, and we just made sure that NewSize is aligned to
// _min_alignment. In initialize_flags() we verified that _max_alignment
// is a multiple of _min_alignment.
OldSize = MaxHeapSize - NewSize;
} else {
MaxHeapSize = NewSize + OldSize;
}
}
// need to do this again
MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
// adjust max heap size if necessary
if (NewSize + OldSize > MaxHeapSize) {
@ -281,24 +249,24 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
uintx calculated_size = NewSize + OldSize;
double shrink_factor = (double) MaxHeapSize / calculated_size;
// align
NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment());
NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment);
// OldSize is already aligned because above we aligned MaxHeapSize to
// max_alignment(), and we just made sure that NewSize is aligned to
// min_alignment(). In initialize_flags() we verified that max_alignment()
// is a multiple of min_alignment().
// _max_alignment, and we just made sure that NewSize is aligned to
// _min_alignment. In initialize_flags() we verified that _max_alignment
// is a multiple of _min_alignment.
OldSize = MaxHeapSize - NewSize;
} else {
MaxHeapSize = NewSize + OldSize;
}
}
// need to do this again
MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
always_do_update_barrier = UseConcMarkSweepGC;
// Check validity of heap flags
assert(OldSize % min_alignment() == 0, "old space alignment");
assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment");
assert(OldSize % _min_alignment == 0, "old space alignment");
assert(MaxHeapSize % _max_alignment == 0, "maximum heap alignment");
}
// Values set on the command line win over any ergonomically
@ -313,7 +281,7 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
void GenCollectorPolicy::initialize_size_info() {
CollectorPolicy::initialize_size_info();
// min_alignment() is used for alignment within a generation.
// _min_alignment is used for alignment within a generation.
// There is additional alignment done down stream for some
// collectors that sometimes causes unwanted rounding up of
// generations sizes.
@ -322,18 +290,18 @@ void GenCollectorPolicy::initialize_size_info() {
size_t max_new_size = 0;
if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) {
if (MaxNewSize < min_alignment()) {
max_new_size = min_alignment();
if (MaxNewSize < _min_alignment) {
max_new_size = _min_alignment;
}
if (MaxNewSize >= max_heap_byte_size()) {
max_new_size = align_size_down(max_heap_byte_size() - min_alignment(),
min_alignment());
if (MaxNewSize >= _max_heap_byte_size) {
max_new_size = align_size_down(_max_heap_byte_size - _min_alignment,
_min_alignment);
warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or "
"greater than the entire heap (" SIZE_FORMAT "k). A "
"new generation size of " SIZE_FORMAT "k will be used.",
MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K);
MaxNewSize/K, _max_heap_byte_size/K, max_new_size/K);
} else {
max_new_size = align_size_down(MaxNewSize, min_alignment());
max_new_size = align_size_down(MaxNewSize, _min_alignment);
}
// The case for FLAG_IS_ERGO(MaxNewSize) could be treated
@ -351,7 +319,7 @@ void GenCollectorPolicy::initialize_size_info() {
// just accept those choices. The choices currently made are
// not always "wise".
} else {
max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size());
max_new_size = scale_by_NewRatio_aligned(_max_heap_byte_size);
// Bound the maximum size by NewSize below (since it historically
// would have been NewSize and because the NewRatio calculation could
// yield a size that is too small) and bound it by MaxNewSize above.
@ -364,13 +332,13 @@ void GenCollectorPolicy::initialize_size_info() {
// Given the maximum gen0 size, determine the initial and
// minimum gen0 sizes.
if (max_heap_byte_size() == min_heap_byte_size()) {
if (_max_heap_byte_size == _min_heap_byte_size) {
// The maximum and minimum heap sizes are the same so
// the generations minimum and initial must be the
// same as its maximum.
set_min_gen0_size(max_new_size);
set_initial_gen0_size(max_new_size);
set_max_gen0_size(max_new_size);
_min_gen0_size = max_new_size;
_initial_gen0_size = max_new_size;
_max_gen0_size = max_new_size;
} else {
size_t desired_new_size = 0;
if (!FLAG_IS_DEFAULT(NewSize)) {
@ -391,43 +359,37 @@ void GenCollectorPolicy::initialize_size_info() {
// Use the default NewSize as the floor for these values. If
// NewRatio is overly large, the resulting sizes can be too
// small.
_min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()),
NewSize);
_min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize);
desired_new_size =
MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()),
NewSize);
MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
}
assert(_min_gen0_size > 0, "Sanity check");
set_initial_gen0_size(desired_new_size);
set_max_gen0_size(max_new_size);
_initial_gen0_size = desired_new_size;
_max_gen0_size = max_new_size;
// At this point the desirable initial and minimum sizes have been
// determined without regard to the maximum sizes.
// Bound the sizes by the corresponding overall heap sizes.
set_min_gen0_size(
bound_minus_alignment(_min_gen0_size, min_heap_byte_size()));
set_initial_gen0_size(
bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size()));
set_max_gen0_size(
bound_minus_alignment(_max_gen0_size, max_heap_byte_size()));
_min_gen0_size = bound_minus_alignment(_min_gen0_size, _min_heap_byte_size);
_initial_gen0_size = bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size);
_max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size);
// At this point all three sizes have been checked against the
// maximum sizes but have not been checked for consistency
// among the three.
// Final check min <= initial <= max
set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size));
set_initial_gen0_size(
MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size));
set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size));
_min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size);
_initial_gen0_size = MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size);
_min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size);
}
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
min_gen0_size(), initial_gen0_size(), max_gen0_size());
_min_gen0_size, _initial_gen0_size, _max_gen0_size);
}
}
@ -447,19 +409,17 @@ bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr,
if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) {
if ((heap_size < (*gen0_size_ptr + min_gen1_size)) &&
(heap_size >= min_gen1_size + min_alignment())) {
(heap_size >= min_gen1_size + _min_alignment)) {
// Adjust gen0 down to accommodate min_gen1_size
*gen0_size_ptr = heap_size - min_gen1_size;
*gen0_size_ptr =
MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()),
min_alignment());
MAX2((uintx)align_size_down(*gen0_size_ptr, _min_alignment), _min_alignment);
assert(*gen0_size_ptr > 0, "Min gen0 is too large");
result = true;
} else {
*gen1_size_ptr = heap_size - *gen0_size_ptr;
*gen1_size_ptr =
MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()),
min_alignment());
MAX2((uintx)align_size_down(*gen1_size_ptr, _min_alignment), _min_alignment);
}
}
return result;
@ -480,10 +440,9 @@ void TwoGenerationCollectorPolicy::initialize_size_info() {
// The maximum gen1 size can be determined from the maximum gen0
// and maximum heap size since no explicit flags exits
// for setting the gen1 maximum.
_max_gen1_size = max_heap_byte_size() - _max_gen0_size;
_max_gen1_size = _max_heap_byte_size - _max_gen0_size;
_max_gen1_size =
MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()),
min_alignment());
MAX2((uintx)align_size_down(_max_gen1_size, _min_alignment), _min_alignment);
// If no explicit command line flag has been set for the
// gen1 size, use what is left for gen1.
if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) {
@ -492,70 +451,66 @@ void TwoGenerationCollectorPolicy::initialize_size_info() {
// with the overall heap size). In either case make
// the minimum, maximum and initial sizes consistent
// with the gen0 sizes and the overall heap sizes.
assert(min_heap_byte_size() > _min_gen0_size,
assert(_min_heap_byte_size > _min_gen0_size,
"gen0 has an unexpected minimum size");
set_min_gen1_size(min_heap_byte_size() - min_gen0_size());
set_min_gen1_size(
MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()),
min_alignment()));
set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size());
set_initial_gen1_size(
MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()),
min_alignment()));
_min_gen1_size = _min_heap_byte_size - _min_gen0_size;
_min_gen1_size =
MAX2((uintx)align_size_down(_min_gen1_size, _min_alignment), _min_alignment);
_initial_gen1_size = _initial_heap_byte_size - _initial_gen0_size;
_initial_gen1_size =
MAX2((uintx)align_size_down(_initial_gen1_size, _min_alignment), _min_alignment);
} else {
// It's been explicitly set on the command line. Use the
// OldSize and then determine the consequences.
set_min_gen1_size(OldSize);
set_initial_gen1_size(OldSize);
_min_gen1_size = OldSize;
_initial_gen1_size = OldSize;
// If the user has explicitly set an OldSize that is inconsistent
// with other command line flags, issue a warning.
// The generation minimums and the overall heap mimimum should
// be within one heap alignment.
if ((_min_gen1_size + _min_gen0_size + min_alignment()) <
min_heap_byte_size()) {
if ((_min_gen1_size + _min_gen0_size + _min_alignment) < _min_heap_byte_size) {
warning("Inconsistency between minimum heap size and minimum "
"generation sizes: using minimum heap = " SIZE_FORMAT,
min_heap_byte_size());
"generation sizes: using minimum heap = " SIZE_FORMAT,
_min_heap_byte_size);
}
if ((OldSize > _max_gen1_size)) {
warning("Inconsistency between maximum heap size and maximum "
"generation sizes: using maximum heap = " SIZE_FORMAT
" -XX:OldSize flag is being ignored",
max_heap_byte_size());
"generation sizes: using maximum heap = " SIZE_FORMAT
" -XX:OldSize flag is being ignored",
_max_heap_byte_size);
}
// If there is an inconsistency between the OldSize and the minimum and/or
// initial size of gen0, since OldSize was explicitly set, OldSize wins.
if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size,
min_heap_byte_size(), OldSize)) {
_min_heap_byte_size, OldSize)) {
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
min_gen0_size(), initial_gen0_size(), max_gen0_size());
_min_gen0_size, _initial_gen0_size, _max_gen0_size);
}
}
// Initial size
if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
initial_heap_byte_size(), OldSize)) {
_initial_heap_byte_size, OldSize)) {
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
min_gen0_size(), initial_gen0_size(), max_gen0_size());
_min_gen0_size, _initial_gen0_size, _max_gen0_size);
}
}
}
// Enforce the maximum gen1 size.
set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size));
_min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size);
// Check that min gen1 <= initial gen1 <= max gen1
set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size));
set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size));
_initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size);
_initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size);
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 "
SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT,
min_gen1_size(), initial_gen1_size(), max_gen1_size());
_min_gen1_size, _initial_gen1_size, _max_gen1_size);
}
}

View file

@ -101,17 +101,12 @@ class CollectorPolicy : public CHeapObj<mtGC> {
// Return maximum heap alignment that may be imposed by the policy
static size_t compute_max_alignment();
void set_min_alignment(size_t align) { _min_alignment = align; }
size_t min_alignment() { return _min_alignment; }
void set_max_alignment(size_t align) { _max_alignment = align; }
size_t max_alignment() { return _max_alignment; }
size_t initial_heap_byte_size() { return _initial_heap_byte_size; }
void set_initial_heap_byte_size(size_t v) { _initial_heap_byte_size = v; }
size_t max_heap_byte_size() { return _max_heap_byte_size; }
void set_max_heap_byte_size(size_t v) { _max_heap_byte_size = v; }
size_t min_heap_byte_size() { return _min_heap_byte_size; }
void set_min_heap_byte_size(size_t v) { _min_heap_byte_size = v; }
enum Name {
CollectorPolicyKind,
@ -248,12 +243,9 @@ class GenCollectorPolicy : public CollectorPolicy {
public:
// Accessors
size_t min_gen0_size() { return _min_gen0_size; }
void set_min_gen0_size(size_t v) { _min_gen0_size = v; }
size_t min_gen0_size() { return _min_gen0_size; }
size_t initial_gen0_size() { return _initial_gen0_size; }
void set_initial_gen0_size(size_t v) { _initial_gen0_size = v; }
size_t max_gen0_size() { return _max_gen0_size; }
void set_max_gen0_size(size_t v) { _max_gen0_size = v; }
size_t max_gen0_size() { return _max_gen0_size; }
virtual int number_of_generations() = 0;
@ -302,12 +294,9 @@ class TwoGenerationCollectorPolicy : public GenCollectorPolicy {
public:
// Accessors
size_t min_gen1_size() { return _min_gen1_size; }
void set_min_gen1_size(size_t v) { _min_gen1_size = v; }
size_t min_gen1_size() { return _min_gen1_size; }
size_t initial_gen1_size() { return _initial_gen1_size; }
void set_initial_gen1_size(size_t v) { _initial_gen1_size = v; }
size_t max_gen1_size() { return _max_gen1_size; }
void set_max_gen1_size(size_t v) { _max_gen1_size = v; }
size_t max_gen1_size() { return _max_gen1_size; }
// Inherited methods
TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; }

View file

@ -26,6 +26,7 @@
#define SHARE_VM_MEMORY_FILEMAP_HPP
#include "memory/metaspaceShared.hpp"
#include "memory/metaspace.hpp"
// Layout of the file:
// header: dump of archive instance plus versioning info, datestamp, etc.

View file

@ -73,6 +73,10 @@
"Number of bytes used by the InstanceKlass::methods() array") \
f(method_ordering_bytes, IK_method_ordering, \
"Number of bytes used by the InstanceKlass::method_ordering() array") \
f(default_methods_array_bytes, IK_default_methods, \
"Number of bytes used by the InstanceKlass::default_methods() array") \
f(default_vtable_indices_bytes, IK_default_vtable_indices, \
"Number of bytes used by the InstanceKlass::default_vtable_indices() array") \
f(local_interfaces_bytes, IK_local_interfaces, \
"Number of bytes used by the InstanceKlass::local_interfaces() array") \
f(transitive_interfaces_bytes, IK_transitive_interfaces, \

File diff suppressed because it is too large Load diff

View file

@ -87,9 +87,10 @@ class Metaspace : public CHeapObj<mtClass> {
friend class MetaspaceAux;
public:
enum MetadataType {ClassType = 0,
NonClassType = ClassType + 1,
MetadataTypeCount = ClassType + 2
enum MetadataType {
ClassType,
NonClassType,
MetadataTypeCount
};
enum MetaspaceType {
StandardMetaspaceType,
@ -103,6 +104,9 @@ class Metaspace : public CHeapObj<mtClass> {
private:
void initialize(Mutex* lock, MetaspaceType type);
// Get the first chunk for a Metaspace. Used for
// special cases such as the boot class loader, reflection
// class loader and anonymous class loader.
Metachunk* get_initialization_chunk(MetadataType mdtype,
size_t chunk_word_size,
size_t chunk_bunch);
@ -123,6 +127,9 @@ class Metaspace : public CHeapObj<mtClass> {
static size_t _first_chunk_word_size;
static size_t _first_class_chunk_word_size;
static size_t _commit_alignment;
static size_t _reserve_alignment;
SpaceManager* _vsm;
SpaceManager* vsm() const { return _vsm; }
@ -191,12 +198,17 @@ class Metaspace : public CHeapObj<mtClass> {
Metaspace(Mutex* lock, MetaspaceType type);
~Metaspace();
// Initialize globals for Metaspace
static void ergo_initialize();
static void global_initialize();
static size_t first_chunk_word_size() { return _first_chunk_word_size; }
static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
static size_t reserve_alignment() { return _reserve_alignment; }
static size_t reserve_alignment_words() { return _reserve_alignment / BytesPerWord; }
static size_t commit_alignment() { return _commit_alignment; }
static size_t commit_alignment_words() { return _commit_alignment / BytesPerWord; }
char* bottom() const;
size_t used_words_slow(MetadataType mdtype) const;
size_t free_words_slow(MetadataType mdtype) const;
@ -219,6 +231,9 @@ class Metaspace : public CHeapObj<mtClass> {
static void purge(MetadataType mdtype);
static void purge();
static void report_metadata_oome(ClassLoaderData* loader_data, size_t word_size,
MetadataType mdtype, TRAPS);
void print_on(outputStream* st) const;
// Debugging support
void verify();
@ -352,17 +367,10 @@ class MetaspaceAux : AllStatic {
class MetaspaceGC : AllStatic {
// The current high-water-mark for inducing a GC. When
// the capacity of all space in the virtual lists reaches this value,
// a GC is induced and the value is increased. This should be changed
// to the space actually used for allocations to avoid affects of
// fragmentation losses to partially used chunks. Size is in words.
static size_t _capacity_until_GC;
// After a GC is done any allocation that fails should try to expand
// the capacity of the Metaspaces. This flag is set during attempts
// to allocate in the VMGCOperation that does the GC.
static bool _expand_after_GC;
// The current high-water-mark for inducing a GC.
// When committed memory of all metaspaces reaches this value,
// a GC is induced and the value is increased. Size is in bytes.
static volatile intptr_t _capacity_until_GC;
// For a CMS collection, signal that a concurrent collection should
// be started.
@ -370,20 +378,16 @@ class MetaspaceGC : AllStatic {
static uint _shrink_factor;
static void set_capacity_until_GC(size_t v) { _capacity_until_GC = v; }
static size_t shrink_factor() { return _shrink_factor; }
void set_shrink_factor(uint v) { _shrink_factor = v; }
public:
static size_t capacity_until_GC() { return _capacity_until_GC; }
static void inc_capacity_until_GC(size_t v) { _capacity_until_GC += v; }
static void dec_capacity_until_GC(size_t v) {
_capacity_until_GC = _capacity_until_GC > v ? _capacity_until_GC - v : 0;
}
static bool expand_after_GC() { return _expand_after_GC; }
static void set_expand_after_GC(bool v) { _expand_after_GC = v; }
static void initialize() { _capacity_until_GC = MetaspaceSize; }
static size_t capacity_until_GC();
static size_t inc_capacity_until_GC(size_t v);
static size_t dec_capacity_until_GC(size_t v);
static bool should_concurrent_collect() { return _should_concurrent_collect; }
static void set_should_concurrent_collect(bool v) {
@ -391,11 +395,14 @@ class MetaspaceGC : AllStatic {
}
// The amount to increase the high-water-mark (_capacity_until_GC)
static size_t delta_capacity_until_GC(size_t word_size);
static size_t delta_capacity_until_GC(size_t bytes);
// It is expected that this will be called when the current capacity
// has been used and a GC should be considered.
static bool should_expand(VirtualSpaceList* vsl, size_t word_size);
// Tells if we have can expand metaspace without hitting set limits.
static bool can_expand(size_t words, bool is_class);
// Returns amount that we can expand without hitting a GC,
// measured in words.
static size_t allowed_expansion();
// Calculate the new high-water mark at which to induce
// a GC.

View file

@ -238,6 +238,13 @@ void InstanceKlass::copy_method_ordering(intArray* m, TRAPS) {
}
}
// create a new array of vtable_indices for default methods
Array<int>* InstanceKlass::create_new_default_vtable_indices(int len, TRAPS) {
Array<int>* vtable_indices = MetadataFactory::new_array<int>(class_loader_data(), len, CHECK_NULL);
assert(default_vtable_indices() == NULL, "only create once");
set_default_vtable_indices(vtable_indices);
return vtable_indices;
}
InstanceKlass::InstanceKlass(int vtable_len,
int itable_len,
@ -263,6 +270,8 @@ InstanceKlass::InstanceKlass(int vtable_len,
set_array_klasses(NULL);
set_methods(NULL);
set_method_ordering(NULL);
set_default_methods(NULL);
set_default_vtable_indices(NULL);
set_local_interfaces(NULL);
set_transitive_interfaces(NULL);
init_implementor();
@ -376,6 +385,21 @@ void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) {
}
set_method_ordering(NULL);
// default methods can be empty
if (default_methods() != NULL &&
default_methods() != Universe::the_empty_method_array()) {
MetadataFactory::free_array<Method*>(loader_data, default_methods());
}
// Do NOT deallocate the default methods, they are owned by superinterfaces.
set_default_methods(NULL);
// default methods vtable indices can be empty
if (default_vtable_indices() != NULL) {
MetadataFactory::free_array<int>(loader_data, default_vtable_indices());
}
set_default_vtable_indices(NULL);
// This array is in Klass, but remove it with the InstanceKlass since
// this place would be the only caller and it can share memory with transitive
// interfaces.
@ -456,14 +480,14 @@ objArrayOop InstanceKlass::signers() const {
return java_lang_Class::signers(java_mirror());
}
volatile oop InstanceKlass::init_lock() const {
oop InstanceKlass::init_lock() const {
// return the init lock from the mirror
return java_lang_Class::init_lock(java_mirror());
}
void InstanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
EXCEPTION_MARK;
volatile oop init_lock = this_oop->init_lock();
oop init_lock = this_oop->init_lock();
ObjectLocker ol(init_lock, THREAD);
// abort if someone beat us to the initialization
@ -608,7 +632,7 @@ bool InstanceKlass::link_class_impl(
// verification & rewriting
{
volatile oop init_lock = this_oop->init_lock();
oop init_lock = this_oop->init_lock();
ObjectLocker ol(init_lock, THREAD);
// rewritten will have been set if loader constraint error found
// on an earlier link attempt
@ -731,7 +755,7 @@ void InstanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
// refer to the JVM book page 47 for description of steps
// Step 1
{
volatile oop init_lock = this_oop->init_lock();
oop init_lock = this_oop->init_lock();
ObjectLocker ol(init_lock, THREAD);
Thread *self = THREAD; // it's passed the current thread
@ -879,7 +903,7 @@ void InstanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS)
}
void InstanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) {
volatile oop init_lock = this_oop->init_lock();
oop init_lock = this_oop->init_lock();
ObjectLocker ol(init_lock, THREAD);
this_oop->set_init_state(state);
ol.notify_all(CHECK);
@ -1354,32 +1378,44 @@ static int binary_search(Array<Method*>* methods, Symbol* name) {
return -1;
}
// find_method looks up the name/signature in the local methods array
Method* InstanceKlass::find_method(Symbol* name, Symbol* signature) const {
return InstanceKlass::find_method(methods(), name, signature);
}
// find_method looks up the name/signature in the local methods array
Method* InstanceKlass::find_method(
Array<Method*>* methods, Symbol* name, Symbol* signature) {
int hit = find_method_index(methods, name, signature);
return hit >= 0 ? methods->at(hit): NULL;
}
// Used directly for default_methods to find the index into the
// default_vtable_indices, and indirectly by find_method
// find_method_index looks in the local methods array to return the index
// of the matching name/signature
int InstanceKlass::find_method_index(
Array<Method*>* methods, Symbol* name, Symbol* signature) {
int hit = binary_search(methods, name);
if (hit != -1) {
Method* m = methods->at(hit);
// Do linear search to find matching signature. First, quick check
// for common case
if (m->signature() == signature) return m;
if (m->signature() == signature) return hit;
// search downwards through overloaded methods
int i;
for (i = hit - 1; i >= 0; --i) {
Method* m = methods->at(i);
assert(m->is_method(), "must be method");
if (m->name() != name) break;
if (m->signature() == signature) return m;
if (m->signature() == signature) return i;
}
// search upwards
for (i = hit + 1; i < methods->length(); ++i) {
Method* m = methods->at(i);
assert(m->is_method(), "must be method");
if (m->name() != name) break;
if (m->signature() == signature) return m;
if (m->signature() == signature) return i;
}
// not found
#ifdef ASSERT
@ -1387,9 +1423,8 @@ Method* InstanceKlass::find_method(
assert(index == -1, err_msg("binary search should have found entry %d", index));
#endif
}
return NULL;
return -1;
}
int InstanceKlass::find_method_by_name(Symbol* name, int* end) {
return find_method_by_name(methods(), name, end);
}
@ -1408,6 +1443,7 @@ int InstanceKlass::find_method_by_name(
return -1;
}
// lookup_method searches both the local methods array and all superclasses methods arrays
Method* InstanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
Klass* klass = const_cast<InstanceKlass*>(this);
while (klass != NULL) {
@ -1418,6 +1454,21 @@ Method* InstanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature) c
return NULL;
}
// lookup a method in the default methods list then in all transitive interfaces
// Do NOT return private or static methods
Method* InstanceKlass::lookup_method_in_ordered_interfaces(Symbol* name,
Symbol* signature) const {
Method* m = NULL;
if (default_methods() != NULL) {
m = find_method(default_methods(), name, signature);
}
// Look up interfaces
if (m == NULL) {
m = lookup_method_in_all_interfaces(name, signature);
}
return m;
}
// lookup a method in all the interfaces that this class implements
// Do NOT return private or static methods, new in JDK8 which are not externally visible
// They should only be found in the initial InterfaceMethodRef
@ -2548,6 +2599,42 @@ Method* InstanceKlass::method_at_itable(Klass* holder, int index, TRAPS) {
return m;
}
#if INCLUDE_JVMTI
// update default_methods for redefineclasses for methods that are
// not yet in the vtable due to concurrent subclass define and superinterface
// redefinition
// Note: those in the vtable, should have been updated via adjust_method_entries
void InstanceKlass::adjust_default_methods(Method** old_methods, Method** new_methods,
int methods_length, bool* trace_name_printed) {
// search the default_methods for uses of either obsolete or EMCP methods
if (default_methods() != NULL) {
for (int j = 0; j < methods_length; j++) {
Method* old_method = old_methods[j];
Method* new_method = new_methods[j];
for (int index = 0; index < default_methods()->length(); index ++) {
if (default_methods()->at(index) == old_method) {
default_methods()->at_put(index, new_method);
if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
if (!(*trace_name_printed)) {
// RC_TRACE_MESG macro has an embedded ResourceMark
RC_TRACE_MESG(("adjust: klassname=%s default methods from name=%s",
external_name(),
old_method->method_holder()->external_name()));
*trace_name_printed = true;
}
RC_TRACE(0x00100000, ("default method update: %s(%s) ",
new_method->name()->as_C_string(),
new_method->signature()->as_C_string()));
}
}
}
}
}
}
#endif // INCLUDE_JVMTI
// On-stack replacement stuff
void InstanceKlass::add_osr_nmethod(nmethod* n) {
// only one compilation can be active
@ -2742,11 +2829,21 @@ void InstanceKlass::print_on(outputStream* st) const {
st->print(BULLET"methods: "); methods()->print_value_on(st); st->cr();
if (Verbose || WizardMode) {
Array<Method*>* method_array = methods();
for(int i = 0; i < method_array->length(); i++) {
for (int i = 0; i < method_array->length(); i++) {
st->print("%d : ", i); method_array->at(i)->print_value(); st->cr();
}
}
st->print(BULLET"method ordering: "); method_ordering()->print_value_on(st); st->cr();
st->print(BULLET"method ordering: "); method_ordering()->print_value_on(st); st->cr();
st->print(BULLET"default_methods: "); default_methods()->print_value_on(st); st->cr();
if (Verbose && default_methods() != NULL) {
Array<Method*>* method_array = default_methods();
for (int i = 0; i < method_array->length(); i++) {
st->print("%d : ", i); method_array->at(i)->print_value(); st->cr();
}
}
if (default_vtable_indices() != NULL) {
st->print(BULLET"default vtable indices: "); default_vtable_indices()->print_value_on(st); st->cr();
}
st->print(BULLET"local interfaces: "); local_interfaces()->print_value_on(st); st->cr();
st->print(BULLET"trans. interfaces: "); transitive_interfaces()->print_value_on(st); st->cr();
st->print(BULLET"constants: "); constants()->print_value_on(st); st->cr();
@ -3099,6 +3196,19 @@ void InstanceKlass::verify_on(outputStream* st, bool check_dictionary) {
}
}
// Verify default methods
if (default_methods() != NULL) {
Array<Method*>* methods = this->default_methods();
for (int j = 0; j < methods->length(); j++) {
guarantee(methods->at(j)->is_method(), "non-method in methods array");
}
for (int j = 0; j < methods->length() - 1; j++) {
Method* m1 = methods->at(j);
Method* m2 = methods->at(j + 1);
guarantee(m1->name()->fast_compare(m2->name()) <= 0, "methods not sorted correctly");
}
}
// Verify JNI static field identifiers
if (jni_ids() != NULL) {
jni_ids()->verify(this);

View file

@ -269,12 +269,18 @@ class InstanceKlass: public Klass {
// Method array.
Array<Method*>* _methods;
// Default Method Array, concrete methods inherited from interfaces
Array<Method*>* _default_methods;
// Interface (Klass*s) this class declares locally to implement.
Array<Klass*>* _local_interfaces;
// Interface (Klass*s) this class implements transitively.
Array<Klass*>* _transitive_interfaces;
// Int array containing the original order of method in the class file (for JVMTI).
Array<int>* _method_ordering;
// Int array containing the vtable_indices for default_methods
// offset matches _default_methods offset
Array<int>* _default_vtable_indices;
// Instance and static variable information, starts with 6-tuples of shorts
// [access, name index, sig index, initval index, low_offset, high_offset]
// for all fields, followed by the generic signature data at the end of
@ -356,6 +362,15 @@ class InstanceKlass: public Klass {
void set_method_ordering(Array<int>* m) { _method_ordering = m; }
void copy_method_ordering(intArray* m, TRAPS);
// default_methods
Array<Method*>* default_methods() const { return _default_methods; }
void set_default_methods(Array<Method*>* a) { _default_methods = a; }
// default method vtable_indices
Array<int>* default_vtable_indices() const { return _default_vtable_indices; }
void set_default_vtable_indices(Array<int>* v) { _default_vtable_indices = v; }
Array<int>* create_new_default_vtable_indices(int len, TRAPS);
// interfaces
Array<Klass*>* local_interfaces() const { return _local_interfaces; }
void set_local_interfaces(Array<Klass*>* a) {
@ -501,12 +516,18 @@ class InstanceKlass: public Klass {
Method* find_method(Symbol* name, Symbol* signature) const;
static Method* find_method(Array<Method*>* methods, Symbol* name, Symbol* signature);
// find a local method index in default_methods (returns -1 if not found)
static int find_method_index(Array<Method*>* methods, Symbol* name, Symbol* signature);
// lookup operation (returns NULL if not found)
Method* uncached_lookup_method(Symbol* name, Symbol* signature) const;
// lookup a method in all the interfaces that this class implements
// (returns NULL if not found)
Method* lookup_method_in_all_interfaces(Symbol* name, Symbol* signature) const;
// lookup a method in local defaults then in all interfaces
// (returns NULL if not found)
Method* lookup_method_in_ordered_interfaces(Symbol* name, Symbol* signature) const;
// Find method indices by name. If a method with the specified name is
// found the index to the first method is returned, and 'end' is filled in
@ -910,6 +931,11 @@ class InstanceKlass: public Klass {
klassItable* itable() const; // return new klassItable wrapper
Method* method_at_itable(Klass* holder, int index, TRAPS);
#if INCLUDE_JVMTI
void adjust_default_methods(Method** old_methods, Method** new_methods,
int methods_length, bool* trace_name_printed);
#endif // INCLUDE_JVMTI
// Garbage collection
void oop_follow_contents(oop obj);
int oop_adjust_pointers(oop obj);
@ -995,7 +1021,7 @@ public:
// Must be one per class and it has to be a VM internal object so java code
// cannot lock it (like the mirror).
// It has to be an object not a Mutex because it's held through java calls.
volatile oop init_lock() const;
oop init_lock() const;
private:
// Static methods that are used to implement member methods where an exposed this pointer

View file

@ -83,7 +83,7 @@ void klassVtable::compute_vtable_size_and_num_mirandas(
GrowableArray<Method*> new_mirandas(20);
// compute the number of mirandas methods that must be added to the end
get_mirandas(&new_mirandas, all_mirandas, super, methods, local_interfaces);
get_mirandas(&new_mirandas, all_mirandas, super, methods, NULL, local_interfaces);
*num_new_mirandas = new_mirandas.length();
vtable_length += *num_new_mirandas * vtableEntry::size();
@ -186,7 +186,7 @@ void klassVtable::initialize_vtable(bool checkconstraints, TRAPS) {
assert(methods->at(i)->is_method(), "must be a Method*");
methodHandle mh(THREAD, methods->at(i));
bool needs_new_entry = update_inherited_vtable(ik(), mh, super_vtable_len, checkconstraints, CHECK);
bool needs_new_entry = update_inherited_vtable(ik(), mh, super_vtable_len, -1, checkconstraints, CHECK);
if (needs_new_entry) {
put_method_at(mh(), initialized);
@ -195,7 +195,35 @@ void klassVtable::initialize_vtable(bool checkconstraints, TRAPS) {
}
}
// add miranda methods to end of vtable.
// update vtable with default_methods
Array<Method*>* default_methods = ik()->default_methods();
if (default_methods != NULL) {
len = default_methods->length();
if (len > 0) {
Array<int>* def_vtable_indices = NULL;
if ((def_vtable_indices = ik()->default_vtable_indices()) == NULL) {
def_vtable_indices = ik()->create_new_default_vtable_indices(len, CHECK);
} else {
assert(def_vtable_indices->length() == len, "reinit vtable len?");
}
for (int i = 0; i < len; i++) {
HandleMark hm(THREAD);
assert(default_methods->at(i)->is_method(), "must be a Method*");
methodHandle mh(THREAD, default_methods->at(i));
bool needs_new_entry = update_inherited_vtable(ik(), mh, super_vtable_len, i, checkconstraints, CHECK);
// needs new entry
if (needs_new_entry) {
put_method_at(mh(), initialized);
def_vtable_indices->at_put(i, initialized); //set vtable index
initialized++;
}
}
}
}
// add miranda methods; it will also return the updated initialized
initialized = fill_in_mirandas(initialized);
// In class hierarchies where the accessibility is not increasing (i.e., going from private ->
@ -230,14 +258,19 @@ InstanceKlass* klassVtable::find_transitive_override(InstanceKlass* initialsuper
#ifndef PRODUCT
if (PrintVtables && Verbose) {
ResourceMark rm(THREAD);
char* sig = target_method()->name_and_sig_as_C_string();
tty->print("transitive overriding superclass %s with %s::%s index %d, original flags: ",
supersuperklass->internal_name(),
_klass->internal_name(), (target_method() != NULL) ?
target_method()->name()->as_C_string() : "<NULL>", vtable_index);
_klass->internal_name(), sig, vtable_index);
super_method->access_flags().print_on(tty);
if (super_method->is_default_method()) {
tty->print("default");
}
tty->print("overriders flags: ");
target_method->access_flags().print_on(tty);
tty->cr();
if (target_method->is_default_method()) {
tty->print("default");
}
}
#endif /*PRODUCT*/
break; // return found superk
@ -258,16 +291,31 @@ InstanceKlass* klassVtable::find_transitive_override(InstanceKlass* initialsuper
// OR return true if a new vtable entry is required.
// Only called for InstanceKlass's, i.e. not for arrays
// If that changed, could not use _klass as handle for klass
bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle target_method, int super_vtable_len,
bool checkconstraints, TRAPS) {
bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle target_method,
int super_vtable_len, int default_index,
bool checkconstraints, TRAPS) {
ResourceMark rm;
bool allocate_new = true;
assert(klass->oop_is_instance(), "must be InstanceKlass");
assert(klass == target_method()->method_holder(), "caller resp.");
// Initialize the method's vtable index to "nonvirtual".
// If we allocate a vtable entry, we will update it to a non-negative number.
target_method()->set_vtable_index(Method::nonvirtual_vtable_index);
Array<int>* def_vtable_indices = NULL;
bool is_default = false;
// default methods are concrete methods in superinterfaces which are added to the vtable
// with their real method_holder
// Since vtable and itable indices share the same storage, don't touch
// the default method's real vtable/itable index
// default_vtable_indices stores the vtable value relative to this inheritor
if (default_index >= 0 ) {
is_default = true;
def_vtable_indices = klass->default_vtable_indices();
assert(def_vtable_indices != NULL, "def vtable alloc?");
assert(default_index <= def_vtable_indices->length(), "def vtable len?");
} else {
assert(klass == target_method()->method_holder(), "caller resp.");
// Initialize the method's vtable index to "nonvirtual".
// If we allocate a vtable entry, we will update it to a non-negative number.
target_method()->set_vtable_index(Method::nonvirtual_vtable_index);
}
// Static and <init> methods are never in
if (target_method()->is_static() || target_method()->name() == vmSymbols::object_initializer_name()) {
@ -284,6 +332,8 @@ bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle tar
// An interface never allocates new vtable slots, only inherits old ones.
// This method will either be assigned its own itable index later,
// or be assigned an inherited vtable index in the loop below.
// default methods store their vtable indices in the inheritors default_vtable_indices
assert (default_index == -1, "interfaces don't store resolved default methods");
target_method()->set_vtable_index(Method::pending_itable_index);
}
@ -307,8 +357,15 @@ bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle tar
Symbol* name = target_method()->name();
Symbol* signature = target_method()->signature();
Handle target_loader(THREAD, _klass()->class_loader());
Symbol* target_classname = _klass->name();
KlassHandle target_klass(THREAD, target_method()->method_holder());
if (target_klass == NULL) {
target_klass = _klass;
}
Handle target_loader(THREAD, target_klass->class_loader());
Symbol* target_classname = target_klass->name();
for(int i = 0; i < super_vtable_len; i++) {
Method* super_method = method_at(i);
// Check if method name matches
@ -317,10 +374,14 @@ bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle tar
// get super_klass for method_holder for the found method
InstanceKlass* super_klass = super_method->method_holder();
if ((super_klass->is_override(super_method, target_loader, target_classname, THREAD)) ||
((klass->major_version() >= VTABLE_TRANSITIVE_OVERRIDE_VERSION)
&& ((super_klass = find_transitive_override(super_klass, target_method, i, target_loader,
target_classname, THREAD)) != (InstanceKlass*)NULL))) {
if (is_default
|| ((super_klass->is_override(super_method, target_loader, target_classname, THREAD))
|| ((klass->major_version() >= VTABLE_TRANSITIVE_OVERRIDE_VERSION)
&& ((super_klass = find_transitive_override(super_klass,
target_method, i, target_loader,
target_classname, THREAD))
!= (InstanceKlass*)NULL))))
{
// overriding, so no new entry
allocate_new = false;
@ -347,7 +408,7 @@ bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle tar
"%s used in the signature";
char* sig = target_method()->name_and_sig_as_C_string();
const char* loader1 = SystemDictionary::loader_name(target_loader());
char* current = _klass->name()->as_C_string();
char* current = target_klass->name()->as_C_string();
const char* loader2 = SystemDictionary::loader_name(super_loader());
char* failed_type_name = failed_type_symbol->as_C_string();
size_t buflen = strlen(msg) + strlen(sig) + strlen(loader1) +
@ -360,16 +421,39 @@ bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle tar
}
}
put_method_at(target_method(), i);
target_method()->set_vtable_index(i);
put_method_at(target_method(), i);
if (!is_default) {
target_method()->set_vtable_index(i);
} else {
if (def_vtable_indices != NULL) {
def_vtable_indices->at_put(default_index, i);
}
assert(super_method->is_default_method() || super_method->is_overpass()
|| super_method->is_abstract(), "default override error");
}
#ifndef PRODUCT
if (PrintVtables && Verbose) {
ResourceMark rm(THREAD);
char* sig = target_method()->name_and_sig_as_C_string();
tty->print("overriding with %s::%s index %d, original flags: ",
_klass->internal_name(), (target_method() != NULL) ?
target_method()->name()->as_C_string() : "<NULL>", i);
target_klass->internal_name(), sig, i);
super_method->access_flags().print_on(tty);
if (super_method->is_default_method()) {
tty->print("default");
}
if (super_method->is_overpass()) {
tty->print("overpass");
}
tty->print("overriders flags: ");
target_method->access_flags().print_on(tty);
if (target_method->is_default_method()) {
tty->print("default");
}
if (target_method->is_overpass()) {
tty->print("overpass");
}
tty->cr();
}
#endif /*PRODUCT*/
@ -378,12 +462,25 @@ bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle tar
// but not override another. Once we override one, not need new
#ifndef PRODUCT
if (PrintVtables && Verbose) {
ResourceMark rm(THREAD);
char* sig = target_method()->name_and_sig_as_C_string();
tty->print("NOT overriding with %s::%s index %d, original flags: ",
_klass->internal_name(), (target_method() != NULL) ?
target_method()->name()->as_C_string() : "<NULL>", i);
target_klass->internal_name(), sig,i);
super_method->access_flags().print_on(tty);
if (super_method->is_default_method()) {
tty->print("default");
}
if (super_method->is_overpass()) {
tty->print("overpass");
}
tty->print("overriders flags: ");
target_method->access_flags().print_on(tty);
if (target_method->is_default_method()) {
tty->print("default");
}
if (target_method->is_overpass()) {
tty->print("overpass");
}
tty->cr();
}
#endif /*PRODUCT*/
@ -438,6 +535,14 @@ bool klassVtable::needs_new_vtable_entry(methodHandle target_method,
return false;
}
// Concrete interface methods do not need new entries, they override
// abstract method entries using default inheritance rules
if (target_method()->method_holder() != NULL &&
target_method()->method_holder()->is_interface() &&
!target_method()->is_abstract() ) {
return false;
}
// we need a new entry if there is no superclass
if (super == NULL) {
return true;
@ -446,7 +551,7 @@ bool klassVtable::needs_new_vtable_entry(methodHandle target_method,
// private methods in classes always have a new entry in the vtable
// specification interpretation since classic has
// private methods not overriding
// JDK8 adds private methods in interfaces which require invokespecial
// JDK8 adds private methods in interfaces which require invokespecial
if (target_method()->is_private()) {
return true;
}
@ -526,35 +631,40 @@ bool klassVtable::is_miranda_entry_at(int i) {
if (mhk->is_interface()) {
assert(m->is_public(), "should be public");
assert(ik()->implements_interface(method_holder) , "this class should implement the interface");
assert(is_miranda(m, ik()->methods(), ik()->super()), "should be a miranda_method");
assert(is_miranda(m, ik()->methods(), ik()->default_methods(), ik()->super()), "should be a miranda_method");
return true;
}
return false;
}
// check if a method is a miranda method, given a class's methods table and its super
// "miranda" means not static, not defined by this class, and not defined
// in super unless it is private and therefore inaccessible to this class.
// check if a method is a miranda method, given a class's methods table,
// its default_method table and its super
// "miranda" means not static, not defined by this class.
// private methods in interfaces do not belong in the miranda list.
// the caller must make sure that the method belongs to an interface implemented by the class
// Miranda methods only include public interface instance methods
// Not private methods, not static methods, not default = concrete abstract
bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods, Klass* super) {
if (m->is_static()) {
// Not private methods, not static methods, not default == concrete abstract
bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
Array<Method*>* default_methods, Klass* super) {
if (m->is_static() || m->is_private()) {
return false;
}
Symbol* name = m->name();
Symbol* signature = m->signature();
if (InstanceKlass::find_method(class_methods, name, signature) == NULL) {
// did not find it in the method table of the current class
if (super == NULL) {
// super doesn't exist
return true;
}
if ((default_methods == NULL) ||
InstanceKlass::find_method(default_methods, name, signature) == NULL) {
if (super == NULL) {
// super doesn't exist
return true;
}
Method* mo = InstanceKlass::cast(super)->lookup_method(name, signature);
if (mo == NULL || mo->access_flags().is_private() ) {
// super class hierarchy does not implement it or protection is different
return true;
Method* mo = InstanceKlass::cast(super)->lookup_method(name, signature);
if (mo == NULL || mo->access_flags().is_private() ) {
// super class hierarchy does not implement it or protection is different
return true;
}
}
}
@ -562,7 +672,7 @@ bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods, Klass* su
}
// Scans current_interface_methods for miranda methods that do not
// already appear in new_mirandas and are also not defined-and-non-private
// already appear in new_mirandas, or default methods, and are also not defined-and-non-private
// in super (superclass). These mirandas are added to all_mirandas if it is
// not null; in addition, those that are not duplicates of miranda methods
// inherited by super from its interfaces are added to new_mirandas.
@ -572,7 +682,8 @@ bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods, Klass* su
void klassVtable::add_new_mirandas_to_lists(
GrowableArray<Method*>* new_mirandas, GrowableArray<Method*>* all_mirandas,
Array<Method*>* current_interface_methods, Array<Method*>* class_methods,
Klass* super) {
Array<Method*>* default_methods, Klass* super) {
// iterate thru the current interface's method to see if it a miranda
int num_methods = current_interface_methods->length();
for (int i = 0; i < num_methods; i++) {
@ -590,7 +701,7 @@ void klassVtable::add_new_mirandas_to_lists(
}
if (!is_duplicate) { // we don't want duplicate miranda entries in the vtable
if (is_miranda(im, class_methods, super)) { // is it a miranda at all?
if (is_miranda(im, class_methods, default_methods, super)) { // is it a miranda at all?
InstanceKlass *sk = InstanceKlass::cast(super);
// check if it is a duplicate of a super's miranda
if (sk->lookup_method_in_all_interfaces(im->name(), im->signature()) == NULL) {
@ -607,6 +718,7 @@ void klassVtable::add_new_mirandas_to_lists(
void klassVtable::get_mirandas(GrowableArray<Method*>* new_mirandas,
GrowableArray<Method*>* all_mirandas,
Klass* super, Array<Method*>* class_methods,
Array<Method*>* default_methods,
Array<Klass*>* local_interfaces) {
assert((new_mirandas->length() == 0) , "current mirandas must be 0");
@ -615,14 +727,16 @@ void klassVtable::get_mirandas(GrowableArray<Method*>* new_mirandas,
for (int i = 0; i < num_local_ifs; i++) {
InstanceKlass *ik = InstanceKlass::cast(local_interfaces->at(i));
add_new_mirandas_to_lists(new_mirandas, all_mirandas,
ik->methods(), class_methods, super);
ik->methods(), class_methods,
default_methods, super);
// iterate thru each local's super interfaces
Array<Klass*>* super_ifs = ik->transitive_interfaces();
int num_super_ifs = super_ifs->length();
for (int j = 0; j < num_super_ifs; j++) {
InstanceKlass *sik = InstanceKlass::cast(super_ifs->at(j));
add_new_mirandas_to_lists(new_mirandas, all_mirandas,
sik->methods(), class_methods, super);
sik->methods(), class_methods,
default_methods, super);
}
}
}
@ -633,8 +747,22 @@ void klassVtable::get_mirandas(GrowableArray<Method*>* new_mirandas,
int klassVtable::fill_in_mirandas(int initialized) {
GrowableArray<Method*> mirandas(20);
get_mirandas(&mirandas, NULL, ik()->super(), ik()->methods(),
ik()->local_interfaces());
ik()->default_methods(), ik()->local_interfaces());
for (int i = 0; i < mirandas.length(); i++) {
if (PrintVtables && Verbose) {
Method* meth = mirandas.at(i);
ResourceMark rm(Thread::current());
if (meth != NULL) {
char* sig = meth->name_and_sig_as_C_string();
tty->print("fill in mirandas with %s index %d, flags: ",
sig, initialized);
meth->access_flags().print_on(tty);
if (meth->is_default_method()) {
tty->print("default");
}
tty->cr();
}
}
put_method_at(mirandas.at(i), initialized);
++initialized;
}
@ -648,6 +776,26 @@ void klassVtable::copy_vtable_to(vtableEntry* start) {
}
#if INCLUDE_JVMTI
bool klassVtable::adjust_default_method(int vtable_index, Method* old_method, Method* new_method) {
// If old_method is default, find this vtable index in default_vtable_indices
// and replace that method in the _default_methods list
bool updated = false;
Array<Method*>* default_methods = ik()->default_methods();
if (default_methods != NULL) {
int len = default_methods->length();
for (int idx = 0; idx < len; idx++) {
if (vtable_index == ik()->default_vtable_indices()->at(idx)) {
if (default_methods->at(idx) == old_method) {
default_methods->at_put(idx, new_method);
updated = true;
}
break;
}
}
}
return updated;
}
void klassVtable::adjust_method_entries(Method** old_methods, Method** new_methods,
int methods_length, bool * trace_name_printed) {
// search the vtable for uses of either obsolete or EMCP methods
@ -663,18 +811,26 @@ void klassVtable::adjust_method_entries(Method** old_methods, Method** new_metho
for (int index = 0; index < length(); index++) {
if (unchecked_method_at(index) == old_method) {
put_method_at(new_method, index);
// For default methods, need to update the _default_methods array
// which can only have one method entry for a given signature
bool updated_default = false;
if (old_method->is_default_method()) {
updated_default = adjust_default_method(index, old_method, new_method);
}
if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
if (!(*trace_name_printed)) {
// RC_TRACE_MESG macro has an embedded ResourceMark
RC_TRACE_MESG(("adjust: name=%s",
RC_TRACE_MESG(("adjust: klassname=%s for methods from name=%s",
klass()->external_name(),
old_method->method_holder()->external_name()));
*trace_name_printed = true;
}
// RC_TRACE macro has an embedded ResourceMark
RC_TRACE(0x00100000, ("vtable method update: %s(%s)",
RC_TRACE(0x00100000, ("vtable method update: %s(%s), updated default = %s",
new_method->name()->as_C_string(),
new_method->signature()->as_C_string()));
new_method->signature()->as_C_string(),
updated_default ? "true" : "false"));
}
// cannot 'break' here; see for-loop comment above.
}
@ -701,6 +857,12 @@ void klassVtable::dump_vtable() {
if (m != NULL) {
tty->print(" (%5d) ", i);
m->access_flags().print_on(tty);
if (m->is_default_method()) {
tty->print("default");
}
if (m->is_overpass()) {
tty->print("overpass");
}
tty->print(" -- ");
m->print_name(tty);
tty->cr();
@ -757,9 +919,9 @@ static int initialize_count = 0;
// Initialization
void klassItable::initialize_itable(bool checkconstraints, TRAPS) {
if (_klass->is_interface()) {
// This needs to go after vtable indexes are assigned but
// before implementors need to know the number of itable indexes.
assign_itable_indexes_for_interface(_klass());
// This needs to go after vtable indices are assigned but
// before implementors need to know the number of itable indices.
assign_itable_indices_for_interface(_klass());
}
// Cannot be setup doing bootstrapping, interfaces don't have
@ -803,7 +965,7 @@ inline bool interface_method_needs_itable_index(Method* m) {
return true;
}
int klassItable::assign_itable_indexes_for_interface(Klass* klass) {
int klassItable::assign_itable_indices_for_interface(Klass* klass) {
// an interface does not have an itable, but its methods need to be numbered
if (TraceItables) tty->print_cr("%3d: Initializing itable for interface %s", ++initialize_count,
klass->name()->as_C_string());
@ -846,7 +1008,7 @@ int klassItable::method_count_for_interface(Klass* interf) {
}
nof_methods -= 1;
}
// no methods have itable indexes
// no methods have itable indices
return 0;
}
@ -907,6 +1069,21 @@ void klassItable::initialize_itable_for_interface(int method_table_offset, Klass
int ime_num = m->itable_index();
assert(ime_num < ime_count, "oob");
itableOffsetEntry::method_entry(_klass(), method_table_offset)[ime_num].initialize(target());
if (TraceItables && Verbose) {
ResourceMark rm(THREAD);
if (target() != NULL) {
char* sig = target()->name_and_sig_as_C_string();
tty->print("interface: %s, ime_num: %d, target: %s, method_holder: %s ",
interf_h()->internal_name(), ime_num, sig,
target()->method_holder()->internal_name());
tty->print("target_method flags: ");
target()->access_flags().print_on(tty);
if (target()->is_default_method()) {
tty->print("default");
}
tty->cr();
}
}
}
}
}
@ -980,6 +1157,9 @@ void klassItable::dump_itable() {
if (m != NULL) {
tty->print(" (%5d) ", i);
m->access_flags().print_on(tty);
if (m->is_default_method()) {
tty->print("default");
}
tty->print(" -- ");
m->print_name(tty);
tty->cr();
@ -1116,7 +1296,7 @@ Method* klassItable::method_for_itable_index(Klass* intf, int itable_index) {
Array<Method*>* methods = InstanceKlass::cast(intf)->methods();
if (itable_index < 0 || itable_index >= method_count_for_interface(intf))
return NULL; // help caller defend against bad indexes
return NULL; // help caller defend against bad indices
int index = itable_index;
Method* m = methods->at(index);

View file

@ -97,6 +97,7 @@ class klassVtable : public ResourceObj {
// trace_name_printed is set to true if the current call has
// printed the klass name so that other routines in the adjust_*
// group don't print the klass name.
bool adjust_default_method(int vtable_index, Method* old_method, Method* new_method);
void adjust_method_entries(Method** old_methods, Method** new_methods,
int methods_length, bool * trace_name_printed);
bool check_no_old_or_obsolete_entries();
@ -118,24 +119,28 @@ class klassVtable : public ResourceObj {
void put_method_at(Method* m, int index);
static bool needs_new_vtable_entry(methodHandle m, Klass* super, Handle classloader, Symbol* classname, AccessFlags access_flags, TRAPS);
bool update_inherited_vtable(InstanceKlass* klass, methodHandle target_method, int super_vtable_len, bool checkconstraints, TRAPS);
bool update_inherited_vtable(InstanceKlass* klass, methodHandle target_method, int super_vtable_len, int default_index, bool checkconstraints, TRAPS);
InstanceKlass* find_transitive_override(InstanceKlass* initialsuper, methodHandle target_method, int vtable_index,
Handle target_loader, Symbol* target_classname, Thread* THREAD);
// support for miranda methods
bool is_miranda_entry_at(int i);
int fill_in_mirandas(int initialized);
static bool is_miranda(Method* m, Array<Method*>* class_methods, Klass* super);
static bool is_miranda(Method* m, Array<Method*>* class_methods,
Array<Method*>* default_methods, Klass* super);
static void add_new_mirandas_to_lists(
GrowableArray<Method*>* new_mirandas,
GrowableArray<Method*>* all_mirandas,
Array<Method*>* current_interface_methods, Array<Method*>* class_methods,
Array<Method*>* current_interface_methods,
Array<Method*>* class_methods,
Array<Method*>* default_methods,
Klass* super);
static void get_mirandas(
GrowableArray<Method*>* new_mirandas,
GrowableArray<Method*>* all_mirandas, Klass* super,
Array<Method*>* class_methods, Array<Klass*>* local_interfaces);
Array<Method*>* class_methods,
Array<Method*>* default_methods,
Array<Klass*>* local_interfaces);
void verify_against(outputStream* st, klassVtable* vt, int index);
inline InstanceKlass* ik() const;
};
@ -290,7 +295,7 @@ class klassItable : public ResourceObj {
#endif // INCLUDE_JVMTI
// Setup of itable
static int assign_itable_indexes_for_interface(Klass* klass);
static int assign_itable_indices_for_interface(Klass* klass);
static int method_count_for_interface(Klass* klass);
static int compute_itable_size(Array<Klass*>* transitive_interfaces);
static void setup_itable_offset_table(instanceKlassHandle klass);

View file

@ -511,9 +511,9 @@ bool Method::compute_has_loops_flag() {
bool Method::is_final_method(AccessFlags class_access_flags) const {
// or "does_not_require_vtable_entry"
// overpass can occur, is not final (reuses vtable entry)
// default method or overpass can occur, is not final (reuses vtable entry)
// private methods get vtable entries for backward class compatibility.
if (is_overpass()) return false;
if (is_overpass() || is_default_method()) return false;
return is_final() || class_access_flags.is_final();
}
@ -521,11 +521,24 @@ bool Method::is_final_method() const {
return is_final_method(method_holder()->access_flags());
}
bool Method::is_default_method() const {
if (method_holder() != NULL &&
method_holder()->is_interface() &&
!is_abstract()) {
return true;
} else {
return false;
}
}
bool Method::can_be_statically_bound(AccessFlags class_access_flags) const {
if (is_final_method(class_access_flags)) return true;
#ifdef ASSERT
ResourceMark rm;
bool is_nonv = (vtable_index() == nonvirtual_vtable_index);
if (class_access_flags.is_interface()) assert(is_nonv == is_static(), err_msg("is_nonv=%s", is_nonv));
if (class_access_flags.is_interface()) {
assert(is_nonv == is_static(), err_msg("is_nonv=%s", name_and_sig_as_C_string()));
}
#endif
assert(valid_vtable_index() || valid_itable_index(), "method must be linked before we ask this question");
return vtable_index() == nonvirtual_vtable_index;
@ -1371,7 +1384,8 @@ static int method_comparator(Method* a, Method* b) {
}
// This is only done during class loading, so it is OK to assume method_idnum matches the methods() array
void Method::sort_methods(Array<Method*>* methods, bool idempotent) {
// default_methods also uses this without the ordering for fast find_method
void Method::sort_methods(Array<Method*>* methods, bool idempotent, bool set_idnums) {
int length = methods->length();
if (length > 1) {
{
@ -1379,14 +1393,15 @@ void Method::sort_methods(Array<Method*>* methods, bool idempotent) {
QuickSort::sort<Method*>(methods->data(), length, method_comparator, idempotent);
}
// Reset method ordering
for (int i = 0; i < length; i++) {
Method* m = methods->at(i);
m->set_method_idnum(i);
if (set_idnums) {
for (int i = 0; i < length; i++) {
Method* m = methods->at(i);
m->set_method_idnum(i);
}
}
}
}
//-----------------------------------------------------------------------------------
// Non-product code unless JVM/TI needs it

View file

@ -567,6 +567,7 @@ class Method : public Metadata {
// checks method and its method holder
bool is_final_method() const;
bool is_final_method(AccessFlags class_access_flags) const;
bool is_default_method() const;
// true if method needs no dynamic dispatch (final and/or no vtable entry)
bool can_be_statically_bound() const;
@ -846,7 +847,7 @@ class Method : public Metadata {
#endif
// Helper routine used for method sorting
static void sort_methods(Array<Method*>* methods, bool idempotent = false);
static void sort_methods(Array<Method*>* methods, bool idempotent = false, bool set_idnums = true);
// Deallocation function for redefine classes or if an error occurs
void deallocate_contents(ClassLoaderData* loader_data);

View file

@ -3713,7 +3713,8 @@ void GraphKit::g1_write_barrier_post(Node* oop_store,
Node* no_base = __ top();
float likely = PROB_LIKELY(0.999);
float unlikely = PROB_UNLIKELY(0.999);
Node* zero = __ ConI(0);
Node* young_card = __ ConI((jint)G1SATBCardTableModRefBS::g1_young_card_val());
Node* dirty_card = __ ConI((jint)CardTableModRefBS::dirty_card_val());
Node* zeroX = __ ConX(0);
// Get the alias_index for raw card-mark memory
@ -3769,8 +3770,16 @@ void GraphKit::g1_write_barrier_post(Node* oop_store,
// load the original value of the card
Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
__ if_then(card_val, BoolTest::ne, zero); {
g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
__ if_then(card_val, BoolTest::ne, young_card); {
sync_kit(ideal);
// Use Op_MemBarVolatile to achieve the effect of a StoreLoad barrier.
insert_mem_bar(Op_MemBarVolatile, oop_store);
__ sync_kit(this);
Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
__ if_then(card_val_reload, BoolTest::ne, dirty_card); {
g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
} __ end_if();
} __ end_if();
} __ end_if();
} __ end_if();

View file

@ -1591,10 +1591,8 @@ static jmethodID get_method_id(JNIEnv *env, jclass clazz, const char *name_str,
}
} else {
m = klass->lookup_method(name, signature);
// Look up interfaces
if (m == NULL && klass->oop_is_instance()) {
m = InstanceKlass::cast(klass())->lookup_method_in_all_interfaces(name,
signature);
if (m == NULL && klass->oop_is_instance()) {
m = InstanceKlass::cast(klass())->lookup_method_in_ordered_interfaces(name, signature);
}
}
if (m == NULL || (m->is_static() != is_static)) {
@ -3210,7 +3208,11 @@ JNI_QUICK_ENTRY(jsize, jni_GetStringLength(JNIEnv *env, jstring string))
HOTSPOT_JNI_GETSTRINGLENGTH_ENTRY(
env, string);
#endif /* USDT2 */
jsize ret = java_lang_String::length(JNIHandles::resolve_non_null(string));
jsize ret = 0;
oop s = JNIHandles::resolve_non_null(string);
if (java_lang_String::value(s) != NULL) {
ret = java_lang_String::length(s);
}
#ifndef USDT2
DTRACE_PROBE1(hotspot_jni, GetStringLength__return, ret);
#else /* USDT2 */
@ -3230,20 +3232,23 @@ JNI_QUICK_ENTRY(const jchar*, jni_GetStringChars(
HOTSPOT_JNI_GETSTRINGCHARS_ENTRY(
env, string, (uintptr_t *) isCopy);
#endif /* USDT2 */
jchar* buf = NULL;
oop s = JNIHandles::resolve_non_null(string);
int s_len = java_lang_String::length(s);
typeArrayOop s_value = java_lang_String::value(s);
int s_offset = java_lang_String::offset(s);
jchar* buf = NEW_C_HEAP_ARRAY_RETURN_NULL(jchar, s_len + 1, mtInternal); // add one for zero termination
/* JNI Specification states return NULL on OOM */
if (buf != NULL) {
if (s_len > 0) {
memcpy(buf, s_value->char_at_addr(s_offset), sizeof(jchar)*s_len);
}
buf[s_len] = 0;
//%note jni_5
if (isCopy != NULL) {
*isCopy = JNI_TRUE;
if (s_value != NULL) {
int s_len = java_lang_String::length(s);
int s_offset = java_lang_String::offset(s);
buf = NEW_C_HEAP_ARRAY_RETURN_NULL(jchar, s_len + 1, mtInternal); // add one for zero termination
/* JNI Specification states return NULL on OOM */
if (buf != NULL) {
if (s_len > 0) {
memcpy(buf, s_value->char_at_addr(s_offset), sizeof(jchar)*s_len);
}
buf[s_len] = 0;
//%note jni_5
if (isCopy != NULL) {
*isCopy = JNI_TRUE;
}
}
}
#ifndef USDT2
@ -3313,7 +3318,11 @@ JNI_ENTRY(jsize, jni_GetStringUTFLength(JNIEnv *env, jstring string))
HOTSPOT_JNI_GETSTRINGUTFLENGTH_ENTRY(
env, string);
#endif /* USDT2 */
jsize ret = java_lang_String::utf8_length(JNIHandles::resolve_non_null(string));
jsize ret = 0;
oop java_string = JNIHandles::resolve_non_null(string);
if (java_lang_String::value(java_string) != NULL) {
ret = java_lang_String::utf8_length(java_string);
}
#ifndef USDT2
DTRACE_PROBE1(hotspot_jni, GetStringUTFLength__return, ret);
#else /* USDT2 */
@ -3332,14 +3341,17 @@ JNI_ENTRY(const char*, jni_GetStringUTFChars(JNIEnv *env, jstring string, jboole
HOTSPOT_JNI_GETSTRINGUTFCHARS_ENTRY(
env, string, (uintptr_t *) isCopy);
#endif /* USDT2 */
char* result = NULL;
oop java_string = JNIHandles::resolve_non_null(string);
size_t length = java_lang_String::utf8_length(java_string);
/* JNI Specification states return NULL on OOM */
char* result = AllocateHeap(length + 1, mtInternal, 0, AllocFailStrategy::RETURN_NULL);
if (result != NULL) {
java_lang_String::as_utf8_string(java_string, result, (int) length + 1);
if (isCopy != NULL) {
*isCopy = JNI_TRUE;
if (java_lang_String::value(java_string) != NULL) {
size_t length = java_lang_String::utf8_length(java_string);
/* JNI Specification states return NULL on OOM */
result = AllocateHeap(length + 1, mtInternal, 0, AllocFailStrategy::RETURN_NULL);
if (result != NULL) {
java_lang_String::as_utf8_string(java_string, result, (int) length + 1);
if (isCopy != NULL) {
*isCopy = JNI_TRUE;
}
}
}
#ifndef USDT2

View file

@ -1324,18 +1324,19 @@ JNI_ENTRY_CHECKED(const jchar *,
IN_VM(
checkString(thr, str);
)
jchar* newResult = NULL;
const jchar *result = UNCHECKED()->GetStringChars(env,str,isCopy);
assert (isCopy == NULL || *isCopy == JNI_TRUE, "GetStringChars didn't return a copy as expected");
size_t len = UNCHECKED()->GetStringLength(env,str) + 1; // + 1 for NULL termination
jint* tagLocation = (jint*) AllocateHeap(len * sizeof(jchar) + sizeof(jint), mtInternal);
*tagLocation = STRING_TAG;
jchar* newResult = (jchar*) (tagLocation + 1);
memcpy(newResult, result, len * sizeof(jchar));
// Avoiding call to UNCHECKED()->ReleaseStringChars() since that will fire unexpected dtrace probes
// Note that the dtrace arguments for the allocated memory will not match up with this solution.
FreeHeap((char*)result);
if (result != NULL) {
size_t len = UNCHECKED()->GetStringLength(env,str) + 1; // + 1 for NULL termination
jint* tagLocation = (jint*) AllocateHeap(len * sizeof(jchar) + sizeof(jint), mtInternal);
*tagLocation = STRING_TAG;
newResult = (jchar*) (tagLocation + 1);
memcpy(newResult, result, len * sizeof(jchar));
// Avoiding call to UNCHECKED()->ReleaseStringChars() since that will fire unexpected dtrace probes
// Note that the dtrace arguments for the allocated memory will not match up with this solution.
FreeHeap((char*)result);
}
functionExit(env);
return newResult;
JNI_END
@ -1394,18 +1395,19 @@ JNI_ENTRY_CHECKED(const char *,
IN_VM(
checkString(thr, str);
)
char* newResult = NULL;
const char *result = UNCHECKED()->GetStringUTFChars(env,str,isCopy);
assert (isCopy == NULL || *isCopy == JNI_TRUE, "GetStringUTFChars didn't return a copy as expected");
size_t len = strlen(result) + 1; // + 1 for NULL termination
jint* tagLocation = (jint*) AllocateHeap(len + sizeof(jint), mtInternal);
*tagLocation = STRING_UTF_TAG;
char* newResult = (char*) (tagLocation + 1);
strcpy(newResult, result);
// Avoiding call to UNCHECKED()->ReleaseStringUTFChars() since that will fire unexpected dtrace probes
// Note that the dtrace arguments for the allocated memory will not match up with this solution.
FreeHeap((char*)result, mtInternal);
if (result != NULL) {
size_t len = strlen(result) + 1; // + 1 for NULL termination
jint* tagLocation = (jint*) AllocateHeap(len + sizeof(jint), mtInternal);
*tagLocation = STRING_UTF_TAG;
newResult = (char*) (tagLocation + 1);
strcpy(newResult, result);
// Avoiding call to UNCHECKED()->ReleaseStringUTFChars() since that will fire unexpected dtrace probes
// Note that the dtrace arguments for the allocated memory will not match up with this solution.
FreeHeap((char*)result, mtInternal);
}
functionExit(env);
return newResult;
JNI_END

View file

@ -668,13 +668,12 @@ JVM_END
JVM_ENTRY(jclass, JVM_GetCallerClass(JNIEnv* env, int depth))
JVMWrapper("JVM_GetCallerClass");
// Pre-JDK 8 and early builds of JDK 8 don't have a CallerSensitive annotation.
if (SystemDictionary::reflect_CallerSensitive_klass() == NULL) {
// Pre-JDK 8 and early builds of JDK 8 don't have a CallerSensitive annotation; or
// sun.reflect.Reflection.getCallerClass with a depth parameter is provided
// temporarily for existing code to use until a replacement API is defined.
if (SystemDictionary::reflect_CallerSensitive_klass() == NULL || depth != JVM_CALLER_DEPTH) {
Klass* k = thread->security_get_caller_class(depth);
return (k == NULL) ? NULL : (jclass) JNIHandles::make_local(env, k->java_mirror());
} else {
// Basic handshaking with Java_sun_reflect_Reflection_getCallerClass
assert(depth == -1, "wrong handshake depth");
}
// Getting the class of the caller frame.
@ -3954,248 +3953,6 @@ void initialize_converter_functions() {
}
// Serialization
JVM_ENTRY(void, JVM_SetPrimitiveFieldValues(JNIEnv *env, jclass cb, jobject obj,
jlongArray fieldIDs, jcharArray typecodes, jbyteArray data))
assert(!JDK_Version::is_gte_jdk14x_version(), "should only be used in 1.3.1 and earlier");
typeArrayOop tcodes = typeArrayOop(JNIHandles::resolve(typecodes));
typeArrayOop dbuf = typeArrayOop(JNIHandles::resolve(data));
typeArrayOop fids = typeArrayOop(JNIHandles::resolve(fieldIDs));
oop o = JNIHandles::resolve(obj);
if (o == NULL || fids == NULL || dbuf == NULL || tcodes == NULL) {
THROW(vmSymbols::java_lang_NullPointerException());
}
jsize nfids = fids->length();
if (nfids == 0) return;
if (tcodes->length() < nfids) {
THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException());
}
jsize off = 0;
/* loop through fields, setting values */
for (jsize i = 0; i < nfids; i++) {
jfieldID fid = (jfieldID)(intptr_t) fids->long_at(i);
int field_offset;
if (fid != NULL) {
// NULL is a legal value for fid, but retrieving the field offset
// trigger assertion in that case
field_offset = jfieldIDWorkaround::from_instance_jfieldID(o->klass(), fid);
}
switch (tcodes->char_at(i)) {
case 'Z':
if (fid != NULL) {
jboolean val = (dbuf->byte_at(off) != 0) ? JNI_TRUE : JNI_FALSE;
o->bool_field_put(field_offset, val);
}
off++;
break;
case 'B':
if (fid != NULL) {
o->byte_field_put(field_offset, dbuf->byte_at(off));
}
off++;
break;
case 'C':
if (fid != NULL) {
jchar val = ((dbuf->byte_at(off + 0) & 0xFF) << 8)
+ ((dbuf->byte_at(off + 1) & 0xFF) << 0);
o->char_field_put(field_offset, val);
}
off += 2;
break;
case 'S':
if (fid != NULL) {
jshort val = ((dbuf->byte_at(off + 0) & 0xFF) << 8)
+ ((dbuf->byte_at(off + 1) & 0xFF) << 0);
o->short_field_put(field_offset, val);
}
off += 2;
break;
case 'I':
if (fid != NULL) {
jint ival = ((dbuf->byte_at(off + 0) & 0xFF) << 24)
+ ((dbuf->byte_at(off + 1) & 0xFF) << 16)
+ ((dbuf->byte_at(off + 2) & 0xFF) << 8)
+ ((dbuf->byte_at(off + 3) & 0xFF) << 0);
o->int_field_put(field_offset, ival);
}
off += 4;
break;
case 'F':
if (fid != NULL) {
jint ival = ((dbuf->byte_at(off + 0) & 0xFF) << 24)
+ ((dbuf->byte_at(off + 1) & 0xFF) << 16)
+ ((dbuf->byte_at(off + 2) & 0xFF) << 8)
+ ((dbuf->byte_at(off + 3) & 0xFF) << 0);
jfloat fval = (*int_bits_to_float_fn)(env, NULL, ival);
o->float_field_put(field_offset, fval);
}
off += 4;
break;
case 'J':
if (fid != NULL) {
jlong lval = (((jlong) dbuf->byte_at(off + 0) & 0xFF) << 56)
+ (((jlong) dbuf->byte_at(off + 1) & 0xFF) << 48)
+ (((jlong) dbuf->byte_at(off + 2) & 0xFF) << 40)
+ (((jlong) dbuf->byte_at(off + 3) & 0xFF) << 32)
+ (((jlong) dbuf->byte_at(off + 4) & 0xFF) << 24)
+ (((jlong) dbuf->byte_at(off + 5) & 0xFF) << 16)
+ (((jlong) dbuf->byte_at(off + 6) & 0xFF) << 8)
+ (((jlong) dbuf->byte_at(off + 7) & 0xFF) << 0);
o->long_field_put(field_offset, lval);
}
off += 8;
break;
case 'D':
if (fid != NULL) {
jlong lval = (((jlong) dbuf->byte_at(off + 0) & 0xFF) << 56)
+ (((jlong) dbuf->byte_at(off + 1) & 0xFF) << 48)
+ (((jlong) dbuf->byte_at(off + 2) & 0xFF) << 40)
+ (((jlong) dbuf->byte_at(off + 3) & 0xFF) << 32)
+ (((jlong) dbuf->byte_at(off + 4) & 0xFF) << 24)
+ (((jlong) dbuf->byte_at(off + 5) & 0xFF) << 16)
+ (((jlong) dbuf->byte_at(off + 6) & 0xFF) << 8)
+ (((jlong) dbuf->byte_at(off + 7) & 0xFF) << 0);
jdouble dval = (*long_bits_to_double_fn)(env, NULL, lval);
o->double_field_put(field_offset, dval);
}
off += 8;
break;
default:
// Illegal typecode
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "illegal typecode");
}
}
JVM_END
JVM_ENTRY(void, JVM_GetPrimitiveFieldValues(JNIEnv *env, jclass cb, jobject obj,
jlongArray fieldIDs, jcharArray typecodes, jbyteArray data))
assert(!JDK_Version::is_gte_jdk14x_version(), "should only be used in 1.3.1 and earlier");
typeArrayOop tcodes = typeArrayOop(JNIHandles::resolve(typecodes));
typeArrayOop dbuf = typeArrayOop(JNIHandles::resolve(data));
typeArrayOop fids = typeArrayOop(JNIHandles::resolve(fieldIDs));
oop o = JNIHandles::resolve(obj);
if (o == NULL || fids == NULL || dbuf == NULL || tcodes == NULL) {
THROW(vmSymbols::java_lang_NullPointerException());
}
jsize nfids = fids->length();
if (nfids == 0) return;
if (tcodes->length() < nfids) {
THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException());
}
/* loop through fields, fetching values */
jsize off = 0;
for (jsize i = 0; i < nfids; i++) {
jfieldID fid = (jfieldID)(intptr_t) fids->long_at(i);
if (fid == NULL) {
THROW(vmSymbols::java_lang_NullPointerException());
}
int field_offset = jfieldIDWorkaround::from_instance_jfieldID(o->klass(), fid);
switch (tcodes->char_at(i)) {
case 'Z':
{
jboolean val = o->bool_field(field_offset);
dbuf->byte_at_put(off++, (val != 0) ? 1 : 0);
}
break;
case 'B':
dbuf->byte_at_put(off++, o->byte_field(field_offset));
break;
case 'C':
{
jchar val = o->char_field(field_offset);
dbuf->byte_at_put(off++, (val >> 8) & 0xFF);
dbuf->byte_at_put(off++, (val >> 0) & 0xFF);
}
break;
case 'S':
{
jshort val = o->short_field(field_offset);
dbuf->byte_at_put(off++, (val >> 8) & 0xFF);
dbuf->byte_at_put(off++, (val >> 0) & 0xFF);
}
break;
case 'I':
{
jint val = o->int_field(field_offset);
dbuf->byte_at_put(off++, (val >> 24) & 0xFF);
dbuf->byte_at_put(off++, (val >> 16) & 0xFF);
dbuf->byte_at_put(off++, (val >> 8) & 0xFF);
dbuf->byte_at_put(off++, (val >> 0) & 0xFF);
}
break;
case 'F':
{
jfloat fval = o->float_field(field_offset);
jint ival = (*float_to_int_bits_fn)(env, NULL, fval);
dbuf->byte_at_put(off++, (ival >> 24) & 0xFF);
dbuf->byte_at_put(off++, (ival >> 16) & 0xFF);
dbuf->byte_at_put(off++, (ival >> 8) & 0xFF);
dbuf->byte_at_put(off++, (ival >> 0) & 0xFF);
}
break;
case 'J':
{
jlong val = o->long_field(field_offset);
dbuf->byte_at_put(off++, (val >> 56) & 0xFF);
dbuf->byte_at_put(off++, (val >> 48) & 0xFF);
dbuf->byte_at_put(off++, (val >> 40) & 0xFF);
dbuf->byte_at_put(off++, (val >> 32) & 0xFF);
dbuf->byte_at_put(off++, (val >> 24) & 0xFF);
dbuf->byte_at_put(off++, (val >> 16) & 0xFF);
dbuf->byte_at_put(off++, (val >> 8) & 0xFF);
dbuf->byte_at_put(off++, (val >> 0) & 0xFF);
}
break;
case 'D':
{
jdouble dval = o->double_field(field_offset);
jlong lval = (*double_to_long_bits_fn)(env, NULL, dval);
dbuf->byte_at_put(off++, (lval >> 56) & 0xFF);
dbuf->byte_at_put(off++, (lval >> 48) & 0xFF);
dbuf->byte_at_put(off++, (lval >> 40) & 0xFF);
dbuf->byte_at_put(off++, (lval >> 32) & 0xFF);
dbuf->byte_at_put(off++, (lval >> 24) & 0xFF);
dbuf->byte_at_put(off++, (lval >> 16) & 0xFF);
dbuf->byte_at_put(off++, (lval >> 8) & 0xFF);
dbuf->byte_at_put(off++, (lval >> 0) & 0xFF);
}
break;
default:
// Illegal typecode
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "illegal typecode");
}
}
JVM_END
// Shared JNI/JVM entry points //////////////////////////////////////////////////////////////

View file

@ -374,6 +374,9 @@ JVM_NewMultiArray(JNIEnv *env, jclass eltClass, jintArray dim);
/*
* java.lang.Class and java.lang.ClassLoader
*/
#define JVM_CALLER_DEPTH -1
/*
* Returns the class in which the code invoking the native method
* belongs.

View file

@ -35,22 +35,6 @@ jclass find_class_from_class_loader(JNIEnv* env, Symbol* name, jboolean init, Ha
void trace_class_resolution(Klass* to_class);
/*
* Support for Serialization and RMI. Currently used by HotSpot only.
*/
extern "C" {
void JNICALL
JVM_SetPrimitiveFieldValues(JNIEnv *env, jclass cb, jobject obj,
jlongArray fieldIDs, jcharArray typecodes, jbyteArray data);
void JNICALL
JVM_GetPrimitiveFieldValues(JNIEnv *env, jclass cb, jobject obj,
jlongArray fieldIDs, jcharArray typecodes, jbyteArray data);
}
/*
* Support for -Xcheck:jni
*/

View file

@ -2755,13 +2755,26 @@ void VM_RedefineClasses::AdjustCpoolCacheAndVtable::do_klass(Klass* k) {
// InstanceKlass around to hold obsolete methods so we don't have
// any other InstanceKlass embedded vtables to update. The vtable
// holds the Method*s for virtual (but not final) methods.
if (ik->vtable_length() > 0 && ik->is_subtype_of(_the_class_oop)) {
// Default methods, or concrete methods in interfaces are stored
// in the vtable, so if an interface changes we need to check
// adjust_method_entries() for every InstanceKlass, which will also
// adjust the default method vtable indices.
// We also need to adjust any default method entries that are
// not yet in the vtable, because the vtable setup is in progress.
// This must be done after we adjust the default_methods and
// default_vtable_indices for methods already in the vtable.
if (ik->vtable_length() > 0 && (_the_class_oop->is_interface()
|| ik->is_subtype_of(_the_class_oop))) {
// ik->vtable() creates a wrapper object; rm cleans it up
ResourceMark rm(_thread);
ik->vtable()->adjust_method_entries(_matching_old_methods,
_matching_new_methods,
_matching_methods_length,
&trace_name_printed);
ik->adjust_default_methods(_matching_old_methods,
_matching_new_methods,
_matching_methods_length,
&trace_name_printed);
}
// If the current class has an itable and we are either redefining an
@ -2931,7 +2944,8 @@ void VM_RedefineClasses::check_methods_and_mark_as_obsolete(
old_method->set_is_obsolete();
obsolete_count++;
// obsolete methods need a unique idnum
// obsolete methods need a unique idnum so they become new entries in
// the jmethodID cache in InstanceKlass
u2 num = InstanceKlass::cast(_the_class_oop)->next_method_idnum();
if (num != ConstMethod::UNSET_IDNUM) {
old_method->set_method_idnum(num);

View file

@ -187,12 +187,34 @@ oop MethodHandles::init_method_MemberName(Handle mname, CallInfo& info) {
receiver_limit = m->method_holder();
assert(receiver_limit->verify_itable_index(vmindex), "");
flags |= IS_METHOD | (JVM_REF_invokeInterface << REFERENCE_KIND_SHIFT);
if (TraceInvokeDynamic) {
ResourceMark rm;
tty->print_cr("memberName: invokeinterface method_holder::method: %s, receiver: %s, itableindex: %d, access_flags:",
Method::name_and_sig_as_C_string(receiver_limit(), m->name(), m->signature()),
receiver_limit()->internal_name(), vmindex);
m->access_flags().print_on(tty);
if (!m->is_abstract()) {
tty->print("default");
}
tty->cr();
}
break;
case CallInfo::vtable_call:
vmindex = info.vtable_index();
flags |= IS_METHOD | (JVM_REF_invokeVirtual << REFERENCE_KIND_SHIFT);
assert(receiver_limit->is_subtype_of(m->method_holder()), "virtual call must be type-safe");
if (TraceInvokeDynamic) {
ResourceMark rm;
tty->print_cr("memberName: invokevirtual method_holder::method: %s, receiver: %s, vtableindex: %d, access_flags:",
Method::name_and_sig_as_C_string(receiver_limit(), m->name(), m->signature()),
receiver_limit()->internal_name(), vmindex);
m->access_flags().print_on(tty);
if (m->is_default_method()) {
tty->print("default");
}
tty->cr();
}
break;
case CallInfo::direct_call:

View file

@ -129,10 +129,6 @@ extern "C" {
#define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)
static JNINativeMethod lookup_special_native_methods[] = {
// Next two functions only exist for compatibility with 1.3.1 and earlier.
{ CC"Java_java_io_ObjectOutputStream_getPrimitiveFieldValues", NULL, FN_PTR(JVM_GetPrimitiveFieldValues) }, // intercept ObjectOutputStream getPrimitiveFieldValues for faster serialization
{ CC"Java_java_io_ObjectInputStream_setPrimitiveFieldValues", NULL, FN_PTR(JVM_SetPrimitiveFieldValues) }, // intercept ObjectInputStream setPrimitiveFieldValues for faster serialization
{ CC"Java_sun_misc_Unsafe_registerNatives", NULL, FN_PTR(JVM_RegisterUnsafeMethods) },
{ CC"Java_java_lang_invoke_MethodHandleNatives_registerNatives", NULL, FN_PTR(JVM_RegisterMethodHandleMethods) },
{ CC"Java_sun_misc_Perf_registerNatives", NULL, FN_PTR(JVM_RegisterPerfMethods) },
@ -140,9 +136,8 @@ static JNINativeMethod lookup_special_native_methods[] = {
};
static address lookup_special_native(char* jni_name) {
int i = !JDK_Version::is_gte_jdk14x_version() ? 0 : 2; // see comment in lookup_special_native_methods
int count = sizeof(lookup_special_native_methods) / sizeof(JNINativeMethod);
for (; i < count; i++) {
for (int i = 0; i < count; i++) {
// NB: To ignore the jni prefix and jni postfix strstr is used matching.
if (strstr(jni_name, lookup_special_native_methods[i].name) != NULL) {
return CAST_FROM_FN_PTR(address, lookup_special_native_methods[i].fnPtr);

Some files were not shown because too many files have changed in this diff Show more