mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-17 17:44:40 +02:00
Merge
This commit is contained in:
commit
c217bdda86
614 changed files with 24480 additions and 7049 deletions
2
.hgtags
2
.hgtags
|
@ -280,3 +280,5 @@ f0c5e4b732da823bdaa4184133675f384e7cd68d jdk9-b33
|
||||||
a137992d750c72f6f944f341aa19b0d0d96afe0c jdk9-b35
|
a137992d750c72f6f944f341aa19b0d0d96afe0c jdk9-b35
|
||||||
41df50e7303daf73c0d661ef601c4fe250915de5 jdk9-b36
|
41df50e7303daf73c0d661ef601c4fe250915de5 jdk9-b36
|
||||||
b409bc51bc23cfd51f2bd04ea919ec83535af9d0 jdk9-b37
|
b409bc51bc23cfd51f2bd04ea919ec83535af9d0 jdk9-b37
|
||||||
|
948cceef81ba4cb34bc233e7cc5952951ff04e88 jdk9-b38
|
||||||
|
4e7c4d692e934cb9023af8201e7c2b510e9c4ee1 jdk9-b39
|
||||||
|
|
|
@ -280,3 +280,5 @@ e4ba01b726e263953ae129be37c94de6ed145b1d jdk9-b33
|
||||||
c173ba994245380fb11ef077d1e59823386840eb jdk9-b35
|
c173ba994245380fb11ef077d1e59823386840eb jdk9-b35
|
||||||
201d4e235d597a25a2d3ee1404394789ba386119 jdk9-b36
|
201d4e235d597a25a2d3ee1404394789ba386119 jdk9-b36
|
||||||
723a67b0c442391447b1d8aad8b249d06d1032e8 jdk9-b37
|
723a67b0c442391447b1d8aad8b249d06d1032e8 jdk9-b37
|
||||||
|
d42c0a90afc3c66ca87543076ec9aafd4b4680de jdk9-b38
|
||||||
|
512dbbeb1730edcebfec873fc3f1455660b32000 jdk9-b39
|
||||||
|
|
|
@ -684,8 +684,6 @@ AC_DEFUN_ONCE([BASIC_SETUP_OUTPUT_DIR],
|
||||||
AC_SUBST(CONF_NAME, $CONF_NAME)
|
AC_SUBST(CONF_NAME, $CONF_NAME)
|
||||||
AC_SUBST(OUTPUT_ROOT, $OUTPUT_ROOT)
|
AC_SUBST(OUTPUT_ROOT, $OUTPUT_ROOT)
|
||||||
|
|
||||||
# Most of the probed defines are put into config.h
|
|
||||||
AC_CONFIG_HEADERS([$OUTPUT_ROOT/config.h:$AUTOCONF_DIR/config.h.in])
|
|
||||||
# The spec.gmk file contains all variables for the make system.
|
# The spec.gmk file contains all variables for the make system.
|
||||||
AC_CONFIG_FILES([$OUTPUT_ROOT/spec.gmk:$AUTOCONF_DIR/spec.gmk.in])
|
AC_CONFIG_FILES([$OUTPUT_ROOT/spec.gmk:$AUTOCONF_DIR/spec.gmk.in])
|
||||||
# The hotspot-spec.gmk file contains legacy variables for the hotspot make system.
|
# The hotspot-spec.gmk file contains legacy variables for the hotspot make system.
|
||||||
|
@ -694,8 +692,6 @@ AC_DEFUN_ONCE([BASIC_SETUP_OUTPUT_DIR],
|
||||||
AC_CONFIG_FILES([$OUTPUT_ROOT/bootcycle-spec.gmk:$AUTOCONF_DIR/bootcycle-spec.gmk.in])
|
AC_CONFIG_FILES([$OUTPUT_ROOT/bootcycle-spec.gmk:$AUTOCONF_DIR/bootcycle-spec.gmk.in])
|
||||||
# The compare.sh is used to compare the build output to other builds.
|
# The compare.sh is used to compare the build output to other builds.
|
||||||
AC_CONFIG_FILES([$OUTPUT_ROOT/compare.sh:$AUTOCONF_DIR/compare.sh.in])
|
AC_CONFIG_FILES([$OUTPUT_ROOT/compare.sh:$AUTOCONF_DIR/compare.sh.in])
|
||||||
# Spec.sh is currently used by compare-objects.sh
|
|
||||||
AC_CONFIG_FILES([$OUTPUT_ROOT/spec.sh:$AUTOCONF_DIR/spec.sh.in])
|
|
||||||
# The generated Makefile knows where the spec.gmk is and where the source is.
|
# The generated Makefile knows where the spec.gmk is and where the source is.
|
||||||
# You can run make from the OUTPUT_ROOT, or from the top-level Makefile
|
# You can run make from the OUTPUT_ROOT, or from the top-level Makefile
|
||||||
# which will look for generated configurations
|
# which will look for generated configurations
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -131,7 +131,7 @@ yum_help() {
|
||||||
pulse)
|
pulse)
|
||||||
PKGHANDLER_COMMAND="sudo yum install pulseaudio-libs-devel" ;;
|
PKGHANDLER_COMMAND="sudo yum install pulseaudio-libs-devel" ;;
|
||||||
x11)
|
x11)
|
||||||
PKGHANDLER_COMMAND="sudo yum install libXtst-devel libXt-devel libXrender-devel" ;;
|
PKGHANDLER_COMMAND="sudo yum install libXtst-devel libXt-devel libXrender-devel libXi-devel" ;;
|
||||||
ccache)
|
ccache)
|
||||||
PKGHANDLER_COMMAND="sudo yum install ccache" ;;
|
PKGHANDLER_COMMAND="sudo yum install ccache" ;;
|
||||||
esac
|
esac
|
||||||
|
|
|
@ -91,85 +91,93 @@ AC_DEFUN_ONCE([LIB_SETUP_X11],
|
||||||
# Check for X Windows
|
# Check for X Windows
|
||||||
#
|
#
|
||||||
|
|
||||||
# Check if the user has specified sysroot, but not --x-includes or --x-libraries.
|
if test "x$X11_NOT_NEEDED" = xyes; then
|
||||||
# Make a simple check for the libraries at the sysroot, and setup --x-includes and
|
if test "x${with_x}" != x; then
|
||||||
# --x-libraries for the sysroot, if that seems to be correct.
|
AC_MSG_WARN([X11 is not used, so --with-x is ignored])
|
||||||
if test "x$OPENJDK_TARGET_OS" = "xlinux"; then
|
fi
|
||||||
if test "x$SYSROOT" != "x"; then
|
X_CFLAGS=
|
||||||
if test "x$x_includes" = xNONE; then
|
X_LIBS=
|
||||||
if test -f "$SYSROOT/usr/X11R6/include/X11/Xlib.h"; then
|
else
|
||||||
x_includes="$SYSROOT/usr/X11R6/include"
|
# Check if the user has specified sysroot, but not --x-includes or --x-libraries.
|
||||||
elif test -f "$SYSROOT/usr/include/X11/Xlib.h"; then
|
# Make a simple check for the libraries at the sysroot, and setup --x-includes and
|
||||||
x_includes="$SYSROOT/usr/include"
|
# --x-libraries for the sysroot, if that seems to be correct.
|
||||||
|
if test "x$OPENJDK_TARGET_OS" = "xlinux"; then
|
||||||
|
if test "x$SYSROOT" != "x"; then
|
||||||
|
if test "x$x_includes" = xNONE; then
|
||||||
|
if test -f "$SYSROOT/usr/X11R6/include/X11/Xlib.h"; then
|
||||||
|
x_includes="$SYSROOT/usr/X11R6/include"
|
||||||
|
elif test -f "$SYSROOT/usr/include/X11/Xlib.h"; then
|
||||||
|
x_includes="$SYSROOT/usr/include"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
if test "x$x_libraries" = xNONE; then
|
||||||
if test "x$x_libraries" = xNONE; then
|
if test -f "$SYSROOT/usr/X11R6/lib/libX11.so"; then
|
||||||
if test -f "$SYSROOT/usr/X11R6/lib/libX11.so"; then
|
x_libraries="$SYSROOT/usr/X11R6/lib"
|
||||||
x_libraries="$SYSROOT/usr/X11R6/lib"
|
elif test "$SYSROOT/usr/lib64/libX11.so" && test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
|
||||||
elif test "$SYSROOT/usr/lib64/libX11.so" && test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
|
x_libraries="$SYSROOT/usr/lib64"
|
||||||
x_libraries="$SYSROOT/usr/lib64"
|
elif test -f "$SYSROOT/usr/lib/libX11.so"; then
|
||||||
elif test -f "$SYSROOT/usr/lib/libX11.so"; then
|
x_libraries="$SYSROOT/usr/lib"
|
||||||
x_libraries="$SYSROOT/usr/lib"
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
|
||||||
|
|
||||||
# Now let autoconf do it's magic
|
# Now let autoconf do it's magic
|
||||||
AC_PATH_X
|
AC_PATH_X
|
||||||
AC_PATH_XTRA
|
AC_PATH_XTRA
|
||||||
|
|
||||||
# AC_PATH_XTRA creates X_LIBS and sometimes adds -R flags. When cross compiling
|
# AC_PATH_XTRA creates X_LIBS and sometimes adds -R flags. When cross compiling
|
||||||
# this doesn't make sense so we remove it.
|
# this doesn't make sense so we remove it.
|
||||||
if test "x$COMPILE_TYPE" = xcross; then
|
if test "x$COMPILE_TYPE" = xcross; then
|
||||||
X_LIBS=`$ECHO $X_LIBS | $SED 's/-R \{0,1\}[[^ ]]*//g'`
|
X_LIBS=`$ECHO $X_LIBS | $SED 's/-R \{0,1\}[[^ ]]*//g'`
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if test "x$no_x" = xyes && test "x$X11_NOT_NEEDED" != xyes; then
|
if test "x$no_x" = xyes; then
|
||||||
HELP_MSG_MISSING_DEPENDENCY([x11])
|
HELP_MSG_MISSING_DEPENDENCY([x11])
|
||||||
AC_MSG_ERROR([Could not find X11 libraries. $HELP_MSG])
|
AC_MSG_ERROR([Could not find X11 libraries. $HELP_MSG])
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
|
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
|
||||||
OPENWIN_HOME="/usr/openwin"
|
OPENWIN_HOME="/usr/openwin"
|
||||||
X_CFLAGS="-I$SYSROOT$OPENWIN_HOME/include -I$SYSROOT$OPENWIN_HOME/include/X11/extensions"
|
X_CFLAGS="-I$SYSROOT$OPENWIN_HOME/include -I$SYSROOT$OPENWIN_HOME/include/X11/extensions"
|
||||||
X_LIBS="-L$SYSROOT$OPENWIN_HOME/sfw/lib$OPENJDK_TARGET_CPU_ISADIR \
|
X_LIBS="-L$SYSROOT$OPENWIN_HOME/sfw/lib$OPENJDK_TARGET_CPU_ISADIR \
|
||||||
-L$SYSROOT$OPENWIN_HOME/lib$OPENJDK_TARGET_CPU_ISADIR \
|
-L$SYSROOT$OPENWIN_HOME/lib$OPENJDK_TARGET_CPU_ISADIR \
|
||||||
-R$OPENWIN_HOME/sfw/lib$OPENJDK_TARGET_CPU_ISADIR \
|
-R$OPENWIN_HOME/sfw/lib$OPENJDK_TARGET_CPU_ISADIR \
|
||||||
-R$OPENWIN_HOME/lib$OPENJDK_TARGET_CPU_ISADIR"
|
-R$OPENWIN_HOME/lib$OPENJDK_TARGET_CPU_ISADIR"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
AC_LANG_PUSH(C)
|
AC_LANG_PUSH(C)
|
||||||
OLD_CFLAGS="$CFLAGS"
|
OLD_CFLAGS="$CFLAGS"
|
||||||
CFLAGS="$CFLAGS $SYSROOT_CFLAGS $X_CFLAGS"
|
CFLAGS="$CFLAGS $SYSROOT_CFLAGS $X_CFLAGS"
|
||||||
|
|
||||||
# Need to include Xlib.h and Xutil.h to avoid "present but cannot be compiled" warnings on Solaris 10
|
# Need to include Xlib.h and Xutil.h to avoid "present but cannot be compiled" warnings on Solaris 10
|
||||||
AC_CHECK_HEADERS([X11/extensions/shape.h X11/extensions/Xrender.h X11/extensions/XTest.h X11/Intrinsic.h],
|
AC_CHECK_HEADERS([X11/extensions/shape.h X11/extensions/Xrender.h X11/extensions/XTest.h X11/Intrinsic.h],
|
||||||
[X11_A_OK=yes],
|
[X11_HEADERS_OK=yes],
|
||||||
[X11_A_OK=no; break],
|
[X11_HEADERS_OK=no; break],
|
||||||
[
|
[
|
||||||
# include <X11/Xlib.h>
|
# include <X11/Xlib.h>
|
||||||
# include <X11/Xutil.h>
|
# include <X11/Xutil.h>
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
# If XLinearGradient isn't available in Xrender.h, signal that it needs to be
|
if test "x$X11_HEADERS_OK" = xno; then
|
||||||
# defined in libawt_xawt.
|
HELP_MSG_MISSING_DEPENDENCY([x11])
|
||||||
AC_MSG_CHECKING([if XlinearGradient is defined in Xrender.h])
|
AC_MSG_ERROR([Could not find all X11 headers (shape.h Xrender.h XTest.h Intrinsic.h). $HELP_MSG])
|
||||||
AC_COMPILE_IFELSE(
|
fi
|
||||||
[AC_LANG_PROGRAM([[#include <X11/extensions/Xrender.h>]],
|
|
||||||
[[XLinearGradient x;]])],
|
|
||||||
[AC_MSG_RESULT([yes])],
|
|
||||||
[AC_MSG_RESULT([no])
|
|
||||||
X_CFLAGS="$X_CFLAGS -DSOLARIS10_NO_XRENDER_STRUCTS"])
|
|
||||||
|
|
||||||
CFLAGS="$OLD_CFLAGS"
|
# If XLinearGradient isn't available in Xrender.h, signal that it needs to be
|
||||||
AC_LANG_POP(C)
|
# defined in libawt_xawt.
|
||||||
|
AC_MSG_CHECKING([if XlinearGradient is defined in Xrender.h])
|
||||||
|
AC_COMPILE_IFELSE(
|
||||||
|
[AC_LANG_PROGRAM([[#include <X11/extensions/Xrender.h>]],
|
||||||
|
[[XLinearGradient x;]])],
|
||||||
|
[AC_MSG_RESULT([yes])],
|
||||||
|
[AC_MSG_RESULT([no])
|
||||||
|
X_CFLAGS="$X_CFLAGS -DSOLARIS10_NO_XRENDER_STRUCTS"])
|
||||||
|
|
||||||
if test "x$X11_A_OK" = xno && test "x$X11_NOT_NEEDED" != xyes; then
|
CFLAGS="$OLD_CFLAGS"
|
||||||
HELP_MSG_MISSING_DEPENDENCY([x11])
|
AC_LANG_POP(C)
|
||||||
AC_MSG_ERROR([Could not find all X11 headers (shape.h Xrender.h XTest.h Intrinsic.h). $HELP_MSG])
|
fi # X11_NOT_NEEDED
|
||||||
fi
|
|
||||||
|
|
||||||
AC_SUBST(X_CFLAGS)
|
AC_SUBST(X_CFLAGS)
|
||||||
AC_SUBST(X_LIBS)
|
AC_SUBST(X_LIBS)
|
||||||
|
@ -264,7 +272,7 @@ AC_DEFUN([LIB_BUILD_FREETYPE],
|
||||||
fi
|
fi
|
||||||
# Now check if configure found a version of 'msbuild.exe'
|
# Now check if configure found a version of 'msbuild.exe'
|
||||||
if test "x$BUILD_FREETYPE" = xyes && test "x$MSBUILD" == x ; then
|
if test "x$BUILD_FREETYPE" = xyes && test "x$MSBUILD" == x ; then
|
||||||
AC_MSG_WARN([Can't find an msbuild.exe executable (you may try to install .NET 4.0) - ignoring --with-freetype-src])
|
AC_MSG_WARN([Can not find an msbuild.exe executable (you may try to install .NET 4.0) - ignoring --with-freetype-src])
|
||||||
BUILD_FREETYPE=no
|
BUILD_FREETYPE=no
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -335,27 +343,50 @@ AC_DEFUN([LIB_CHECK_POTENTIAL_FREETYPE],
|
||||||
POTENTIAL_FREETYPE_LIB_PATH="$2"
|
POTENTIAL_FREETYPE_LIB_PATH="$2"
|
||||||
METHOD="$3"
|
METHOD="$3"
|
||||||
|
|
||||||
# First check if the files exists.
|
# Let's start with an optimistic view of the world :-)
|
||||||
if test -s "$POTENTIAL_FREETYPE_INCLUDE_PATH/ft2build.h"; then
|
FOUND_FREETYPE=yes
|
||||||
# We found an arbitrary include file. That's a good sign.
|
|
||||||
AC_MSG_NOTICE([Found freetype include files at $POTENTIAL_FREETYPE_INCLUDE_PATH using $METHOD])
|
|
||||||
FOUND_FREETYPE=yes
|
|
||||||
|
|
||||||
FREETYPE_LIB_NAME="${LIBRARY_PREFIX}freetype${SHARED_LIBRARY_SUFFIX}"
|
# First look for the canonical freetype main include file ft2build.h.
|
||||||
if ! test -s "$POTENTIAL_FREETYPE_LIB_PATH/$FREETYPE_LIB_NAME"; then
|
if ! test -s "$POTENTIAL_FREETYPE_INCLUDE_PATH/ft2build.h"; then
|
||||||
AC_MSG_NOTICE([Could not find $POTENTIAL_FREETYPE_LIB_PATH/$FREETYPE_LIB_NAME. Ignoring location.])
|
# Oh no! Let's try in the freetype2 directory. This is needed at least at Mac OS X Yosemite.
|
||||||
|
POTENTIAL_FREETYPE_INCLUDE_PATH="$POTENTIAL_FREETYPE_INCLUDE_PATH/freetype2"
|
||||||
|
if ! test -s "$POTENTIAL_FREETYPE_INCLUDE_PATH/ft2build.h"; then
|
||||||
|
# Fail.
|
||||||
FOUND_FREETYPE=no
|
FOUND_FREETYPE=no
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if test "x$FOUND_FREETYPE" = xyes; then
|
||||||
|
# Include file found, let's continue the sanity check.
|
||||||
|
AC_MSG_NOTICE([Found freetype include files at $POTENTIAL_FREETYPE_INCLUDE_PATH using $METHOD])
|
||||||
|
|
||||||
|
# Reset to default value
|
||||||
|
FREETYPE_BASE_NAME=freetype
|
||||||
|
FREETYPE_LIB_NAME="${LIBRARY_PREFIX}${FREETYPE_BASE_NAME}${SHARED_LIBRARY_SUFFIX}"
|
||||||
|
if ! test -s "$POTENTIAL_FREETYPE_LIB_PATH/$FREETYPE_LIB_NAME"; then
|
||||||
|
if test "x$OPENJDK_TARGET_OS" = xmacosx \
|
||||||
|
&& test -s "$POTENTIAL_FREETYPE_LIB_PATH/${LIBRARY_PREFIX}freetype.6${SHARED_LIBRARY_SUFFIX}"; then
|
||||||
|
# On Mac OS X Yosemite, the symlink from libfreetype.dylib to libfreetype.6.dylib disappeared. Check
|
||||||
|
# for the .6 version explicitly.
|
||||||
|
FREETYPE_BASE_NAME=freetype.6
|
||||||
|
FREETYPE_LIB_NAME="${LIBRARY_PREFIX}${FREETYPE_BASE_NAME}${SHARED_LIBRARY_SUFFIX}"
|
||||||
|
AC_MSG_NOTICE([Compensating for missing symlink by using version 6 explicitly])
|
||||||
|
else
|
||||||
|
AC_MSG_NOTICE([Could not find $POTENTIAL_FREETYPE_LIB_PATH/$FREETYPE_LIB_NAME. Ignoring location.])
|
||||||
|
FOUND_FREETYPE=no
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
if test "x$OPENJDK_TARGET_OS" = xwindows; then
|
if test "x$OPENJDK_TARGET_OS" = xwindows; then
|
||||||
# On Windows, we will need both .lib and .dll file.
|
# On Windows, we will need both .lib and .dll file.
|
||||||
if ! test -s "$POTENTIAL_FREETYPE_LIB_PATH/freetype.lib"; then
|
if ! test -s "$POTENTIAL_FREETYPE_LIB_PATH/${FREETYPE_BASE_NAME}.lib"; then
|
||||||
AC_MSG_NOTICE([Could not find $POTENTIAL_FREETYPE_LIB_PATH/freetype.lib. Ignoring location.])
|
AC_MSG_NOTICE([Could not find $POTENTIAL_FREETYPE_LIB_PATH/${FREETYPE_BASE_NAME}.lib. Ignoring location.])
|
||||||
FOUND_FREETYPE=no
|
FOUND_FREETYPE=no
|
||||||
fi
|
fi
|
||||||
elif test "x$OPENJDK_TARGET_OS" = xsolaris \
|
elif test "x$OPENJDK_TARGET_OS" = xsolaris \
|
||||||
&& test -s "$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR/$FREETYPE_LIB_NAME"; then
|
&& test -s "$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR/$FREETYPE_LIB_NAME"; then
|
||||||
# Found lib in isa dir, use that instead.
|
# Found lib in isa dir, use that instead.
|
||||||
POTENTIAL_FREETYPE_LIB_PATH="$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR"
|
POTENTIAL_FREETYPE_LIB_PATH="$POTENTIAL_FREETYPE_LIB_PATH$OPENJDK_TARGET_CPU_ISADIR"
|
||||||
|
AC_MSG_NOTICE([Rewriting to use $POTENTIAL_FREETYPE_LIB_PATH instead])
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
@ -392,6 +423,8 @@ AC_DEFUN_ONCE([LIB_SETUP_FREETYPE],
|
||||||
AC_ARG_ENABLE(freetype-bundling, [AS_HELP_STRING([--disable-freetype-bundling],
|
AC_ARG_ENABLE(freetype-bundling, [AS_HELP_STRING([--disable-freetype-bundling],
|
||||||
[disable bundling of the freetype library with the build result @<:@enabled on Windows or when using --with-freetype, disabled otherwise@:>@])])
|
[disable bundling of the freetype library with the build result @<:@enabled on Windows or when using --with-freetype, disabled otherwise@:>@])])
|
||||||
|
|
||||||
|
# Need to specify explicitly since it needs to be overridden on some versions of macosx
|
||||||
|
FREETYPE_BASE_NAME=freetype
|
||||||
FREETYPE_CFLAGS=
|
FREETYPE_CFLAGS=
|
||||||
FREETYPE_LIBS=
|
FREETYPE_LIBS=
|
||||||
FREETYPE_BUNDLE_LIB_PATH=
|
FREETYPE_BUNDLE_LIB_PATH=
|
||||||
|
@ -575,9 +608,9 @@ AC_DEFUN_ONCE([LIB_SETUP_FREETYPE],
|
||||||
if test "x$FREETYPE_LIBS" = x; then
|
if test "x$FREETYPE_LIBS" = x; then
|
||||||
BASIC_FIXUP_PATH(FREETYPE_LIB_PATH)
|
BASIC_FIXUP_PATH(FREETYPE_LIB_PATH)
|
||||||
if test "x$OPENJDK_TARGET_OS" = xwindows; then
|
if test "x$OPENJDK_TARGET_OS" = xwindows; then
|
||||||
FREETYPE_LIBS="$FREETYPE_LIB_PATH/freetype.lib"
|
FREETYPE_LIBS="$FREETYPE_LIB_PATH/$FREETYPE_BASE_NAME.lib"
|
||||||
else
|
else
|
||||||
FREETYPE_LIBS="-L$FREETYPE_LIB_PATH -lfreetype"
|
FREETYPE_LIBS="-L$FREETYPE_LIB_PATH -l$FREETYPE_BASE_NAME"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
|
@ -1,77 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
#
|
|
||||||
# This code is free software; you can redistribute it and/or modify it
|
|
||||||
# under the terms of the GNU General Public License version 2 only, as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
# version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
# accompanied this code).
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License version
|
|
||||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
#
|
|
||||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
# or visit www.oracle.com if you need additional information or have any
|
|
||||||
# questions.
|
|
||||||
#
|
|
||||||
|
|
||||||
# The boot_cycle.sh script performs two complete image builds (no javadoc though....)
|
|
||||||
# where the second build uses the first build as the boot jdk.
|
|
||||||
#
|
|
||||||
# This is useful to verify that the build is self hoisting and assists
|
|
||||||
# in flushing out bugs. You can follow up with compare_objects.sh to check
|
|
||||||
# that the two boot_cycle_?/images/j2sdk are identical. They should be.
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# Specify the configure arguments to boot_cycle.sh, for example:
|
|
||||||
#
|
|
||||||
# sh common/bin/boot_cycle.sh --enable-debug --with-jvm-variants=server
|
|
||||||
#
|
|
||||||
# The same arguments will be used for both builds, except of course --with-boot-jdk
|
|
||||||
# that will be adjusted to boot_cycle_1 for the second build.
|
|
||||||
|
|
||||||
SCRIPT_DIR=`pwd`/`dirname $0`
|
|
||||||
ROOT_DIR=`(cd $SCRIPT_DIR/../.. ; pwd)`
|
|
||||||
BUILD_DIR=$ROOT_DIR/build
|
|
||||||
mkdir -p $BUILD_DIR
|
|
||||||
AUTOCONF_DIR=`(cd $SCRIPT_DIR/../autoconf ; pwd)`
|
|
||||||
BOOT_CYCLE_1_DIR=$BUILD_DIR/boot_cycle_1
|
|
||||||
BOOT_CYCLE_2_DIR=$BUILD_DIR/boot_cycle_2
|
|
||||||
|
|
||||||
# Create the boot cycle dirs in the build directory.
|
|
||||||
mkdir -p $BOOT_CYCLE_1_DIR
|
|
||||||
mkdir -p $BOOT_CYCLE_2_DIR
|
|
||||||
|
|
||||||
cd $BOOT_CYCLE_1_DIR
|
|
||||||
# Configure!
|
|
||||||
sh $AUTOCONF_DIR/configure "$@"
|
|
||||||
# Now build!
|
|
||||||
make images
|
|
||||||
|
|
||||||
if ! test -x $BOOT_CYCLE_1_DIR/images/j2sdk-image/bin/java ; then
|
|
||||||
echo Failed to build the executable $BOOT_CYCLE_1_DIR/images/j2sdk-image/bin/java
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
cd $BOOT_CYCLE_2_DIR
|
|
||||||
# Pickup the configure arguments, but drop any --with-boot-jdk=....
|
|
||||||
# and add the correct --with-boot-jdk=...boot_cycle_1... at the end.
|
|
||||||
ARGUMENTS="`cat $BOOT_CYCLE_1_DIR/configure-arguments|sed 's/--with-boot-jdk=[^ ]*//'` --with-boot-jdk=$BOOT_CYCLE_1_DIR/images/j2sdk-image"
|
|
||||||
# Configure using these adjusted arguments.
|
|
||||||
sh $AUTOCONF_DIR/configure $ARGUMENTS
|
|
||||||
# Now build!
|
|
||||||
make images
|
|
||||||
|
|
||||||
if ! test -x $BOOT_CYCLE_2_DIR/images/j2sdk-image/bin/java ; then
|
|
||||||
echo Failed to build the final executable $BOOT_CYCLE_2_DIR/images/j2sdk-image/bin/java
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
|
@ -1,235 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
#
|
|
||||||
# This code is free software; you can redistribute it and/or modify it
|
|
||||||
# under the terms of the GNU General Public License version 2 only, as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
# version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
# accompanied this code).
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License version
|
|
||||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
#
|
|
||||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
# or visit www.oracle.com if you need additional information or have any
|
|
||||||
# questions.
|
|
||||||
#
|
|
||||||
|
|
||||||
# MANUAL
|
|
||||||
#
|
|
||||||
# ./common/bin/compare-objects.sh old_jdk_build_dir new_jdk_build_dir
|
|
||||||
#
|
|
||||||
# Compares object files
|
|
||||||
#
|
|
||||||
|
|
||||||
if [ "x$1" = "x-h" ] || [ "x$1" = "x--help" ] || [ "x$1" == "x" ]; then
|
|
||||||
echo "bash ./common/bin/compare-objects.sh old_jdk_build_dir new_jdk_build_dir <pattern>"
|
|
||||||
echo ""
|
|
||||||
echo "Compare object files"
|
|
||||||
echo ""
|
|
||||||
exit 10
|
|
||||||
fi
|
|
||||||
|
|
||||||
#######
|
|
||||||
#
|
|
||||||
# List of files (grep patterns) that are ignored
|
|
||||||
#
|
|
||||||
# 1) hotspot object files
|
|
||||||
IGNORE="-e hotspot"
|
|
||||||
|
|
||||||
# 2) various build artifacts: sizer.32.o sizer.64.o dummyodbc.o
|
|
||||||
# these are produced during build and then e.g run to produce other data
|
|
||||||
# i.e not directly put into build => safe to ignore
|
|
||||||
IGNORE="${IGNORE} -e sizer.32.o -e sizer.64.o"
|
|
||||||
IGNORE="${IGNORE} -e dummyodbc.o"
|
|
||||||
IGNORE="${IGNORE} -e genSolarisConstants.o"
|
|
||||||
IGNORE="${IGNORE} -e genUnixConstants.o"
|
|
||||||
|
|
||||||
OLD="$1"
|
|
||||||
NEW="$2"
|
|
||||||
shift; shift
|
|
||||||
PATTERN="$*"
|
|
||||||
|
|
||||||
if [ -f $NEW/spec.sh ]; then
|
|
||||||
. $NEW/spec.sh
|
|
||||||
elif [ -f $NEW/../../spec.sh ]; then
|
|
||||||
. $NEW/../../spec.sh
|
|
||||||
elif [ -f $OLD/spec.sh ]; then
|
|
||||||
. $OLD/spec.sh
|
|
||||||
elif [ -f $OLD/../../spec.sh ]; then
|
|
||||||
. $OLD/../../spec.sh
|
|
||||||
else
|
|
||||||
echo "Unable to find spec.sh"
|
|
||||||
echo "Giving up"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
export COMPARE_ROOT=/tmp/cimages.$USER/objects
|
|
||||||
mkdir -p $COMPARE_ROOT
|
|
||||||
|
|
||||||
(${CD} $OLD && ${FIND} . -name '*.o') > $COMPARE_ROOT/list.old
|
|
||||||
(${CD} $NEW && ${FIND} . -name '*.o') > $COMPARE_ROOT/list.new
|
|
||||||
|
|
||||||
# On macosx JobjC is build in both i386 and x86_64 variant (universial binary)
|
|
||||||
# but new build only builds the x86_64
|
|
||||||
# Remove the 386 variants from comparison...to avoid "false" positives
|
|
||||||
${GREP} -v 'JObjC.dst/Objects-normal/i386' $COMPARE_ROOT/list.old > $COMPARE_ROOT/list.old.new
|
|
||||||
${CP} $COMPARE_ROOT/list.old $COMPARE_ROOT/list.old.full
|
|
||||||
${CP} $COMPARE_ROOT/list.old.new $COMPARE_ROOT/list.old
|
|
||||||
|
|
||||||
findnew() {
|
|
||||||
arg_1=$1
|
|
||||||
arg_2=$2
|
|
||||||
|
|
||||||
# special case 1 unpack-cmd => unpackexe
|
|
||||||
arg_1=`${ECHO} $arg_1 | ${SED} 's!unpack-cmd!unpackexe!g'`
|
|
||||||
arg_2=`${ECHO} $arg_2 | ${SED} 's!unpack-cmd!unpackexe!g'`
|
|
||||||
|
|
||||||
# special case 2 /JObjC.dst/ => /libjobjc/
|
|
||||||
arg_1=`${ECHO} $arg_1 | ${SED} 's!/JObjC.dst/!/libjobjc/!g'`
|
|
||||||
arg_2=`${ECHO} $arg_2 | ${SED} 's!/JObjC.dst/!/libjobjc/!g'`
|
|
||||||
|
|
||||||
full=`${ECHO} $arg_1 | ${SED} 's!\.!\\\.!g'`
|
|
||||||
medium=`${ECHO} $arg_1 | ${SED} 's!.*/\([^/]*/[^/]*\)!\1!'`
|
|
||||||
short=`${ECHO} $arg_2 | ${SED} 's!\.!\\\.!g'`
|
|
||||||
if [ "`${GREP} -c "/$full" $COMPARE_ROOT/list.new`" -eq 1 ]
|
|
||||||
then
|
|
||||||
${ECHO} $NEW/$arg_1
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "`${GREP} -c "$medium" $COMPARE_ROOT/list.new`" -eq 1 ]
|
|
||||||
then
|
|
||||||
${GREP} "$medium" $COMPARE_ROOT/list.new
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "`${GREP} -c "/$short" $COMPARE_ROOT/list.new`" -eq 1 ]
|
|
||||||
then
|
|
||||||
${GREP} "/$short" $COMPARE_ROOT/list.new
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
# old style has "dir" before obj{64}
|
|
||||||
dir=`${ECHO} $arg_1 | ${SED} 's!.*/\([^/]*\)/obj[64]*.*!\1!g'`
|
|
||||||
if [ -n "$dir" -a "$dir" != "$arg_1" ]
|
|
||||||
then
|
|
||||||
if [ "`${GREP} $dir $COMPARE_ROOT/list.new | ${GREP} -c "/$short"`" -eq 1 ]
|
|
||||||
then
|
|
||||||
${GREP} $dir $COMPARE_ROOT/list.new | ${GREP} "/$short"
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Try with lib$dir/
|
|
||||||
if [ "`${GREP} "lib$dir/" $COMPARE_ROOT/list.new | ${GREP} -c "/$short"`" -eq 1 ]
|
|
||||||
then
|
|
||||||
${GREP} "lib$dir/" $COMPARE_ROOT/list.new | ${GREP} "/$short"
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Try with $dir_objs
|
|
||||||
if [ "`${GREP} "${dir}_objs" $COMPARE_ROOT/list.new | ${GREP} -c "/$short"`" -eq 1 ]
|
|
||||||
then
|
|
||||||
${GREP} "${dir}_objs" $COMPARE_ROOT/list.new | ${GREP} "/$short"
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# check for some specifics...
|
|
||||||
for i in demo hotspot jobjc
|
|
||||||
do
|
|
||||||
if [ "`${ECHO} $full | ${GREP} -c $i`" -gt 0 ]
|
|
||||||
then
|
|
||||||
if [ "`${GREP} $i $COMPARE_ROOT/list.new | ${GREP} -c "/$short"`" -eq 1 ]
|
|
||||||
then
|
|
||||||
${GREP} $i $COMPARE_ROOT/list.new | ${GREP} "/$short"
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# check for specific demo
|
|
||||||
demo=`${ECHO} $arg_1 | ${SED} 's!.*/demo/jvmti/\([^/]*\)/.*!\1!g'`
|
|
||||||
if [ -n "$demo" -a "$dir" != "$demo" ]
|
|
||||||
then
|
|
||||||
if [ "`${GREP} $demo $COMPARE_ROOT/list.new | ${GREP} -c "/$short"`" -eq 1 ]
|
|
||||||
then
|
|
||||||
${GREP} $demo $COMPARE_ROOT/list.new | ${GREP} "/$short"
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
compare() {
|
|
||||||
old=$1
|
|
||||||
new=$2
|
|
||||||
${DIFF} $old $new > /dev/null
|
|
||||||
res=$?
|
|
||||||
if [ $res -eq 0 ]
|
|
||||||
then
|
|
||||||
${ECHO} 0
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
# check if stripped objects gives equality
|
|
||||||
${CP} $old $COMPARE_ROOT/`basename $old`.old
|
|
||||||
${CP} $new $COMPARE_ROOT/`basename $old`.new
|
|
||||||
${POST_STRIP_CMD} $COMPARE_ROOT/`basename $old`.old $COMPARE_ROOT/`basename $old`.new > /dev/null 2>&1
|
|
||||||
${DIFF} $COMPARE_ROOT/`basename $old`.old $COMPARE_ROOT/`basename $old`.new > /dev/null
|
|
||||||
res=$?
|
|
||||||
${RM} $COMPARE_ROOT/`basename $old`.old $COMPARE_ROOT/`basename $old`.new
|
|
||||||
if [ $res -eq 0 ]
|
|
||||||
then
|
|
||||||
${ECHO} S
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
name=`basename $1 | ${SED} 's!\.o!!'`
|
|
||||||
cntold=`strings $old | ${GREP} -c $name`
|
|
||||||
cntnew=`strings $new | ${GREP} -c $name`
|
|
||||||
|
|
||||||
if [ $cntold -gt 0 -a $cntnew -gt 0 ]
|
|
||||||
then
|
|
||||||
${ECHO} F
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
${ECHO} 1
|
|
||||||
}
|
|
||||||
|
|
||||||
for F in `${CAT} $COMPARE_ROOT/list.old`
|
|
||||||
do
|
|
||||||
if [ "${IGNORE}" ] && [ "`${ECHO} $F | ${GREP} ${IGNORE}`" ]
|
|
||||||
then
|
|
||||||
#
|
|
||||||
# skip ignored files
|
|
||||||
#
|
|
||||||
continue;
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$PATTERN" ] && [ `${ECHO} $F | ${GREP} -c $PATTERN` -eq 0 ]
|
|
||||||
then
|
|
||||||
continue;
|
|
||||||
fi
|
|
||||||
|
|
||||||
f=`basename $F`
|
|
||||||
o=$OLD/$F
|
|
||||||
n=`findnew $F $f`
|
|
||||||
|
|
||||||
if [ "$n" ]
|
|
||||||
then
|
|
||||||
n="$NEW/$n"
|
|
||||||
${ECHO} `compare $o $n` : $f : $o : $n
|
|
||||||
else
|
|
||||||
${ECHO} "- : $f : $o "
|
|
||||||
fi
|
|
||||||
done
|
|
|
@ -280,3 +280,5 @@ cfdac5887952c2dd73c73a1d8d9aa880d0539bbf jdk9-b33
|
||||||
9bc2dbd3dfb8c9fa88e00056b8b93a81ee6d306e jdk9-b35
|
9bc2dbd3dfb8c9fa88e00056b8b93a81ee6d306e jdk9-b35
|
||||||
ffd90c81d4ef9d94d880fc852e2fc482ecd9b374 jdk9-b36
|
ffd90c81d4ef9d94d880fc852e2fc482ecd9b374 jdk9-b36
|
||||||
7e9add74ad50841fb39dae75db56374aefa1de4c jdk9-b37
|
7e9add74ad50841fb39dae75db56374aefa1de4c jdk9-b37
|
||||||
|
8acf056126e819cf536eef02aee0f61f207a6b52 jdk9-b38
|
||||||
|
53bf36cb722db50815712258a77cb6bbe25a2f5f jdk9-b39
|
||||||
|
|
|
@ -166,16 +166,15 @@ import sun.reflect.misc.ReflectUtil;
|
||||||
*
|
*
|
||||||
*
|
*
|
||||||
* @implNote
|
* @implNote
|
||||||
* As described above it is possible to specify, at runtime, an alternative ORBSingleton class and
|
* When a singleton ORB is configured via the system property,
|
||||||
* an alternative ORB implementation class, via the system properties {@code org.omg.CORBA.ORBSingletonClass}
|
* or orb.properties, it will be
|
||||||
* and {@code org.omg.CORBA.ORBClass} respectively.
|
* located, and loaded via the system class loader.
|
||||||
* The class loading strategy is organized, such that, in the case of the ORBSingleton
|
* Thus, where appropriate, it is necessary that
|
||||||
* the system class loader is used to load the alternative singleton ORB.
|
* the classes for this alternative ORBSingleton are available on the application's class path.
|
||||||
* Thus, it is necessary that an application's CLASSPATH
|
* It should be noted that the singleton ORB is system wide.
|
||||||
* includes the classes for this alternative ORBSingleton, when specified.
|
|
||||||
*
|
*
|
||||||
* In the case of specifying an alternative ORB implementation class, the loading
|
* When a per-application ORB is created via the 2-arg init methods,
|
||||||
* strategy will use the thread context class loader, as appropriate.
|
* then it will be located using the thread context class loader.
|
||||||
*
|
*
|
||||||
* @since JDK1.2
|
* @since JDK1.2
|
||||||
*/
|
*/
|
||||||
|
@ -295,6 +294,11 @@ abstract public class ORB {
|
||||||
* creating <code>TypeCode</code> objects are invoked.
|
* creating <code>TypeCode</code> objects are invoked.
|
||||||
*
|
*
|
||||||
* @return the singleton ORB
|
* @return the singleton ORB
|
||||||
|
*
|
||||||
|
* @implNote
|
||||||
|
* When configured via the system property, or orb.properties,
|
||||||
|
* the system-wide singleton ORB is located via the
|
||||||
|
* system class loader.
|
||||||
*/
|
*/
|
||||||
public static synchronized ORB init() {
|
public static synchronized ORB init() {
|
||||||
if (singleton == null) {
|
if (singleton == null) {
|
||||||
|
@ -354,6 +358,10 @@ abstract public class ORB {
|
||||||
* method; may be <code>null</code>
|
* method; may be <code>null</code>
|
||||||
* @param props application-specific properties; may be <code>null</code>
|
* @param props application-specific properties; may be <code>null</code>
|
||||||
* @return the newly-created ORB instance
|
* @return the newly-created ORB instance
|
||||||
|
*
|
||||||
|
* @implNote
|
||||||
|
* When configured via the system property, or orb.properties,
|
||||||
|
* the ORB is located via the thread context class loader.
|
||||||
*/
|
*/
|
||||||
public static ORB init(String[] args, Properties props) {
|
public static ORB init(String[] args, Properties props) {
|
||||||
//
|
//
|
||||||
|
@ -392,6 +400,10 @@ abstract public class ORB {
|
||||||
* @param app the applet; may be <code>null</code>
|
* @param app the applet; may be <code>null</code>
|
||||||
* @param props applet-specific properties; may be <code>null</code>
|
* @param props applet-specific properties; may be <code>null</code>
|
||||||
* @return the newly-created ORB instance
|
* @return the newly-created ORB instance
|
||||||
|
*
|
||||||
|
* @implNote
|
||||||
|
* When configured via the system property, or orb.properties,
|
||||||
|
* the ORB is located via the thread context class loader.
|
||||||
*/
|
*/
|
||||||
public static ORB init(Applet app, Properties props) {
|
public static ORB init(Applet app, Properties props) {
|
||||||
String className;
|
String className;
|
||||||
|
|
|
@ -440,3 +440,5 @@ af46576a8d7cb4003028b8ee8bf408cfe227315b jdk9-b32
|
||||||
438cb613151c4bd290bb732697517cba1cafcb04 jdk9-b35
|
438cb613151c4bd290bb732697517cba1cafcb04 jdk9-b35
|
||||||
464ab653fbb17eb518d8ef60f8df301de7ef00d0 jdk9-b36
|
464ab653fbb17eb518d8ef60f8df301de7ef00d0 jdk9-b36
|
||||||
b1c2dd843f247a1db19e1e85eb62ca405f72dc26 jdk9-b37
|
b1c2dd843f247a1db19e1e85eb62ca405f72dc26 jdk9-b37
|
||||||
|
c363a8b87e477ee45d6d3cb2a36cb365141bc596 jdk9-b38
|
||||||
|
9cb75e5e394827ccbaf2e15524108a412dc4ddc5 jdk9-b39
|
||||||
|
|
|
@ -64,8 +64,8 @@ public class DefNewGeneration extends Generation {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Accessing spaces
|
// Accessing spaces
|
||||||
public EdenSpace eden() {
|
public ContiguousSpace eden() {
|
||||||
return (EdenSpace) VMObjectFactory.newObject(EdenSpace.class, edenSpaceField.getValue(addr));
|
return (ContiguousSpace) VMObjectFactory.newObject(ContiguousSpace.class, edenSpaceField.getValue(addr));
|
||||||
}
|
}
|
||||||
|
|
||||||
public ContiguousSpace from() {
|
public ContiguousSpace from() {
|
||||||
|
|
|
@ -3513,7 +3513,7 @@ void TemplateTable::_new() {
|
||||||
Rtags = R3_ARG1,
|
Rtags = R3_ARG1,
|
||||||
Rindex = R5_ARG3;
|
Rindex = R5_ARG3;
|
||||||
|
|
||||||
const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
|
const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc();
|
||||||
|
|
||||||
// --------------------------------------------------------------------------
|
// --------------------------------------------------------------------------
|
||||||
// Check if fast case is possible.
|
// Check if fast case is possible.
|
||||||
|
|
|
@ -3196,7 +3196,7 @@ void MacroAssembler::eden_allocate(
|
||||||
assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size");
|
assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size");
|
||||||
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
|
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
|
||||||
|
|
||||||
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
|
if (!Universe::heap()->supports_inline_contig_alloc()) {
|
||||||
// No allocation in the shared eden.
|
// No allocation in the shared eden.
|
||||||
ba(slow_case);
|
ba(slow_case);
|
||||||
delayed()->nop();
|
delayed()->nop();
|
||||||
|
@ -3331,7 +3331,7 @@ void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case
|
||||||
assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */);
|
assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */);
|
||||||
Label do_refill, discard_tlab;
|
Label do_refill, discard_tlab;
|
||||||
|
|
||||||
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
|
if (!Universe::heap()->supports_inline_contig_alloc()) {
|
||||||
// No allocation in the shared eden.
|
// No allocation in the shared eden.
|
||||||
ba(slow_case);
|
ba(slow_case);
|
||||||
delayed()->nop();
|
delayed()->nop();
|
||||||
|
|
|
@ -3309,7 +3309,7 @@ void TemplateTable::_new() {
|
||||||
// (creates a new TLAB, etc.)
|
// (creates a new TLAB, etc.)
|
||||||
|
|
||||||
const bool allow_shared_alloc =
|
const bool allow_shared_alloc =
|
||||||
Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
|
Universe::heap()->supports_inline_contig_alloc();
|
||||||
|
|
||||||
if(UseTLAB) {
|
if(UseTLAB) {
|
||||||
Register RoldTopValue = RallocatedObject;
|
Register RoldTopValue = RallocatedObject;
|
||||||
|
|
|
@ -1767,7 +1767,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
||||||
// at [FETCH], below, will never observe a biased encoding (*101b).
|
// at [FETCH], below, will never observe a biased encoding (*101b).
|
||||||
// If this invariant is not held we risk exclusion (safety) failure.
|
// If this invariant is not held we risk exclusion (safety) failure.
|
||||||
if (UseBiasedLocking && !UseOptoBiasInlining) {
|
if (UseBiasedLocking && !UseOptoBiasInlining) {
|
||||||
biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, counters);
|
biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, counters);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if INCLUDE_RTM_OPT
|
#if INCLUDE_RTM_OPT
|
||||||
|
@ -2946,7 +2946,7 @@ void MacroAssembler::eden_allocate(Register obj,
|
||||||
Label& slow_case) {
|
Label& slow_case) {
|
||||||
assert(obj == rax, "obj must be in rax, for cmpxchg");
|
assert(obj == rax, "obj must be in rax, for cmpxchg");
|
||||||
assert_different_registers(obj, var_size_in_bytes, t1);
|
assert_different_registers(obj, var_size_in_bytes, t1);
|
||||||
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
|
if (!Universe::heap()->supports_inline_contig_alloc()) {
|
||||||
jmp(slow_case);
|
jmp(slow_case);
|
||||||
} else {
|
} else {
|
||||||
Register end = t1;
|
Register end = t1;
|
||||||
|
@ -4419,7 +4419,7 @@ Register MacroAssembler::tlab_refill(Label& retry,
|
||||||
assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
|
assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
|
||||||
Label do_refill, discard_tlab;
|
Label do_refill, discard_tlab;
|
||||||
|
|
||||||
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
|
if (!Universe::heap()->supports_inline_contig_alloc()) {
|
||||||
// No allocation in the shared eden.
|
// No allocation in the shared eden.
|
||||||
jmp(slow_case);
|
jmp(slow_case);
|
||||||
}
|
}
|
||||||
|
|
|
@ -3214,7 +3214,7 @@ void TemplateTable::_new() {
|
||||||
// (creates a new TLAB, etc.)
|
// (creates a new TLAB, etc.)
|
||||||
|
|
||||||
const bool allow_shared_alloc =
|
const bool allow_shared_alloc =
|
||||||
Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
|
Universe::heap()->supports_inline_contig_alloc();
|
||||||
|
|
||||||
const Register thread = rcx;
|
const Register thread = rcx;
|
||||||
if (UseTLAB || allow_shared_alloc) {
|
if (UseTLAB || allow_shared_alloc) {
|
||||||
|
|
|
@ -3269,7 +3269,7 @@ void TemplateTable::_new() {
|
||||||
// (creates a new TLAB, etc.)
|
// (creates a new TLAB, etc.)
|
||||||
|
|
||||||
const bool allow_shared_alloc =
|
const bool allow_shared_alloc =
|
||||||
Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
|
Universe::heap()->supports_inline_contig_alloc();
|
||||||
|
|
||||||
if (UseTLAB) {
|
if (UseTLAB) {
|
||||||
__ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
|
__ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
|
||||||
|
|
|
@ -2608,7 +2608,10 @@ void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
||||||
assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
|
assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
|
||||||
assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
|
assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
|
||||||
if (UseLargePages) {
|
if (UseLargePages) {
|
||||||
Solaris::setup_large_pages(addr, bytes, alignment_hint);
|
size_t page_size = Solaris::page_size_for_alignment(alignment_hint);
|
||||||
|
if (page_size > (size_t) vm_page_size()) {
|
||||||
|
Solaris::setup_large_pages(addr, bytes, page_size);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,11 @@
|
||||||
|
|
||||||
// Implementation of class OrderAccess.
|
// Implementation of class OrderAccess.
|
||||||
|
|
||||||
|
// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
|
||||||
|
static inline void compiler_barrier() {
|
||||||
|
__asm__ volatile ("" : : : "memory");
|
||||||
|
}
|
||||||
|
|
||||||
inline void OrderAccess::loadload() { acquire(); }
|
inline void OrderAccess::loadload() { acquire(); }
|
||||||
inline void OrderAccess::storestore() { release(); }
|
inline void OrderAccess::storestore() { release(); }
|
||||||
inline void OrderAccess::loadstore() { acquire(); }
|
inline void OrderAccess::loadstore() { acquire(); }
|
||||||
|
@ -46,9 +51,7 @@ inline void OrderAccess::acquire() {
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void OrderAccess::release() {
|
inline void OrderAccess::release() {
|
||||||
// Avoid hitting the same cache-line from
|
compiler_barrier();
|
||||||
// different threads.
|
|
||||||
volatile jint local_dummy = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void OrderAccess::fence() {
|
inline void OrderAccess::fence() {
|
||||||
|
@ -62,34 +65,34 @@ inline void OrderAccess::fence() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; }
|
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { jbyte v = *p; compiler_barrier(); return v; }
|
||||||
inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; }
|
inline jshort OrderAccess::load_acquire(volatile jshort* p) { jshort v = *p; compiler_barrier(); return v; }
|
||||||
inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; }
|
inline jint OrderAccess::load_acquire(volatile jint* p) { jint v = *p; compiler_barrier(); return v; }
|
||||||
inline jlong OrderAccess::load_acquire(volatile jlong* p) { return Atomic::load(p); }
|
inline jlong OrderAccess::load_acquire(volatile jlong* p) { jlong v = Atomic::load(p); compiler_barrier(); return v; }
|
||||||
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; }
|
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { jubyte v = *p; compiler_barrier(); return v; }
|
||||||
inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; }
|
inline jushort OrderAccess::load_acquire(volatile jushort* p) { jushort v = *p; compiler_barrier(); return v; }
|
||||||
inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; }
|
inline juint OrderAccess::load_acquire(volatile juint* p) { juint v = *p; compiler_barrier(); return v; }
|
||||||
inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); }
|
inline julong OrderAccess::load_acquire(volatile julong* p) { julong v = Atomic::load((volatile jlong*)p); compiler_barrier(); return v; }
|
||||||
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; }
|
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { jfloat v = *p; compiler_barrier(); return v; }
|
||||||
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return jdouble_cast(Atomic::load((volatile jlong*)p)); }
|
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { jdouble v = jdouble_cast(Atomic::load((volatile jlong*)p)); compiler_barrier(); return v; }
|
||||||
|
|
||||||
inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return *p; }
|
inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { intptr_t v = *p; compiler_barrier(); return v; }
|
||||||
inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return *(void* volatile *)p; }
|
inline void* OrderAccess::load_ptr_acquire(volatile void* p) { void* v = *(void* volatile *)p; compiler_barrier(); return v; }
|
||||||
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
|
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { void* v = *(void* const volatile *)p; compiler_barrier(); return v; }
|
||||||
|
|
||||||
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; }
|
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { compiler_barrier(); *p = v; }
|
||||||
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; }
|
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { compiler_barrier(); *p = v; }
|
||||||
inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; }
|
inline void OrderAccess::release_store(volatile jint* p, jint v) { compiler_barrier(); *p = v; }
|
||||||
inline void OrderAccess::release_store(volatile jlong* p, jlong v) { Atomic::store(v, p); }
|
inline void OrderAccess::release_store(volatile jlong* p, jlong v) { compiler_barrier(); Atomic::store(v, p); }
|
||||||
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; }
|
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { compiler_barrier(); *p = v; }
|
||||||
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
|
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { compiler_barrier(); *p = v; }
|
||||||
inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; }
|
inline void OrderAccess::release_store(volatile juint* p, juint v) { compiler_barrier(); *p = v; }
|
||||||
inline void OrderAccess::release_store(volatile julong* p, julong v) { Atomic::store((jlong)v, (volatile jlong*)p); }
|
inline void OrderAccess::release_store(volatile julong* p, julong v) { compiler_barrier(); Atomic::store((jlong)v, (volatile jlong*)p); }
|
||||||
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; }
|
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { compiler_barrier(); *p = v; }
|
||||||
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { release_store((volatile jlong *)p, jlong_cast(v)); }
|
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { release_store((volatile jlong *)p, jlong_cast(v)); }
|
||||||
|
|
||||||
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
|
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { compiler_barrier(); *p = v; }
|
||||||
inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { *(void* volatile *)p = v; }
|
inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { compiler_barrier(); *(void* volatile *)p = v; }
|
||||||
|
|
||||||
inline void OrderAccess::store_fence(jbyte* p, jbyte v) {
|
inline void OrderAccess::store_fence(jbyte* p, jbyte v) {
|
||||||
__asm__ volatile ( "xchgb (%2),%0"
|
__asm__ volatile ( "xchgb (%2),%0"
|
||||||
|
|
|
@ -7,7 +7,6 @@
|
||||||
-Xbootclasspath/p:<directories and zip/jar files separated by ;>
|
-Xbootclasspath/p:<directories and zip/jar files separated by ;>
|
||||||
prepend in front of bootstrap class path
|
prepend in front of bootstrap class path
|
||||||
-Xnoclassgc disable class garbage collection
|
-Xnoclassgc disable class garbage collection
|
||||||
-Xincgc enable incremental garbage collection
|
|
||||||
-Xloggc:<file> log GC status to a file with time stamps
|
-Xloggc:<file> log GC status to a file with time stamps
|
||||||
-Xbatch disable background compilation
|
-Xbatch disable background compilation
|
||||||
-Xms<size> set initial Java heap size
|
-Xms<size> set initial Java heap size
|
||||||
|
|
|
@ -68,7 +68,10 @@
|
||||||
// ciMethod::ciMethod
|
// ciMethod::ciMethod
|
||||||
//
|
//
|
||||||
// Loaded method.
|
// Loaded method.
|
||||||
ciMethod::ciMethod(methodHandle h_m) : ciMetadata(h_m()) {
|
ciMethod::ciMethod(methodHandle h_m, ciInstanceKlass* holder) :
|
||||||
|
ciMetadata(h_m()),
|
||||||
|
_holder(holder)
|
||||||
|
{
|
||||||
assert(h_m() != NULL, "no null method");
|
assert(h_m() != NULL, "no null method");
|
||||||
|
|
||||||
// These fields are always filled in in loaded methods.
|
// These fields are always filled in in loaded methods.
|
||||||
|
@ -124,7 +127,6 @@ ciMethod::ciMethod(methodHandle h_m) : ciMetadata(h_m()) {
|
||||||
// generating _signature may allow GC and therefore move m.
|
// generating _signature may allow GC and therefore move m.
|
||||||
// These fields are always filled in.
|
// These fields are always filled in.
|
||||||
_name = env->get_symbol(h_m()->name());
|
_name = env->get_symbol(h_m()->name());
|
||||||
_holder = env->get_instance_klass(h_m()->method_holder());
|
|
||||||
ciSymbol* sig_symbol = env->get_symbol(h_m()->signature());
|
ciSymbol* sig_symbol = env->get_symbol(h_m()->signature());
|
||||||
constantPoolHandle cpool = h_m()->constants();
|
constantPoolHandle cpool = h_m()->constants();
|
||||||
_signature = new (env->arena()) ciSignature(_holder, cpool, sig_symbol);
|
_signature = new (env->arena()) ciSignature(_holder, cpool, sig_symbol);
|
||||||
|
|
|
@ -91,7 +91,7 @@ class ciMethod : public ciMetadata {
|
||||||
BCEscapeAnalyzer* _bcea;
|
BCEscapeAnalyzer* _bcea;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
ciMethod(methodHandle h_m);
|
ciMethod(methodHandle h_m, ciInstanceKlass* holder);
|
||||||
ciMethod(ciInstanceKlass* holder, ciSymbol* name, ciSymbol* signature, ciInstanceKlass* accessor);
|
ciMethod(ciInstanceKlass* holder, ciSymbol* name, ciSymbol* signature, ciInstanceKlass* accessor);
|
||||||
|
|
||||||
Method* get_Method() const {
|
Method* get_Method() const {
|
||||||
|
|
|
@ -239,7 +239,7 @@ void ciObjectFactory::remove_symbols() {
|
||||||
ciObject* ciObjectFactory::get(oop key) {
|
ciObject* ciObjectFactory::get(oop key) {
|
||||||
ASSERT_IN_VM;
|
ASSERT_IN_VM;
|
||||||
|
|
||||||
assert(key == NULL || Universe::heap()->is_in_reserved(key), "must be");
|
assert(Universe::heap()->is_in_reserved(key), "must be");
|
||||||
|
|
||||||
NonPermObject* &bucket = find_non_perm(key);
|
NonPermObject* &bucket = find_non_perm(key);
|
||||||
if (bucket != NULL) {
|
if (bucket != NULL) {
|
||||||
|
@ -260,10 +260,10 @@ ciObject* ciObjectFactory::get(oop key) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------
|
// ------------------------------------------------------------------
|
||||||
// ciObjectFactory::get
|
// ciObjectFactory::get_metadata
|
||||||
//
|
//
|
||||||
// Get the ciObject corresponding to some oop. If the ciObject has
|
// Get the ciMetadata corresponding to some Metadata. If the ciMetadata has
|
||||||
// already been created, it is returned. Otherwise, a new ciObject
|
// already been created, it is returned. Otherwise, a new ciMetadata
|
||||||
// is created.
|
// is created.
|
||||||
ciMetadata* ciObjectFactory::get_metadata(Metadata* key) {
|
ciMetadata* ciObjectFactory::get_metadata(Metadata* key) {
|
||||||
ASSERT_IN_VM;
|
ASSERT_IN_VM;
|
||||||
|
@ -290,9 +290,9 @@ ciMetadata* ciObjectFactory::get_metadata(Metadata* key) {
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
if (!is_found_at(index, key, _ci_metadata)) {
|
if (!is_found_at(index, key, _ci_metadata)) {
|
||||||
// The ciObject does not yet exist. Create it and insert it
|
// The ciMetadata does not yet exist. Create it and insert it
|
||||||
// into the cache.
|
// into the cache.
|
||||||
ciMetadata* new_object = create_new_object(key);
|
ciMetadata* new_object = create_new_metadata(key);
|
||||||
init_ident_of(new_object);
|
init_ident_of(new_object);
|
||||||
assert(new_object->is_metadata(), "must be");
|
assert(new_object->is_metadata(), "must be");
|
||||||
|
|
||||||
|
@ -344,15 +344,28 @@ ciObject* ciObjectFactory::create_new_object(oop o) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------
|
// ------------------------------------------------------------------
|
||||||
// ciObjectFactory::create_new_object
|
// ciObjectFactory::create_new_metadata
|
||||||
//
|
//
|
||||||
// Create a new ciObject from a Metadata*.
|
// Create a new ciMetadata from a Metadata*.
|
||||||
//
|
//
|
||||||
// Implementation note: this functionality could be virtual behavior
|
// Implementation note: in order to keep Metadata live, an auxiliary ciObject
|
||||||
// of the oop itself. For now, we explicitly marshal the object.
|
// is used, which points to it's holder.
|
||||||
ciMetadata* ciObjectFactory::create_new_object(Metadata* o) {
|
ciMetadata* ciObjectFactory::create_new_metadata(Metadata* o) {
|
||||||
EXCEPTION_CONTEXT;
|
EXCEPTION_CONTEXT;
|
||||||
|
|
||||||
|
// Hold metadata from unloading by keeping it's holder alive.
|
||||||
|
if (_initialized && o->is_klass()) {
|
||||||
|
Klass* holder = ((Klass*)o);
|
||||||
|
if (holder->oop_is_instance() && InstanceKlass::cast(holder)->is_anonymous()) {
|
||||||
|
// Though ciInstanceKlass records class loader oop, it's not enough to keep
|
||||||
|
// VM anonymous classes alive (loader == NULL). Klass holder should be used instead.
|
||||||
|
// It is enough to record a ciObject, since cached elements are never removed
|
||||||
|
// during ciObjectFactory lifetime. ciObjectFactory itself is created for
|
||||||
|
// every compilation and lives for the whole duration of the compilation.
|
||||||
|
ciObject* h = get(holder->klass_holder());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (o->is_klass()) {
|
if (o->is_klass()) {
|
||||||
KlassHandle h_k(THREAD, (Klass*)o);
|
KlassHandle h_k(THREAD, (Klass*)o);
|
||||||
Klass* k = (Klass*)o;
|
Klass* k = (Klass*)o;
|
||||||
|
@ -365,14 +378,16 @@ ciMetadata* ciObjectFactory::create_new_object(Metadata* o) {
|
||||||
}
|
}
|
||||||
} else if (o->is_method()) {
|
} else if (o->is_method()) {
|
||||||
methodHandle h_m(THREAD, (Method*)o);
|
methodHandle h_m(THREAD, (Method*)o);
|
||||||
return new (arena()) ciMethod(h_m);
|
ciEnv *env = CURRENT_THREAD_ENV;
|
||||||
|
ciInstanceKlass* holder = env->get_instance_klass(h_m()->method_holder());
|
||||||
|
return new (arena()) ciMethod(h_m, holder);
|
||||||
} else if (o->is_methodData()) {
|
} else if (o->is_methodData()) {
|
||||||
// Hold methodHandle alive - might not be necessary ???
|
// Hold methodHandle alive - might not be necessary ???
|
||||||
methodHandle h_m(THREAD, ((MethodData*)o)->method());
|
methodHandle h_m(THREAD, ((MethodData*)o)->method());
|
||||||
return new (arena()) ciMethodData((MethodData*)o);
|
return new (arena()) ciMethodData((MethodData*)o);
|
||||||
}
|
}
|
||||||
|
|
||||||
// The oop is of some type not supported by the compiler interface.
|
// The Metadata* is of some type not supported by the compiler interface.
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -701,7 +716,7 @@ static ciObjectFactory::NonPermObject* emptyBucket = NULL;
|
||||||
// If there is no entry in the cache corresponding to this oop, return
|
// If there is no entry in the cache corresponding to this oop, return
|
||||||
// the null tail of the bucket into which the oop should be inserted.
|
// the null tail of the bucket into which the oop should be inserted.
|
||||||
ciObjectFactory::NonPermObject* &ciObjectFactory::find_non_perm(oop key) {
|
ciObjectFactory::NonPermObject* &ciObjectFactory::find_non_perm(oop key) {
|
||||||
assert(Universe::heap()->is_in_reserved_or_null(key), "must be");
|
assert(Universe::heap()->is_in_reserved(key), "must be");
|
||||||
ciMetadata* klass = get_metadata(key->klass());
|
ciMetadata* klass = get_metadata(key->klass());
|
||||||
NonPermObject* *bp = &_non_perm_bucket[(unsigned) klass->hash() % NON_PERM_BUCKETS];
|
NonPermObject* *bp = &_non_perm_bucket[(unsigned) klass->hash() % NON_PERM_BUCKETS];
|
||||||
for (NonPermObject* p; (p = (*bp)) != NULL; bp = &p->next()) {
|
for (NonPermObject* p; (p = (*bp)) != NULL; bp = &p->next()) {
|
||||||
|
|
|
@ -73,7 +73,7 @@ private:
|
||||||
void insert(int index, ciMetadata* obj, GrowableArray<ciMetadata*>* objects);
|
void insert(int index, ciMetadata* obj, GrowableArray<ciMetadata*>* objects);
|
||||||
|
|
||||||
ciObject* create_new_object(oop o);
|
ciObject* create_new_object(oop o);
|
||||||
ciMetadata* create_new_object(Metadata* o);
|
ciMetadata* create_new_metadata(Metadata* o);
|
||||||
|
|
||||||
void ensure_metadata_alive(ciMetadata* m);
|
void ensure_metadata_alive(ciMetadata* m);
|
||||||
|
|
||||||
|
|
|
@ -43,7 +43,7 @@
|
||||||
#include "c1/c1_Runtime1.hpp"
|
#include "c1/c1_Runtime1.hpp"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
unsigned int align_code_offset(int offset) {
|
unsigned int CodeBlob::align_code_offset(int offset) {
|
||||||
// align the size to CodeEntryAlignment
|
// align the size to CodeEntryAlignment
|
||||||
return
|
return
|
||||||
((offset + (int)CodeHeap::header_size() + (CodeEntryAlignment-1)) & ~(CodeEntryAlignment-1))
|
((offset + (int)CodeHeap::header_size() + (CodeEntryAlignment-1)) & ~(CodeEntryAlignment-1))
|
||||||
|
|
|
@ -83,6 +83,7 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
|
||||||
public:
|
public:
|
||||||
// Returns the space needed for CodeBlob
|
// Returns the space needed for CodeBlob
|
||||||
static unsigned int allocation_size(CodeBuffer* cb, int header_size);
|
static unsigned int allocation_size(CodeBuffer* cb, int header_size);
|
||||||
|
static unsigned int align_code_offset(int offset);
|
||||||
|
|
||||||
// Creation
|
// Creation
|
||||||
// a) simple CodeBlob
|
// a) simple CodeBlob
|
||||||
|
@ -207,7 +208,7 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class WhiteBox;
|
||||||
//----------------------------------------------------------------------------------------------------
|
//----------------------------------------------------------------------------------------------------
|
||||||
// BufferBlob: used to hold non-relocatable machine code such as the interpreter, stubroutines, etc.
|
// BufferBlob: used to hold non-relocatable machine code such as the interpreter, stubroutines, etc.
|
||||||
|
|
||||||
|
@ -215,6 +216,7 @@ class BufferBlob: public CodeBlob {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
friend class AdapterBlob;
|
friend class AdapterBlob;
|
||||||
friend class MethodHandlesAdapterBlob;
|
friend class MethodHandlesAdapterBlob;
|
||||||
|
friend class WhiteBox;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// Creation support
|
// Creation support
|
||||||
|
|
|
@ -305,7 +305,7 @@ void CodeCache::add_heap(ReservedSpace rs, const char* name, size_t size_initial
|
||||||
MemoryService::add_code_heap_memory_pool(heap, name);
|
MemoryService::add_code_heap_memory_pool(heap, name);
|
||||||
}
|
}
|
||||||
|
|
||||||
CodeHeap* CodeCache::get_code_heap(CodeBlob* cb) {
|
CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) {
|
||||||
assert(cb != NULL, "CodeBlob is null");
|
assert(cb != NULL, "CodeBlob is null");
|
||||||
FOR_ALL_HEAPS(heap) {
|
FOR_ALL_HEAPS(heap) {
|
||||||
if ((*heap)->contains(cb)) {
|
if ((*heap)->contains(cb)) {
|
||||||
|
|
|
@ -77,6 +77,7 @@ class DepChange;
|
||||||
class CodeCache : AllStatic {
|
class CodeCache : AllStatic {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
friend class NMethodIterator;
|
friend class NMethodIterator;
|
||||||
|
friend class WhiteBox;
|
||||||
private:
|
private:
|
||||||
// CodeHeaps of the cache
|
// CodeHeaps of the cache
|
||||||
static GrowableArray<CodeHeap*>* _heaps;
|
static GrowableArray<CodeHeap*>* _heaps;
|
||||||
|
@ -98,7 +99,7 @@ class CodeCache : AllStatic {
|
||||||
static void initialize_heaps(); // Initializes the CodeHeaps
|
static void initialize_heaps(); // Initializes the CodeHeaps
|
||||||
// Creates a new heap with the given name and size, containing CodeBlobs of the given type
|
// Creates a new heap with the given name and size, containing CodeBlobs of the given type
|
||||||
static void add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type);
|
static void add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type);
|
||||||
static CodeHeap* get_code_heap(CodeBlob* cb); // Returns the CodeHeap for the given CodeBlob
|
static CodeHeap* get_code_heap(const CodeBlob* cb); // Returns the CodeHeap for the given CodeBlob
|
||||||
static CodeHeap* get_code_heap(int code_blob_type); // Returns the CodeHeap for the given CodeBlobType
|
static CodeHeap* get_code_heap(int code_blob_type); // Returns the CodeHeap for the given CodeBlobType
|
||||||
// Returns the name of the VM option to set the size of the corresponding CodeHeap
|
// Returns the name of the VM option to set the size of the corresponding CodeHeap
|
||||||
static const char* get_code_heap_flag_name(int code_blob_type);
|
static const char* get_code_heap_flag_name(int code_blob_type);
|
||||||
|
|
|
@ -35,6 +35,7 @@
|
||||||
#include "oops/method.hpp"
|
#include "oops/method.hpp"
|
||||||
#include "oops/oop.inline.hpp"
|
#include "oops/oop.inline.hpp"
|
||||||
#include "prims/nativeLookup.hpp"
|
#include "prims/nativeLookup.hpp"
|
||||||
|
#include "prims/whitebox.hpp"
|
||||||
#include "runtime/arguments.hpp"
|
#include "runtime/arguments.hpp"
|
||||||
#include "runtime/atomic.inline.hpp"
|
#include "runtime/atomic.inline.hpp"
|
||||||
#include "runtime/compilationPolicy.hpp"
|
#include "runtime/compilationPolicy.hpp"
|
||||||
|
@ -1963,6 +1964,12 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||||
if (comp == NULL) {
|
if (comp == NULL) {
|
||||||
ci_env.record_method_not_compilable("no compiler", !TieredCompilation);
|
ci_env.record_method_not_compilable("no compiler", !TieredCompilation);
|
||||||
} else {
|
} else {
|
||||||
|
if (WhiteBoxAPI && WhiteBox::compilation_locked) {
|
||||||
|
MonitorLockerEx locker(Compilation_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
while (WhiteBox::compilation_locked) {
|
||||||
|
locker.wait(Mutex::_no_safepoint_check_flag);
|
||||||
|
}
|
||||||
|
}
|
||||||
comp->compile_method(&ci_env, target, osr_bci);
|
comp->compile_method(&ci_env, target, osr_bci);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -89,9 +89,3 @@ void ConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
|
||||||
_gc_policy_counters = new GCPolicyCounters("Copy:CMS", 2, 3);
|
_gc_policy_counters = new GCPolicyCounters("Copy:CMS", 2, 3);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns true if the incremental mode is enabled.
|
|
||||||
bool ConcurrentMarkSweepPolicy::has_soft_ended_eden()
|
|
||||||
{
|
|
||||||
return CMSIncrementalMode;
|
|
||||||
}
|
|
||||||
|
|
|
@ -42,9 +42,6 @@ class ConcurrentMarkSweepPolicy : public GenCollectorPolicy {
|
||||||
virtual void initialize_size_policy(size_t init_eden_size,
|
virtual void initialize_size_policy(size_t init_eden_size,
|
||||||
size_t init_promo_size,
|
size_t init_promo_size,
|
||||||
size_t init_survivor_size);
|
size_t init_survivor_size);
|
||||||
|
|
||||||
// Returns true if the incremental mode is enabled.
|
|
||||||
virtual bool has_soft_ended_eden();
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSCOLLECTORPOLICY_HPP
|
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSCOLLECTORPOLICY_HPP
|
||||||
|
|
|
@ -2083,17 +2083,13 @@ bool CompactibleFreeListSpace::should_concurrent_collect() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Support for compaction
|
// Support for compaction
|
||||||
|
|
||||||
void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
|
void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
|
||||||
SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
|
scan_and_forward(this, cp);
|
||||||
// Prepare_for_compaction() uses the space between live objects
|
// Prepare_for_compaction() uses the space between live objects
|
||||||
// so that later phase can skip dead space quickly. So verification
|
// so that later phase can skip dead space quickly. So verification
|
||||||
// of the free lists doesn't work after.
|
// of the free lists doesn't work after.
|
||||||
}
|
}
|
||||||
|
|
||||||
#define obj_size(q) adjustObjectSize(oop(q)->size())
|
|
||||||
#define adjust_obj_size(s) adjustObjectSize(s)
|
|
||||||
|
|
||||||
void CompactibleFreeListSpace::adjust_pointers() {
|
void CompactibleFreeListSpace::adjust_pointers() {
|
||||||
// In other versions of adjust_pointers(), a bail out
|
// In other versions of adjust_pointers(), a bail out
|
||||||
// based on the amount of live data in the generation
|
// based on the amount of live data in the generation
|
||||||
|
@ -2101,12 +2097,12 @@ void CompactibleFreeListSpace::adjust_pointers() {
|
||||||
// Cannot test used() == 0 here because the free lists have already
|
// Cannot test used() == 0 here because the free lists have already
|
||||||
// been mangled by the compaction.
|
// been mangled by the compaction.
|
||||||
|
|
||||||
SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
|
scan_and_adjust_pointers(this);
|
||||||
// See note about verification in prepare_for_compaction().
|
// See note about verification in prepare_for_compaction().
|
||||||
}
|
}
|
||||||
|
|
||||||
void CompactibleFreeListSpace::compact() {
|
void CompactibleFreeListSpace::compact() {
|
||||||
SCAN_AND_COMPACT(obj_size);
|
scan_and_compact(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
|
// Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
|
||||||
|
@ -2629,7 +2625,7 @@ void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>*
|
||||||
// Get the #blocks we want to claim
|
// Get the #blocks we want to claim
|
||||||
size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
|
size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
|
||||||
assert(n_blks > 0, "Error");
|
assert(n_blks > 0, "Error");
|
||||||
assert(ResizePLAB || n_blks == OldPLABSize, "Error");
|
assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
|
||||||
// In some cases, when the application has a phase change,
|
// In some cases, when the application has a phase change,
|
||||||
// there may be a sudden and sharp shift in the object survival
|
// there may be a sudden and sharp shift in the object survival
|
||||||
// profile, and updating the counts at the end of a scavenge
|
// profile, and updating the counts at the end of a scavenge
|
||||||
|
|
|
@ -73,6 +73,13 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||||
friend class CMSCollector;
|
friend class CMSCollector;
|
||||||
// Local alloc buffer for promotion into this space.
|
// Local alloc buffer for promotion into this space.
|
||||||
friend class CFLS_LAB;
|
friend class CFLS_LAB;
|
||||||
|
// Allow scan_and_* functions to call (private) overrides of the auxiliary functions on this class
|
||||||
|
template <typename SpaceType>
|
||||||
|
friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space);
|
||||||
|
template <typename SpaceType>
|
||||||
|
friend void CompactibleSpace::scan_and_compact(SpaceType* space);
|
||||||
|
template <typename SpaceType>
|
||||||
|
friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
|
||||||
|
|
||||||
// "Size" of chunks of work (executed during parallel remark phases
|
// "Size" of chunks of work (executed during parallel remark phases
|
||||||
// of CMS collection); this probably belongs in CMSCollector, although
|
// of CMS collection); this probably belongs in CMSCollector, although
|
||||||
|
@ -288,6 +295,28 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||||
_bt.freed(start, size);
|
_bt.freed(start, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
|
||||||
|
// See comments for CompactibleSpace for more information.
|
||||||
|
inline HeapWord* scan_limit() const {
|
||||||
|
return end();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline bool scanned_block_is_obj(const HeapWord* addr) const {
|
||||||
|
return CompactibleFreeListSpace::block_is_obj(addr); // Avoid virtual call
|
||||||
|
}
|
||||||
|
|
||||||
|
inline size_t scanned_block_size(const HeapWord* addr) const {
|
||||||
|
return CompactibleFreeListSpace::block_size(addr); // Avoid virtual call
|
||||||
|
}
|
||||||
|
|
||||||
|
inline size_t adjust_obj_size(size_t size) const {
|
||||||
|
return adjustObjectSize(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline size_t obj_size(const HeapWord* addr) const {
|
||||||
|
return adjustObjectSize(oop(addr)->size());
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// Reset the indexed free list to its initial empty condition.
|
// Reset the indexed free list to its initial empty condition.
|
||||||
void resetIndexedFreeListArray();
|
void resetIndexedFreeListArray();
|
||||||
|
|
|
@ -167,16 +167,6 @@ class CMSTokenSyncWithLocks: public CMSTokenSync {
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
// Wrapper class to temporarily disable icms during a foreground cms collection.
|
|
||||||
class ICMSDisabler: public StackObj {
|
|
||||||
public:
|
|
||||||
// The ctor disables icms and wakes up the thread so it notices the change;
|
|
||||||
// the dtor re-enables icms. Note that the CMSCollector methods will check
|
|
||||||
// CMSIncrementalMode.
|
|
||||||
ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
|
|
||||||
~ICMSDisabler() { CMSCollector::enable_icms(); }
|
|
||||||
};
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////
|
||||||
// Concurrent Mark-Sweep Generation /////////////////////////////
|
// Concurrent Mark-Sweep Generation /////////////////////////////
|
||||||
//////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////
|
||||||
|
@ -363,7 +353,6 @@ CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
|
||||||
_cms_used_at_gc0_end = 0;
|
_cms_used_at_gc0_end = 0;
|
||||||
_allow_duty_cycle_reduction = false;
|
_allow_duty_cycle_reduction = false;
|
||||||
_valid_bits = 0;
|
_valid_bits = 0;
|
||||||
_icms_duty_cycle = CMSIncrementalDutyCycle;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
double CMSStats::cms_free_adjustment_factor(size_t free) const {
|
double CMSStats::cms_free_adjustment_factor(size_t free) const {
|
||||||
|
@ -442,86 +431,17 @@ double CMSStats::time_until_cms_start() const {
|
||||||
return work - deadline;
|
return work - deadline;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
|
|
||||||
// amount of change to prevent wild oscillation.
|
|
||||||
unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
|
|
||||||
unsigned int new_duty_cycle) {
|
|
||||||
assert(old_duty_cycle <= 100, "bad input value");
|
|
||||||
assert(new_duty_cycle <= 100, "bad input value");
|
|
||||||
|
|
||||||
// Note: use subtraction with caution since it may underflow (values are
|
|
||||||
// unsigned). Addition is safe since we're in the range 0-100.
|
|
||||||
unsigned int damped_duty_cycle = new_duty_cycle;
|
|
||||||
if (new_duty_cycle < old_duty_cycle) {
|
|
||||||
const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
|
|
||||||
if (new_duty_cycle + largest_delta < old_duty_cycle) {
|
|
||||||
damped_duty_cycle = old_duty_cycle - largest_delta;
|
|
||||||
}
|
|
||||||
} else if (new_duty_cycle > old_duty_cycle) {
|
|
||||||
const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
|
|
||||||
if (new_duty_cycle > old_duty_cycle + largest_delta) {
|
|
||||||
damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
|
|
||||||
|
|
||||||
if (CMSTraceIncrementalPacing) {
|
|
||||||
gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
|
|
||||||
old_duty_cycle, new_duty_cycle, damped_duty_cycle);
|
|
||||||
}
|
|
||||||
return damped_duty_cycle;
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned int CMSStats::icms_update_duty_cycle_impl() {
|
|
||||||
assert(CMSIncrementalPacing && valid(),
|
|
||||||
"should be handled in icms_update_duty_cycle()");
|
|
||||||
|
|
||||||
double cms_time_so_far = cms_timer().seconds();
|
|
||||||
double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
|
|
||||||
double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
|
|
||||||
|
|
||||||
// Avoid division by 0.
|
|
||||||
double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
|
|
||||||
double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
|
|
||||||
|
|
||||||
unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
|
|
||||||
if (new_duty_cycle > _icms_duty_cycle) {
|
|
||||||
// Avoid very small duty cycles (1 or 2); 0 is allowed.
|
|
||||||
if (new_duty_cycle > 2) {
|
|
||||||
_icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
|
|
||||||
new_duty_cycle);
|
|
||||||
}
|
|
||||||
} else if (_allow_duty_cycle_reduction) {
|
|
||||||
// The duty cycle is reduced only once per cms cycle (see record_cms_end()).
|
|
||||||
new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
|
|
||||||
// Respect the minimum duty cycle.
|
|
||||||
unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
|
|
||||||
_icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (PrintGCDetails || CMSTraceIncrementalPacing) {
|
|
||||||
gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
|
|
||||||
}
|
|
||||||
|
|
||||||
_allow_duty_cycle_reduction = false;
|
|
||||||
return _icms_duty_cycle;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
void CMSStats::print_on(outputStream *st) const {
|
void CMSStats::print_on(outputStream *st) const {
|
||||||
st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
|
st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
|
||||||
st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
|
st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
|
||||||
gc0_duration(), gc0_period(), gc0_promoted());
|
gc0_duration(), gc0_period(), gc0_promoted());
|
||||||
st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
|
st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
|
||||||
cms_duration(), cms_duration_per_mb(),
|
cms_duration(), cms_period(), cms_allocated());
|
||||||
cms_period(), cms_allocated());
|
|
||||||
st->print(",cms_since_beg=%g,cms_since_end=%g",
|
st->print(",cms_since_beg=%g,cms_since_end=%g",
|
||||||
cms_time_since_begin(), cms_time_since_end());
|
cms_time_since_begin(), cms_time_since_end());
|
||||||
st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
|
st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
|
||||||
_cms_used_at_gc0_begin, _cms_used_at_gc0_end);
|
_cms_used_at_gc0_begin, _cms_used_at_gc0_end);
|
||||||
if (CMSIncrementalMode) {
|
|
||||||
st->print(",dc=%d", icms_duty_cycle());
|
|
||||||
}
|
|
||||||
|
|
||||||
if (valid()) {
|
if (valid()) {
|
||||||
st->print(",promo_rate=%g,cms_alloc_rate=%g",
|
st->print(",promo_rate=%g,cms_alloc_rate=%g",
|
||||||
|
@ -579,8 +499,6 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|
||||||
#endif
|
#endif
|
||||||
_collection_count_start(0),
|
_collection_count_start(0),
|
||||||
_verifying(false),
|
_verifying(false),
|
||||||
_icms_start_limit(NULL),
|
|
||||||
_icms_stop_limit(NULL),
|
|
||||||
_verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
|
_verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
|
||||||
_completed_initialization(false),
|
_completed_initialization(false),
|
||||||
_collector_policy(cp),
|
_collector_policy(cp),
|
||||||
|
@ -1116,137 +1034,6 @@ void CMSCollector::promoted(bool par, HeapWord* start,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline size_t percent_of_space(Space* space, HeapWord* addr)
|
|
||||||
{
|
|
||||||
size_t delta = pointer_delta(addr, space->bottom());
|
|
||||||
return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
|
|
||||||
}
|
|
||||||
|
|
||||||
void CMSCollector::icms_update_allocation_limits()
|
|
||||||
{
|
|
||||||
Generation* young = GenCollectedHeap::heap()->get_gen(0);
|
|
||||||
EdenSpace* eden = young->as_DefNewGeneration()->eden();
|
|
||||||
|
|
||||||
const unsigned int duty_cycle = stats().icms_update_duty_cycle();
|
|
||||||
if (CMSTraceIncrementalPacing) {
|
|
||||||
stats().print();
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(duty_cycle <= 100, "invalid duty cycle");
|
|
||||||
if (duty_cycle != 0) {
|
|
||||||
// The duty_cycle is a percentage between 0 and 100; convert to words and
|
|
||||||
// then compute the offset from the endpoints of the space.
|
|
||||||
size_t free_words = eden->free() / HeapWordSize;
|
|
||||||
double free_words_dbl = (double)free_words;
|
|
||||||
size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
|
|
||||||
size_t offset_words = (free_words - duty_cycle_words) / 2;
|
|
||||||
|
|
||||||
_icms_start_limit = eden->top() + offset_words;
|
|
||||||
_icms_stop_limit = eden->end() - offset_words;
|
|
||||||
|
|
||||||
// The limits may be adjusted (shifted to the right) by
|
|
||||||
// CMSIncrementalOffset, to allow the application more mutator time after a
|
|
||||||
// young gen gc (when all mutators were stopped) and before CMS starts and
|
|
||||||
// takes away one or more cpus.
|
|
||||||
if (CMSIncrementalOffset != 0) {
|
|
||||||
double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
|
|
||||||
size_t adjustment = (size_t)adjustment_dbl;
|
|
||||||
HeapWord* tmp_stop = _icms_stop_limit + adjustment;
|
|
||||||
if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
|
|
||||||
_icms_start_limit += adjustment;
|
|
||||||
_icms_stop_limit = tmp_stop;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
|
|
||||||
_icms_start_limit = _icms_stop_limit = eden->end();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Install the new start limit.
|
|
||||||
eden->set_soft_end(_icms_start_limit);
|
|
||||||
|
|
||||||
if (CMSTraceIncrementalMode) {
|
|
||||||
gclog_or_tty->print(" icms alloc limits: "
|
|
||||||
PTR_FORMAT "," PTR_FORMAT
|
|
||||||
" (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
|
|
||||||
p2i(_icms_start_limit), p2i(_icms_stop_limit),
|
|
||||||
percent_of_space(eden, _icms_start_limit),
|
|
||||||
percent_of_space(eden, _icms_stop_limit));
|
|
||||||
if (Verbose) {
|
|
||||||
gclog_or_tty->print("eden: ");
|
|
||||||
eden->print_on(gclog_or_tty);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Any changes here should try to maintain the invariant
|
|
||||||
// that if this method is called with _icms_start_limit
|
|
||||||
// and _icms_stop_limit both NULL, then it should return NULL
|
|
||||||
// and not notify the icms thread.
|
|
||||||
HeapWord*
|
|
||||||
CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
|
|
||||||
size_t word_size)
|
|
||||||
{
|
|
||||||
// A start_limit equal to end() means the duty cycle is 0, so treat that as a
|
|
||||||
// nop.
|
|
||||||
if (CMSIncrementalMode && _icms_start_limit != space->end()) {
|
|
||||||
if (top <= _icms_start_limit) {
|
|
||||||
if (CMSTraceIncrementalMode) {
|
|
||||||
space->print_on(gclog_or_tty);
|
|
||||||
gclog_or_tty->stamp();
|
|
||||||
gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
|
|
||||||
", new limit=" PTR_FORMAT
|
|
||||||
" (" SIZE_FORMAT "%%)",
|
|
||||||
p2i(top), p2i(_icms_stop_limit),
|
|
||||||
percent_of_space(space, _icms_stop_limit));
|
|
||||||
}
|
|
||||||
ConcurrentMarkSweepThread::start_icms();
|
|
||||||
assert(top < _icms_stop_limit, "Tautology");
|
|
||||||
if (word_size < pointer_delta(_icms_stop_limit, top)) {
|
|
||||||
return _icms_stop_limit;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The allocation will cross both the _start and _stop limits, so do the
|
|
||||||
// stop notification also and return end().
|
|
||||||
if (CMSTraceIncrementalMode) {
|
|
||||||
space->print_on(gclog_or_tty);
|
|
||||||
gclog_or_tty->stamp();
|
|
||||||
gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
|
|
||||||
", new limit=" PTR_FORMAT
|
|
||||||
" (" SIZE_FORMAT "%%)",
|
|
||||||
p2i(top), p2i(space->end()),
|
|
||||||
percent_of_space(space, space->end()));
|
|
||||||
}
|
|
||||||
ConcurrentMarkSweepThread::stop_icms();
|
|
||||||
return space->end();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (top <= _icms_stop_limit) {
|
|
||||||
if (CMSTraceIncrementalMode) {
|
|
||||||
space->print_on(gclog_or_tty);
|
|
||||||
gclog_or_tty->stamp();
|
|
||||||
gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
|
|
||||||
", new limit=" PTR_FORMAT
|
|
||||||
" (" SIZE_FORMAT "%%)",
|
|
||||||
top, space->end(),
|
|
||||||
percent_of_space(space, space->end()));
|
|
||||||
}
|
|
||||||
ConcurrentMarkSweepThread::stop_icms();
|
|
||||||
return space->end();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (CMSTraceIncrementalMode) {
|
|
||||||
space->print_on(gclog_or_tty);
|
|
||||||
gclog_or_tty->stamp();
|
|
||||||
gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
|
|
||||||
", new limit=" PTR_FORMAT,
|
|
||||||
top, NULL);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
|
oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
|
||||||
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
|
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
|
||||||
// allocate, copy and if necessary update promoinfo --
|
// allocate, copy and if necessary update promoinfo --
|
||||||
|
@ -1289,14 +1076,6 @@ oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
HeapWord*
|
|
||||||
ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
|
|
||||||
HeapWord* top,
|
|
||||||
size_t word_sz)
|
|
||||||
{
|
|
||||||
return collector()->allocation_limit_reached(space, top, word_sz);
|
|
||||||
}
|
|
||||||
|
|
||||||
// IMPORTANT: Notes on object size recognition in CMS.
|
// IMPORTANT: Notes on object size recognition in CMS.
|
||||||
// ---------------------------------------------------
|
// ---------------------------------------------------
|
||||||
// A block of storage in the CMS generation is always in
|
// A block of storage in the CMS generation is always in
|
||||||
|
@ -1809,9 +1588,6 @@ void CMSCollector::acquire_control_and_collect(bool full,
|
||||||
// we want to do a foreground collection.
|
// we want to do a foreground collection.
|
||||||
_foregroundGCIsActive = true;
|
_foregroundGCIsActive = true;
|
||||||
|
|
||||||
// Disable incremental mode during a foreground collection.
|
|
||||||
ICMSDisabler icms_disabler;
|
|
||||||
|
|
||||||
// release locks and wait for a notify from the background collector
|
// release locks and wait for a notify from the background collector
|
||||||
// releasing the locks in only necessary for phases which
|
// releasing the locks in only necessary for phases which
|
||||||
// do yields to improve the granularity of the collection.
|
// do yields to improve the granularity of the collection.
|
||||||
|
@ -2135,7 +1911,7 @@ void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
|
||||||
|
|
||||||
void CMSCollector::print_eden_and_survivor_chunk_arrays() {
|
void CMSCollector::print_eden_and_survivor_chunk_arrays() {
|
||||||
DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
|
DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
|
||||||
EdenSpace* eden_space = dng->eden();
|
ContiguousSpace* eden_space = dng->eden();
|
||||||
ContiguousSpace* from_space = dng->from();
|
ContiguousSpace* from_space = dng->from();
|
||||||
ContiguousSpace* to_space = dng->to();
|
ContiguousSpace* to_space = dng->to();
|
||||||
// Eden
|
// Eden
|
||||||
|
@ -2783,10 +2559,6 @@ void CMSCollector::gc_epilogue(bool full) {
|
||||||
//
|
//
|
||||||
_cmsGen->update_counters(cms_used);
|
_cmsGen->update_counters(cms_used);
|
||||||
|
|
||||||
if (CMSIncrementalMode) {
|
|
||||||
icms_update_allocation_limits();
|
|
||||||
}
|
|
||||||
|
|
||||||
bitMapLock()->unlock();
|
bitMapLock()->unlock();
|
||||||
releaseFreelistLocks();
|
releaseFreelistLocks();
|
||||||
|
|
||||||
|
@ -4272,12 +4044,10 @@ void CMSConcMarkingTask::coordinator_yield() {
|
||||||
assert_lock_strong(_bit_map_lock);
|
assert_lock_strong(_bit_map_lock);
|
||||||
_bit_map_lock->unlock();
|
_bit_map_lock->unlock();
|
||||||
ConcurrentMarkSweepThread::desynchronize(true);
|
ConcurrentMarkSweepThread::desynchronize(true);
|
||||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
||||||
_collector->stopTimer();
|
_collector->stopTimer();
|
||||||
if (PrintCMSStatistics != 0) {
|
if (PrintCMSStatistics != 0) {
|
||||||
_collector->incrementYields();
|
_collector->incrementYields();
|
||||||
}
|
}
|
||||||
_collector->icms_wait();
|
|
||||||
|
|
||||||
// It is possible for whichever thread initiated the yield request
|
// It is possible for whichever thread initiated the yield request
|
||||||
// not to get a chance to wake up and take the bitmap lock between
|
// not to get a chance to wake up and take the bitmap lock between
|
||||||
|
@ -4307,7 +4077,6 @@ void CMSConcMarkingTask::coordinator_yield() {
|
||||||
ConcurrentMarkSweepThread::should_yield() &&
|
ConcurrentMarkSweepThread::should_yield() &&
|
||||||
!CMSCollector::foregroundGCIsActive(); ++i) {
|
!CMSCollector::foregroundGCIsActive(); ++i) {
|
||||||
os::sleep(Thread::current(), 1, false);
|
os::sleep(Thread::current(), 1, false);
|
||||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ConcurrentMarkSweepThread::synchronize(true);
|
ConcurrentMarkSweepThread::synchronize(true);
|
||||||
|
@ -5238,7 +5007,7 @@ class RemarkKlassClosure : public KlassClosure {
|
||||||
|
|
||||||
void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
|
void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
|
||||||
DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
|
DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
|
||||||
EdenSpace* eden_space = dng->eden();
|
ContiguousSpace* eden_space = dng->eden();
|
||||||
ContiguousSpace* from_space = dng->from();
|
ContiguousSpace* from_space = dng->from();
|
||||||
ContiguousSpace* to_space = dng->to();
|
ContiguousSpace* to_space = dng->to();
|
||||||
|
|
||||||
|
@ -5410,7 +5179,7 @@ CMSParMarkTask::do_young_space_rescan(uint worker_id,
|
||||||
while (!pst->is_task_claimed(/* reference */ nth_task)) {
|
while (!pst->is_task_claimed(/* reference */ nth_task)) {
|
||||||
// We claimed task # nth_task; compute its boundaries.
|
// We claimed task # nth_task; compute its boundaries.
|
||||||
if (chunk_top == 0) { // no samples were taken
|
if (chunk_top == 0) { // no samples were taken
|
||||||
assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
|
assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
|
||||||
start = space->bottom();
|
start = space->bottom();
|
||||||
end = space->top();
|
end = space->top();
|
||||||
} else if (nth_task == 0) {
|
} else if (nth_task == 0) {
|
||||||
|
@ -5788,7 +5557,7 @@ void CMSCollector::do_remark_parallel() {
|
||||||
// process_roots (which currently doesn't know how to
|
// process_roots (which currently doesn't know how to
|
||||||
// parallelize such a scan), but rather will be broken up into
|
// parallelize such a scan), but rather will be broken up into
|
||||||
// a set of parallel tasks (via the sampling that the [abortable]
|
// a set of parallel tasks (via the sampling that the [abortable]
|
||||||
// preclean phase did of EdenSpace, plus the [two] tasks of
|
// preclean phase did of eden, plus the [two] tasks of
|
||||||
// scanning the [two] survivor spaces. Further fine-grain
|
// scanning the [two] survivor spaces. Further fine-grain
|
||||||
// parallelization of the scanning of the survivor spaces
|
// parallelization of the scanning of the survivor spaces
|
||||||
// themselves, and of precleaning of the younger gen itself
|
// themselves, and of precleaning of the younger gen itself
|
||||||
|
@ -6474,19 +6243,16 @@ void CMSCollector::reset(bool asynch) {
|
||||||
assert_lock_strong(bitMapLock());
|
assert_lock_strong(bitMapLock());
|
||||||
bitMapLock()->unlock();
|
bitMapLock()->unlock();
|
||||||
ConcurrentMarkSweepThread::desynchronize(true);
|
ConcurrentMarkSweepThread::desynchronize(true);
|
||||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
||||||
stopTimer();
|
stopTimer();
|
||||||
if (PrintCMSStatistics != 0) {
|
if (PrintCMSStatistics != 0) {
|
||||||
incrementYields();
|
incrementYields();
|
||||||
}
|
}
|
||||||
icms_wait();
|
|
||||||
|
|
||||||
// See the comment in coordinator_yield()
|
// See the comment in coordinator_yield()
|
||||||
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
||||||
ConcurrentMarkSweepThread::should_yield() &&
|
ConcurrentMarkSweepThread::should_yield() &&
|
||||||
!CMSCollector::foregroundGCIsActive(); ++i) {
|
!CMSCollector::foregroundGCIsActive(); ++i) {
|
||||||
os::sleep(Thread::current(), 1, false);
|
os::sleep(Thread::current(), 1, false);
|
||||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ConcurrentMarkSweepThread::synchronize(true);
|
ConcurrentMarkSweepThread::synchronize(true);
|
||||||
|
@ -6509,10 +6275,6 @@ void CMSCollector::reset(bool asynch) {
|
||||||
_collectorState = Idling;
|
_collectorState = Idling;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop incremental mode after a cycle completes, so that any future cycles
|
|
||||||
// are triggered by allocation.
|
|
||||||
stop_icms();
|
|
||||||
|
|
||||||
NOT_PRODUCT(
|
NOT_PRODUCT(
|
||||||
if (RotateCMSCollectionTypes) {
|
if (RotateCMSCollectionTypes) {
|
||||||
_cmsGen->rotate_debug_collection_type();
|
_cmsGen->rotate_debug_collection_type();
|
||||||
|
@ -6964,12 +6726,10 @@ void MarkRefsIntoAndScanClosure::do_yield_work() {
|
||||||
_bit_map->lock()->unlock();
|
_bit_map->lock()->unlock();
|
||||||
_freelistLock->unlock();
|
_freelistLock->unlock();
|
||||||
ConcurrentMarkSweepThread::desynchronize(true);
|
ConcurrentMarkSweepThread::desynchronize(true);
|
||||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
||||||
_collector->stopTimer();
|
_collector->stopTimer();
|
||||||
if (PrintCMSStatistics != 0) {
|
if (PrintCMSStatistics != 0) {
|
||||||
_collector->incrementYields();
|
_collector->incrementYields();
|
||||||
}
|
}
|
||||||
_collector->icms_wait();
|
|
||||||
|
|
||||||
// See the comment in coordinator_yield()
|
// See the comment in coordinator_yield()
|
||||||
for (unsigned i = 0;
|
for (unsigned i = 0;
|
||||||
|
@ -6978,7 +6738,6 @@ void MarkRefsIntoAndScanClosure::do_yield_work() {
|
||||||
!CMSCollector::foregroundGCIsActive();
|
!CMSCollector::foregroundGCIsActive();
|
||||||
++i) {
|
++i) {
|
||||||
os::sleep(Thread::current(), 1, false);
|
os::sleep(Thread::current(), 1, false);
|
||||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ConcurrentMarkSweepThread::synchronize(true);
|
ConcurrentMarkSweepThread::synchronize(true);
|
||||||
|
@ -7124,19 +6883,16 @@ void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
|
||||||
_bitMap->lock()->unlock();
|
_bitMap->lock()->unlock();
|
||||||
_freelistLock->unlock();
|
_freelistLock->unlock();
|
||||||
ConcurrentMarkSweepThread::desynchronize(true);
|
ConcurrentMarkSweepThread::desynchronize(true);
|
||||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
||||||
_collector->stopTimer();
|
_collector->stopTimer();
|
||||||
if (PrintCMSStatistics != 0) {
|
if (PrintCMSStatistics != 0) {
|
||||||
_collector->incrementYields();
|
_collector->incrementYields();
|
||||||
}
|
}
|
||||||
_collector->icms_wait();
|
|
||||||
|
|
||||||
// See the comment in coordinator_yield()
|
// See the comment in coordinator_yield()
|
||||||
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
||||||
ConcurrentMarkSweepThread::should_yield() &&
|
ConcurrentMarkSweepThread::should_yield() &&
|
||||||
!CMSCollector::foregroundGCIsActive(); ++i) {
|
!CMSCollector::foregroundGCIsActive(); ++i) {
|
||||||
os::sleep(Thread::current(), 1, false);
|
os::sleep(Thread::current(), 1, false);
|
||||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ConcurrentMarkSweepThread::synchronize(true);
|
ConcurrentMarkSweepThread::synchronize(true);
|
||||||
|
@ -7196,19 +6952,16 @@ void SurvivorSpacePrecleanClosure::do_yield_work() {
|
||||||
// Relinquish the bit map lock
|
// Relinquish the bit map lock
|
||||||
_bit_map->lock()->unlock();
|
_bit_map->lock()->unlock();
|
||||||
ConcurrentMarkSweepThread::desynchronize(true);
|
ConcurrentMarkSweepThread::desynchronize(true);
|
||||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
||||||
_collector->stopTimer();
|
_collector->stopTimer();
|
||||||
if (PrintCMSStatistics != 0) {
|
if (PrintCMSStatistics != 0) {
|
||||||
_collector->incrementYields();
|
_collector->incrementYields();
|
||||||
}
|
}
|
||||||
_collector->icms_wait();
|
|
||||||
|
|
||||||
// See the comment in coordinator_yield()
|
// See the comment in coordinator_yield()
|
||||||
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
||||||
ConcurrentMarkSweepThread::should_yield() &&
|
ConcurrentMarkSweepThread::should_yield() &&
|
||||||
!CMSCollector::foregroundGCIsActive(); ++i) {
|
!CMSCollector::foregroundGCIsActive(); ++i) {
|
||||||
os::sleep(Thread::current(), 1, false);
|
os::sleep(Thread::current(), 1, false);
|
||||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ConcurrentMarkSweepThread::synchronize(true);
|
ConcurrentMarkSweepThread::synchronize(true);
|
||||||
|
@ -7354,19 +7107,16 @@ void MarkFromRootsClosure::do_yield_work() {
|
||||||
assert_lock_strong(_bitMap->lock());
|
assert_lock_strong(_bitMap->lock());
|
||||||
_bitMap->lock()->unlock();
|
_bitMap->lock()->unlock();
|
||||||
ConcurrentMarkSweepThread::desynchronize(true);
|
ConcurrentMarkSweepThread::desynchronize(true);
|
||||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
||||||
_collector->stopTimer();
|
_collector->stopTimer();
|
||||||
if (PrintCMSStatistics != 0) {
|
if (PrintCMSStatistics != 0) {
|
||||||
_collector->incrementYields();
|
_collector->incrementYields();
|
||||||
}
|
}
|
||||||
_collector->icms_wait();
|
|
||||||
|
|
||||||
// See the comment in coordinator_yield()
|
// See the comment in coordinator_yield()
|
||||||
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
||||||
ConcurrentMarkSweepThread::should_yield() &&
|
ConcurrentMarkSweepThread::should_yield() &&
|
||||||
!CMSCollector::foregroundGCIsActive(); ++i) {
|
!CMSCollector::foregroundGCIsActive(); ++i) {
|
||||||
os::sleep(Thread::current(), 1, false);
|
os::sleep(Thread::current(), 1, false);
|
||||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ConcurrentMarkSweepThread::synchronize(true);
|
ConcurrentMarkSweepThread::synchronize(true);
|
||||||
|
@ -7388,7 +7138,7 @@ void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
|
||||||
_finger = ptr + obj->size();
|
_finger = ptr + obj->size();
|
||||||
assert(_finger > ptr, "we just incremented it above");
|
assert(_finger > ptr, "we just incremented it above");
|
||||||
// On large heaps, it may take us some time to get through
|
// On large heaps, it may take us some time to get through
|
||||||
// the marking phase (especially if running iCMS). During
|
// the marking phase. During
|
||||||
// this time it's possible that a lot of mutations have
|
// this time it's possible that a lot of mutations have
|
||||||
// accumulated in the card table and the mod union table --
|
// accumulated in the card table and the mod union table --
|
||||||
// these mutation records are redundant until we have
|
// these mutation records are redundant until we have
|
||||||
|
@ -7505,7 +7255,7 @@ void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
|
||||||
_finger = ptr + obj->size();
|
_finger = ptr + obj->size();
|
||||||
assert(_finger > ptr, "we just incremented it above");
|
assert(_finger > ptr, "we just incremented it above");
|
||||||
// On large heaps, it may take us some time to get through
|
// On large heaps, it may take us some time to get through
|
||||||
// the marking phase (especially if running iCMS). During
|
// the marking phase. During
|
||||||
// this time it's possible that a lot of mutations have
|
// this time it's possible that a lot of mutations have
|
||||||
// accumulated in the card table and the mod union table --
|
// accumulated in the card table and the mod union table --
|
||||||
// these mutation records are redundant until we have
|
// these mutation records are redundant until we have
|
||||||
|
@ -7994,20 +7744,16 @@ void CMSPrecleanRefsYieldClosure::do_yield_work() {
|
||||||
bml->unlock();
|
bml->unlock();
|
||||||
ConcurrentMarkSweepThread::desynchronize(true);
|
ConcurrentMarkSweepThread::desynchronize(true);
|
||||||
|
|
||||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
||||||
|
|
||||||
_collector->stopTimer();
|
_collector->stopTimer();
|
||||||
if (PrintCMSStatistics != 0) {
|
if (PrintCMSStatistics != 0) {
|
||||||
_collector->incrementYields();
|
_collector->incrementYields();
|
||||||
}
|
}
|
||||||
_collector->icms_wait();
|
|
||||||
|
|
||||||
// See the comment in coordinator_yield()
|
// See the comment in coordinator_yield()
|
||||||
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
||||||
ConcurrentMarkSweepThread::should_yield() &&
|
ConcurrentMarkSweepThread::should_yield() &&
|
||||||
!CMSCollector::foregroundGCIsActive(); ++i) {
|
!CMSCollector::foregroundGCIsActive(); ++i) {
|
||||||
os::sleep(Thread::current(), 1, false);
|
os::sleep(Thread::current(), 1, false);
|
||||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ConcurrentMarkSweepThread::synchronize(true);
|
ConcurrentMarkSweepThread::synchronize(true);
|
||||||
|
@ -8675,19 +8421,16 @@ void SweepClosure::do_yield_work(HeapWord* addr) {
|
||||||
_bitMap->lock()->unlock();
|
_bitMap->lock()->unlock();
|
||||||
_freelistLock->unlock();
|
_freelistLock->unlock();
|
||||||
ConcurrentMarkSweepThread::desynchronize(true);
|
ConcurrentMarkSweepThread::desynchronize(true);
|
||||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
||||||
_collector->stopTimer();
|
_collector->stopTimer();
|
||||||
if (PrintCMSStatistics != 0) {
|
if (PrintCMSStatistics != 0) {
|
||||||
_collector->incrementYields();
|
_collector->incrementYields();
|
||||||
}
|
}
|
||||||
_collector->icms_wait();
|
|
||||||
|
|
||||||
// See the comment in coordinator_yield()
|
// See the comment in coordinator_yield()
|
||||||
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
||||||
ConcurrentMarkSweepThread::should_yield() &&
|
ConcurrentMarkSweepThread::should_yield() &&
|
||||||
!CMSCollector::foregroundGCIsActive(); ++i) {
|
!CMSCollector::foregroundGCIsActive(); ++i) {
|
||||||
os::sleep(Thread::current(), 1, false);
|
os::sleep(Thread::current(), 1, false);
|
||||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ConcurrentMarkSweepThread::synchronize(true);
|
ConcurrentMarkSweepThread::synchronize(true);
|
||||||
|
|
|
@ -356,7 +356,6 @@ class CMSStats VALUE_OBJ_CLASS_SPEC {
|
||||||
size_t _gc0_promoted; // bytes promoted per gc0
|
size_t _gc0_promoted; // bytes promoted per gc0
|
||||||
double _cms_duration;
|
double _cms_duration;
|
||||||
double _cms_duration_pre_sweep; // time from initiation to start of sweep
|
double _cms_duration_pre_sweep; // time from initiation to start of sweep
|
||||||
double _cms_duration_per_mb;
|
|
||||||
double _cms_period;
|
double _cms_period;
|
||||||
size_t _cms_allocated; // bytes of direct allocation per gc0 period
|
size_t _cms_allocated; // bytes of direct allocation per gc0 period
|
||||||
|
|
||||||
|
@ -383,17 +382,7 @@ class CMSStats VALUE_OBJ_CLASS_SPEC {
|
||||||
|
|
||||||
unsigned int _valid_bits;
|
unsigned int _valid_bits;
|
||||||
|
|
||||||
unsigned int _icms_duty_cycle; // icms duty cycle (0-100).
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
// Return a duty cycle that avoids wild oscillations, by limiting the amount
|
|
||||||
// of change between old_duty_cycle and new_duty_cycle (the latter is treated
|
|
||||||
// as a recommended value).
|
|
||||||
static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle,
|
|
||||||
unsigned int new_duty_cycle);
|
|
||||||
unsigned int icms_update_duty_cycle_impl();
|
|
||||||
|
|
||||||
// In support of adjusting of cms trigger ratios based on history
|
// In support of adjusting of cms trigger ratios based on history
|
||||||
// of concurrent mode failure.
|
// of concurrent mode failure.
|
||||||
double cms_free_adjustment_factor(size_t free) const;
|
double cms_free_adjustment_factor(size_t free) const;
|
||||||
|
@ -426,7 +415,6 @@ class CMSStats VALUE_OBJ_CLASS_SPEC {
|
||||||
size_t gc0_promoted() const { return _gc0_promoted; }
|
size_t gc0_promoted() const { return _gc0_promoted; }
|
||||||
double cms_period() const { return _cms_period; }
|
double cms_period() const { return _cms_period; }
|
||||||
double cms_duration() const { return _cms_duration; }
|
double cms_duration() const { return _cms_duration; }
|
||||||
double cms_duration_per_mb() const { return _cms_duration_per_mb; }
|
|
||||||
size_t cms_allocated() const { return _cms_allocated; }
|
size_t cms_allocated() const { return _cms_allocated; }
|
||||||
|
|
||||||
size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
|
size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
|
||||||
|
@ -458,12 +446,6 @@ class CMSStats VALUE_OBJ_CLASS_SPEC {
|
||||||
|
|
||||||
// End of higher level statistics.
|
// End of higher level statistics.
|
||||||
|
|
||||||
// Returns the cms incremental mode duty cycle, as a percentage (0-100).
|
|
||||||
unsigned int icms_duty_cycle() const { return _icms_duty_cycle; }
|
|
||||||
|
|
||||||
// Update the duty cycle and return the new value.
|
|
||||||
unsigned int icms_update_duty_cycle();
|
|
||||||
|
|
||||||
// Debugging.
|
// Debugging.
|
||||||
void print_on(outputStream* st) const PRODUCT_RETURN;
|
void print_on(outputStream* st) const PRODUCT_RETURN;
|
||||||
void print() const { print_on(gclog_or_tty); }
|
void print() const { print_on(gclog_or_tty); }
|
||||||
|
@ -725,13 +707,6 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||||
// Timing, allocation and promotion statistics, used for scheduling.
|
// Timing, allocation and promotion statistics, used for scheduling.
|
||||||
CMSStats _stats;
|
CMSStats _stats;
|
||||||
|
|
||||||
// Allocation limits installed in the young gen, used only in
|
|
||||||
// CMSIncrementalMode. When an allocation in the young gen would cross one of
|
|
||||||
// these limits, the cms generation is notified and the cms thread is started
|
|
||||||
// or stopped, respectively.
|
|
||||||
HeapWord* _icms_start_limit;
|
|
||||||
HeapWord* _icms_stop_limit;
|
|
||||||
|
|
||||||
enum CMS_op_type {
|
enum CMS_op_type {
|
||||||
CMS_op_checkpointRootsInitial,
|
CMS_op_checkpointRootsInitial,
|
||||||
CMS_op_checkpointRootsFinal
|
CMS_op_checkpointRootsFinal
|
||||||
|
@ -867,10 +842,6 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||||
// collector.
|
// collector.
|
||||||
bool waitForForegroundGC();
|
bool waitForForegroundGC();
|
||||||
|
|
||||||
// Incremental mode triggering: recompute the icms duty cycle and set the
|
|
||||||
// allocation limits in the young gen.
|
|
||||||
void icms_update_allocation_limits();
|
|
||||||
|
|
||||||
size_t block_size_using_printezis_bits(HeapWord* addr) const;
|
size_t block_size_using_printezis_bits(HeapWord* addr) const;
|
||||||
size_t block_size_if_printezis_bits(HeapWord* addr) const;
|
size_t block_size_if_printezis_bits(HeapWord* addr) const;
|
||||||
HeapWord* next_card_start_after_block(HeapWord* addr) const;
|
HeapWord* next_card_start_after_block(HeapWord* addr) const;
|
||||||
|
@ -928,9 +899,6 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||||
void promoted(bool par, HeapWord* start,
|
void promoted(bool par, HeapWord* start,
|
||||||
bool is_obj_array, size_t obj_size);
|
bool is_obj_array, size_t obj_size);
|
||||||
|
|
||||||
HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
|
|
||||||
size_t word_size);
|
|
||||||
|
|
||||||
void getFreelistLocks() const;
|
void getFreelistLocks() const;
|
||||||
void releaseFreelistLocks() const;
|
void releaseFreelistLocks() const;
|
||||||
bool haveFreelistLocks() const;
|
bool haveFreelistLocks() const;
|
||||||
|
@ -1001,14 +969,6 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||||
// Timers/stats for gc scheduling and incremental mode pacing.
|
// Timers/stats for gc scheduling and incremental mode pacing.
|
||||||
CMSStats& stats() { return _stats; }
|
CMSStats& stats() { return _stats; }
|
||||||
|
|
||||||
// Convenience methods that check whether CMSIncrementalMode is enabled and
|
|
||||||
// forward to the corresponding methods in ConcurrentMarkSweepThread.
|
|
||||||
static void start_icms();
|
|
||||||
static void stop_icms(); // Called at the end of the cms cycle.
|
|
||||||
static void disable_icms(); // Called before a foreground collection.
|
|
||||||
static void enable_icms(); // Called after a foreground collection.
|
|
||||||
void icms_wait(); // Called at yield points.
|
|
||||||
|
|
||||||
// Adaptive size policy
|
// Adaptive size policy
|
||||||
AdaptiveSizePolicy* size_policy();
|
AdaptiveSizePolicy* size_policy();
|
||||||
|
|
||||||
|
@ -1211,9 +1171,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||||
return allocate(size, tlab);
|
return allocate(size, tlab);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Incremental mode triggering.
|
|
||||||
HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
|
|
||||||
size_t word_size);
|
|
||||||
|
|
||||||
// Used by CMSStats to track direct allocation. The value is sampled and
|
// Used by CMSStats to track direct allocation. The value is sampled and
|
||||||
// reset after each young gen collection.
|
// reset after each young gen collection.
|
||||||
|
|
|
@ -234,36 +234,6 @@ inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void CMSCollector::start_icms() {
|
|
||||||
if (CMSIncrementalMode) {
|
|
||||||
ConcurrentMarkSweepThread::start_icms();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void CMSCollector::stop_icms() {
|
|
||||||
if (CMSIncrementalMode) {
|
|
||||||
ConcurrentMarkSweepThread::stop_icms();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void CMSCollector::disable_icms() {
|
|
||||||
if (CMSIncrementalMode) {
|
|
||||||
ConcurrentMarkSweepThread::disable_icms();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void CMSCollector::enable_icms() {
|
|
||||||
if (CMSIncrementalMode) {
|
|
||||||
ConcurrentMarkSweepThread::enable_icms();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void CMSCollector::icms_wait() {
|
|
||||||
if (CMSIncrementalMode) {
|
|
||||||
cmsThread()->icms_wait();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void CMSCollector::save_sweep_limits() {
|
inline void CMSCollector::save_sweep_limits() {
|
||||||
_cmsGen->save_sweep_limit();
|
_cmsGen->save_sweep_limit();
|
||||||
}
|
}
|
||||||
|
@ -363,12 +333,6 @@ inline void CMSStats::record_cms_end() {
|
||||||
_cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
|
_cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
|
||||||
cur_duration, _cms_alpha);
|
cur_duration, _cms_alpha);
|
||||||
|
|
||||||
// Avoid division by 0.
|
|
||||||
const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1);
|
|
||||||
_cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb,
|
|
||||||
cur_duration / cms_used_mb,
|
|
||||||
_cms_alpha);
|
|
||||||
|
|
||||||
_cms_end_time.update();
|
_cms_end_time.update();
|
||||||
_cms_alpha = _saved_alpha;
|
_cms_alpha = _saved_alpha;
|
||||||
_allow_duty_cycle_reduction = true;
|
_allow_duty_cycle_reduction = true;
|
||||||
|
@ -400,15 +364,6 @@ inline double CMSStats::cms_consumption_rate() const {
|
||||||
return (gc0_promoted() + cms_allocated()) / gc0_period();
|
return (gc0_promoted() + cms_allocated()) / gc0_period();
|
||||||
}
|
}
|
||||||
|
|
||||||
inline unsigned int CMSStats::icms_update_duty_cycle() {
|
|
||||||
// Update the duty cycle only if pacing is enabled and the stats are valid
|
|
||||||
// (after at least one young gen gc and one cms cycle have completed).
|
|
||||||
if (CMSIncrementalPacing && valid()) {
|
|
||||||
return icms_update_duty_cycle_impl();
|
|
||||||
}
|
|
||||||
return _icms_duty_cycle;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
|
inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
|
||||||
cmsSpace()->save_sweep_limit();
|
cmsSpace()->save_sweep_limit();
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,13 +49,6 @@ bool ConcurrentMarkSweepThread::_should_terminate = false;
|
||||||
int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil;
|
int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil;
|
||||||
|
|
||||||
volatile jint ConcurrentMarkSweepThread::_pending_yields = 0;
|
volatile jint ConcurrentMarkSweepThread::_pending_yields = 0;
|
||||||
volatile jint ConcurrentMarkSweepThread::_pending_decrements = 0;
|
|
||||||
|
|
||||||
volatile jint ConcurrentMarkSweepThread::_icms_disabled = 0;
|
|
||||||
volatile bool ConcurrentMarkSweepThread::_should_run = false;
|
|
||||||
// When icms is enabled, the icms thread is stopped until explicitly
|
|
||||||
// started.
|
|
||||||
volatile bool ConcurrentMarkSweepThread::_should_stop = true;
|
|
||||||
|
|
||||||
SurrogateLockerThread*
|
SurrogateLockerThread*
|
||||||
ConcurrentMarkSweepThread::_slt = NULL;
|
ConcurrentMarkSweepThread::_slt = NULL;
|
||||||
|
@ -99,7 +92,6 @@ ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_sltMonitor = SLT_lock;
|
_sltMonitor = SLT_lock;
|
||||||
assert(!CMSIncrementalMode || icms_is_enabled(), "Error");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConcurrentMarkSweepThread::run() {
|
void ConcurrentMarkSweepThread::run() {
|
||||||
|
@ -184,11 +176,6 @@ ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::start(CMSCollector* collec
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConcurrentMarkSweepThread::stop() {
|
void ConcurrentMarkSweepThread::stop() {
|
||||||
if (CMSIncrementalMode) {
|
|
||||||
// Disable incremental mode and wake up the thread so it notices the change.
|
|
||||||
disable_icms();
|
|
||||||
start_icms();
|
|
||||||
}
|
|
||||||
// it is ok to take late safepoints here, if needed
|
// it is ok to take late safepoints here, if needed
|
||||||
{
|
{
|
||||||
MutexLockerEx x(Terminator_lock);
|
MutexLockerEx x(Terminator_lock);
|
||||||
|
@ -387,23 +374,13 @@ void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
|
||||||
|
|
||||||
void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
|
void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
|
||||||
while (!_should_terminate) {
|
while (!_should_terminate) {
|
||||||
if (CMSIncrementalMode) {
|
if(CMSWaitDuration >= 0) {
|
||||||
icms_wait();
|
// Wait until the next synchronous GC, a concurrent full gc
|
||||||
if(CMSWaitDuration >= 0) {
|
// request or a timeout, whichever is earlier.
|
||||||
// Wait until the next synchronous GC, a concurrent full gc
|
wait_on_cms_lock_for_scavenge(CMSWaitDuration);
|
||||||
// request or a timeout, whichever is earlier.
|
|
||||||
wait_on_cms_lock_for_scavenge(CMSWaitDuration);
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
} else {
|
} else {
|
||||||
if(CMSWaitDuration >= 0) {
|
// Wait until any cms_lock event or check interval not to call shouldConcurrentCollect permanently
|
||||||
// Wait until the next synchronous GC, a concurrent full gc
|
wait_on_cms_lock(CMSCheckInterval);
|
||||||
// request or a timeout, whichever is earlier.
|
|
||||||
wait_on_cms_lock_for_scavenge(CMSWaitDuration);
|
|
||||||
} else {
|
|
||||||
// Wait until any cms_lock event or check interval not to call shouldConcurrentCollect permanently
|
|
||||||
wait_on_cms_lock(CMSCheckInterval);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// Check if we should start a CMS collection cycle
|
// Check if we should start a CMS collection cycle
|
||||||
if (_collector->shouldConcurrentCollect()) {
|
if (_collector->shouldConcurrentCollect()) {
|
||||||
|
@ -414,42 +391,6 @@ void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Incremental CMS
|
|
||||||
void ConcurrentMarkSweepThread::start_icms() {
|
|
||||||
assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
|
|
||||||
MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
|
|
||||||
trace_state("start_icms");
|
|
||||||
_should_run = true;
|
|
||||||
iCMS_lock->notify_all();
|
|
||||||
}
|
|
||||||
|
|
||||||
void ConcurrentMarkSweepThread::stop_icms() {
|
|
||||||
assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
|
|
||||||
MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
|
|
||||||
if (!_should_stop) {
|
|
||||||
trace_state("stop_icms");
|
|
||||||
_should_stop = true;
|
|
||||||
_should_run = false;
|
|
||||||
asynchronous_yield_request();
|
|
||||||
iCMS_lock->notify_all();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ConcurrentMarkSweepThread::icms_wait() {
|
|
||||||
assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
|
|
||||||
if (_should_stop && icms_is_enabled()) {
|
|
||||||
MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
|
|
||||||
trace_state("pause_icms");
|
|
||||||
_collector->stats().stop_cms_timer();
|
|
||||||
while(!_should_run && icms_is_enabled()) {
|
|
||||||
iCMS_lock->wait(Mutex::_no_safepoint_check_flag);
|
|
||||||
}
|
|
||||||
_collector->stats().start_cms_timer();
|
|
||||||
_should_stop = false;
|
|
||||||
trace_state("pause_icms end");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note: this method, although exported by the ConcurrentMarkSweepThread,
|
// Note: this method, although exported by the ConcurrentMarkSweepThread,
|
||||||
// which is a non-JavaThread, can only be called by a JavaThread.
|
// which is a non-JavaThread, can only be called by a JavaThread.
|
||||||
// Currently this is done at vm creation time (post-vm-init) by the
|
// Currently this is done at vm creation time (post-vm-init) by the
|
||||||
|
|
|
@ -64,20 +64,11 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
|
||||||
static bool clear_CMS_flag(int b) { return (_CMS_flag &= ~b) != 0; }
|
static bool clear_CMS_flag(int b) { return (_CMS_flag &= ~b) != 0; }
|
||||||
void sleepBeforeNextCycle();
|
void sleepBeforeNextCycle();
|
||||||
|
|
||||||
// CMS thread should yield for a young gen collection, direct allocation,
|
// CMS thread should yield for a young gen collection and direct allocations
|
||||||
// and iCMS activity.
|
|
||||||
static char _pad_1[64 - sizeof(jint)]; // prevent cache-line sharing
|
static char _pad_1[64 - sizeof(jint)]; // prevent cache-line sharing
|
||||||
static volatile jint _pending_yields;
|
static volatile jint _pending_yields;
|
||||||
static volatile jint _pending_decrements; // decrements to _pending_yields
|
|
||||||
static char _pad_2[64 - sizeof(jint)]; // prevent cache-line sharing
|
static char _pad_2[64 - sizeof(jint)]; // prevent cache-line sharing
|
||||||
|
|
||||||
// Tracing messages, enabled by CMSTraceThreadState.
|
|
||||||
static inline void trace_state(const char* desc);
|
|
||||||
|
|
||||||
static volatile int _icms_disabled; // a counter to track #iCMS disable & enable
|
|
||||||
static volatile bool _should_run; // iCMS may run
|
|
||||||
static volatile bool _should_stop; // iCMS should stop
|
|
||||||
|
|
||||||
// debugging
|
// debugging
|
||||||
void verify_ok_to_terminate() const PRODUCT_RETURN;
|
void verify_ok_to_terminate() const PRODUCT_RETURN;
|
||||||
|
|
||||||
|
@ -135,44 +126,13 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
|
||||||
void wait_on_cms_lock_for_scavenge(long t_millis);
|
void wait_on_cms_lock_for_scavenge(long t_millis);
|
||||||
|
|
||||||
// The CMS thread will yield during the work portion of its cycle
|
// The CMS thread will yield during the work portion of its cycle
|
||||||
// only when requested to. Both synchronous and asychronous requests
|
// only when requested to.
|
||||||
// are provided:
|
// A synchronous request is used for young gen collections and
|
||||||
// (1) A synchronous request is used for young gen collections and
|
// for direct allocations. The requesting thread increments
|
||||||
// for direct allocations. The requesting thread increments
|
// _pending_yields at the beginning of an operation, and decrements
|
||||||
// _pending_yields at the beginning of an operation, and decrements
|
// _pending_yields when that operation is completed.
|
||||||
// _pending_yields when that operation is completed.
|
// In turn, the CMS thread yields when _pending_yields is positive,
|
||||||
// In turn, the CMS thread yields when _pending_yields is positive,
|
// and continues to yield until the value reverts to 0.
|
||||||
// and continues to yield until the value reverts to 0.
|
|
||||||
// (2) An asynchronous request, on the other hand, is used by iCMS
|
|
||||||
// for the stop_icms() operation. A single yield satisfies all of
|
|
||||||
// the outstanding asynch yield requests, of which there may
|
|
||||||
// occasionally be several in close succession. To accomplish
|
|
||||||
// this, an asynch-requesting thread atomically increments both
|
|
||||||
// _pending_yields and _pending_decrements. An asynchr requesting
|
|
||||||
// thread does not wait and "acknowledge" completion of an operation
|
|
||||||
// and deregister the request, like the synchronous version described
|
|
||||||
// above does. In turn, after yielding, the CMS thread decrements both
|
|
||||||
// _pending_yields and _pending_decrements by the value seen in
|
|
||||||
// _pending_decrements before the decrement.
|
|
||||||
// NOTE: The above scheme is isomorphic to having two request counters,
|
|
||||||
// one for async requests and one for sync requests, and for the CMS thread
|
|
||||||
// to check the sum of the two counters to decide whether it should yield
|
|
||||||
// and to clear only the async counter when it yields. However, it turns out
|
|
||||||
// to be more efficient for CMS code to just check a single counter
|
|
||||||
// _pending_yields that holds the sum (of both sync and async requests), and
|
|
||||||
// a second counter _pending_decrements that only holds the async requests,
|
|
||||||
// for greater efficiency, since in a typical CMS run, there are many more
|
|
||||||
// potential (i.e. static) yield points than there are actual
|
|
||||||
// (i.e. dynamic) yields because of requests, which are few and far between.
|
|
||||||
//
|
|
||||||
// Note that, while "_pending_yields >= _pending_decrements" is an invariant,
|
|
||||||
// we cannot easily test that invariant, since the counters are manipulated via
|
|
||||||
// atomic instructions without explicit locking and we cannot read
|
|
||||||
// the two counters atomically together: one suggestion is to
|
|
||||||
// use (for example) 16-bit counters so as to be able to read the
|
|
||||||
// two counters atomically even on 32-bit platforms. Notice that
|
|
||||||
// the second assert in acknowledge_yield_request() below does indeed
|
|
||||||
// check a form of the above invariant, albeit indirectly.
|
|
||||||
|
|
||||||
static void increment_pending_yields() {
|
static void increment_pending_yields() {
|
||||||
Atomic::inc(&_pending_yields);
|
Atomic::inc(&_pending_yields);
|
||||||
|
@ -182,67 +142,9 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
|
||||||
Atomic::dec(&_pending_yields);
|
Atomic::dec(&_pending_yields);
|
||||||
assert(_pending_yields >= 0, "can't be negative");
|
assert(_pending_yields >= 0, "can't be negative");
|
||||||
}
|
}
|
||||||
static void asynchronous_yield_request() {
|
|
||||||
assert(CMSIncrementalMode, "Currently only used w/iCMS");
|
|
||||||
increment_pending_yields();
|
|
||||||
Atomic::inc(&_pending_decrements);
|
|
||||||
assert(_pending_decrements >= 0, "can't be negative");
|
|
||||||
}
|
|
||||||
static void acknowledge_yield_request() {
|
|
||||||
jint decrement = _pending_decrements;
|
|
||||||
if (decrement > 0) {
|
|
||||||
assert(CMSIncrementalMode, "Currently only used w/iCMS");
|
|
||||||
// Order important to preserve: _pending_yields >= _pending_decrements
|
|
||||||
Atomic::add(-decrement, &_pending_decrements);
|
|
||||||
Atomic::add(-decrement, &_pending_yields);
|
|
||||||
assert(_pending_decrements >= 0, "can't be negative");
|
|
||||||
assert(_pending_yields >= 0, "can't be negative");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
static bool should_yield() { return _pending_yields > 0; }
|
static bool should_yield() { return _pending_yields > 0; }
|
||||||
|
|
||||||
// CMS incremental mode.
|
|
||||||
static void start_icms(); // notify thread to start a quantum of work
|
|
||||||
static void stop_icms(); // request thread to stop working
|
|
||||||
void icms_wait(); // if asked to stop, wait until notified to start
|
|
||||||
|
|
||||||
// Incremental mode is enabled globally by the flag CMSIncrementalMode. It
|
|
||||||
// must also be enabled/disabled dynamically to allow foreground collections.
|
|
||||||
#define ICMS_ENABLING_ASSERT \
|
|
||||||
assert((CMSIncrementalMode && _icms_disabled >= 0) || \
|
|
||||||
(!CMSIncrementalMode && _icms_disabled <= 0), "Error")
|
|
||||||
|
|
||||||
static inline void enable_icms() {
|
|
||||||
ICMS_ENABLING_ASSERT;
|
|
||||||
Atomic::dec(&_icms_disabled);
|
|
||||||
}
|
|
||||||
static inline void disable_icms() {
|
|
||||||
ICMS_ENABLING_ASSERT;
|
|
||||||
Atomic::inc(&_icms_disabled);
|
|
||||||
}
|
|
||||||
static inline bool icms_is_disabled() {
|
|
||||||
ICMS_ENABLING_ASSERT;
|
|
||||||
return _icms_disabled > 0;
|
|
||||||
}
|
|
||||||
static inline bool icms_is_enabled() {
|
|
||||||
return !icms_is_disabled();
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
inline void ConcurrentMarkSweepThread::trace_state(const char* desc) {
|
|
||||||
if (CMSTraceThreadState) {
|
|
||||||
char buf[128];
|
|
||||||
TimeStamp& ts = gclog_or_tty->time_stamp();
|
|
||||||
if (!ts.is_updated()) {
|
|
||||||
ts.update();
|
|
||||||
}
|
|
||||||
jio_snprintf(buf, sizeof(buf), " [%.3f: CMSThread %s] ",
|
|
||||||
ts.seconds(), desc);
|
|
||||||
buf[sizeof(buf) - 1] = '\0';
|
|
||||||
gclog_or_tty->print("%s", buf);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// For scoped increment/decrement of (synchronous) yield requests
|
// For scoped increment/decrement of (synchronous) yield requests
|
||||||
class CMSSynchronousYieldRequest: public StackObj {
|
class CMSSynchronousYieldRequest: public StackObj {
|
||||||
public:
|
public:
|
||||||
|
|
|
@ -207,12 +207,6 @@ void VM_GenCollectFullConcurrent::doit() {
|
||||||
MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
|
||||||
assert(_full_gc_count_before <= gch->total_full_collections(), "Error");
|
assert(_full_gc_count_before <= gch->total_full_collections(), "Error");
|
||||||
if (gch->total_full_collections() == _full_gc_count_before) {
|
if (gch->total_full_collections() == _full_gc_count_before) {
|
||||||
// Disable iCMS until the full collection is done, and
|
|
||||||
// remember that we did so.
|
|
||||||
CMSCollector::disable_icms();
|
|
||||||
_disabled_icms = true;
|
|
||||||
// In case CMS thread was in icms_wait(), wake it up.
|
|
||||||
CMSCollector::start_icms();
|
|
||||||
// Nudge the CMS thread to start a concurrent collection.
|
// Nudge the CMS thread to start a concurrent collection.
|
||||||
CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
|
CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
|
||||||
} else {
|
} else {
|
||||||
|
@ -276,8 +270,4 @@ void VM_GenCollectFullConcurrent::doit_epilogue() {
|
||||||
FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
|
FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Enable iCMS back if we disabled it earlier.
|
|
||||||
if (_disabled_icms) {
|
|
||||||
CMSCollector::enable_icms();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -128,13 +128,11 @@ class VM_CMS_Final_Remark: public VM_CMS_Operation {
|
||||||
// VM operation to invoke a concurrent collection of the heap as a
|
// VM operation to invoke a concurrent collection of the heap as a
|
||||||
// GenCollectedHeap heap.
|
// GenCollectedHeap heap.
|
||||||
class VM_GenCollectFullConcurrent: public VM_GC_Operation {
|
class VM_GenCollectFullConcurrent: public VM_GC_Operation {
|
||||||
bool _disabled_icms;
|
|
||||||
public:
|
public:
|
||||||
VM_GenCollectFullConcurrent(unsigned int gc_count_before,
|
VM_GenCollectFullConcurrent(unsigned int gc_count_before,
|
||||||
unsigned int full_gc_count_before,
|
unsigned int full_gc_count_before,
|
||||||
GCCause::Cause gc_cause)
|
GCCause::Cause gc_cause)
|
||||||
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */),
|
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */)
|
||||||
_disabled_icms(false)
|
|
||||||
{
|
{
|
||||||
assert(FullGCCount_lock != NULL, "Error");
|
assert(FullGCCount_lock != NULL, "Error");
|
||||||
assert(UseAsyncConcMarkSweepGC, "Else will hang caller");
|
assert(UseAsyncConcMarkSweepGC, "Else will hang caller");
|
||||||
|
|
|
@ -1248,7 +1248,7 @@ public:
|
||||||
// The same as above but assume that the caller holds the Heap_lock.
|
// The same as above but assume that the caller holds the Heap_lock.
|
||||||
void collect_locked(GCCause::Cause cause);
|
void collect_locked(GCCause::Cause cause);
|
||||||
|
|
||||||
virtual void copy_allocation_context_stats(const jint* contexts,
|
virtual bool copy_allocation_context_stats(const jint* contexts,
|
||||||
jlong* totals,
|
jlong* totals,
|
||||||
jbyte* accuracy,
|
jbyte* accuracy,
|
||||||
jint len);
|
jint len);
|
||||||
|
|
|
@ -25,8 +25,9 @@
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||||
|
|
||||||
void G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
|
bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
|
||||||
jlong* totals,
|
jlong* totals,
|
||||||
jbyte* accuracy,
|
jbyte* accuracy,
|
||||||
jint len) {
|
jint len) {
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1585,34 +1585,22 @@ public:
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) {
|
||||||
|
assert(n_workers > 0, "Active gc workers should be greater than 0");
|
||||||
|
const uint overpartition_factor = 4;
|
||||||
|
const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
|
||||||
|
return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
|
G1CollectorPolicy::record_concurrent_mark_cleanup_end(uint n_workers) {
|
||||||
_collectionSetChooser->clear();
|
_collectionSetChooser->clear();
|
||||||
|
|
||||||
uint region_num = _g1->num_regions();
|
uint n_regions = _g1->num_regions();
|
||||||
const uint OverpartitionFactor = 4;
|
uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
|
||||||
uint WorkUnit;
|
_collectionSetChooser->prepare_for_par_region_addition(n_regions, chunk_size);
|
||||||
// The use of MinChunkSize = 8 in the original code
|
ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers);
|
||||||
// causes some assertion failures when the total number of
|
_g1->workers()->run_task(&par_known_garbage_task);
|
||||||
// region is less than 8. The code here tries to fix that.
|
|
||||||
// Should the original code also be fixed?
|
|
||||||
if (no_of_gc_threads > 0) {
|
|
||||||
const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U);
|
|
||||||
WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor),
|
|
||||||
MinWorkUnit);
|
|
||||||
} else {
|
|
||||||
assert(no_of_gc_threads > 0,
|
|
||||||
"The active gc workers should be greater than 0");
|
|
||||||
// In a product build do something reasonable to avoid a crash.
|
|
||||||
const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);
|
|
||||||
WorkUnit =
|
|
||||||
MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
|
|
||||||
MinWorkUnit);
|
|
||||||
}
|
|
||||||
_collectionSetChooser->prepare_for_par_region_addition(_g1->num_regions(),
|
|
||||||
WorkUnit);
|
|
||||||
ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, WorkUnit, (uint) no_of_gc_threads);
|
|
||||||
_g1->workers()->run_task(&parKnownGarbageTask);
|
|
||||||
|
|
||||||
_collectionSetChooser->sort_regions();
|
_collectionSetChooser->sort_regions();
|
||||||
|
|
||||||
|
|
|
@ -612,6 +612,10 @@ private:
|
||||||
uint desired_min_length,
|
uint desired_min_length,
|
||||||
uint desired_max_length);
|
uint desired_max_length);
|
||||||
|
|
||||||
|
// Calculate and return chunk size (in number of regions) for parallel
|
||||||
|
// concurrent mark cleanup.
|
||||||
|
uint calculate_parallel_work_chunk_size(uint n_workers, uint n_regions);
|
||||||
|
|
||||||
// Check whether a given young length (young_length) fits into the
|
// Check whether a given young length (young_length) fits into the
|
||||||
// given target pause time and whether the prediction for the amount
|
// given target pause time and whether the prediction for the amount
|
||||||
// of objects to be copied for the given length will fit into the
|
// of objects to be copied for the given length will fit into the
|
||||||
|
@ -687,7 +691,7 @@ public:
|
||||||
|
|
||||||
// Record start, end, and completion of cleanup.
|
// Record start, end, and completion of cleanup.
|
||||||
void record_concurrent_mark_cleanup_start();
|
void record_concurrent_mark_cleanup_start();
|
||||||
void record_concurrent_mark_cleanup_end(int no_of_gc_threads);
|
void record_concurrent_mark_cleanup_end(uint n_workers);
|
||||||
void record_concurrent_mark_cleanup_completed();
|
void record_concurrent_mark_cleanup_completed();
|
||||||
|
|
||||||
// Records the information about the heap size for reporting in
|
// Records the information about the heap size for reporting in
|
||||||
|
|
|
@ -97,13 +97,6 @@ G1RemSet::~G1RemSet() {
|
||||||
FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl, mtGC);
|
FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl, mtGC);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
|
|
||||||
if (_g1->is_in_g1_reserved(mr.start())) {
|
|
||||||
_n += (int) ((mr.byte_size() / CardTableModRefBS::card_size));
|
|
||||||
if (_start_first == NULL) _start_first = mr.start();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
class ScanRSClosure : public HeapRegionClosure {
|
class ScanRSClosure : public HeapRegionClosure {
|
||||||
size_t _cards_done, _cards;
|
size_t _cards_done, _cards;
|
||||||
G1CollectedHeap* _g1h;
|
G1CollectedHeap* _g1h;
|
||||||
|
@ -303,15 +296,6 @@ void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i) {
|
||||||
|
|
||||||
_g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
|
_g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
|
||||||
|
|
||||||
// Now there should be no dirty cards.
|
|
||||||
if (G1RSLogCheckCardTable) {
|
|
||||||
CountNonCleanMemRegionClosure cl(_g1);
|
|
||||||
_ct_bs->mod_card_iterate(&cl);
|
|
||||||
// XXX This isn't true any more: keeping cards of young regions
|
|
||||||
// marked dirty broke it. Need some reasonable fix.
|
|
||||||
guarantee(cl.n() == 0, "Card table should be clean.");
|
|
||||||
}
|
|
||||||
|
|
||||||
_g1p->phase_times()->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
|
_g1p->phase_times()->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -151,19 +151,6 @@ public:
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class CountNonCleanMemRegionClosure: public MemRegionClosure {
|
|
||||||
G1CollectedHeap* _g1;
|
|
||||||
int _n;
|
|
||||||
HeapWord* _start_first;
|
|
||||||
public:
|
|
||||||
CountNonCleanMemRegionClosure(G1CollectedHeap* g1) :
|
|
||||||
_g1(g1), _n(0), _start_first(NULL)
|
|
||||||
{}
|
|
||||||
void do_MemRegion(MemRegion mr);
|
|
||||||
int n() { return _n; };
|
|
||||||
HeapWord* start_first() { return _start_first; }
|
|
||||||
};
|
|
||||||
|
|
||||||
class UpdateRSOopClosure: public ExtendedOopClosure {
|
class UpdateRSOopClosure: public ExtendedOopClosure {
|
||||||
HeapRegion* _from;
|
HeapRegion* _from;
|
||||||
G1RemSet* _rs;
|
G1RemSet* _rs;
|
||||||
|
|
|
@ -108,10 +108,6 @@
|
||||||
develop(bool, G1RSBarrierRegionFilter, true, \
|
develop(bool, G1RSBarrierRegionFilter, true, \
|
||||||
"If true, generate region filtering code in RS barrier") \
|
"If true, generate region filtering code in RS barrier") \
|
||||||
\
|
\
|
||||||
develop(bool, G1RSLogCheckCardTable, false, \
|
|
||||||
"If true, verify that no dirty cards remain after RS log " \
|
|
||||||
"processing.") \
|
|
||||||
\
|
|
||||||
diagnostic(bool, G1PrintRegionLivenessInfo, false, \
|
diagnostic(bool, G1PrintRegionLivenessInfo, false, \
|
||||||
"Prints the liveness information for all regions in the heap " \
|
"Prints the liveness information for all regions in the heap " \
|
||||||
"at the end of a marking cycle.") \
|
"at the end of a marking cycle.") \
|
||||||
|
|
|
@ -960,6 +960,10 @@ void HeapRegion::verify() const {
|
||||||
verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
|
verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void HeapRegion::prepare_for_compaction(CompactPoint* cp) {
|
||||||
|
scan_and_forward(this, cp);
|
||||||
|
}
|
||||||
|
|
||||||
// G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
|
// G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
|
||||||
// away eventually.
|
// away eventually.
|
||||||
|
|
||||||
|
@ -1043,12 +1047,6 @@ void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#define block_is_always_obj(q) true
|
|
||||||
void G1OffsetTableContigSpace::prepare_for_compaction(CompactPoint* cp) {
|
|
||||||
SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size);
|
|
||||||
}
|
|
||||||
#undef block_is_always_obj
|
|
||||||
|
|
||||||
G1OffsetTableContigSpace::
|
G1OffsetTableContigSpace::
|
||||||
G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
|
G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||||
MemRegion mr) :
|
MemRegion mr) :
|
||||||
|
|
|
@ -187,8 +187,6 @@ class G1OffsetTableContigSpace: public CompactibleSpace {
|
||||||
HeapWord* block_start(const void* p);
|
HeapWord* block_start(const void* p);
|
||||||
HeapWord* block_start_const(const void* p) const;
|
HeapWord* block_start_const(const void* p) const;
|
||||||
|
|
||||||
void prepare_for_compaction(CompactPoint* cp);
|
|
||||||
|
|
||||||
// Add offset table update.
|
// Add offset table update.
|
||||||
virtual HeapWord* allocate(size_t word_size);
|
virtual HeapWord* allocate(size_t word_size);
|
||||||
HeapWord* par_allocate(size_t word_size);
|
HeapWord* par_allocate(size_t word_size);
|
||||||
|
@ -210,6 +208,9 @@ class G1OffsetTableContigSpace: public CompactibleSpace {
|
||||||
|
|
||||||
class HeapRegion: public G1OffsetTableContigSpace {
|
class HeapRegion: public G1OffsetTableContigSpace {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
|
// Allow scan_and_forward to call (private) overrides for auxiliary functions on this class
|
||||||
|
template <typename SpaceType>
|
||||||
|
friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
|
||||||
private:
|
private:
|
||||||
|
|
||||||
// The remembered set for this region.
|
// The remembered set for this region.
|
||||||
|
@ -219,6 +220,20 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||||
|
|
||||||
G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
|
G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
|
||||||
|
|
||||||
|
// Auxiliary functions for scan_and_forward support.
|
||||||
|
// See comments for CompactibleSpace for more information.
|
||||||
|
inline HeapWord* scan_limit() const {
|
||||||
|
return top();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline bool scanned_block_is_obj(const HeapWord* addr) const {
|
||||||
|
return true; // Always true, since scan_limit is top
|
||||||
|
}
|
||||||
|
|
||||||
|
inline size_t scanned_block_size(const HeapWord* addr) const {
|
||||||
|
return HeapRegion::block_size(addr); // Avoid virtual call
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// The index of this region in the heap region sequence.
|
// The index of this region in the heap region sequence.
|
||||||
uint _hrm_index;
|
uint _hrm_index;
|
||||||
|
@ -340,6 +355,9 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||||
// and the amount of unallocated words if called on top()
|
// and the amount of unallocated words if called on top()
|
||||||
size_t block_size(const HeapWord* p) const;
|
size_t block_size(const HeapWord* p) const;
|
||||||
|
|
||||||
|
// Override for scan_and_forward support.
|
||||||
|
void prepare_for_compaction(CompactPoint* cp);
|
||||||
|
|
||||||
inline HeapWord* par_allocate_no_bot_updates(size_t word_size);
|
inline HeapWord* par_allocate_no_bot_updates(size_t word_size);
|
||||||
inline HeapWord* allocate_no_bot_updates(size_t word_size);
|
inline HeapWord* allocate_no_bot_updates(size_t word_size);
|
||||||
|
|
||||||
|
|
|
@ -426,11 +426,19 @@ void FreeRegionList_test() {
|
||||||
mtGC);
|
mtGC);
|
||||||
G1BlockOffsetSharedArray oa(heap, bot_storage);
|
G1BlockOffsetSharedArray oa(heap, bot_storage);
|
||||||
bot_storage->commit_regions(0, num_regions_in_test);
|
bot_storage->commit_regions(0, num_regions_in_test);
|
||||||
HeapRegion hr0(0, &oa, heap);
|
|
||||||
HeapRegion hr1(1, &oa, heap);
|
// Set up memory regions for the heap regions.
|
||||||
HeapRegion hr2(2, &oa, heap);
|
MemRegion mr0(heap.start(), HeapRegion::GrainWords);
|
||||||
HeapRegion hr3(3, &oa, heap);
|
MemRegion mr1(mr0.end(), HeapRegion::GrainWords);
|
||||||
HeapRegion hr4(4, &oa, heap);
|
MemRegion mr2(mr1.end(), HeapRegion::GrainWords);
|
||||||
|
MemRegion mr3(mr2.end(), HeapRegion::GrainWords);
|
||||||
|
MemRegion mr4(mr3.end(), HeapRegion::GrainWords);
|
||||||
|
|
||||||
|
HeapRegion hr0(0, &oa, mr0);
|
||||||
|
HeapRegion hr1(1, &oa, mr1);
|
||||||
|
HeapRegion hr2(2, &oa, mr2);
|
||||||
|
HeapRegion hr3(3, &oa, mr3);
|
||||||
|
HeapRegion hr4(4, &oa, mr4);
|
||||||
l.add_ordered(&hr1);
|
l.add_ordered(&hr1);
|
||||||
l.add_ordered(&hr0);
|
l.add_ordered(&hr0);
|
||||||
l.add_ordered(&hr3);
|
l.add_ordered(&hr3);
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
|
|
||||||
#include "gc_implementation/shared/markSweep.hpp"
|
#include "gc_implementation/shared/markSweep.hpp"
|
||||||
#include "gc_interface/collectedHeap.hpp"
|
#include "gc_interface/collectedHeap.hpp"
|
||||||
|
#include "oops/markOop.inline.hpp"
|
||||||
#include "utilities/stack.inline.hpp"
|
#include "utilities/stack.inline.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
|
|
|
@ -644,10 +644,13 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||||
// For each context in contexts, set the corresponding entries in the totals
|
// For each context in contexts, set the corresponding entries in the totals
|
||||||
// and accuracy arrays to the current values held by the statistics. Each
|
// and accuracy arrays to the current values held by the statistics. Each
|
||||||
// array should be of length len.
|
// array should be of length len.
|
||||||
virtual void copy_allocation_context_stats(const jint* contexts,
|
// Returns true if there are more stats available.
|
||||||
|
virtual bool copy_allocation_context_stats(const jint* contexts,
|
||||||
jlong* totals,
|
jlong* totals,
|
||||||
jbyte* accuracy,
|
jbyte* accuracy,
|
||||||
jint len) { }
|
jint len) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/////////////// Unit tests ///////////////
|
/////////////// Unit tests ///////////////
|
||||||
|
|
||||||
|
|
|
@ -401,8 +401,10 @@ class Bytecodes: AllStatic {
|
||||||
static bool is_astore (Code code) { return (code == _astore || code == _astore_0 || code == _astore_1
|
static bool is_astore (Code code) { return (code == _astore || code == _astore_0 || code == _astore_1
|
||||||
|| code == _astore_2 || code == _astore_3); }
|
|| code == _astore_2 || code == _astore_3); }
|
||||||
|
|
||||||
|
static bool is_const (Code code) { return (_aconst_null <= code && code <= _ldc2_w); }
|
||||||
static bool is_zero_const (Code code) { return (code == _aconst_null || code == _iconst_0
|
static bool is_zero_const (Code code) { return (code == _aconst_null || code == _iconst_0
|
||||||
|| code == _fconst_0 || code == _dconst_0); }
|
|| code == _fconst_0 || code == _dconst_0); }
|
||||||
|
static bool is_return (Code code) { return (_ireturn <= code && code <= _return); }
|
||||||
static bool is_invoke (Code code) { return (_invokevirtual <= code && code <= _invokedynamic); }
|
static bool is_invoke (Code code) { return (_invokevirtual <= code && code <= _invokedynamic); }
|
||||||
static bool has_receiver (Code code) { assert(is_invoke(code), ""); return code == _invokevirtual ||
|
static bool has_receiver (Code code) { assert(is_invoke(code), ""); return code == _invokevirtual ||
|
||||||
code == _invokespecial ||
|
code == _invokespecial ||
|
||||||
|
|
|
@ -189,11 +189,6 @@ class CollectorPolicy : public CHeapObj<mtGC> {
|
||||||
return CollectorPolicy::CollectorPolicyKind;
|
return CollectorPolicy::CollectorPolicyKind;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns true if a collector has eden space with soft end.
|
|
||||||
virtual bool has_soft_ended_eden() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do any updates required to global flags that are due to heap initialization
|
// Do any updates required to global flags that are due to heap initialization
|
||||||
// changes
|
// changes
|
||||||
virtual void post_heap_initialize() = 0;
|
virtual void post_heap_initialize() = 0;
|
||||||
|
|
|
@ -194,11 +194,7 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
|
||||||
(HeapWord*)_virtual_space.high());
|
(HeapWord*)_virtual_space.high());
|
||||||
Universe::heap()->barrier_set()->resize_covered_region(cmr);
|
Universe::heap()->barrier_set()->resize_covered_region(cmr);
|
||||||
|
|
||||||
if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
|
_eden_space = new ContiguousSpace();
|
||||||
_eden_space = new ConcEdenSpace(this);
|
|
||||||
} else {
|
|
||||||
_eden_space = new EdenSpace(this);
|
|
||||||
}
|
|
||||||
_from_space = new ContiguousSpace();
|
_from_space = new ContiguousSpace();
|
||||||
_to_space = new ContiguousSpace();
|
_to_space = new ContiguousSpace();
|
||||||
|
|
||||||
|
@ -1038,38 +1034,12 @@ HeapWord* DefNewGeneration::allocate(size_t word_size,
|
||||||
if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
|
if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
|
||||||
_next_gen->sample_eden_chunk();
|
_next_gen->sample_eden_chunk();
|
||||||
}
|
}
|
||||||
return result;
|
} else {
|
||||||
}
|
// If the eden is full and the last collection bailed out, we are running
|
||||||
do {
|
// out of heap space, and we try to allocate the from-space, too.
|
||||||
HeapWord* old_limit = eden()->soft_end();
|
// allocate_from_space can't be inlined because that would introduce a
|
||||||
if (old_limit < eden()->end()) {
|
// circular dependency at compile time.
|
||||||
// Tell the next generation we reached a limit.
|
|
||||||
HeapWord* new_limit =
|
|
||||||
next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
|
|
||||||
if (new_limit != NULL) {
|
|
||||||
Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
|
|
||||||
} else {
|
|
||||||
assert(eden()->soft_end() == eden()->end(),
|
|
||||||
"invalid state after allocation_limit_reached returned null");
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// The allocation failed and the soft limit is equal to the hard limit,
|
|
||||||
// there are no reasons to do an attempt to allocate
|
|
||||||
assert(old_limit == eden()->end(), "sanity check");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// Try to allocate until succeeded or the soft limit can't be adjusted
|
|
||||||
result = eden()->par_allocate(word_size);
|
|
||||||
} while (result == NULL);
|
|
||||||
|
|
||||||
// If the eden is full and the last collection bailed out, we are running
|
|
||||||
// out of heap space, and we try to allocate the from-space, too.
|
|
||||||
// allocate_from_space can't be inlined because that would introduce a
|
|
||||||
// circular dependency at compile time.
|
|
||||||
if (result == NULL) {
|
|
||||||
result = allocate_from_space(word_size);
|
result = allocate_from_space(word_size);
|
||||||
} else if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
|
|
||||||
_next_gen->sample_eden_chunk();
|
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -1083,11 +1053,6 @@ HeapWord* DefNewGeneration::par_allocate(size_t word_size,
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
void DefNewGeneration::gc_prologue(bool full) {
|
|
||||||
// Ensure that _end and _soft_end are the same in eden space.
|
|
||||||
eden()->set_soft_end(eden()->end());
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t DefNewGeneration::tlab_capacity() const {
|
size_t DefNewGeneration::tlab_capacity() const {
|
||||||
return eden()->capacity();
|
return eden()->capacity();
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,7 +32,6 @@
|
||||||
#include "memory/generation.inline.hpp"
|
#include "memory/generation.inline.hpp"
|
||||||
#include "utilities/stack.hpp"
|
#include "utilities/stack.hpp"
|
||||||
|
|
||||||
class EdenSpace;
|
|
||||||
class ContiguousSpace;
|
class ContiguousSpace;
|
||||||
class ScanClosure;
|
class ScanClosure;
|
||||||
class STWGCTimer;
|
class STWGCTimer;
|
||||||
|
@ -132,7 +131,7 @@ protected:
|
||||||
void adjust_desired_tenuring_threshold();
|
void adjust_desired_tenuring_threshold();
|
||||||
|
|
||||||
// Spaces
|
// Spaces
|
||||||
EdenSpace* _eden_space;
|
ContiguousSpace* _eden_space;
|
||||||
ContiguousSpace* _from_space;
|
ContiguousSpace* _from_space;
|
||||||
ContiguousSpace* _to_space;
|
ContiguousSpace* _to_space;
|
||||||
|
|
||||||
|
@ -214,9 +213,9 @@ protected:
|
||||||
virtual Generation::Name kind() { return Generation::DefNew; }
|
virtual Generation::Name kind() { return Generation::DefNew; }
|
||||||
|
|
||||||
// Accessing spaces
|
// Accessing spaces
|
||||||
EdenSpace* eden() const { return _eden_space; }
|
ContiguousSpace* eden() const { return _eden_space; }
|
||||||
ContiguousSpace* from() const { return _from_space; }
|
ContiguousSpace* from() const { return _from_space; }
|
||||||
ContiguousSpace* to() const { return _to_space; }
|
ContiguousSpace* to() const { return _to_space; }
|
||||||
|
|
||||||
virtual CompactibleSpace* first_compaction_space() const;
|
virtual CompactibleSpace* first_compaction_space() const;
|
||||||
|
|
||||||
|
@ -282,8 +281,6 @@ protected:
|
||||||
|
|
||||||
HeapWord* par_allocate(size_t word_size, bool is_tlab);
|
HeapWord* par_allocate(size_t word_size, bool is_tlab);
|
||||||
|
|
||||||
// Prologue & Epilogue
|
|
||||||
virtual void gc_prologue(bool full);
|
|
||||||
virtual void gc_epilogue(bool full);
|
virtual void gc_epilogue(bool full);
|
||||||
|
|
||||||
// Save the tops for eden, from, and to
|
// Save the tops for eden, from, and to
|
||||||
|
|
|
@ -265,14 +265,6 @@ class Generation: public CHeapObj<mtGC> {
|
||||||
// Like "allocate", but performs any necessary locking internally.
|
// Like "allocate", but performs any necessary locking internally.
|
||||||
virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0;
|
virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0;
|
||||||
|
|
||||||
// A 'younger' gen has reached an allocation limit, and uses this to notify
|
|
||||||
// the next older gen. The return value is a new limit, or NULL if none. The
|
|
||||||
// caller must do the necessary locking.
|
|
||||||
virtual HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
|
|
||||||
size_t word_size) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Some generation may offer a region for shared, contiguous allocation,
|
// Some generation may offer a region for shared, contiguous allocation,
|
||||||
// via inlined code (by exporting the address of the top and end fields
|
// via inlined code (by exporting the address of the top and end fields
|
||||||
// defining the extent of the contiguous allocation region.)
|
// defining the extent of the contiguous allocation region.)
|
||||||
|
|
|
@ -438,52 +438,8 @@ bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#define block_is_always_obj(q) true
|
|
||||||
#define obj_size(q) oop(q)->size()
|
|
||||||
#define adjust_obj_size(s) s
|
|
||||||
|
|
||||||
void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) {
|
|
||||||
SCAN_AND_FORWARD(cp, end, block_is_obj, block_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Faster object search.
|
|
||||||
void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
|
void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
|
||||||
SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size);
|
scan_and_forward(this, cp);
|
||||||
}
|
|
||||||
|
|
||||||
void Space::adjust_pointers() {
|
|
||||||
// adjust all the interior pointers to point at the new locations of objects
|
|
||||||
// Used by MarkSweep::mark_sweep_phase3()
|
|
||||||
|
|
||||||
// First check to see if there is any work to be done.
|
|
||||||
if (used() == 0) {
|
|
||||||
return; // Nothing to do.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise...
|
|
||||||
HeapWord* q = bottom();
|
|
||||||
HeapWord* t = end();
|
|
||||||
|
|
||||||
debug_only(HeapWord* prev_q = NULL);
|
|
||||||
while (q < t) {
|
|
||||||
if (oop(q)->is_gc_marked()) {
|
|
||||||
// q is alive
|
|
||||||
|
|
||||||
// point all the oops to the new location
|
|
||||||
size_t size = oop(q)->adjust_pointers();
|
|
||||||
|
|
||||||
debug_only(prev_q = q);
|
|
||||||
|
|
||||||
q += size;
|
|
||||||
} else {
|
|
||||||
// q is not a live object. But we're not in a compactible space,
|
|
||||||
// So we don't have live ranges.
|
|
||||||
debug_only(prev_q = q);
|
|
||||||
q += block_size(q);
|
|
||||||
assert(q > prev_q, "we should be moving forward through memory");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert(q == t, "just checking");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void CompactibleSpace::adjust_pointers() {
|
void CompactibleSpace::adjust_pointers() {
|
||||||
|
@ -492,11 +448,11 @@ void CompactibleSpace::adjust_pointers() {
|
||||||
return; // Nothing to do.
|
return; // Nothing to do.
|
||||||
}
|
}
|
||||||
|
|
||||||
SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
|
scan_and_adjust_pointers(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CompactibleSpace::compact() {
|
void CompactibleSpace::compact() {
|
||||||
SCAN_AND_COMPACT(obj_size);
|
scan_and_compact(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Space::print_short() const { print_short_on(tty); }
|
void Space::print_short() const { print_short_on(tty); }
|
||||||
|
@ -684,13 +640,12 @@ size_t ContiguousSpace::block_size(const HeapWord* p) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
// This version requires locking.
|
// This version requires locking.
|
||||||
inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
|
inline HeapWord* ContiguousSpace::allocate_impl(size_t size) {
|
||||||
HeapWord* const end_value) {
|
|
||||||
assert(Heap_lock->owned_by_self() ||
|
assert(Heap_lock->owned_by_self() ||
|
||||||
(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
|
(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
|
||||||
"not locked");
|
"not locked");
|
||||||
HeapWord* obj = top();
|
HeapWord* obj = top();
|
||||||
if (pointer_delta(end_value, obj) >= size) {
|
if (pointer_delta(end(), obj) >= size) {
|
||||||
HeapWord* new_top = obj + size;
|
HeapWord* new_top = obj + size;
|
||||||
set_top(new_top);
|
set_top(new_top);
|
||||||
assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
|
assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
|
||||||
|
@ -701,11 +656,10 @@ inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
|
||||||
}
|
}
|
||||||
|
|
||||||
// This version is lock-free.
|
// This version is lock-free.
|
||||||
inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size,
|
inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size) {
|
||||||
HeapWord* const end_value) {
|
|
||||||
do {
|
do {
|
||||||
HeapWord* obj = top();
|
HeapWord* obj = top();
|
||||||
if (pointer_delta(end_value, obj) >= size) {
|
if (pointer_delta(end(), obj) >= size) {
|
||||||
HeapWord* new_top = obj + size;
|
HeapWord* new_top = obj + size;
|
||||||
HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
|
HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
|
||||||
// result can be one of two:
|
// result can be one of two:
|
||||||
|
@ -744,12 +698,12 @@ HeapWord* ContiguousSpace::allocate_aligned(size_t size) {
|
||||||
|
|
||||||
// Requires locking.
|
// Requires locking.
|
||||||
HeapWord* ContiguousSpace::allocate(size_t size) {
|
HeapWord* ContiguousSpace::allocate(size_t size) {
|
||||||
return allocate_impl(size, end());
|
return allocate_impl(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lock-free.
|
// Lock-free.
|
||||||
HeapWord* ContiguousSpace::par_allocate(size_t size) {
|
HeapWord* ContiguousSpace::par_allocate(size_t size) {
|
||||||
return par_allocate_impl(size, end());
|
return par_allocate_impl(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ContiguousSpace::allocate_temporary_filler(int factor) {
|
void ContiguousSpace::allocate_temporary_filler(int factor) {
|
||||||
|
@ -784,49 +738,6 @@ void ContiguousSpace::allocate_temporary_filler(int factor) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void EdenSpace::clear(bool mangle_space) {
|
|
||||||
ContiguousSpace::clear(mangle_space);
|
|
||||||
set_soft_end(end());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Requires locking.
|
|
||||||
HeapWord* EdenSpace::allocate(size_t size) {
|
|
||||||
return allocate_impl(size, soft_end());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lock-free.
|
|
||||||
HeapWord* EdenSpace::par_allocate(size_t size) {
|
|
||||||
return par_allocate_impl(size, soft_end());
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapWord* ConcEdenSpace::par_allocate(size_t size)
|
|
||||||
{
|
|
||||||
do {
|
|
||||||
// The invariant is top() should be read before end() because
|
|
||||||
// top() can't be greater than end(), so if an update of _soft_end
|
|
||||||
// occurs between 'end_val = end();' and 'top_val = top();' top()
|
|
||||||
// also can grow up to the new end() and the condition
|
|
||||||
// 'top_val > end_val' is true. To ensure the loading order
|
|
||||||
// OrderAccess::loadload() is required after top() read.
|
|
||||||
HeapWord* obj = top();
|
|
||||||
OrderAccess::loadload();
|
|
||||||
if (pointer_delta(*soft_end_addr(), obj) >= size) {
|
|
||||||
HeapWord* new_top = obj + size;
|
|
||||||
HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
|
|
||||||
// result can be one of two:
|
|
||||||
// the old top value: the exchange succeeded
|
|
||||||
// otherwise: the new value of the top is returned.
|
|
||||||
if (result == obj) {
|
|
||||||
assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
|
|
||||||
return obj;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
} while (true);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
HeapWord* OffsetTableContigSpace::initialize_threshold() {
|
HeapWord* OffsetTableContigSpace::initialize_threshold() {
|
||||||
return _offsets.initialize_threshold();
|
return _offsets.initialize_threshold();
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,19 +41,6 @@
|
||||||
// implementations for keeping track of free and used space,
|
// implementations for keeping track of free and used space,
|
||||||
// for iterating over objects and free blocks, etc.
|
// for iterating over objects and free blocks, etc.
|
||||||
|
|
||||||
// Here's the Space hierarchy:
|
|
||||||
//
|
|
||||||
// - Space -- an abstract base class describing a heap area
|
|
||||||
// - CompactibleSpace -- a space supporting compaction
|
|
||||||
// - CompactibleFreeListSpace -- (used for CMS generation)
|
|
||||||
// - ContiguousSpace -- a compactible space in which all free space
|
|
||||||
// is contiguous
|
|
||||||
// - EdenSpace -- contiguous space used as nursery
|
|
||||||
// - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
|
|
||||||
// - OffsetTableContigSpace -- contiguous space with a block offset array
|
|
||||||
// that allows "fast" block_start calls
|
|
||||||
// - TenuredSpace -- (used for TenuredGeneration)
|
|
||||||
|
|
||||||
// Forward decls.
|
// Forward decls.
|
||||||
class Space;
|
class Space;
|
||||||
class BlockOffsetArray;
|
class BlockOffsetArray;
|
||||||
|
@ -238,7 +225,7 @@ class Space: public CHeapObj<mtGC> {
|
||||||
|
|
||||||
// Mark-sweep-compact support: all spaces can update pointers to objects
|
// Mark-sweep-compact support: all spaces can update pointers to objects
|
||||||
// moving as a part of compaction.
|
// moving as a part of compaction.
|
||||||
virtual void adjust_pointers();
|
virtual void adjust_pointers() = 0;
|
||||||
|
|
||||||
// PrintHeapAtGC support
|
// PrintHeapAtGC support
|
||||||
virtual void print() const;
|
virtual void print() const;
|
||||||
|
@ -339,7 +326,36 @@ public:
|
||||||
// necessarily, a space that is normally contiguous. But, for example, a
|
// necessarily, a space that is normally contiguous. But, for example, a
|
||||||
// free-list-based space whose normal collection is a mark-sweep without
|
// free-list-based space whose normal collection is a mark-sweep without
|
||||||
// compaction could still support compaction in full GC's.
|
// compaction could still support compaction in full GC's.
|
||||||
|
//
|
||||||
|
// The compaction operations are implemented by the
|
||||||
|
// scan_and_{adjust_pointers,compact,forward} function templates.
|
||||||
|
// The following are, non-virtual, auxiliary functions used by these function templates:
|
||||||
|
// - scan_limit()
|
||||||
|
// - scanned_block_is_obj()
|
||||||
|
// - scanned_block_size()
|
||||||
|
// - adjust_obj_size()
|
||||||
|
// - obj_size()
|
||||||
|
// These functions are to be used exclusively by the scan_and_* function templates,
|
||||||
|
// and must be defined for all (non-abstract) subclasses of CompactibleSpace.
|
||||||
|
//
|
||||||
|
// NOTE: Any subclasses to CompactibleSpace wanting to change/define the behavior
|
||||||
|
// in any of the auxiliary functions must also override the corresponding
|
||||||
|
// prepare_for_compaction/adjust_pointers/compact functions using them.
|
||||||
|
// If not, such changes will not be used or have no effect on the compaction operations.
|
||||||
|
//
|
||||||
|
// This translates to the following dependencies:
|
||||||
|
// Overrides/definitions of
|
||||||
|
// - scan_limit
|
||||||
|
// - scanned_block_is_obj
|
||||||
|
// - scanned_block_size
|
||||||
|
// require override/definition of prepare_for_compaction().
|
||||||
|
// Similar dependencies exist between
|
||||||
|
// - adjust_obj_size and adjust_pointers()
|
||||||
|
// - obj_size and compact().
|
||||||
|
//
|
||||||
|
// Additionally, this also means that changes to block_size() or block_is_obj() that
|
||||||
|
// should be effective during the compaction operations must provide a corresponding
|
||||||
|
// definition of scanned_block_size/scanned_block_is_obj respectively.
|
||||||
class CompactibleSpace: public Space {
|
class CompactibleSpace: public Space {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
friend class CompactibleFreeListSpace;
|
friend class CompactibleFreeListSpace;
|
||||||
|
@ -347,6 +363,15 @@ private:
|
||||||
HeapWord* _compaction_top;
|
HeapWord* _compaction_top;
|
||||||
CompactibleSpace* _next_compaction_space;
|
CompactibleSpace* _next_compaction_space;
|
||||||
|
|
||||||
|
// Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
|
||||||
|
inline size_t adjust_obj_size(size_t size) const {
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline size_t obj_size(const HeapWord* addr) const {
|
||||||
|
return oop(addr)->size();
|
||||||
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
CompactibleSpace() :
|
CompactibleSpace() :
|
||||||
_compaction_top(NULL), _next_compaction_space(NULL) {}
|
_compaction_top(NULL), _next_compaction_space(NULL) {}
|
||||||
|
@ -390,7 +415,7 @@ public:
|
||||||
// "cp->compaction_space" up-to-date. Offset tables may be updated in
|
// "cp->compaction_space" up-to-date. Offset tables may be updated in
|
||||||
// this phase as if the final copy had occurred; if so, "cp->threshold"
|
// this phase as if the final copy had occurred; if so, "cp->threshold"
|
||||||
// indicates when the next such action should be taken.
|
// indicates when the next such action should be taken.
|
||||||
virtual void prepare_for_compaction(CompactPoint* cp);
|
virtual void prepare_for_compaction(CompactPoint* cp) = 0;
|
||||||
// MarkSweep support phase3
|
// MarkSweep support phase3
|
||||||
virtual void adjust_pointers();
|
virtual void adjust_pointers();
|
||||||
// MarkSweep support phase4
|
// MarkSweep support phase4
|
||||||
|
@ -449,6 +474,25 @@ protected:
|
||||||
// words remaining after this operation.
|
// words remaining after this operation.
|
||||||
bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
|
bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
|
||||||
size_t word_len);
|
size_t word_len);
|
||||||
|
|
||||||
|
// Below are template functions for scan_and_* algorithms (avoiding virtual calls).
|
||||||
|
// The space argument should be a subclass of CompactibleSpace, implementing
|
||||||
|
// scan_limit(), scanned_block_is_obj(), and scanned_block_size(),
|
||||||
|
// and possibly also overriding obj_size(), and adjust_obj_size().
|
||||||
|
// These functions should avoid virtual calls whenever possible.
|
||||||
|
|
||||||
|
// Frequently calls adjust_obj_size().
|
||||||
|
template <class SpaceType>
|
||||||
|
static inline void scan_and_adjust_pointers(SpaceType* space);
|
||||||
|
|
||||||
|
// Frequently calls obj_size().
|
||||||
|
template <class SpaceType>
|
||||||
|
static inline void scan_and_compact(SpaceType* space);
|
||||||
|
|
||||||
|
// Frequently calls scanned_block_is_obj() and scanned_block_size().
|
||||||
|
// Requires the scan_limit() function.
|
||||||
|
template <class SpaceType>
|
||||||
|
static inline void scan_and_forward(SpaceType* space, CompactPoint* cp);
|
||||||
};
|
};
|
||||||
|
|
||||||
class GenSpaceMangler;
|
class GenSpaceMangler;
|
||||||
|
@ -458,6 +502,25 @@ class GenSpaceMangler;
|
||||||
class ContiguousSpace: public CompactibleSpace {
|
class ContiguousSpace: public CompactibleSpace {
|
||||||
friend class OneContigSpaceCardGeneration;
|
friend class OneContigSpaceCardGeneration;
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
|
// Allow scan_and_forward function to call (private) overrides for auxiliary functions on this class
|
||||||
|
template <typename SpaceType>
|
||||||
|
friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Auxiliary functions for scan_and_forward support.
|
||||||
|
// See comments for CompactibleSpace for more information.
|
||||||
|
inline HeapWord* scan_limit() const {
|
||||||
|
return top();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline bool scanned_block_is_obj(const HeapWord* addr) const {
|
||||||
|
return true; // Always true, since scan_limit is top
|
||||||
|
}
|
||||||
|
|
||||||
|
inline size_t scanned_block_size(const HeapWord* addr) const {
|
||||||
|
return oop(addr)->size();
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
HeapWord* _top;
|
HeapWord* _top;
|
||||||
HeapWord* _concurrent_iteration_safe_limit;
|
HeapWord* _concurrent_iteration_safe_limit;
|
||||||
|
@ -467,8 +530,8 @@ class ContiguousSpace: public CompactibleSpace {
|
||||||
GenSpaceMangler* mangler() { return _mangler; }
|
GenSpaceMangler* mangler() { return _mangler; }
|
||||||
|
|
||||||
// Allocation helpers (return NULL if full).
|
// Allocation helpers (return NULL if full).
|
||||||
inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
|
inline HeapWord* allocate_impl(size_t word_size);
|
||||||
inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
|
inline HeapWord* par_allocate_impl(size_t word_size);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ContiguousSpace();
|
ContiguousSpace();
|
||||||
|
@ -622,7 +685,6 @@ class ContiguousSpace: public CompactibleSpace {
|
||||||
// Used to increase collection frequency. "factor" of 0 means entire
|
// Used to increase collection frequency. "factor" of 0 means entire
|
||||||
// space.
|
// space.
|
||||||
void allocate_temporary_filler(int factor);
|
void allocate_temporary_filler(int factor);
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -685,56 +747,6 @@ public:
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
// Class EdenSpace describes eden-space in new generation.
|
|
||||||
|
|
||||||
class DefNewGeneration;
|
|
||||||
|
|
||||||
class EdenSpace : public ContiguousSpace {
|
|
||||||
friend class VMStructs;
|
|
||||||
private:
|
|
||||||
DefNewGeneration* _gen;
|
|
||||||
|
|
||||||
// _soft_end is used as a soft limit on allocation. As soft limits are
|
|
||||||
// reached, the slow-path allocation code can invoke other actions and then
|
|
||||||
// adjust _soft_end up to a new soft limit or to end().
|
|
||||||
HeapWord* _soft_end;
|
|
||||||
|
|
||||||
public:
|
|
||||||
EdenSpace(DefNewGeneration* gen) :
|
|
||||||
_gen(gen), _soft_end(NULL) {}
|
|
||||||
|
|
||||||
// Get/set just the 'soft' limit.
|
|
||||||
HeapWord* soft_end() { return _soft_end; }
|
|
||||||
HeapWord** soft_end_addr() { return &_soft_end; }
|
|
||||||
void set_soft_end(HeapWord* value) { _soft_end = value; }
|
|
||||||
|
|
||||||
// Override.
|
|
||||||
void clear(bool mangle_space);
|
|
||||||
|
|
||||||
// Set both the 'hard' and 'soft' limits (_end and _soft_end).
|
|
||||||
void set_end(HeapWord* value) {
|
|
||||||
set_soft_end(value);
|
|
||||||
ContiguousSpace::set_end(value);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allocation (return NULL if full)
|
|
||||||
HeapWord* allocate(size_t word_size);
|
|
||||||
HeapWord* par_allocate(size_t word_size);
|
|
||||||
};
|
|
||||||
|
|
||||||
// Class ConcEdenSpace extends EdenSpace for the sake of safe
|
|
||||||
// allocation while soft-end is being modified concurrently
|
|
||||||
|
|
||||||
class ConcEdenSpace : public EdenSpace {
|
|
||||||
public:
|
|
||||||
ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { }
|
|
||||||
|
|
||||||
// Allocation (return NULL if full)
|
|
||||||
HeapWord* par_allocate(size_t word_size);
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
// A ContigSpace that Supports an efficient "block_start" operation via
|
// A ContigSpace that Supports an efficient "block_start" operation via
|
||||||
// a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
|
// a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
|
||||||
// other spaces.) This is the abstract base class for old generation
|
// other spaces.) This is the abstract base class for old generation
|
||||||
|
|
|
@ -25,6 +25,9 @@
|
||||||
#ifndef SHARE_VM_MEMORY_SPACE_INLINE_HPP
|
#ifndef SHARE_VM_MEMORY_SPACE_INLINE_HPP
|
||||||
#define SHARE_VM_MEMORY_SPACE_INLINE_HPP
|
#define SHARE_VM_MEMORY_SPACE_INLINE_HPP
|
||||||
|
|
||||||
|
#include "gc_implementation/shared/liveRange.hpp"
|
||||||
|
#include "gc_implementation/shared/markSweep.inline.hpp"
|
||||||
|
#include "gc_implementation/shared/spaceDecorator.hpp"
|
||||||
#include "gc_interface/collectedHeap.hpp"
|
#include "gc_interface/collectedHeap.hpp"
|
||||||
#include "memory/space.hpp"
|
#include "memory/space.hpp"
|
||||||
#include "memory/universe.hpp"
|
#include "memory/universe.hpp"
|
||||||
|
@ -35,272 +38,6 @@ inline HeapWord* Space::block_start(const void* p) {
|
||||||
return block_start_const(p);
|
return block_start_const(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \
|
|
||||||
/* Compute the new addresses for the live objects and store it in the mark \
|
|
||||||
* Used by universe::mark_sweep_phase2() \
|
|
||||||
*/ \
|
|
||||||
HeapWord* compact_top; /* This is where we are currently compacting to. */ \
|
|
||||||
\
|
|
||||||
/* We're sure to be here before any objects are compacted into this \
|
|
||||||
* space, so this is a good time to initialize this: \
|
|
||||||
*/ \
|
|
||||||
set_compaction_top(bottom()); \
|
|
||||||
\
|
|
||||||
if (cp->space == NULL) { \
|
|
||||||
assert(cp->gen != NULL, "need a generation"); \
|
|
||||||
assert(cp->threshold == NULL, "just checking"); \
|
|
||||||
assert(cp->gen->first_compaction_space() == this, "just checking"); \
|
|
||||||
cp->space = cp->gen->first_compaction_space(); \
|
|
||||||
compact_top = cp->space->bottom(); \
|
|
||||||
cp->space->set_compaction_top(compact_top); \
|
|
||||||
cp->threshold = cp->space->initialize_threshold(); \
|
|
||||||
} else { \
|
|
||||||
compact_top = cp->space->compaction_top(); \
|
|
||||||
} \
|
|
||||||
\
|
|
||||||
/* We allow some amount of garbage towards the bottom of the space, so \
|
|
||||||
* we don't start compacting before there is a significant gain to be made.\
|
|
||||||
* Occasionally, we want to ensure a full compaction, which is determined \
|
|
||||||
* by the MarkSweepAlwaysCompactCount parameter. \
|
|
||||||
*/ \
|
|
||||||
uint invocations = MarkSweep::total_invocations(); \
|
|
||||||
bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \
|
|
||||||
\
|
|
||||||
size_t allowed_deadspace = 0; \
|
|
||||||
if (skip_dead) { \
|
|
||||||
const size_t ratio = allowed_dead_ratio(); \
|
|
||||||
allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \
|
|
||||||
} \
|
|
||||||
\
|
|
||||||
HeapWord* q = bottom(); \
|
|
||||||
HeapWord* t = scan_limit(); \
|
|
||||||
\
|
|
||||||
HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \
|
|
||||||
live object. */ \
|
|
||||||
HeapWord* first_dead = end();/* The first dead object. */ \
|
|
||||||
LiveRange* liveRange = NULL; /* The current live range, recorded in the \
|
|
||||||
first header of preceding free area. */ \
|
|
||||||
_first_dead = first_dead; \
|
|
||||||
\
|
|
||||||
const intx interval = PrefetchScanIntervalInBytes; \
|
|
||||||
\
|
|
||||||
while (q < t) { \
|
|
||||||
assert(!block_is_obj(q) || \
|
|
||||||
oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \
|
|
||||||
oop(q)->mark()->has_bias_pattern(), \
|
|
||||||
"these are the only valid states during a mark sweep"); \
|
|
||||||
if (block_is_obj(q) && oop(q)->is_gc_marked()) { \
|
|
||||||
/* prefetch beyond q */ \
|
|
||||||
Prefetch::write(q, interval); \
|
|
||||||
size_t size = block_size(q); \
|
|
||||||
compact_top = cp->space->forward(oop(q), size, cp, compact_top); \
|
|
||||||
q += size; \
|
|
||||||
end_of_live = q; \
|
|
||||||
} else { \
|
|
||||||
/* run over all the contiguous dead objects */ \
|
|
||||||
HeapWord* end = q; \
|
|
||||||
do { \
|
|
||||||
/* prefetch beyond end */ \
|
|
||||||
Prefetch::write(end, interval); \
|
|
||||||
end += block_size(end); \
|
|
||||||
} while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\
|
|
||||||
\
|
|
||||||
/* see if we might want to pretend this object is alive so that \
|
|
||||||
* we don't have to compact quite as often. \
|
|
||||||
*/ \
|
|
||||||
if (allowed_deadspace > 0 && q == compact_top) { \
|
|
||||||
size_t sz = pointer_delta(end, q); \
|
|
||||||
if (insert_deadspace(allowed_deadspace, q, sz)) { \
|
|
||||||
compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \
|
|
||||||
q = end; \
|
|
||||||
end_of_live = end; \
|
|
||||||
continue; \
|
|
||||||
} \
|
|
||||||
} \
|
|
||||||
\
|
|
||||||
/* otherwise, it really is a free region. */ \
|
|
||||||
\
|
|
||||||
/* for the previous LiveRange, record the end of the live objects. */ \
|
|
||||||
if (liveRange) { \
|
|
||||||
liveRange->set_end(q); \
|
|
||||||
} \
|
|
||||||
\
|
|
||||||
/* record the current LiveRange object. \
|
|
||||||
* liveRange->start() is overlaid on the mark word. \
|
|
||||||
*/ \
|
|
||||||
liveRange = (LiveRange*)q; \
|
|
||||||
liveRange->set_start(end); \
|
|
||||||
liveRange->set_end(end); \
|
|
||||||
\
|
|
||||||
/* see if this is the first dead region. */ \
|
|
||||||
if (q < first_dead) { \
|
|
||||||
first_dead = q; \
|
|
||||||
} \
|
|
||||||
\
|
|
||||||
/* move on to the next object */ \
|
|
||||||
q = end; \
|
|
||||||
} \
|
|
||||||
} \
|
|
||||||
\
|
|
||||||
assert(q == t, "just checking"); \
|
|
||||||
if (liveRange != NULL) { \
|
|
||||||
liveRange->set_end(q); \
|
|
||||||
} \
|
|
||||||
_end_of_live = end_of_live; \
|
|
||||||
if (end_of_live < first_dead) { \
|
|
||||||
first_dead = end_of_live; \
|
|
||||||
} \
|
|
||||||
_first_dead = first_dead; \
|
|
||||||
\
|
|
||||||
/* save the compaction_top of the compaction space. */ \
|
|
||||||
cp->space->set_compaction_top(compact_top); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \
|
|
||||||
/* adjust all the interior pointers to point at the new locations of objects \
|
|
||||||
* Used by MarkSweep::mark_sweep_phase3() */ \
|
|
||||||
\
|
|
||||||
HeapWord* q = bottom(); \
|
|
||||||
HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \
|
|
||||||
\
|
|
||||||
assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \
|
|
||||||
\
|
|
||||||
if (q < t && _first_dead > q && \
|
|
||||||
!oop(q)->is_gc_marked()) { \
|
|
||||||
/* we have a chunk of the space which hasn't moved and we've \
|
|
||||||
* reinitialized the mark word during the previous pass, so we can't \
|
|
||||||
* use is_gc_marked for the traversal. */ \
|
|
||||||
HeapWord* end = _first_dead; \
|
|
||||||
\
|
|
||||||
while (q < end) { \
|
|
||||||
/* I originally tried to conjoin "block_start(q) == q" to the \
|
|
||||||
* assertion below, but that doesn't work, because you can't \
|
|
||||||
* accurately traverse previous objects to get to the current one \
|
|
||||||
* after their pointers have been \
|
|
||||||
* updated, until the actual compaction is done. dld, 4/00 */ \
|
|
||||||
assert(block_is_obj(q), \
|
|
||||||
"should be at block boundaries, and should be looking at objs"); \
|
|
||||||
\
|
|
||||||
/* point all the oops to the new location */ \
|
|
||||||
size_t size = oop(q)->adjust_pointers(); \
|
|
||||||
size = adjust_obj_size(size); \
|
|
||||||
\
|
|
||||||
q += size; \
|
|
||||||
} \
|
|
||||||
\
|
|
||||||
if (_first_dead == t) { \
|
|
||||||
q = t; \
|
|
||||||
} else { \
|
|
||||||
/* $$$ This is funky. Using this to read the previously written \
|
|
||||||
* LiveRange. See also use below. */ \
|
|
||||||
q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \
|
|
||||||
} \
|
|
||||||
} \
|
|
||||||
\
|
|
||||||
const intx interval = PrefetchScanIntervalInBytes; \
|
|
||||||
\
|
|
||||||
debug_only(HeapWord* prev_q = NULL); \
|
|
||||||
while (q < t) { \
|
|
||||||
/* prefetch beyond q */ \
|
|
||||||
Prefetch::write(q, interval); \
|
|
||||||
if (oop(q)->is_gc_marked()) { \
|
|
||||||
/* q is alive */ \
|
|
||||||
/* point all the oops to the new location */ \
|
|
||||||
size_t size = oop(q)->adjust_pointers(); \
|
|
||||||
size = adjust_obj_size(size); \
|
|
||||||
debug_only(prev_q = q); \
|
|
||||||
q += size; \
|
|
||||||
} else { \
|
|
||||||
/* q is not a live object, so its mark should point at the next \
|
|
||||||
* live object */ \
|
|
||||||
debug_only(prev_q = q); \
|
|
||||||
q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
|
|
||||||
assert(q > prev_q, "we should be moving forward through memory"); \
|
|
||||||
} \
|
|
||||||
} \
|
|
||||||
\
|
|
||||||
assert(q == t, "just checking"); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define SCAN_AND_COMPACT(obj_size) { \
|
|
||||||
/* Copy all live objects to their new location \
|
|
||||||
* Used by MarkSweep::mark_sweep_phase4() */ \
|
|
||||||
\
|
|
||||||
HeapWord* q = bottom(); \
|
|
||||||
HeapWord* const t = _end_of_live; \
|
|
||||||
debug_only(HeapWord* prev_q = NULL); \
|
|
||||||
\
|
|
||||||
if (q < t && _first_dead > q && \
|
|
||||||
!oop(q)->is_gc_marked()) { \
|
|
||||||
debug_only( \
|
|
||||||
/* we have a chunk of the space which hasn't moved and we've reinitialized \
|
|
||||||
* the mark word during the previous pass, so we can't use is_gc_marked for \
|
|
||||||
* the traversal. */ \
|
|
||||||
HeapWord* const end = _first_dead; \
|
|
||||||
\
|
|
||||||
while (q < end) { \
|
|
||||||
size_t size = obj_size(q); \
|
|
||||||
assert(!oop(q)->is_gc_marked(), \
|
|
||||||
"should be unmarked (special dense prefix handling)"); \
|
|
||||||
debug_only(prev_q = q); \
|
|
||||||
q += size; \
|
|
||||||
} \
|
|
||||||
) /* debug_only */ \
|
|
||||||
\
|
|
||||||
if (_first_dead == t) { \
|
|
||||||
q = t; \
|
|
||||||
} else { \
|
|
||||||
/* $$$ Funky */ \
|
|
||||||
q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \
|
|
||||||
} \
|
|
||||||
} \
|
|
||||||
\
|
|
||||||
const intx scan_interval = PrefetchScanIntervalInBytes; \
|
|
||||||
const intx copy_interval = PrefetchCopyIntervalInBytes; \
|
|
||||||
while (q < t) { \
|
|
||||||
if (!oop(q)->is_gc_marked()) { \
|
|
||||||
/* mark is pointer to next marked oop */ \
|
|
||||||
debug_only(prev_q = q); \
|
|
||||||
q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
|
|
||||||
assert(q > prev_q, "we should be moving forward through memory"); \
|
|
||||||
} else { \
|
|
||||||
/* prefetch beyond q */ \
|
|
||||||
Prefetch::read(q, scan_interval); \
|
|
||||||
\
|
|
||||||
/* size and destination */ \
|
|
||||||
size_t size = obj_size(q); \
|
|
||||||
HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \
|
|
||||||
\
|
|
||||||
/* prefetch beyond compaction_top */ \
|
|
||||||
Prefetch::write(compaction_top, copy_interval); \
|
|
||||||
\
|
|
||||||
/* copy object and reinit its mark */ \
|
|
||||||
assert(q != compaction_top, "everything in this pass should be moving"); \
|
|
||||||
Copy::aligned_conjoint_words(q, compaction_top, size); \
|
|
||||||
oop(compaction_top)->init_mark(); \
|
|
||||||
assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
|
|
||||||
\
|
|
||||||
debug_only(prev_q = q); \
|
|
||||||
q += size; \
|
|
||||||
} \
|
|
||||||
} \
|
|
||||||
\
|
|
||||||
/* Let's remember if we were empty before we did the compaction. */ \
|
|
||||||
bool was_empty = used_region().is_empty(); \
|
|
||||||
/* Reset space after compaction is complete */ \
|
|
||||||
reset_after_compaction(); \
|
|
||||||
/* We do this clear, below, since it has overloaded meanings for some */ \
|
|
||||||
/* space subtypes. For example, OffsetTableContigSpace's that were */ \
|
|
||||||
/* compacted into will have had their offset table thresholds updated */ \
|
|
||||||
/* continuously, but those that weren't need to have their thresholds */ \
|
|
||||||
/* re-initialized. Also mangles unused area for debugging. */ \
|
|
||||||
if (used_region().is_empty()) { \
|
|
||||||
if (!was_empty) clear(SpaceDecorator::Mangle); \
|
|
||||||
} else { \
|
|
||||||
if (ZapUnusedHeapArea) mangle_unused_area(); \
|
|
||||||
} \
|
|
||||||
}
|
|
||||||
|
|
||||||
inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
|
inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
|
||||||
HeapWord* res = ContiguousSpace::allocate(size);
|
HeapWord* res = ContiguousSpace::allocate(size);
|
||||||
if (res != NULL) {
|
if (res != NULL) {
|
||||||
|
@ -334,4 +71,263 @@ OffsetTableContigSpace::block_start_const(const void* p) const {
|
||||||
return _offsets.block_start(p);
|
return _offsets.block_start(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <class SpaceType>
|
||||||
|
inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp) {
|
||||||
|
// Compute the new addresses for the live objects and store it in the mark
|
||||||
|
// Used by universe::mark_sweep_phase2()
|
||||||
|
HeapWord* compact_top; // This is where we are currently compacting to.
|
||||||
|
|
||||||
|
// We're sure to be here before any objects are compacted into this
|
||||||
|
// space, so this is a good time to initialize this:
|
||||||
|
space->set_compaction_top(space->bottom());
|
||||||
|
|
||||||
|
if (cp->space == NULL) {
|
||||||
|
assert(cp->gen != NULL, "need a generation");
|
||||||
|
assert(cp->threshold == NULL, "just checking");
|
||||||
|
assert(cp->gen->first_compaction_space() == space, "just checking");
|
||||||
|
cp->space = cp->gen->first_compaction_space();
|
||||||
|
compact_top = cp->space->bottom();
|
||||||
|
cp->space->set_compaction_top(compact_top);
|
||||||
|
cp->threshold = cp->space->initialize_threshold();
|
||||||
|
} else {
|
||||||
|
compact_top = cp->space->compaction_top();
|
||||||
|
}
|
||||||
|
|
||||||
|
// We allow some amount of garbage towards the bottom of the space, so
|
||||||
|
// we don't start compacting before there is a significant gain to be made.
|
||||||
|
// Occasionally, we want to ensure a full compaction, which is determined
|
||||||
|
// by the MarkSweepAlwaysCompactCount parameter.
|
||||||
|
uint invocations = MarkSweep::total_invocations();
|
||||||
|
bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0);
|
||||||
|
|
||||||
|
size_t allowed_deadspace = 0;
|
||||||
|
if (skip_dead) {
|
||||||
|
const size_t ratio = space->allowed_dead_ratio();
|
||||||
|
allowed_deadspace = (space->capacity() * ratio / 100) / HeapWordSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapWord* q = space->bottom();
|
||||||
|
HeapWord* t = space->scan_limit();
|
||||||
|
|
||||||
|
HeapWord* end_of_live= q; // One byte beyond the last byte of the last
|
||||||
|
// live object.
|
||||||
|
HeapWord* first_dead = space->end(); // The first dead object.
|
||||||
|
LiveRange* liveRange = NULL; // The current live range, recorded in the
|
||||||
|
// first header of preceding free area.
|
||||||
|
space->_first_dead = first_dead;
|
||||||
|
|
||||||
|
const intx interval = PrefetchScanIntervalInBytes;
|
||||||
|
|
||||||
|
while (q < t) {
|
||||||
|
assert(!space->scanned_block_is_obj(q) ||
|
||||||
|
oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||
|
||||||
|
oop(q)->mark()->has_bias_pattern(),
|
||||||
|
"these are the only valid states during a mark sweep");
|
||||||
|
if (space->scanned_block_is_obj(q) && oop(q)->is_gc_marked()) {
|
||||||
|
// prefetch beyond q
|
||||||
|
Prefetch::write(q, interval);
|
||||||
|
size_t size = space->scanned_block_size(q);
|
||||||
|
compact_top = cp->space->forward(oop(q), size, cp, compact_top);
|
||||||
|
q += size;
|
||||||
|
end_of_live = q;
|
||||||
|
} else {
|
||||||
|
// run over all the contiguous dead objects
|
||||||
|
HeapWord* end = q;
|
||||||
|
do {
|
||||||
|
// prefetch beyond end
|
||||||
|
Prefetch::write(end, interval);
|
||||||
|
end += space->scanned_block_size(end);
|
||||||
|
} while (end < t && (!space->scanned_block_is_obj(end) || !oop(end)->is_gc_marked()));
|
||||||
|
|
||||||
|
// see if we might want to pretend this object is alive so that
|
||||||
|
// we don't have to compact quite as often.
|
||||||
|
if (allowed_deadspace > 0 && q == compact_top) {
|
||||||
|
size_t sz = pointer_delta(end, q);
|
||||||
|
if (space->insert_deadspace(allowed_deadspace, q, sz)) {
|
||||||
|
compact_top = cp->space->forward(oop(q), sz, cp, compact_top);
|
||||||
|
q = end;
|
||||||
|
end_of_live = end;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// otherwise, it really is a free region.
|
||||||
|
|
||||||
|
// for the previous LiveRange, record the end of the live objects.
|
||||||
|
if (liveRange) {
|
||||||
|
liveRange->set_end(q);
|
||||||
|
}
|
||||||
|
|
||||||
|
// record the current LiveRange object.
|
||||||
|
// liveRange->start() is overlaid on the mark word.
|
||||||
|
liveRange = (LiveRange*)q;
|
||||||
|
liveRange->set_start(end);
|
||||||
|
liveRange->set_end(end);
|
||||||
|
|
||||||
|
// see if this is the first dead region.
|
||||||
|
if (q < first_dead) {
|
||||||
|
first_dead = q;
|
||||||
|
}
|
||||||
|
|
||||||
|
// move on to the next object
|
||||||
|
q = end;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(q == t, "just checking");
|
||||||
|
if (liveRange != NULL) {
|
||||||
|
liveRange->set_end(q);
|
||||||
|
}
|
||||||
|
space->_end_of_live = end_of_live;
|
||||||
|
if (end_of_live < first_dead) {
|
||||||
|
first_dead = end_of_live;
|
||||||
|
}
|
||||||
|
space->_first_dead = first_dead;
|
||||||
|
|
||||||
|
// save the compaction_top of the compaction space.
|
||||||
|
cp->space->set_compaction_top(compact_top);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class SpaceType>
|
||||||
|
inline void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space) {
|
||||||
|
// adjust all the interior pointers to point at the new locations of objects
|
||||||
|
// Used by MarkSweep::mark_sweep_phase3()
|
||||||
|
|
||||||
|
HeapWord* q = space->bottom();
|
||||||
|
HeapWord* t = space->_end_of_live; // Established by "prepare_for_compaction".
|
||||||
|
|
||||||
|
assert(space->_first_dead <= space->_end_of_live, "Stands to reason, no?");
|
||||||
|
|
||||||
|
if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) {
|
||||||
|
// we have a chunk of the space which hasn't moved and we've
|
||||||
|
// reinitialized the mark word during the previous pass, so we can't
|
||||||
|
// use is_gc_marked for the traversal.
|
||||||
|
HeapWord* end = space->_first_dead;
|
||||||
|
|
||||||
|
while (q < end) {
|
||||||
|
// I originally tried to conjoin "block_start(q) == q" to the
|
||||||
|
// assertion below, but that doesn't work, because you can't
|
||||||
|
// accurately traverse previous objects to get to the current one
|
||||||
|
// after their pointers have been
|
||||||
|
// updated, until the actual compaction is done. dld, 4/00
|
||||||
|
assert(space->block_is_obj(q), "should be at block boundaries, and should be looking at objs");
|
||||||
|
|
||||||
|
// point all the oops to the new location
|
||||||
|
size_t size = oop(q)->adjust_pointers();
|
||||||
|
size = space->adjust_obj_size(size);
|
||||||
|
|
||||||
|
q += size;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (space->_first_dead == t) {
|
||||||
|
q = t;
|
||||||
|
} else {
|
||||||
|
// $$$ This is funky. Using this to read the previously written
|
||||||
|
// LiveRange. See also use below.
|
||||||
|
q = (HeapWord*)oop(space->_first_dead)->mark()->decode_pointer();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const intx interval = PrefetchScanIntervalInBytes;
|
||||||
|
|
||||||
|
debug_only(HeapWord* prev_q = NULL);
|
||||||
|
while (q < t) {
|
||||||
|
// prefetch beyond q
|
||||||
|
Prefetch::write(q, interval);
|
||||||
|
if (oop(q)->is_gc_marked()) {
|
||||||
|
// q is alive
|
||||||
|
// point all the oops to the new location
|
||||||
|
size_t size = oop(q)->adjust_pointers();
|
||||||
|
size = space->adjust_obj_size(size);
|
||||||
|
debug_only(prev_q = q);
|
||||||
|
q += size;
|
||||||
|
} else {
|
||||||
|
// q is not a live object, so its mark should point at the next
|
||||||
|
// live object
|
||||||
|
debug_only(prev_q = q);
|
||||||
|
q = (HeapWord*) oop(q)->mark()->decode_pointer();
|
||||||
|
assert(q > prev_q, "we should be moving forward through memory");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(q == t, "just checking");
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class SpaceType>
|
||||||
|
inline void CompactibleSpace::scan_and_compact(SpaceType* space) {
|
||||||
|
// Copy all live objects to their new location
|
||||||
|
// Used by MarkSweep::mark_sweep_phase4()
|
||||||
|
|
||||||
|
HeapWord* q = space->bottom();
|
||||||
|
HeapWord* const t = space->_end_of_live;
|
||||||
|
debug_only(HeapWord* prev_q = NULL);
|
||||||
|
|
||||||
|
if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) {
|
||||||
|
#ifdef ASSERT // Debug only
|
||||||
|
// we have a chunk of the space which hasn't moved and we've reinitialized
|
||||||
|
// the mark word during the previous pass, so we can't use is_gc_marked for
|
||||||
|
// the traversal.
|
||||||
|
HeapWord* const end = space->_first_dead;
|
||||||
|
|
||||||
|
while (q < end) {
|
||||||
|
size_t size = space->obj_size(q);
|
||||||
|
assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
|
||||||
|
prev_q = q;
|
||||||
|
q += size;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (space->_first_dead == t) {
|
||||||
|
q = t;
|
||||||
|
} else {
|
||||||
|
// $$$ Funky
|
||||||
|
q = (HeapWord*) oop(space->_first_dead)->mark()->decode_pointer();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const intx scan_interval = PrefetchScanIntervalInBytes;
|
||||||
|
const intx copy_interval = PrefetchCopyIntervalInBytes;
|
||||||
|
while (q < t) {
|
||||||
|
if (!oop(q)->is_gc_marked()) {
|
||||||
|
// mark is pointer to next marked oop
|
||||||
|
debug_only(prev_q = q);
|
||||||
|
q = (HeapWord*) oop(q)->mark()->decode_pointer();
|
||||||
|
assert(q > prev_q, "we should be moving forward through memory");
|
||||||
|
} else {
|
||||||
|
// prefetch beyond q
|
||||||
|
Prefetch::read(q, scan_interval);
|
||||||
|
|
||||||
|
// size and destination
|
||||||
|
size_t size = space->obj_size(q);
|
||||||
|
HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();
|
||||||
|
|
||||||
|
// prefetch beyond compaction_top
|
||||||
|
Prefetch::write(compaction_top, copy_interval);
|
||||||
|
|
||||||
|
// copy object and reinit its mark
|
||||||
|
assert(q != compaction_top, "everything in this pass should be moving");
|
||||||
|
Copy::aligned_conjoint_words(q, compaction_top, size);
|
||||||
|
oop(compaction_top)->init_mark();
|
||||||
|
assert(oop(compaction_top)->klass() != NULL, "should have a class");
|
||||||
|
|
||||||
|
debug_only(prev_q = q);
|
||||||
|
q += size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Let's remember if we were empty before we did the compaction.
|
||||||
|
bool was_empty = space->used_region().is_empty();
|
||||||
|
// Reset space after compaction is complete
|
||||||
|
space->reset_after_compaction();
|
||||||
|
// We do this clear, below, since it has overloaded meanings for some
|
||||||
|
// space subtypes. For example, OffsetTableContigSpace's that were
|
||||||
|
// compacted into will have had their offset table thresholds updated
|
||||||
|
// continuously, but those that weren't need to have their thresholds
|
||||||
|
// re-initialized. Also mangles unused area for debugging.
|
||||||
|
if (space->used_region().is_empty()) {
|
||||||
|
if (!was_empty) space->clear(SpaceDecorator::Mangle);
|
||||||
|
} else {
|
||||||
|
if (ZapUnusedHeapArea) space->mangle_unused_area();
|
||||||
|
}
|
||||||
|
}
|
||||||
#endif // SHARE_VM_MEMORY_SPACE_INLINE_HPP
|
#endif // SHARE_VM_MEMORY_SPACE_INLINE_HPP
|
||||||
|
|
|
@ -588,6 +588,15 @@ bool Method::is_accessor() const {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool Method::is_constant_getter() const {
|
||||||
|
int last_index = code_size() - 1;
|
||||||
|
// Check if the first 1-3 bytecodes are a constant push
|
||||||
|
// and the last bytecode is a return.
|
||||||
|
return (2 <= code_size() && code_size() <= 4 &&
|
||||||
|
Bytecodes::is_const(java_code_at(0)) &&
|
||||||
|
Bytecodes::length_for(java_code_at(0)) == last_index &&
|
||||||
|
Bytecodes::is_return(java_code_at(last_index)));
|
||||||
|
}
|
||||||
|
|
||||||
bool Method::is_initializer() const {
|
bool Method::is_initializer() const {
|
||||||
return name() == vmSymbols::object_initializer_name() || is_static_initializer();
|
return name() == vmSymbols::object_initializer_name() || is_static_initializer();
|
||||||
|
|
|
@ -595,6 +595,9 @@ class Method : public Metadata {
|
||||||
// returns true if the method is an accessor function (setter/getter).
|
// returns true if the method is an accessor function (setter/getter).
|
||||||
bool is_accessor() const;
|
bool is_accessor() const;
|
||||||
|
|
||||||
|
// returns true if the method does nothing but return a constant of primitive type
|
||||||
|
bool is_constant_getter() const;
|
||||||
|
|
||||||
// returns true if the method is an initializer (<init> or <clinit>).
|
// returns true if the method is an initializer (<init> or <clinit>).
|
||||||
bool is_initializer() const;
|
bool is_initializer() const;
|
||||||
|
|
||||||
|
|
|
@ -1134,7 +1134,7 @@ void MethodData::init() {
|
||||||
_tenure_traps = 0;
|
_tenure_traps = 0;
|
||||||
_num_loops = 0;
|
_num_loops = 0;
|
||||||
_num_blocks = 0;
|
_num_blocks = 0;
|
||||||
_would_profile = true;
|
_would_profile = unknown;
|
||||||
|
|
||||||
#if INCLUDE_RTM_OPT
|
#if INCLUDE_RTM_OPT
|
||||||
_rtm_state = NoRTM; // No RTM lock eliding by default
|
_rtm_state = NoRTM; // No RTM lock eliding by default
|
||||||
|
|
|
@ -2096,7 +2096,8 @@ private:
|
||||||
short _num_loops;
|
short _num_loops;
|
||||||
short _num_blocks;
|
short _num_blocks;
|
||||||
// Does this method contain anything worth profiling?
|
// Does this method contain anything worth profiling?
|
||||||
bool _would_profile;
|
enum WouldProfile {unknown, no_profile, profile};
|
||||||
|
WouldProfile _would_profile;
|
||||||
|
|
||||||
// Size of _data array in bytes. (Excludes header and extra_data fields.)
|
// Size of _data array in bytes. (Excludes header and extra_data fields.)
|
||||||
int _data_size;
|
int _data_size;
|
||||||
|
@ -2270,8 +2271,8 @@ public:
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void set_would_profile(bool p) { _would_profile = p; }
|
void set_would_profile(bool p) { _would_profile = p ? profile : no_profile; }
|
||||||
bool would_profile() const { return _would_profile; }
|
bool would_profile() const { return _would_profile != no_profile; }
|
||||||
|
|
||||||
int num_loops() const { return _num_loops; }
|
int num_loops() const { return _num_loops; }
|
||||||
void set_num_loops(int n) { _num_loops = n; }
|
void set_num_loops(int n) { _num_loops = n; }
|
||||||
|
|
|
@ -794,7 +794,7 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
|
||||||
Node* ex_klass_node = NULL;
|
Node* ex_klass_node = NULL;
|
||||||
if (has_ex_handler() && !ex_type->klass_is_exact()) {
|
if (has_ex_handler() && !ex_type->klass_is_exact()) {
|
||||||
Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes());
|
Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes());
|
||||||
ex_klass_node = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
|
ex_klass_node = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT));
|
||||||
|
|
||||||
// Compute the exception klass a little more cleverly.
|
// Compute the exception klass a little more cleverly.
|
||||||
// Obvious solution is to simple do a LoadKlass from the 'ex_node'.
|
// Obvious solution is to simple do a LoadKlass from the 'ex_node'.
|
||||||
|
@ -812,7 +812,7 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes());
|
Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes());
|
||||||
Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
|
Node* k = _gvn.transform( LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT));
|
||||||
ex_klass_node->init_req( i, k );
|
ex_klass_node->init_req( i, k );
|
||||||
}
|
}
|
||||||
_gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT);
|
_gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT);
|
||||||
|
|
|
@ -1154,7 +1154,7 @@ Node* GraphKit::load_object_klass(Node* obj) {
|
||||||
Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
|
Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
|
||||||
if (akls != NULL) return akls;
|
if (akls != NULL) return akls;
|
||||||
Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
|
Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
|
||||||
return _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), k_adr, TypeInstPtr::KLASS) );
|
return _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS));
|
||||||
}
|
}
|
||||||
|
|
||||||
//-------------------------load_array_length-----------------------------------
|
//-------------------------load_array_length-----------------------------------
|
||||||
|
@ -2615,7 +2615,7 @@ Node* Phase::gen_subtype_check(Node* subklass, Node* superklass, Node** ctrl, Me
|
||||||
// types load from the super-class display table which is immutable.
|
// types load from the super-class display table which is immutable.
|
||||||
m = mem->memory_at(C->get_alias_index(gvn->type(p2)->is_ptr()));
|
m = mem->memory_at(C->get_alias_index(gvn->type(p2)->is_ptr()));
|
||||||
Node *kmem = might_be_cache ? m : C->immutable_memory();
|
Node *kmem = might_be_cache ? m : C->immutable_memory();
|
||||||
Node *nkls = gvn->transform(LoadKlassNode::make(*gvn, kmem, p2, gvn->type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL));
|
Node *nkls = gvn->transform(LoadKlassNode::make(*gvn, NULL, kmem, p2, gvn->type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL));
|
||||||
|
|
||||||
// Compile speed common case: ARE a subtype and we canNOT fail
|
// Compile speed common case: ARE a subtype and we canNOT fail
|
||||||
if( superklass == nkls )
|
if( superklass == nkls )
|
||||||
|
|
|
@ -3345,7 +3345,7 @@ Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
|
||||||
if (region == NULL) never_see_null = true;
|
if (region == NULL) never_see_null = true;
|
||||||
Node* p = basic_plus_adr(mirror, offset);
|
Node* p = basic_plus_adr(mirror, offset);
|
||||||
const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
|
const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
|
||||||
Node* kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
|
Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
|
||||||
Node* null_ctl = top();
|
Node* null_ctl = top();
|
||||||
kls = null_check_oop(kls, &null_ctl, never_see_null);
|
kls = null_check_oop(kls, &null_ctl, never_see_null);
|
||||||
if (region != NULL) {
|
if (region != NULL) {
|
||||||
|
@ -3517,7 +3517,7 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
|
||||||
phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
|
phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
|
||||||
// If we fall through, it's a plain class. Get its _super.
|
// If we fall through, it's a plain class. Get its _super.
|
||||||
p = basic_plus_adr(kls, in_bytes(Klass::super_offset()));
|
p = basic_plus_adr(kls, in_bytes(Klass::super_offset()));
|
||||||
kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL));
|
kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL));
|
||||||
null_ctl = top();
|
null_ctl = top();
|
||||||
kls = null_check_oop(kls, &null_ctl);
|
kls = null_check_oop(kls, &null_ctl);
|
||||||
if (null_ctl != top()) {
|
if (null_ctl != top()) {
|
||||||
|
@ -3671,7 +3671,7 @@ bool LibraryCallKit::inline_native_subtype_check() {
|
||||||
args[which_arg] = arg;
|
args[which_arg] = arg;
|
||||||
|
|
||||||
Node* p = basic_plus_adr(arg, class_klass_offset);
|
Node* p = basic_plus_adr(arg, class_klass_offset);
|
||||||
Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type);
|
Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
|
||||||
klasses[which_arg] = _gvn.transform(kls);
|
klasses[which_arg] = _gvn.transform(kls);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1197,8 +1197,7 @@ void PhaseMacroExpand::expand_allocate_common(
|
||||||
}
|
}
|
||||||
|
|
||||||
if (C->env()->dtrace_alloc_probes() ||
|
if (C->env()->dtrace_alloc_probes() ||
|
||||||
!UseTLAB && (!Universe::heap()->supports_inline_contig_alloc() ||
|
!UseTLAB && (!Universe::heap()->supports_inline_contig_alloc())) {
|
||||||
(UseConcMarkSweepGC && CMSIncrementalMode))) {
|
|
||||||
// Force slow-path allocation
|
// Force slow-path allocation
|
||||||
always_slow = true;
|
always_slow = true;
|
||||||
initial_slow_test = NULL;
|
initial_slow_test = NULL;
|
||||||
|
@ -2202,7 +2201,7 @@ void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
|
||||||
Node* klass_node = AllocateNode::Ideal_klass(obj, &_igvn);
|
Node* klass_node = AllocateNode::Ideal_klass(obj, &_igvn);
|
||||||
if (klass_node == NULL) {
|
if (klass_node == NULL) {
|
||||||
Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
|
Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
|
||||||
klass_node = transform_later( LoadKlassNode::make(_igvn, mem, k_adr, _igvn.type(k_adr)->is_ptr()) );
|
klass_node = transform_later(LoadKlassNode::make(_igvn, NULL, mem, k_adr, _igvn.type(k_adr)->is_ptr()));
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
if (UseCompressedClassPointers && klass_node->is_DecodeNKlass()) {
|
if (UseCompressedClassPointers && klass_node->is_DecodeNKlass()) {
|
||||||
assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity");
|
assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity");
|
||||||
|
|
|
@ -529,7 +529,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode*
|
||||||
// (At this point we can assume disjoint_bases, since types differ.)
|
// (At this point we can assume disjoint_bases, since types differ.)
|
||||||
int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
|
int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
|
||||||
Node* p1 = basic_plus_adr(dest_klass, ek_offset);
|
Node* p1 = basic_plus_adr(dest_klass, ek_offset);
|
||||||
Node* n1 = LoadKlassNode::make(_igvn, C->immutable_memory(), p1, TypeRawPtr::BOTTOM);
|
Node* n1 = LoadKlassNode::make(_igvn, NULL, C->immutable_memory(), p1, TypeRawPtr::BOTTOM);
|
||||||
Node* dest_elem_klass = transform_later(n1);
|
Node* dest_elem_klass = transform_later(n1);
|
||||||
Node* cv = generate_checkcast_arraycopy(&local_ctrl, &local_mem,
|
Node* cv = generate_checkcast_arraycopy(&local_ctrl, &local_mem,
|
||||||
adr_type,
|
adr_type,
|
||||||
|
|
|
@ -861,6 +861,10 @@ Node *MemNode::Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ) {
|
||||||
|
|
||||||
|
|
||||||
//=============================================================================
|
//=============================================================================
|
||||||
|
// Should LoadNode::Ideal() attempt to remove control edges?
|
||||||
|
bool LoadNode::can_remove_control() const {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
uint LoadNode::size_of() const { return sizeof(*this); }
|
uint LoadNode::size_of() const { return sizeof(*this); }
|
||||||
uint LoadNode::cmp( const Node &n ) const
|
uint LoadNode::cmp( const Node &n ) const
|
||||||
{ return !Type::cmp( _type, ((LoadNode&)n)._type ); }
|
{ return !Type::cmp( _type, ((LoadNode&)n)._type ); }
|
||||||
|
@ -1471,7 +1475,7 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------Ideal------------------------------------------
|
//------------------------------Ideal------------------------------------------
|
||||||
// If the load is from Field memory and the pointer is non-null, we can
|
// If the load is from Field memory and the pointer is non-null, it might be possible to
|
||||||
// zero out the control input.
|
// zero out the control input.
|
||||||
// If the offset is constant and the base is an object allocation,
|
// If the offset is constant and the base is an object allocation,
|
||||||
// try to hook me up to the exact initializing store.
|
// try to hook me up to the exact initializing store.
|
||||||
|
@ -1498,6 +1502,7 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||||
&& phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) {
|
&& phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) {
|
||||||
// Check for useless control edge in some common special cases
|
// Check for useless control edge in some common special cases
|
||||||
if (in(MemNode::Control) != NULL
|
if (in(MemNode::Control) != NULL
|
||||||
|
&& can_remove_control()
|
||||||
&& phase->type(base)->higher_equal(TypePtr::NOTNULL)
|
&& phase->type(base)->higher_equal(TypePtr::NOTNULL)
|
||||||
&& all_controls_dominate(base, phase->C->start())) {
|
&& all_controls_dominate(base, phase->C->start())) {
|
||||||
// A method-invariant, non-null address (constant or 'this' argument).
|
// A method-invariant, non-null address (constant or 'this' argument).
|
||||||
|
@ -2019,8 +2024,7 @@ const Type* LoadSNode::Value(PhaseTransform *phase) const {
|
||||||
//=============================================================================
|
//=============================================================================
|
||||||
//----------------------------LoadKlassNode::make------------------------------
|
//----------------------------LoadKlassNode::make------------------------------
|
||||||
// Polymorphic factory method:
|
// Polymorphic factory method:
|
||||||
Node *LoadKlassNode::make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at, const TypeKlassPtr *tk ) {
|
Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk) {
|
||||||
Node *ctl = NULL;
|
|
||||||
// sanity check the alias category against the created node type
|
// sanity check the alias category against the created node type
|
||||||
const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
|
const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
|
||||||
assert(adr_type != NULL, "expecting TypeKlassPtr");
|
assert(adr_type != NULL, "expecting TypeKlassPtr");
|
||||||
|
@ -2040,6 +2044,12 @@ const Type *LoadKlassNode::Value( PhaseTransform *phase ) const {
|
||||||
return klass_value_common(phase);
|
return klass_value_common(phase);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// In most cases, LoadKlassNode does not have the control input set. If the control
|
||||||
|
// input is set, it must not be removed (by LoadNode::Ideal()).
|
||||||
|
bool LoadKlassNode::can_remove_control() const {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
const Type *LoadNode::klass_value_common( PhaseTransform *phase ) const {
|
const Type *LoadNode::klass_value_common( PhaseTransform *phase ) const {
|
||||||
// Either input is TOP ==> the result is TOP
|
// Either input is TOP ==> the result is TOP
|
||||||
const Type *t1 = phase->type( in(MemNode::Memory) );
|
const Type *t1 = phase->type( in(MemNode::Memory) );
|
||||||
|
|
|
@ -148,6 +148,8 @@ private:
|
||||||
protected:
|
protected:
|
||||||
virtual uint cmp(const Node &n) const;
|
virtual uint cmp(const Node &n) const;
|
||||||
virtual uint size_of() const; // Size is bigger
|
virtual uint size_of() const; // Size is bigger
|
||||||
|
// Should LoadNode::Ideal() attempt to remove control edges?
|
||||||
|
virtual bool can_remove_control() const;
|
||||||
const Type* const _type; // What kind of value is loaded?
|
const Type* const _type; // What kind of value is loaded?
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
@ -171,8 +173,10 @@ public:
|
||||||
// we are equivalent to. We look for Load of a Store.
|
// we are equivalent to. We look for Load of a Store.
|
||||||
virtual Node *Identity( PhaseTransform *phase );
|
virtual Node *Identity( PhaseTransform *phase );
|
||||||
|
|
||||||
// If the load is from Field memory and the pointer is non-null, we can
|
// If the load is from Field memory and the pointer is non-null, it might be possible to
|
||||||
// zero out the control input.
|
// zero out the control input.
|
||||||
|
// If the offset is constant and the base is an object allocation,
|
||||||
|
// try to hook me up to the exact initializing store.
|
||||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||||
|
|
||||||
// Split instance field load through Phi.
|
// Split instance field load through Phi.
|
||||||
|
@ -431,6 +435,10 @@ public:
|
||||||
//------------------------------LoadKlassNode----------------------------------
|
//------------------------------LoadKlassNode----------------------------------
|
||||||
// Load a Klass from an object
|
// Load a Klass from an object
|
||||||
class LoadKlassNode : public LoadPNode {
|
class LoadKlassNode : public LoadPNode {
|
||||||
|
protected:
|
||||||
|
// In most cases, LoadKlassNode does not have the control input set. If the control
|
||||||
|
// input is set, it must not be removed (by LoadNode::Ideal()).
|
||||||
|
virtual bool can_remove_control() const;
|
||||||
public:
|
public:
|
||||||
LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo)
|
LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo)
|
||||||
: LoadPNode(c, mem, adr, at, tk, mo) {}
|
: LoadPNode(c, mem, adr, at, tk, mo) {}
|
||||||
|
@ -440,8 +448,8 @@ public:
|
||||||
virtual bool depends_only_on_test() const { return true; }
|
virtual bool depends_only_on_test() const { return true; }
|
||||||
|
|
||||||
// Polymorphic factory method:
|
// Polymorphic factory method:
|
||||||
static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at,
|
static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
|
||||||
const TypeKlassPtr *tk = TypeKlassPtr::OBJECT );
|
const TypeKlassPtr* tk = TypeKlassPtr::OBJECT);
|
||||||
};
|
};
|
||||||
|
|
||||||
//------------------------------LoadNKlassNode---------------------------------
|
//------------------------------LoadNKlassNode---------------------------------
|
||||||
|
|
|
@ -1987,7 +1987,7 @@ void Parse::call_register_finalizer() {
|
||||||
// finalization. In general this will fold up since the concrete
|
// finalization. In general this will fold up since the concrete
|
||||||
// class is often visible so the access flags are constant.
|
// class is often visible so the access flags are constant.
|
||||||
Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() );
|
Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() );
|
||||||
Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) );
|
Node* klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), klass_addr, TypeInstPtr::KLASS));
|
||||||
|
|
||||||
Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset()));
|
Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset()));
|
||||||
Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT, MemNode::unordered);
|
Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT, MemNode::unordered);
|
||||||
|
|
|
@ -156,22 +156,43 @@ void Parse::array_store_check() {
|
||||||
int klass_offset = oopDesc::klass_offset_in_bytes();
|
int klass_offset = oopDesc::klass_offset_in_bytes();
|
||||||
Node* p = basic_plus_adr( ary, ary, klass_offset );
|
Node* p = basic_plus_adr( ary, ary, klass_offset );
|
||||||
// p's type is array-of-OOPS plus klass_offset
|
// p's type is array-of-OOPS plus klass_offset
|
||||||
Node* array_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS) );
|
Node* array_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS));
|
||||||
// Get the array klass
|
// Get the array klass
|
||||||
const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr();
|
const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr();
|
||||||
|
|
||||||
// array_klass's type is generally INexact array-of-oop. Heroically
|
// The type of array_klass is usually INexact array-of-oop. Heroically
|
||||||
// cast the array klass to EXACT array and uncommon-trap if the cast
|
// cast array_klass to EXACT array and uncommon-trap if the cast fails.
|
||||||
// fails.
|
// Make constant out of the inexact array klass, but use it only if the cast
|
||||||
|
// succeeds.
|
||||||
bool always_see_exact_class = false;
|
bool always_see_exact_class = false;
|
||||||
if (MonomorphicArrayCheck
|
if (MonomorphicArrayCheck
|
||||||
&& !too_many_traps(Deoptimization::Reason_array_check)) {
|
&& !too_many_traps(Deoptimization::Reason_array_check)
|
||||||
|
&& !tak->klass_is_exact()
|
||||||
|
&& tak != TypeKlassPtr::OBJECT) {
|
||||||
|
// Regarding the fourth condition in the if-statement from above:
|
||||||
|
//
|
||||||
|
// If the compiler has determined that the type of array 'ary' (represented
|
||||||
|
// by 'array_klass') is java/lang/Object, the compiler must not assume that
|
||||||
|
// the array 'ary' is monomorphic.
|
||||||
|
//
|
||||||
|
// If 'ary' were of type java/lang/Object, this arraystore would have to fail,
|
||||||
|
// because it is not possible to perform a arraystore into an object that is not
|
||||||
|
// a "proper" array.
|
||||||
|
//
|
||||||
|
// Therefore, let's obtain at runtime the type of 'ary' and check if we can still
|
||||||
|
// successfully perform the store.
|
||||||
|
//
|
||||||
|
// The implementation reasons for the condition are the following:
|
||||||
|
//
|
||||||
|
// java/lang/Object is the superclass of all arrays, but it is represented by the VM
|
||||||
|
// as an InstanceKlass. The checks generated by gen_checkcast() (see below) expect
|
||||||
|
// 'array_klass' to be ObjArrayKlass, which can result in invalid memory accesses.
|
||||||
|
//
|
||||||
|
// See issue JDK-8057622 for details.
|
||||||
|
|
||||||
always_see_exact_class = true;
|
always_see_exact_class = true;
|
||||||
// (If no MDO at all, hope for the best, until a trap actually occurs.)
|
// (If no MDO at all, hope for the best, until a trap actually occurs.)
|
||||||
}
|
|
||||||
|
|
||||||
// Is the array klass is exactly its defined type?
|
|
||||||
if (always_see_exact_class && !tak->klass_is_exact()) {
|
|
||||||
// Make a constant out of the inexact array klass
|
// Make a constant out of the inexact array klass
|
||||||
const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr();
|
const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr();
|
||||||
Node* con = makecon(extak);
|
Node* con = makecon(extak);
|
||||||
|
@ -202,11 +223,15 @@ void Parse::array_store_check() {
|
||||||
// Extract the array element class
|
// Extract the array element class
|
||||||
int element_klass_offset = in_bytes(ObjArrayKlass::element_klass_offset());
|
int element_klass_offset = in_bytes(ObjArrayKlass::element_klass_offset());
|
||||||
Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset);
|
Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset);
|
||||||
Node *a_e_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p2, tak) );
|
// We are allowed to use the constant type only if cast succeeded. If always_see_exact_class is true,
|
||||||
|
// we must set a control edge from the IfTrue node created by the uncommon_trap above to the
|
||||||
|
// LoadKlassNode.
|
||||||
|
Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, always_see_exact_class ? control() : NULL,
|
||||||
|
immutable_memory(), p2, tak));
|
||||||
|
|
||||||
// Check (the hard way) and throw if not a subklass.
|
// Check (the hard way) and throw if not a subklass.
|
||||||
// Result is ignored, we just need the CFG effects.
|
// Result is ignored, we just need the CFG effects.
|
||||||
gen_checkcast( obj, a_e_klass );
|
gen_checkcast(obj, a_e_klass);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -24,6 +24,8 @@
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
|
|
||||||
|
#include <new>
|
||||||
|
|
||||||
#include "code/codeCache.hpp"
|
#include "code/codeCache.hpp"
|
||||||
#include "memory/metadataFactory.hpp"
|
#include "memory/metadataFactory.hpp"
|
||||||
#include "memory/universe.hpp"
|
#include "memory/universe.hpp"
|
||||||
|
@ -37,9 +39,11 @@
|
||||||
|
|
||||||
#include "runtime/thread.hpp"
|
#include "runtime/thread.hpp"
|
||||||
#include "runtime/arguments.hpp"
|
#include "runtime/arguments.hpp"
|
||||||
|
#include "runtime/deoptimization.hpp"
|
||||||
#include "runtime/interfaceSupport.hpp"
|
#include "runtime/interfaceSupport.hpp"
|
||||||
#include "runtime/os.hpp"
|
#include "runtime/os.hpp"
|
||||||
#include "runtime/vm_version.hpp"
|
#include "runtime/vm_version.hpp"
|
||||||
|
#include "runtime/sweeper.hpp"
|
||||||
|
|
||||||
#include "utilities/array.hpp"
|
#include "utilities/array.hpp"
|
||||||
#include "utilities/debug.hpp"
|
#include "utilities/debug.hpp"
|
||||||
|
@ -67,6 +71,7 @@ PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
|
||||||
#define SIZE_T_MAX_VALUE ((size_t) -1)
|
#define SIZE_T_MAX_VALUE ((size_t) -1)
|
||||||
|
|
||||||
bool WhiteBox::_used = false;
|
bool WhiteBox::_used = false;
|
||||||
|
volatile bool WhiteBox::compilation_locked = false;
|
||||||
|
|
||||||
WB_ENTRY(jlong, WB_GetObjectAddress(JNIEnv* env, jobject o, jobject obj))
|
WB_ENTRY(jlong, WB_GetObjectAddress(JNIEnv* env, jobject o, jobject obj))
|
||||||
return (jlong)(void*)JNIHandles::resolve(obj);
|
return (jlong)(void*)JNIHandles::resolve(obj);
|
||||||
|
@ -302,13 +307,12 @@ WB_END
|
||||||
WB_ENTRY(jlong, WB_NMTReserveMemory(JNIEnv* env, jobject o, jlong size))
|
WB_ENTRY(jlong, WB_NMTReserveMemory(JNIEnv* env, jobject o, jlong size))
|
||||||
jlong addr = 0;
|
jlong addr = 0;
|
||||||
|
|
||||||
addr = (jlong)(uintptr_t)os::reserve_memory(size);
|
addr = (jlong)(uintptr_t)os::reserve_memory(size);
|
||||||
MemTracker::record_virtual_memory_type((address)addr, mtTest);
|
MemTracker::record_virtual_memory_type((address)addr, mtTest);
|
||||||
|
|
||||||
return addr;
|
return addr;
|
||||||
WB_END
|
WB_END
|
||||||
|
|
||||||
|
|
||||||
WB_ENTRY(void, WB_NMTCommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size))
|
WB_ENTRY(void, WB_NMTCommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size))
|
||||||
os::commit_memory((char *)(uintptr_t)addr, size, !ExecMem);
|
os::commit_memory((char *)(uintptr_t)addr, size, !ExecMem);
|
||||||
MemTracker::record_virtual_memory_type((address)(uintptr_t)addr, mtTest);
|
MemTracker::record_virtual_memory_type((address)(uintptr_t)addr, mtTest);
|
||||||
|
@ -728,6 +732,29 @@ WB_ENTRY(void, WB_SetStringVMFlag(JNIEnv* env, jobject o, jstring name, jstring
|
||||||
WB_END
|
WB_END
|
||||||
|
|
||||||
|
|
||||||
|
WB_ENTRY(void, WB_LockCompilation(JNIEnv* env, jobject o, jlong timeout))
|
||||||
|
WhiteBox::compilation_locked = true;
|
||||||
|
WB_END
|
||||||
|
|
||||||
|
WB_ENTRY(void, WB_UnlockCompilation(JNIEnv* env, jobject o))
|
||||||
|
MonitorLockerEx mo(Compilation_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
WhiteBox::compilation_locked = false;
|
||||||
|
mo.notify_all();
|
||||||
|
WB_END
|
||||||
|
|
||||||
|
void WhiteBox::force_sweep() {
|
||||||
|
guarantee(WhiteBoxAPI, "internal testing API :: WhiteBox has to enabled");
|
||||||
|
{
|
||||||
|
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
NMethodSweeper::_should_sweep = true;
|
||||||
|
}
|
||||||
|
NMethodSweeper::possibly_sweep();
|
||||||
|
}
|
||||||
|
|
||||||
|
WB_ENTRY(void, WB_ForceNMethodSweep(JNIEnv* env, jobject o))
|
||||||
|
WhiteBox::force_sweep();
|
||||||
|
WB_END
|
||||||
|
|
||||||
WB_ENTRY(jboolean, WB_IsInStringTable(JNIEnv* env, jobject o, jstring javaString))
|
WB_ENTRY(jboolean, WB_IsInStringTable(JNIEnv* env, jobject o, jstring javaString))
|
||||||
ResourceMark rm(THREAD);
|
ResourceMark rm(THREAD);
|
||||||
int len;
|
int len;
|
||||||
|
@ -774,6 +801,46 @@ WB_ENTRY(jstring, WB_GetCPUFeatures(JNIEnv* env, jobject o))
|
||||||
return features_string;
|
return features_string;
|
||||||
WB_END
|
WB_END
|
||||||
|
|
||||||
|
int WhiteBox::get_blob_type(const CodeBlob* code) {
|
||||||
|
guarantee(WhiteBoxAPI, "internal testing API :: WhiteBox has to enabled");
|
||||||
|
return CodeCache::get_code_heap(code)->code_blob_type();
|
||||||
|
}
|
||||||
|
|
||||||
|
CodeHeap* WhiteBox::get_code_heap(int blob_type) {
|
||||||
|
guarantee(WhiteBoxAPI, "internal testing API :: WhiteBox has to enabled");
|
||||||
|
return CodeCache::get_code_heap(blob_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct CodeBlobStub {
|
||||||
|
CodeBlobStub(const CodeBlob* blob) :
|
||||||
|
name(os::strdup(blob->name())),
|
||||||
|
size(blob->size()),
|
||||||
|
blob_type(WhiteBox::get_blob_type(blob)) { }
|
||||||
|
~CodeBlobStub() { os::free((void*) name); }
|
||||||
|
const char* const name;
|
||||||
|
const int size;
|
||||||
|
const int blob_type;
|
||||||
|
};
|
||||||
|
|
||||||
|
static jobjectArray codeBlob2objectArray(JavaThread* thread, JNIEnv* env, CodeBlobStub* cb) {
|
||||||
|
jclass clazz = env->FindClass(vmSymbols::java_lang_Object()->as_C_string());
|
||||||
|
CHECK_JNI_EXCEPTION_(env, NULL);
|
||||||
|
jobjectArray result = env->NewObjectArray(3, clazz, NULL);
|
||||||
|
|
||||||
|
jstring name = env->NewStringUTF(cb->name);
|
||||||
|
CHECK_JNI_EXCEPTION_(env, NULL);
|
||||||
|
env->SetObjectArrayElement(result, 0, name);
|
||||||
|
|
||||||
|
jobject obj = integerBox(thread, env, cb->size);
|
||||||
|
CHECK_JNI_EXCEPTION_(env, NULL);
|
||||||
|
env->SetObjectArrayElement(result, 1, obj);
|
||||||
|
|
||||||
|
obj = integerBox(thread, env, cb->blob_type);
|
||||||
|
CHECK_JNI_EXCEPTION_(env, NULL);
|
||||||
|
env->SetObjectArrayElement(result, 2, obj);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
WB_ENTRY(jobjectArray, WB_GetNMethod(JNIEnv* env, jobject o, jobject method, jboolean is_osr))
|
WB_ENTRY(jobjectArray, WB_GetNMethod(JNIEnv* env, jobject o, jobject method, jboolean is_osr))
|
||||||
ResourceMark rm(THREAD);
|
ResourceMark rm(THREAD);
|
||||||
|
@ -790,27 +857,93 @@ WB_ENTRY(jobjectArray, WB_GetNMethod(JNIEnv* env, jobject o, jobject method, jbo
|
||||||
ThreadToNativeFromVM ttn(thread);
|
ThreadToNativeFromVM ttn(thread);
|
||||||
jclass clazz = env->FindClass(vmSymbols::java_lang_Object()->as_C_string());
|
jclass clazz = env->FindClass(vmSymbols::java_lang_Object()->as_C_string());
|
||||||
CHECK_JNI_EXCEPTION_(env, NULL);
|
CHECK_JNI_EXCEPTION_(env, NULL);
|
||||||
result = env->NewObjectArray(3, clazz, NULL);
|
result = env->NewObjectArray(4, clazz, NULL);
|
||||||
if (result == NULL) {
|
if (result == NULL) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
CodeBlobStub stub(code);
|
||||||
|
jobjectArray codeBlob = codeBlob2objectArray(thread, env, &stub);
|
||||||
|
env->SetObjectArrayElement(result, 0, codeBlob);
|
||||||
|
|
||||||
jobject level = integerBox(thread, env, code->comp_level());
|
jobject level = integerBox(thread, env, code->comp_level());
|
||||||
CHECK_JNI_EXCEPTION_(env, NULL);
|
CHECK_JNI_EXCEPTION_(env, NULL);
|
||||||
env->SetObjectArrayElement(result, 0, level);
|
env->SetObjectArrayElement(result, 1, level);
|
||||||
|
|
||||||
jbyteArray insts = env->NewByteArray(insts_size);
|
jbyteArray insts = env->NewByteArray(insts_size);
|
||||||
CHECK_JNI_EXCEPTION_(env, NULL);
|
CHECK_JNI_EXCEPTION_(env, NULL);
|
||||||
env->SetByteArrayRegion(insts, 0, insts_size, (jbyte*) code->insts_begin());
|
env->SetByteArrayRegion(insts, 0, insts_size, (jbyte*) code->insts_begin());
|
||||||
env->SetObjectArrayElement(result, 1, insts);
|
env->SetObjectArrayElement(result, 2, insts);
|
||||||
|
|
||||||
jobject id = integerBox(thread, env, code->compile_id());
|
jobject id = integerBox(thread, env, code->compile_id());
|
||||||
CHECK_JNI_EXCEPTION_(env, NULL);
|
CHECK_JNI_EXCEPTION_(env, NULL);
|
||||||
env->SetObjectArrayElement(result, 2, id);
|
env->SetObjectArrayElement(result, 3, id);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
WB_END
|
WB_END
|
||||||
|
|
||||||
|
CodeBlob* WhiteBox::allocate_code_blob(int size, int blob_type) {
|
||||||
|
guarantee(WhiteBoxAPI, "internal testing API :: WhiteBox has to enabled");
|
||||||
|
BufferBlob* blob;
|
||||||
|
int full_size = CodeBlob::align_code_offset(sizeof(BufferBlob));
|
||||||
|
if (full_size < size) {
|
||||||
|
full_size += round_to(size - full_size, oopSize);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
blob = (BufferBlob*) CodeCache::allocate(full_size, blob_type);
|
||||||
|
}
|
||||||
|
// Track memory usage statistic after releasing CodeCache_lock
|
||||||
|
MemoryService::track_code_cache_memory_usage();
|
||||||
|
::new (blob) BufferBlob("WB::DummyBlob", full_size);
|
||||||
|
return blob;
|
||||||
|
}
|
||||||
|
|
||||||
|
WB_ENTRY(jlong, WB_AllocateCodeBlob(JNIEnv* env, jobject o, jint size, jint blob_type))
|
||||||
|
return (jlong) WhiteBox::allocate_code_blob(size, blob_type);
|
||||||
|
WB_END
|
||||||
|
|
||||||
|
WB_ENTRY(void, WB_FreeCodeBlob(JNIEnv* env, jobject o, jlong addr))
|
||||||
|
BufferBlob::free((BufferBlob*) addr);
|
||||||
|
WB_END
|
||||||
|
|
||||||
|
WB_ENTRY(jobjectArray, WB_GetCodeHeapEntries(JNIEnv* env, jobject o, jint blob_type))
|
||||||
|
ResourceMark rm;
|
||||||
|
GrowableArray<CodeBlobStub*> blobs;
|
||||||
|
{
|
||||||
|
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
CodeHeap* heap = WhiteBox::get_code_heap(blob_type);
|
||||||
|
if (heap == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
for (CodeBlob* cb = (CodeBlob*) heap->first();
|
||||||
|
cb != NULL; cb = (CodeBlob*) heap->next(cb)) {
|
||||||
|
CodeBlobStub* stub = NEW_RESOURCE_OBJ(CodeBlobStub);
|
||||||
|
new (stub) CodeBlobStub(cb);
|
||||||
|
blobs.append(stub);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (blobs.length() == 0) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
ThreadToNativeFromVM ttn(thread);
|
||||||
|
jobjectArray result = NULL;
|
||||||
|
jclass clazz = env->FindClass(vmSymbols::java_lang_Object()->as_C_string());
|
||||||
|
CHECK_JNI_EXCEPTION_(env, NULL);
|
||||||
|
result = env->NewObjectArray(blobs.length(), clazz, NULL);
|
||||||
|
if (result == NULL) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
int i = 0;
|
||||||
|
for (GrowableArrayIterator<CodeBlobStub*> it = blobs.begin();
|
||||||
|
it != blobs.end(); ++it) {
|
||||||
|
jobjectArray obj = codeBlob2objectArray(thread, env, *it);
|
||||||
|
env->SetObjectArrayElement(result, i, obj);
|
||||||
|
++i;
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
WB_END
|
||||||
|
|
||||||
WB_ENTRY(jlong, WB_GetThreadStackSize(JNIEnv* env, jobject o))
|
WB_ENTRY(jlong, WB_GetThreadStackSize(JNIEnv* env, jobject o))
|
||||||
return (jlong) Thread::current()->stack_size();
|
return (jlong) Thread::current()->stack_size();
|
||||||
WB_END
|
WB_END
|
||||||
|
@ -1018,6 +1151,8 @@ static JNINativeMethod methods[] = {
|
||||||
CC"(Ljava/lang/reflect/Executable;II)Z", (void*)&WB_EnqueueMethodForCompilation},
|
CC"(Ljava/lang/reflect/Executable;II)Z", (void*)&WB_EnqueueMethodForCompilation},
|
||||||
{CC"clearMethodState",
|
{CC"clearMethodState",
|
||||||
CC"(Ljava/lang/reflect/Executable;)V", (void*)&WB_ClearMethodState},
|
CC"(Ljava/lang/reflect/Executable;)V", (void*)&WB_ClearMethodState},
|
||||||
|
{CC"lockCompilation", CC"()V", (void*)&WB_LockCompilation},
|
||||||
|
{CC"unlockCompilation", CC"()V", (void*)&WB_UnlockCompilation},
|
||||||
{CC"isConstantVMFlag", CC"(Ljava/lang/String;)Z", (void*)&WB_IsConstantVMFlag},
|
{CC"isConstantVMFlag", CC"(Ljava/lang/String;)Z", (void*)&WB_IsConstantVMFlag},
|
||||||
{CC"isLockedVMFlag", CC"(Ljava/lang/String;)Z", (void*)&WB_IsLockedVMFlag},
|
{CC"isLockedVMFlag", CC"(Ljava/lang/String;)Z", (void*)&WB_IsLockedVMFlag},
|
||||||
{CC"setBooleanVMFlag", CC"(Ljava/lang/String;Z)V",(void*)&WB_SetBooleanVMFlag},
|
{CC"setBooleanVMFlag", CC"(Ljava/lang/String;Z)V",(void*)&WB_SetBooleanVMFlag},
|
||||||
|
@ -1055,6 +1190,10 @@ static JNINativeMethod methods[] = {
|
||||||
{CC"getCPUFeatures", CC"()Ljava/lang/String;", (void*)&WB_GetCPUFeatures },
|
{CC"getCPUFeatures", CC"()Ljava/lang/String;", (void*)&WB_GetCPUFeatures },
|
||||||
{CC"getNMethod", CC"(Ljava/lang/reflect/Executable;Z)[Ljava/lang/Object;",
|
{CC"getNMethod", CC"(Ljava/lang/reflect/Executable;Z)[Ljava/lang/Object;",
|
||||||
(void*)&WB_GetNMethod },
|
(void*)&WB_GetNMethod },
|
||||||
|
{CC"forceNMethodSweep", CC"()V", (void*)&WB_ForceNMethodSweep },
|
||||||
|
{CC"allocateCodeBlob", CC"(II)J", (void*)&WB_AllocateCodeBlob },
|
||||||
|
{CC"freeCodeBlob", CC"(J)V", (void*)&WB_FreeCodeBlob },
|
||||||
|
{CC"getCodeHeapEntries", CC"(I)[Ljava/lang/Object;",(void*)&WB_GetCodeHeapEntries },
|
||||||
{CC"getThreadStackSize", CC"()J", (void*)&WB_GetThreadStackSize },
|
{CC"getThreadStackSize", CC"()J", (void*)&WB_GetThreadStackSize },
|
||||||
{CC"getThreadRemainingStackSize", CC"()J", (void*)&WB_GetThreadRemainingStackSize },
|
{CC"getThreadRemainingStackSize", CC"()J", (void*)&WB_GetThreadRemainingStackSize },
|
||||||
};
|
};
|
||||||
|
|
|
@ -54,17 +54,24 @@
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
class CodeBlob;
|
||||||
|
class CodeHeap;
|
||||||
|
|
||||||
class WhiteBox : public AllStatic {
|
class WhiteBox : public AllStatic {
|
||||||
private:
|
private:
|
||||||
static bool _used;
|
static bool _used;
|
||||||
public:
|
public:
|
||||||
|
static volatile bool compilation_locked;
|
||||||
static bool used() { return _used; }
|
static bool used() { return _used; }
|
||||||
static void set_used() { _used = true; }
|
static void set_used() { _used = true; }
|
||||||
static int offset_for_field(const char* field_name, oop object,
|
static int offset_for_field(const char* field_name, oop object,
|
||||||
Symbol* signature_symbol);
|
Symbol* signature_symbol);
|
||||||
static const char* lookup_jstring(const char* field_name, oop object);
|
static const char* lookup_jstring(const char* field_name, oop object);
|
||||||
static bool lookup_bool(const char* field_name, oop object);
|
static bool lookup_bool(const char* field_name, oop object);
|
||||||
|
static void force_sweep();
|
||||||
|
static int get_blob_type(const CodeBlob* code);
|
||||||
|
static CodeHeap* get_code_heap(int blob_type);
|
||||||
|
static CodeBlob* allocate_code_blob(int blob_type, int size);
|
||||||
static int array_bytes_to_length(size_t bytes);
|
static int array_bytes_to_length(size_t bytes);
|
||||||
static void register_methods(JNIEnv* env, jclass wbclass, JavaThread* thread,
|
static void register_methods(JNIEnv* env, jclass wbclass, JavaThread* thread,
|
||||||
JNINativeMethod* method_array, int method_count);
|
JNINativeMethod* method_array, int method_count);
|
||||||
|
|
|
@ -317,8 +317,8 @@ void AdvancedThresholdPolicy::create_mdo(methodHandle mh, JavaThread* THREAD) {
|
||||||
* c. 0 -> (3->2) -> 4.
|
* c. 0 -> (3->2) -> 4.
|
||||||
* In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
|
* In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
|
||||||
* to enable the profiling to fully occur at level 0. In this case we change the compilation level
|
* to enable the profiling to fully occur at level 0. In this case we change the compilation level
|
||||||
* of the method to 2, because it'll allow it to run much faster without full profiling while c2
|
* of the method to 2 while the request is still in-queue, because it'll allow it to run much faster
|
||||||
* is compiling.
|
* without full profiling while c2 is compiling.
|
||||||
*
|
*
|
||||||
* d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
|
* d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
|
||||||
* After a method was once compiled with C1 it can be identified as trivial and be compiled to
|
* After a method was once compiled with C1 it can be identified as trivial and be compiled to
|
||||||
|
|
|
@ -1789,7 +1789,7 @@ void Arguments::set_g1_gc_flags() {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
static bool verify_serial_gc_flags() {
|
static bool verify_serial_gc_flags() {
|
||||||
return (UseSerialGC &&
|
return (UseSerialGC &&
|
||||||
!(UseParNewGC || (UseConcMarkSweepGC || CMSIncrementalMode) || UseG1GC ||
|
!(UseParNewGC || (UseConcMarkSweepGC) || UseG1GC ||
|
||||||
UseParallelGC || UseParallelOldGC));
|
UseParallelGC || UseParallelOldGC));
|
||||||
}
|
}
|
||||||
#endif // ASSERT
|
#endif // ASSERT
|
||||||
|
@ -2203,10 +2203,6 @@ void Arguments::check_deprecated_gcs() {
|
||||||
warning("Using the ParNew young collector with the Serial old collector is deprecated "
|
warning("Using the ParNew young collector with the Serial old collector is deprecated "
|
||||||
"and will likely be removed in a future release");
|
"and will likely be removed in a future release");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (CMSIncrementalMode) {
|
|
||||||
warning("Using incremental CMS is deprecated and will likely be removed in a future release");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Arguments::check_deprecated_gc_flags() {
|
void Arguments::check_deprecated_gc_flags() {
|
||||||
|
@ -2328,31 +2324,8 @@ bool Arguments::check_vm_args_consistency() {
|
||||||
status = status && ArgumentsExt::check_gc_consistency_user();
|
status = status && ArgumentsExt::check_gc_consistency_user();
|
||||||
status = status && check_stack_pages();
|
status = status && check_stack_pages();
|
||||||
|
|
||||||
if (CMSIncrementalMode) {
|
status = status && verify_percentage(CMSIncrementalSafetyFactor,
|
||||||
if (!UseConcMarkSweepGC) {
|
"CMSIncrementalSafetyFactor");
|
||||||
jio_fprintf(defaultStream::error_stream(),
|
|
||||||
"error: invalid argument combination.\n"
|
|
||||||
"The CMS collector (-XX:+UseConcMarkSweepGC) must be "
|
|
||||||
"selected in order\nto use CMSIncrementalMode.\n");
|
|
||||||
status = false;
|
|
||||||
} else {
|
|
||||||
status = status && verify_percentage(CMSIncrementalDutyCycle,
|
|
||||||
"CMSIncrementalDutyCycle");
|
|
||||||
status = status && verify_percentage(CMSIncrementalDutyCycleMin,
|
|
||||||
"CMSIncrementalDutyCycleMin");
|
|
||||||
status = status && verify_percentage(CMSIncrementalSafetyFactor,
|
|
||||||
"CMSIncrementalSafetyFactor");
|
|
||||||
status = status && verify_percentage(CMSIncrementalOffset,
|
|
||||||
"CMSIncrementalOffset");
|
|
||||||
status = status && verify_percentage(CMSExpAvgFactor,
|
|
||||||
"CMSExpAvgFactor");
|
|
||||||
// If it was not set on the command line, set
|
|
||||||
// CMSInitiatingOccupancyFraction to 1 so icms can initiate cycles early.
|
|
||||||
if (CMSInitiatingOccupancyFraction < 0) {
|
|
||||||
FLAG_SET_DEFAULT(CMSInitiatingOccupancyFraction, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CMS space iteration, which FLSVerifyAllHeapreferences entails,
|
// CMS space iteration, which FLSVerifyAllHeapreferences entails,
|
||||||
// insists that we hold the requisite locks so that the iteration is
|
// insists that we hold the requisite locks so that the iteration is
|
||||||
|
@ -2886,14 +2859,6 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
||||||
// -Xnoclassgc
|
// -Xnoclassgc
|
||||||
} else if (match_option(option, "-Xnoclassgc", &tail)) {
|
} else if (match_option(option, "-Xnoclassgc", &tail)) {
|
||||||
FLAG_SET_CMDLINE(bool, ClassUnloading, false);
|
FLAG_SET_CMDLINE(bool, ClassUnloading, false);
|
||||||
// -Xincgc: i-CMS
|
|
||||||
} else if (match_option(option, "-Xincgc", &tail)) {
|
|
||||||
FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, true);
|
|
||||||
FLAG_SET_CMDLINE(bool, CMSIncrementalMode, true);
|
|
||||||
// -Xnoincgc: no i-CMS
|
|
||||||
} else if (match_option(option, "-Xnoincgc", &tail)) {
|
|
||||||
FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, false);
|
|
||||||
FLAG_SET_CMDLINE(bool, CMSIncrementalMode, false);
|
|
||||||
// -Xconcgc
|
// -Xconcgc
|
||||||
} else if (match_option(option, "-Xconcgc", &tail)) {
|
} else if (match_option(option, "-Xconcgc", &tail)) {
|
||||||
FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, true);
|
FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, true);
|
||||||
|
@ -3723,7 +3688,6 @@ void Arguments::set_shared_spaces_flags() {
|
||||||
#if !INCLUDE_ALL_GCS
|
#if !INCLUDE_ALL_GCS
|
||||||
static void force_serial_gc() {
|
static void force_serial_gc() {
|
||||||
FLAG_SET_DEFAULT(UseSerialGC, true);
|
FLAG_SET_DEFAULT(UseSerialGC, true);
|
||||||
FLAG_SET_DEFAULT(CMSIncrementalMode, false); // special CMS suboption
|
|
||||||
UNSUPPORTED_GC_OPTION(UseG1GC);
|
UNSUPPORTED_GC_OPTION(UseG1GC);
|
||||||
UNSUPPORTED_GC_OPTION(UseParallelGC);
|
UNSUPPORTED_GC_OPTION(UseParallelGC);
|
||||||
UNSUPPORTED_GC_OPTION(UseParallelOldGC);
|
UNSUPPORTED_GC_OPTION(UseParallelOldGC);
|
||||||
|
|
|
@ -1632,30 +1632,10 @@ class CommandLineFlags {
|
||||||
"The maximum size of young gen chosen by default per GC worker " \
|
"The maximum size of young gen chosen by default per GC worker " \
|
||||||
"thread available") \
|
"thread available") \
|
||||||
\
|
\
|
||||||
product(bool, CMSIncrementalMode, false, \
|
|
||||||
"Whether CMS GC should operate in \"incremental\" mode") \
|
|
||||||
\
|
|
||||||
product(uintx, CMSIncrementalDutyCycle, 10, \
|
|
||||||
"Percentage (0-100) of CMS incremental mode duty cycle. If " \
|
|
||||||
"CMSIncrementalPacing is enabled, then this is just the initial " \
|
|
||||||
"value.") \
|
|
||||||
\
|
|
||||||
product(bool, CMSIncrementalPacing, true, \
|
|
||||||
"Whether the CMS incremental mode duty cycle should be " \
|
|
||||||
"automatically adjusted") \
|
|
||||||
\
|
|
||||||
product(uintx, CMSIncrementalDutyCycleMin, 0, \
|
|
||||||
"Minimum percentage (0-100) of the CMS incremental duty cycle " \
|
|
||||||
"used when CMSIncrementalPacing is enabled") \
|
|
||||||
\
|
|
||||||
product(uintx, CMSIncrementalSafetyFactor, 10, \
|
product(uintx, CMSIncrementalSafetyFactor, 10, \
|
||||||
"Percentage (0-100) used to add conservatism when computing the " \
|
"Percentage (0-100) used to add conservatism when computing the " \
|
||||||
"duty cycle") \
|
"duty cycle") \
|
||||||
\
|
\
|
||||||
product(uintx, CMSIncrementalOffset, 0, \
|
|
||||||
"Percentage (0-100) by which the CMS incremental mode duty cycle "\
|
|
||||||
"is shifted to the right within the period between young GCs") \
|
|
||||||
\
|
|
||||||
product(uintx, CMSExpAvgFactor, 50, \
|
product(uintx, CMSExpAvgFactor, 50, \
|
||||||
"Percentage (0-100) used to weight the current sample when " \
|
"Percentage (0-100) used to weight the current sample when " \
|
||||||
"computing exponential averages for CMS statistics") \
|
"computing exponential averages for CMS statistics") \
|
||||||
|
@ -1714,15 +1694,6 @@ class CommandLineFlags {
|
||||||
"Skip block flux-rate sampling for an epoch unless inter-sweep " \
|
"Skip block flux-rate sampling for an epoch unless inter-sweep " \
|
||||||
"duration exceeds this threshold in milliseconds") \
|
"duration exceeds this threshold in milliseconds") \
|
||||||
\
|
\
|
||||||
develop(bool, CMSTraceIncrementalMode, false, \
|
|
||||||
"Trace CMS incremental mode") \
|
|
||||||
\
|
|
||||||
develop(bool, CMSTraceIncrementalPacing, false, \
|
|
||||||
"Trace CMS incremental mode pacing computation") \
|
|
||||||
\
|
|
||||||
develop(bool, CMSTraceThreadState, false, \
|
|
||||||
"Trace the CMS thread state (enable the trace_state() method)") \
|
|
||||||
\
|
|
||||||
product(bool, CMSClassUnloadingEnabled, true, \
|
product(bool, CMSClassUnloadingEnabled, true, \
|
||||||
"Whether class unloading enabled when using CMS GC") \
|
"Whether class unloading enabled when using CMS GC") \
|
||||||
\
|
\
|
||||||
|
|
|
@ -72,7 +72,6 @@ Monitor* Threads_lock = NULL;
|
||||||
Monitor* CGC_lock = NULL;
|
Monitor* CGC_lock = NULL;
|
||||||
Monitor* STS_lock = NULL;
|
Monitor* STS_lock = NULL;
|
||||||
Monitor* SLT_lock = NULL;
|
Monitor* SLT_lock = NULL;
|
||||||
Monitor* iCMS_lock = NULL;
|
|
||||||
Monitor* FullGCCount_lock = NULL;
|
Monitor* FullGCCount_lock = NULL;
|
||||||
Monitor* CMark_lock = NULL;
|
Monitor* CMark_lock = NULL;
|
||||||
Mutex* CMRegionStack_lock = NULL;
|
Mutex* CMRegionStack_lock = NULL;
|
||||||
|
@ -88,6 +87,7 @@ Mutex* DerivedPointerTableGC_lock = NULL;
|
||||||
Mutex* Compile_lock = NULL;
|
Mutex* Compile_lock = NULL;
|
||||||
Monitor* MethodCompileQueue_lock = NULL;
|
Monitor* MethodCompileQueue_lock = NULL;
|
||||||
Monitor* CompileThread_lock = NULL;
|
Monitor* CompileThread_lock = NULL;
|
||||||
|
Monitor* Compilation_lock = NULL;
|
||||||
Mutex* CompileTaskAlloc_lock = NULL;
|
Mutex* CompileTaskAlloc_lock = NULL;
|
||||||
Mutex* CompileStatistics_lock = NULL;
|
Mutex* CompileStatistics_lock = NULL;
|
||||||
Mutex* MultiArray_lock = NULL;
|
Mutex* MultiArray_lock = NULL;
|
||||||
|
@ -175,9 +175,6 @@ void mutex_init() {
|
||||||
|
|
||||||
def(CGC_lock , Monitor, special, true ); // coordinate between fore- and background GC
|
def(CGC_lock , Monitor, special, true ); // coordinate between fore- and background GC
|
||||||
def(STS_lock , Monitor, leaf, true );
|
def(STS_lock , Monitor, leaf, true );
|
||||||
if (UseConcMarkSweepGC) {
|
|
||||||
def(iCMS_lock , Monitor, special, true ); // CMS incremental mode start/stop notification
|
|
||||||
}
|
|
||||||
if (UseConcMarkSweepGC || UseG1GC) {
|
if (UseConcMarkSweepGC || UseG1GC) {
|
||||||
def(FullGCCount_lock , Monitor, leaf, true ); // in support of ExplicitGCInvokesConcurrent
|
def(FullGCCount_lock , Monitor, leaf, true ); // in support of ExplicitGCInvokesConcurrent
|
||||||
}
|
}
|
||||||
|
@ -278,7 +275,9 @@ void mutex_init() {
|
||||||
def(ProfileVM_lock , Monitor, special, false); // used for profiling of the VMThread
|
def(ProfileVM_lock , Monitor, special, false); // used for profiling of the VMThread
|
||||||
def(CompileThread_lock , Monitor, nonleaf+5, false );
|
def(CompileThread_lock , Monitor, nonleaf+5, false );
|
||||||
def(PeriodicTask_lock , Monitor, nonleaf+5, true);
|
def(PeriodicTask_lock , Monitor, nonleaf+5, true);
|
||||||
|
if (WhiteBoxAPI) {
|
||||||
|
def(Compilation_lock , Monitor, leaf, false );
|
||||||
|
}
|
||||||
#ifdef INCLUDE_TRACE
|
#ifdef INCLUDE_TRACE
|
||||||
def(JfrMsg_lock , Monitor, leaf, true);
|
def(JfrMsg_lock , Monitor, leaf, true);
|
||||||
def(JfrBuffer_lock , Mutex, leaf, true);
|
def(JfrBuffer_lock , Mutex, leaf, true);
|
||||||
|
|
|
@ -66,7 +66,6 @@ extern Monitor* CGC_lock; // used for coordination betwee
|
||||||
// fore- & background GC threads.
|
// fore- & background GC threads.
|
||||||
extern Monitor* STS_lock; // used for joining/leaving SuspendibleThreadSet.
|
extern Monitor* STS_lock; // used for joining/leaving SuspendibleThreadSet.
|
||||||
extern Monitor* SLT_lock; // used in CMS GC for acquiring PLL
|
extern Monitor* SLT_lock; // used in CMS GC for acquiring PLL
|
||||||
extern Monitor* iCMS_lock; // CMS incremental mode start/stop notification
|
|
||||||
extern Monitor* FullGCCount_lock; // in support of "concurrent" full gc
|
extern Monitor* FullGCCount_lock; // in support of "concurrent" full gc
|
||||||
extern Monitor* CMark_lock; // used for concurrent mark thread coordination
|
extern Monitor* CMark_lock; // used for concurrent mark thread coordination
|
||||||
extern Mutex* CMRegionStack_lock; // used for protecting accesses to the CM region stack
|
extern Mutex* CMRegionStack_lock; // used for protecting accesses to the CM region stack
|
||||||
|
@ -91,6 +90,7 @@ extern Mutex* EvacFailureStack_lock; // guards the evac failure scan
|
||||||
extern Mutex* Compile_lock; // a lock held when Compilation is updating code (used to block CodeCache traversal, CHA updates, etc)
|
extern Mutex* Compile_lock; // a lock held when Compilation is updating code (used to block CodeCache traversal, CHA updates, etc)
|
||||||
extern Monitor* MethodCompileQueue_lock; // a lock held when method compilations are enqueued, dequeued
|
extern Monitor* MethodCompileQueue_lock; // a lock held when method compilations are enqueued, dequeued
|
||||||
extern Monitor* CompileThread_lock; // a lock held by compile threads during compilation system initialization
|
extern Monitor* CompileThread_lock; // a lock held by compile threads during compilation system initialization
|
||||||
|
extern Monitor* Compilation_lock; // a lock used to pause compilation
|
||||||
extern Mutex* CompileTaskAlloc_lock; // a lock held when CompileTasks are allocated
|
extern Mutex* CompileTaskAlloc_lock; // a lock held when CompileTasks are allocated
|
||||||
extern Mutex* CompileStatistics_lock; // a lock held when updating compilation statistics
|
extern Mutex* CompileStatistics_lock; // a lock held when updating compilation statistics
|
||||||
extern Mutex* MultiArray_lock; // a lock used to guard allocation of multi-dim arrays
|
extern Mutex* MultiArray_lock; // a lock used to guard allocation of multi-dim arrays
|
||||||
|
|
|
@ -54,13 +54,17 @@ bool SimpleThresholdPolicy::loop_predicate_helper(int i, int b, double scale) {
|
||||||
// Simple methods are as good being compiled with C1 as C2.
|
// Simple methods are as good being compiled with C1 as C2.
|
||||||
// Determine if a given method is such a case.
|
// Determine if a given method is such a case.
|
||||||
bool SimpleThresholdPolicy::is_trivial(Method* method) {
|
bool SimpleThresholdPolicy::is_trivial(Method* method) {
|
||||||
if (method->is_accessor()) return true;
|
if (method->is_accessor() ||
|
||||||
if (method->code() != NULL) {
|
method->is_constant_getter()) {
|
||||||
MethodData* mdo = method->method_data();
|
return true;
|
||||||
if (mdo != NULL && mdo->num_loops() == 0 &&
|
}
|
||||||
(method->code_size() < 5 || (mdo->num_blocks() < 4) && (method->code_size() < 15))) {
|
if (method->has_loops() || method->code_size() >= 15) {
|
||||||
return !mdo->would_profile();
|
return false;
|
||||||
}
|
}
|
||||||
|
MethodData* mdo = method->method_data();
|
||||||
|
if (mdo != NULL && !mdo->would_profile() &&
|
||||||
|
(method->code_size() < 5 || (mdo->num_blocks() < 4))) {
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,6 +25,8 @@
|
||||||
#ifndef SHARE_VM_RUNTIME_SWEEPER_HPP
|
#ifndef SHARE_VM_RUNTIME_SWEEPER_HPP
|
||||||
#define SHARE_VM_RUNTIME_SWEEPER_HPP
|
#define SHARE_VM_RUNTIME_SWEEPER_HPP
|
||||||
|
|
||||||
|
class WhiteBox;
|
||||||
|
|
||||||
#include "utilities/ticks.hpp"
|
#include "utilities/ticks.hpp"
|
||||||
// An NmethodSweeper is an incremental cleaner for:
|
// An NmethodSweeper is an incremental cleaner for:
|
||||||
// - cleanup inline caches
|
// - cleanup inline caches
|
||||||
|
@ -52,6 +54,8 @@
|
||||||
// nmethod's space is freed.
|
// nmethod's space is freed.
|
||||||
|
|
||||||
class NMethodSweeper : public AllStatic {
|
class NMethodSweeper : public AllStatic {
|
||||||
|
friend class WhiteBox;
|
||||||
|
private:
|
||||||
static long _traversals; // Stack scan count, also sweep ID.
|
static long _traversals; // Stack scan count, also sweep ID.
|
||||||
static long _total_nof_code_cache_sweeps; // Total number of full sweeps of the code cache
|
static long _total_nof_code_cache_sweeps; // Total number of full sweeps of the code cache
|
||||||
static long _time_counter; // Virtual time used to periodically invoke sweeper
|
static long _time_counter; // Virtual time used to periodically invoke sweeper
|
||||||
|
@ -88,7 +92,6 @@ class NMethodSweeper : public AllStatic {
|
||||||
static void handle_safepoint_request();
|
static void handle_safepoint_request();
|
||||||
static void do_stack_scanning();
|
static void do_stack_scanning();
|
||||||
static void possibly_sweep();
|
static void possibly_sweep();
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static long traversal_count() { return _traversals; }
|
static long traversal_count() { return _traversals; }
|
||||||
static int total_nof_methods_reclaimed() { return _total_nof_methods_reclaimed; }
|
static int total_nof_methods_reclaimed() { return _total_nof_methods_reclaimed; }
|
||||||
|
|
|
@ -527,12 +527,10 @@ typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable;
|
||||||
nonstatic_field(DefNewGeneration, _next_gen, Generation*) \
|
nonstatic_field(DefNewGeneration, _next_gen, Generation*) \
|
||||||
nonstatic_field(DefNewGeneration, _tenuring_threshold, uint) \
|
nonstatic_field(DefNewGeneration, _tenuring_threshold, uint) \
|
||||||
nonstatic_field(DefNewGeneration, _age_table, ageTable) \
|
nonstatic_field(DefNewGeneration, _age_table, ageTable) \
|
||||||
nonstatic_field(DefNewGeneration, _eden_space, EdenSpace*) \
|
nonstatic_field(DefNewGeneration, _eden_space, ContiguousSpace*) \
|
||||||
nonstatic_field(DefNewGeneration, _from_space, ContiguousSpace*) \
|
nonstatic_field(DefNewGeneration, _from_space, ContiguousSpace*) \
|
||||||
nonstatic_field(DefNewGeneration, _to_space, ContiguousSpace*) \
|
nonstatic_field(DefNewGeneration, _to_space, ContiguousSpace*) \
|
||||||
\
|
\
|
||||||
nonstatic_field(EdenSpace, _gen, DefNewGeneration*) \
|
|
||||||
\
|
|
||||||
nonstatic_field(Generation, _reserved, MemRegion) \
|
nonstatic_field(Generation, _reserved, MemRegion) \
|
||||||
nonstatic_field(Generation, _virtual_space, VirtualSpace) \
|
nonstatic_field(Generation, _virtual_space, VirtualSpace) \
|
||||||
nonstatic_field(Generation, _level, int) \
|
nonstatic_field(Generation, _level, int) \
|
||||||
|
@ -1490,7 +1488,6 @@ typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable;
|
||||||
declare_toplevel_type(BitMap) \
|
declare_toplevel_type(BitMap) \
|
||||||
declare_type(CompactibleSpace, Space) \
|
declare_type(CompactibleSpace, Space) \
|
||||||
declare_type(ContiguousSpace, CompactibleSpace) \
|
declare_type(ContiguousSpace, CompactibleSpace) \
|
||||||
declare_type(EdenSpace, ContiguousSpace) \
|
|
||||||
declare_type(OffsetTableContigSpace, ContiguousSpace) \
|
declare_type(OffsetTableContigSpace, ContiguousSpace) \
|
||||||
declare_type(TenuredSpace, OffsetTableContigSpace) \
|
declare_type(TenuredSpace, OffsetTableContigSpace) \
|
||||||
declare_toplevel_type(BarrierSet) \
|
declare_toplevel_type(BarrierSet) \
|
||||||
|
@ -1532,7 +1529,6 @@ typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable;
|
||||||
declare_toplevel_type(CollectedHeap*) \
|
declare_toplevel_type(CollectedHeap*) \
|
||||||
declare_toplevel_type(ContiguousSpace*) \
|
declare_toplevel_type(ContiguousSpace*) \
|
||||||
declare_toplevel_type(DefNewGeneration*) \
|
declare_toplevel_type(DefNewGeneration*) \
|
||||||
declare_toplevel_type(EdenSpace*) \
|
|
||||||
declare_toplevel_type(GenCollectedHeap*) \
|
declare_toplevel_type(GenCollectedHeap*) \
|
||||||
declare_toplevel_type(Generation*) \
|
declare_toplevel_type(Generation*) \
|
||||||
declare_toplevel_type(GenerationSpec**) \
|
declare_toplevel_type(GenerationSpec**) \
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
<?xml version="1.0" encoding="utf-8"?>
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
<!--
|
<!--
|
||||||
Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
|
||||||
This code is free software; you can redistribute it and/or modify it
|
This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -59,6 +59,7 @@ public:
|
||||||
void set_starttime(const Ticks& time) {}
|
void set_starttime(const Ticks& time) {}
|
||||||
void set_endtime(const Ticks& time) {}
|
void set_endtime(const Ticks& time) {}
|
||||||
bool should_commit() const { return false; }
|
bool should_commit() const { return false; }
|
||||||
|
static bool is_enabled() { return false; }
|
||||||
void commit() const {}
|
void commit() const {}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -174,11 +174,8 @@ needs_full_vm_compact1 = \
|
||||||
gc/g1/TestShrinkToOneRegion.java \
|
gc/g1/TestShrinkToOneRegion.java \
|
||||||
gc/metaspace/G1AddMetaspaceDependency.java \
|
gc/metaspace/G1AddMetaspaceDependency.java \
|
||||||
gc/startup_warnings/TestCMS.java \
|
gc/startup_warnings/TestCMS.java \
|
||||||
gc/startup_warnings/TestCMSIncrementalMode.java \
|
|
||||||
gc/startup_warnings/TestCMSNoIncrementalMode.java \
|
|
||||||
gc/startup_warnings/TestDefaultMaxRAMFraction.java \
|
gc/startup_warnings/TestDefaultMaxRAMFraction.java \
|
||||||
gc/startup_warnings/TestDefNewCMS.java \
|
gc/startup_warnings/TestDefNewCMS.java \
|
||||||
gc/startup_warnings/TestIncGC.java \
|
|
||||||
gc/startup_warnings/TestParallelGC.java \
|
gc/startup_warnings/TestParallelGC.java \
|
||||||
gc/startup_warnings/TestParallelScavengeSerialOld.java \
|
gc/startup_warnings/TestParallelScavengeSerialOld.java \
|
||||||
gc/startup_warnings/TestParNewCMS.java \
|
gc/startup_warnings/TestParNewCMS.java \
|
||||||
|
@ -273,8 +270,6 @@ needs_cmsgc = \
|
||||||
gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java \
|
gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java \
|
||||||
gc/concurrentMarkSweep/ \
|
gc/concurrentMarkSweep/ \
|
||||||
gc/startup_warnings/TestCMS.java \
|
gc/startup_warnings/TestCMS.java \
|
||||||
gc/startup_warnings/TestCMSIncrementalMode.java \
|
|
||||||
gc/startup_warnings/TestCMSNoIncrementalMode.java \
|
|
||||||
gc/startup_warnings/TestDefNewCMS.java \
|
gc/startup_warnings/TestDefNewCMS.java \
|
||||||
gc/startup_warnings/TestParNewCMS.java
|
gc/startup_warnings/TestParNewCMS.java
|
||||||
|
|
||||||
|
@ -431,7 +426,8 @@ hotspot_compiler_2 = \
|
||||||
compiler/8005033/Test8005033.java \
|
compiler/8005033/Test8005033.java \
|
||||||
compiler/8005419/Test8005419.java \
|
compiler/8005419/Test8005419.java \
|
||||||
compiler/8005956/PolynomialRoot.java \
|
compiler/8005956/PolynomialRoot.java \
|
||||||
compiler/8007294/Test8007294.java
|
compiler/8007294/Test8007294.java \
|
||||||
|
compiler/EliminateAutoBox/UnsignedLoads.java
|
||||||
|
|
||||||
hotspot_compiler_3 = \
|
hotspot_compiler_3 = \
|
||||||
compiler/8007722/Test8007722.java \
|
compiler/8007722/Test8007722.java \
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -26,57 +26,116 @@
|
||||||
* @test
|
* @test
|
||||||
* @bug 7068051
|
* @bug 7068051
|
||||||
* @summary SIGSEGV in PhaseIdealLoop::build_loop_late_post on T5440
|
* @summary SIGSEGV in PhaseIdealLoop::build_loop_late_post on T5440
|
||||||
|
* @library /testlibrary
|
||||||
*
|
*
|
||||||
* @run shell/timeout=300 Test7068051.sh
|
* @run main/othervm -showversion -Xbatch Test7068051
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import java.io.*;
|
import com.oracle.java.testlibrary.JDKToolLauncher;
|
||||||
import java.nio.*;
|
import com.oracle.java.testlibrary.OutputAnalyzer;
|
||||||
import java.util.*;
|
|
||||||
import java.util.zip.*;
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.nio.file.Files;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.nio.file.Paths;
|
||||||
|
import java.nio.file.StandardCopyOption;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Enumeration;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.zip.ZipEntry;
|
||||||
|
import java.util.zip.ZipFile;
|
||||||
|
|
||||||
public class Test7068051 {
|
public class Test7068051 {
|
||||||
|
private static final String SELF_NAME = Test7068051.class.getSimpleName();
|
||||||
|
private static final String SELF_FILE_NAME = SELF_NAME + ".java";
|
||||||
|
private static final String JAR_NAME = "foo.jar";
|
||||||
|
private static final String TEST_PATH = System.getProperty("test.src");
|
||||||
|
private static final Path CURRENT_DIR = Paths.get(".");
|
||||||
|
private static final Path TEST_SOURCE_PATH = Paths.get(TEST_PATH, SELF_FILE_NAME);
|
||||||
|
|
||||||
public static void main (String[] args) throws Throwable {
|
public static void main (String[] args) throws IOException {
|
||||||
|
createTestJarFile();
|
||||||
|
System.out.println("Running test...");
|
||||||
|
|
||||||
ZipFile zf = new ZipFile(args[0]);
|
try (ZipFile zf = new ZipFile(JAR_NAME)) {
|
||||||
|
|
||||||
Enumeration<? extends ZipEntry> entries = zf.entries();
|
Enumeration<? extends ZipEntry> entries = zf.entries();
|
||||||
ArrayList<String> names = new ArrayList<String>();
|
ArrayList<String> names = new ArrayList<String>();
|
||||||
while (entries.hasMoreElements()) {
|
while (entries.hasMoreElements()) {
|
||||||
names.add(entries.nextElement().getName());
|
names.add(entries.nextElement().getName());
|
||||||
}
|
}
|
||||||
|
|
||||||
byte[] bytes = new byte[16];
|
byte[] bytes = new byte[16];
|
||||||
for (String name : names) {
|
for (String name : names) {
|
||||||
ZipEntry e = zf.getEntry(name);
|
ZipEntry e = zf.getEntry(name);
|
||||||
|
|
||||||
if (e.isDirectory())
|
if (e.isDirectory()) {
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
final InputStream is = zf.getInputStream(e);
|
|
||||||
|
|
||||||
try {
|
|
||||||
while (is.read(bytes) >= 0) {
|
|
||||||
}
|
}
|
||||||
is.close();
|
|
||||||
|
|
||||||
} catch (IOException x) {
|
try (final InputStream is = zf.getInputStream(e)) {
|
||||||
System.out.println("..................................");
|
try {
|
||||||
System.out.println(" --> is :" + is);
|
while (is.read(bytes) >= 0) {
|
||||||
System.out.println(" is.hash :" + is.hashCode());
|
}
|
||||||
System.out.println();
|
} catch (IOException x) {
|
||||||
System.out.println(" e.name :" + e.getName());
|
System.out.println("..................................");
|
||||||
System.out.println(" e.hash :" + e.hashCode());
|
System.out.println(" --> is :" + is);
|
||||||
System.out.println(" e.method :" + e.getMethod());
|
System.out.println(" is.hash :" + is.hashCode());
|
||||||
System.out.println(" e.size :" + e.getSize());
|
System.out.println();
|
||||||
System.out.println(" e.csize :" + e.getCompressedSize());
|
System.out.println(" e.name :" + e.getName());
|
||||||
|
System.out.println(" e.hash :" + e.hashCode());
|
||||||
|
System.out.println(" e.method :" + e.getMethod());
|
||||||
|
System.out.println(" e.size :" + e.getSize());
|
||||||
|
System.out.println(" e.csize :" + e.getCompressedSize());
|
||||||
|
System.out.println("..................................");
|
||||||
|
|
||||||
x.printStackTrace();
|
throw new AssertionError("IOException was throwing while read the archive. Test failed.", x);
|
||||||
System.out.println("..................................");
|
}
|
||||||
System.exit(97);
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
zf.close();
|
System.out.println("Test passed.");
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void createTestJarFile() {
|
||||||
|
ArrayList<String> jarOptions = new ArrayList<>();
|
||||||
|
|
||||||
|
// jar cf foo.jar *
|
||||||
|
System.out.println("Creating jar file..");
|
||||||
|
jarOptions.add("cf");
|
||||||
|
jarOptions.add(JAR_NAME);
|
||||||
|
try {
|
||||||
|
for (int i = 0; i < 100; ++i) {
|
||||||
|
Path temp = Files.createTempFile(CURRENT_DIR, SELF_NAME, ".java");
|
||||||
|
Files.copy(TEST_SOURCE_PATH, temp, StandardCopyOption.REPLACE_EXISTING);
|
||||||
|
jarOptions.add(temp.toString());
|
||||||
|
}
|
||||||
|
} catch (IOException ex) {
|
||||||
|
throw new AssertionError("TESTBUG: Creating temp files failed.", ex);
|
||||||
|
}
|
||||||
|
runJar(jarOptions);
|
||||||
|
|
||||||
|
// jar -uf0 foo.jar Test7068051.java
|
||||||
|
System.out.println("Adding unpacked file...");
|
||||||
|
jarOptions.clear();
|
||||||
|
jarOptions.add("-uf0");
|
||||||
|
jarOptions.add(JAR_NAME);
|
||||||
|
jarOptions.add(TEST_SOURCE_PATH.toString());
|
||||||
|
runJar(jarOptions);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void runJar(List<String> params) {
|
||||||
|
JDKToolLauncher jar = JDKToolLauncher.create("jar");
|
||||||
|
for (String p : params) {
|
||||||
|
jar.addToolArg(p);
|
||||||
|
}
|
||||||
|
ProcessBuilder pb = new ProcessBuilder(jar.getCommand());
|
||||||
|
try {
|
||||||
|
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
||||||
|
output.shouldHaveExitValue(0);
|
||||||
|
} catch (IOException ex) {
|
||||||
|
throw new AssertionError("TESTBUG: jar failed.", ex);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,45 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
#
|
|
||||||
# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
#
|
|
||||||
# This code is free software; you can redistribute it and/or modify it
|
|
||||||
# under the terms of the GNU General Public License version 2 only, as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
# version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
# accompanied this code).
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License version
|
|
||||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
#
|
|
||||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
# or visit www.oracle.com if you need additional information or have any
|
|
||||||
# questions.
|
|
||||||
#
|
|
||||||
#
|
|
||||||
## some tests require path to find test source dir
|
|
||||||
if [ "${TESTSRC}" = "" ]
|
|
||||||
then
|
|
||||||
TESTSRC=${PWD}
|
|
||||||
echo "TESTSRC not set. Using "${TESTSRC}" as default"
|
|
||||||
fi
|
|
||||||
echo "TESTSRC=${TESTSRC}"
|
|
||||||
## Adding common setup Variables for running shell tests.
|
|
||||||
. ${TESTSRC}/../../test_env.sh
|
|
||||||
|
|
||||||
set -x
|
|
||||||
|
|
||||||
${COMPILEJAVA}/bin/jar xf ${COMPILEJAVA}/jre/lib/javaws.jar
|
|
||||||
${COMPILEJAVA}/bin/jar cf foo.jar *
|
|
||||||
cp ${TESTSRC}/Test7068051.java ./
|
|
||||||
${COMPILEJAVA}/bin/jar -uf0 foo.jar Test7068051.java
|
|
||||||
|
|
||||||
${COMPILEJAVA}/bin/javac ${TESTJAVACOPTS} -d . Test7068051.java
|
|
||||||
|
|
||||||
${TESTJAVA}/bin/java ${TESTOPTS} -showversion -Xbatch Test7068051 foo.jar
|
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
/*
|
/*
|
||||||
* @test
|
* @test
|
||||||
* @library /testlibrary
|
* @library /testlibrary
|
||||||
* @run main/othervm -Xbatch -XX:+EliminateAutoBox
|
* @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:+EliminateAutoBox
|
||||||
* -XX:CompileOnly=::valueOf,::byteValue,::shortValue,::testUnsignedByte,::testUnsignedShort
|
* -XX:CompileOnly=::valueOf,::byteValue,::shortValue,::testUnsignedByte,::testUnsignedShort
|
||||||
* UnsignedLoads
|
* UnsignedLoads
|
||||||
*/
|
*/
|
||||||
|
|
128
hotspot/test/compiler/whitebox/AllocationCodeBlobTest.java
Normal file
128
hotspot/test/compiler/whitebox/AllocationCodeBlobTest.java
Normal file
|
@ -0,0 +1,128 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
import java.lang.management.MemoryPoolMXBean;
|
||||||
|
import java.util.EnumSet;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
|
||||||
|
import sun.hotspot.WhiteBox;
|
||||||
|
import sun.hotspot.code.BlobType;
|
||||||
|
import com.oracle.java.testlibrary.Asserts;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @test AllocationCodeBlobTest
|
||||||
|
* @bug 8059624
|
||||||
|
* @library /testlibrary /testlibrary/whitebox
|
||||||
|
* @build AllocationCodeBlobTest
|
||||||
|
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||||
|
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,null::*
|
||||||
|
* -XX:-SegmentedCodeCache AllocationCodeBlobTest
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,null::*
|
||||||
|
* -XX:+SegmentedCodeCache AllocationCodeBlobTest
|
||||||
|
* @summary testing of WB::allocate/freeCodeBlob()
|
||||||
|
*/
|
||||||
|
public class AllocationCodeBlobTest {
|
||||||
|
private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
|
||||||
|
private static final long CODE_CACHE_SIZE
|
||||||
|
= WHITE_BOX.getUintxVMFlag("ReservedCodeCacheSize");
|
||||||
|
private static final int SIZE = 1;
|
||||||
|
|
||||||
|
public static void main(String[] args) {
|
||||||
|
// check that Sweeper handels dummy blobs correctly
|
||||||
|
new ForcedSweeper(500).start();
|
||||||
|
EnumSet<BlobType> blobTypes = BlobType.getAvailable();
|
||||||
|
for (BlobType type : blobTypes) {
|
||||||
|
new AllocationCodeBlobTest(type).test();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private final BlobType type;
|
||||||
|
private final MemoryPoolMXBean bean;
|
||||||
|
private AllocationCodeBlobTest(BlobType type) {
|
||||||
|
this.type = type;
|
||||||
|
bean = type.getMemoryPool();
|
||||||
|
}
|
||||||
|
|
||||||
|
private void test() {
|
||||||
|
System.out.printf("type %s%n", type);
|
||||||
|
long start = getUsage();
|
||||||
|
long addr = WHITE_BOX.allocateCodeBlob(SIZE, type.id);
|
||||||
|
Asserts.assertNE(0, addr, "allocation failed");
|
||||||
|
|
||||||
|
long firstAllocation = getUsage();
|
||||||
|
Asserts.assertLTE(start + SIZE, firstAllocation,
|
||||||
|
"allocation should increase memory usage: "
|
||||||
|
+ start + " + " + SIZE + " <= " + firstAllocation);
|
||||||
|
|
||||||
|
WHITE_BOX.freeCodeBlob(addr);
|
||||||
|
long firstFree = getUsage();
|
||||||
|
Asserts.assertLTE(firstFree, firstAllocation,
|
||||||
|
"free shouldn't increase memory usage: "
|
||||||
|
+ firstFree + " <= " + firstAllocation);
|
||||||
|
|
||||||
|
addr = WHITE_BOX.allocateCodeBlob(SIZE, type.id);
|
||||||
|
Asserts.assertNE(0, addr, "allocation failed");
|
||||||
|
|
||||||
|
long secondAllocation = getUsage();
|
||||||
|
Asserts.assertEQ(firstAllocation, secondAllocation);
|
||||||
|
|
||||||
|
WHITE_BOX.freeCodeBlob(addr);
|
||||||
|
System.out.println("allocating till possible...");
|
||||||
|
ArrayList<Long> blobs = new ArrayList<>();
|
||||||
|
int size = (int) (CODE_CACHE_SIZE >> 7);
|
||||||
|
while ((addr = WHITE_BOX.allocateCodeBlob(size, type.id)) != 0) {
|
||||||
|
blobs.add(addr);
|
||||||
|
}
|
||||||
|
for (Long blob : blobs) {
|
||||||
|
WHITE_BOX.freeCodeBlob(blob);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private long getUsage() {
|
||||||
|
return bean.getUsage().getUsed();
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class ForcedSweeper extends Thread {
|
||||||
|
private final int millis;
|
||||||
|
public ForcedSweeper(int millis) {
|
||||||
|
super("ForcedSweeper");
|
||||||
|
setDaemon(true);
|
||||||
|
this.millis = millis;
|
||||||
|
}
|
||||||
|
public void run() {
|
||||||
|
try {
|
||||||
|
while (true) {
|
||||||
|
WHITE_BOX.forceNMethodSweep();
|
||||||
|
Thread.sleep(millis);
|
||||||
|
}
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
Thread.currentThread().interrupt();
|
||||||
|
throw new Error(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
95
hotspot/test/compiler/whitebox/GetCodeHeapEntriesTest.java
Normal file
95
hotspot/test/compiler/whitebox/GetCodeHeapEntriesTest.java
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.EnumSet;
|
||||||
|
|
||||||
|
import sun.hotspot.WhiteBox;
|
||||||
|
import sun.hotspot.code.CodeBlob;
|
||||||
|
import sun.hotspot.code.BlobType;
|
||||||
|
import com.oracle.java.testlibrary.Asserts;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @test GetCodeHeapEntriesTest
|
||||||
|
* @bug 8059624
|
||||||
|
* @library /testlibrary /testlibrary/whitebox
|
||||||
|
* @build GetCodeHeapEntriesTest
|
||||||
|
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||||
|
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:-SegmentedCodeCache
|
||||||
|
* GetCodeHeapEntriesTest
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:+SegmentedCodeCache
|
||||||
|
* GetCodeHeapEntriesTest
|
||||||
|
* @summary testing of WB::getCodeHeapEntries()
|
||||||
|
*/
|
||||||
|
public class GetCodeHeapEntriesTest {
|
||||||
|
private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
|
||||||
|
private static final int SIZE = 1024;
|
||||||
|
private static final String DUMMY_NAME = "WB::DummyBlob";
|
||||||
|
private static EnumSet<BlobType> SEGMENTED_TYPES
|
||||||
|
= EnumSet.complementOf(EnumSet.of(BlobType.All));
|
||||||
|
|
||||||
|
public static void main(String[] args) {
|
||||||
|
EnumSet<BlobType> blobTypes = BlobType.getAvailable();
|
||||||
|
for (BlobType type : blobTypes) {
|
||||||
|
new GetCodeHeapEntriesTest(type).test();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private final BlobType type;
|
||||||
|
private GetCodeHeapEntriesTest(BlobType type) {
|
||||||
|
this.type = type;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void test() {
|
||||||
|
System.out.printf("type %s%n", type);
|
||||||
|
long addr = WHITE_BOX.allocateCodeBlob(SIZE, type.id);
|
||||||
|
Asserts.assertNE(0, addr, "allocation failed");
|
||||||
|
CodeBlob[] blobs = CodeBlob.getCodeBlobs(type);
|
||||||
|
Asserts.assertNotNull(blobs);
|
||||||
|
CodeBlob blob = Arrays.stream(blobs)
|
||||||
|
.filter(GetCodeHeapEntriesTest::filter)
|
||||||
|
.findAny()
|
||||||
|
.get();
|
||||||
|
Asserts.assertNotNull(blob);
|
||||||
|
Asserts.assertEQ(blob.code_blob_type, type);
|
||||||
|
Asserts.assertGTE(blob.size, SIZE);
|
||||||
|
|
||||||
|
WHITE_BOX.freeCodeBlob(addr);
|
||||||
|
blobs = CodeBlob.getCodeBlobs(type);
|
||||||
|
long count = Arrays.stream(blobs)
|
||||||
|
.filter(GetCodeHeapEntriesTest::filter)
|
||||||
|
.count();
|
||||||
|
Asserts.assertEQ(0L, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean filter(CodeBlob blob) {
|
||||||
|
if (blob == null) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return DUMMY_NAME.equals(blob.name);
|
||||||
|
}
|
||||||
|
}
|
|
@ -22,7 +22,9 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
import sun.hotspot.code.BlobType;
|
||||||
import sun.hotspot.code.NMethod;
|
import sun.hotspot.code.NMethod;
|
||||||
|
import com.oracle.java.testlibrary.Asserts;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @test GetNMethodTest
|
* @test GetNMethodTest
|
||||||
|
@ -52,21 +54,46 @@ public class GetNMethodTest extends CompilerWhiteBoxTest {
|
||||||
|
|
||||||
compile();
|
compile();
|
||||||
checkCompiled();
|
checkCompiled();
|
||||||
|
|
||||||
NMethod nmethod = NMethod.get(method, testCase.isOsr());
|
NMethod nmethod = NMethod.get(method, testCase.isOsr());
|
||||||
if (IS_VERBOSE) {
|
if (IS_VERBOSE) {
|
||||||
System.out.println("nmethod = " + nmethod);
|
System.out.println("nmethod = " + nmethod);
|
||||||
}
|
}
|
||||||
if (nmethod == null) {
|
Asserts.assertNotNull(nmethod,
|
||||||
throw new RuntimeException("nmethod of compiled method is null");
|
"nmethod of compiled method is null");
|
||||||
}
|
Asserts.assertNotNull(nmethod.insts,
|
||||||
if (nmethod.insts.length == 0) {
|
"nmethod.insts of compiled method is null");
|
||||||
throw new RuntimeException("compiled method's instructions is empty");
|
Asserts.assertGT(nmethod.insts.length, 0,
|
||||||
|
"compiled method's instructions is empty");
|
||||||
|
Asserts.assertNotNull(nmethod.code_blob_type, "blob type is null");
|
||||||
|
if (WHITE_BOX.getBooleanVMFlag("SegmentedCodeCache")) {
|
||||||
|
Asserts.assertNE(nmethod.code_blob_type, BlobType.All);
|
||||||
|
switch (nmethod.comp_level) {
|
||||||
|
case 1:
|
||||||
|
case 4:
|
||||||
|
checkBlockType(nmethod, BlobType.MethodNonProfiled);
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
case 3:
|
||||||
|
checkBlockType(nmethod, BlobType.MethodNonProfiled);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw new Error("unexpected comp level " + nmethod);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Asserts.assertEQ(nmethod.code_blob_type, BlobType.All);
|
||||||
}
|
}
|
||||||
|
|
||||||
deoptimize();
|
deoptimize();
|
||||||
checkNotCompiled();
|
checkNotCompiled();
|
||||||
nmethod = NMethod.get(method, testCase.isOsr());
|
nmethod = NMethod.get(method, testCase.isOsr());
|
||||||
if (nmethod != null) {
|
Asserts.assertNull(nmethod,
|
||||||
throw new RuntimeException("nmethod of non-compiled method isn't null");
|
"nmethod of non-compiled method isn't null");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void checkBlockType(NMethod nmethod, BlobType expectedType) {
|
||||||
|
Asserts.assertEQ(nmethod.code_blob_type, expectedType,
|
||||||
|
String.format("blob_type[%s] for %d level isn't %s",
|
||||||
|
nmethod.code_blob_type, nmethod.comp_level, expectedType));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||||
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||||
* @run main ClassFileInstaller com.oracle.java.testlibrary.Platform
|
* @run main ClassFileInstaller com.oracle.java.testlibrary.Platform
|
||||||
* @run main/othervm/timeout=2400 -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:PerMethodRecompilationCutoff=3 -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* IsMethodCompilableTest
|
* @run main/othervm/timeout=2400 -Xbootclasspath/a:. -Xmixed -XX:-TieredCompilation -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:PerMethodRecompilationCutoff=3 -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* IsMethodCompilableTest
|
||||||
* @summary testing of WB::isMethodCompilable()
|
* @summary testing of WB::isMethodCompilable()
|
||||||
* @author igor.ignatyev@oracle.com
|
* @author igor.ignatyev@oracle.com
|
||||||
*/
|
*/
|
||||||
|
|
91
hotspot/test/compiler/whitebox/LockCompilationTest.java
Normal file
91
hotspot/test/compiler/whitebox/LockCompilationTest.java
Normal file
|
@ -0,0 +1,91 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @test LockCompilationTest
|
||||||
|
* @bug 8059624
|
||||||
|
* @library /testlibrary /testlibrary/whitebox
|
||||||
|
* @build LockCompilationTest
|
||||||
|
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||||
|
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||||
|
* @run main/othervm/timeout=600 -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* LockCompilationTest
|
||||||
|
* @summary testing of WB::lock/unlockCompilation()
|
||||||
|
*/
|
||||||
|
|
||||||
|
import java.io.OutputStream;
|
||||||
|
import java.io.PrintWriter;
|
||||||
|
import java.util.concurrent.BrokenBarrierException;
|
||||||
|
import java.util.concurrent.CyclicBarrier;
|
||||||
|
|
||||||
|
import com.oracle.java.testlibrary.Asserts;
|
||||||
|
|
||||||
|
public class LockCompilationTest extends CompilerWhiteBoxTest {
|
||||||
|
public static void main(String[] args) throws Exception {
|
||||||
|
CompilerWhiteBoxTest.main(LockCompilationTest::new, args);
|
||||||
|
}
|
||||||
|
|
||||||
|
private LockCompilationTest(TestCase testCase) {
|
||||||
|
super(testCase);
|
||||||
|
// to prevent inlining of #method
|
||||||
|
WHITE_BOX.testSetDontInlineMethod(method, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected void test() throws Exception {
|
||||||
|
checkNotCompiled();
|
||||||
|
|
||||||
|
System.out.println("locking compilation");
|
||||||
|
WHITE_BOX.lockCompilation();
|
||||||
|
|
||||||
|
try {
|
||||||
|
System.out.println("trying to compile");
|
||||||
|
compile();
|
||||||
|
// to check if it works correctly w/ safepoints
|
||||||
|
System.out.println("going to safepoint");
|
||||||
|
WHITE_BOX.fullGC();
|
||||||
|
waitBackgroundCompilation();
|
||||||
|
Asserts.assertTrue(
|
||||||
|
WHITE_BOX.isMethodQueuedForCompilation(method),
|
||||||
|
method + " must be in queue");
|
||||||
|
Asserts.assertFalse(
|
||||||
|
WHITE_BOX.isMethodCompiled(method, false),
|
||||||
|
method + " must be not compiled");
|
||||||
|
Asserts.assertEQ(
|
||||||
|
WHITE_BOX.getMethodCompilationLevel(method, false), 0,
|
||||||
|
method + " comp_level must be == 0");
|
||||||
|
Asserts.assertFalse(
|
||||||
|
WHITE_BOX.isMethodCompiled(method, true),
|
||||||
|
method + " must be not osr_compiled");
|
||||||
|
Asserts.assertEQ(
|
||||||
|
WHITE_BOX.getMethodCompilationLevel(method, true), 0,
|
||||||
|
method + " osr_comp_level must be == 0");
|
||||||
|
} finally {
|
||||||
|
System.out.println("unlocking compilation");
|
||||||
|
WHITE_BOX.unlockCompilation();
|
||||||
|
}
|
||||||
|
waitBackgroundCompilation();
|
||||||
|
Asserts.assertFalse(
|
||||||
|
WHITE_BOX.isMethodQueuedForCompilation(method),
|
||||||
|
method + " must not be in queue");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -19,22 +19,19 @@
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
* questions.
|
* questions.
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package sun.jvm.hotspot.memory;
|
/**
|
||||||
|
* @test TestNUMAPageSize
|
||||||
|
* @summary Make sure that start up with NUMA support does not cause problems.
|
||||||
|
* @bug 8061467
|
||||||
|
* @key gc
|
||||||
|
* @key regression
|
||||||
|
* @run main/othervm -Xmx8M -XX:+UseNUMA TestNUMAPageSize
|
||||||
|
*/
|
||||||
|
|
||||||
import java.util.*;
|
public class TestNUMAPageSize {
|
||||||
import sun.jvm.hotspot.debugger.*;
|
public static void main(String args[]) throws Exception {
|
||||||
import sun.jvm.hotspot.runtime.*;
|
// nothing to do
|
||||||
import sun.jvm.hotspot.types.*;
|
|
||||||
|
|
||||||
/** <P> Class EdenSpace describes eden-space in new
|
|
||||||
generation. (Currently it does not add any significant
|
|
||||||
functionality beyond ContiguousSpace.) */
|
|
||||||
|
|
||||||
public class EdenSpace extends ContiguousSpace {
|
|
||||||
public EdenSpace(Address addr) {
|
|
||||||
super(addr);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -22,25 +22,23 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @test TestIncGC
|
* @test DisableResizePLAB
|
||||||
* @key gc
|
* @key gc
|
||||||
* @bug 8006398
|
* @bug 8060467
|
||||||
* @summary Test that the deprecated -Xincgc print a warning message
|
* @author filipp.zhinkin@oracle.com, john.coomes@oracle.com
|
||||||
* @library /testlibrary
|
* @summary Run CMS with PLAB resizing disabled and a small OldPLABSize
|
||||||
*/
|
* @run main/othervm -XX:+UseConcMarkSweepGC -XX:-ResizePLAB -XX:OldPLABSize=1k -Xmx256m -XX:+PrintGCDetails DisableResizePLAB
|
||||||
|
*/
|
||||||
import com.oracle.java.testlibrary.OutputAnalyzer;
|
|
||||||
import com.oracle.java.testlibrary.ProcessTools;
|
|
||||||
|
|
||||||
|
|
||||||
public class TestIncGC {
|
|
||||||
|
|
||||||
public static void main(String args[]) throws Exception {
|
|
||||||
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xincgc", "-version");
|
|
||||||
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
|
||||||
output.shouldContain("warning: Using incremental CMS is deprecated and will likely be removed in a future release");
|
|
||||||
output.shouldNotContain("error");
|
|
||||||
output.shouldHaveExitValue(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
public class DisableResizePLAB {
|
||||||
|
public static void main(String args[]) throws Exception {
|
||||||
|
Object garbage[] = new Object[1_000];
|
||||||
|
for (int i = 0; i < garbage.length; i++) {
|
||||||
|
garbage[i] = new byte[0];
|
||||||
|
}
|
||||||
|
long startTime = System.currentTimeMillis();
|
||||||
|
while (System.currentTimeMillis() - startTime < 10_000) {
|
||||||
|
Object o = new byte[1024];
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -279,8 +279,7 @@ public class TestShrinkAuxiliaryData {
|
||||||
"-XX:\\+UseConcMarkSweepGC",
|
"-XX:\\+UseConcMarkSweepGC",
|
||||||
"-XX:\\+UseParallelOldGC",
|
"-XX:\\+UseParallelOldGC",
|
||||||
"-XX:\\+UseParNewGC",
|
"-XX:\\+UseParNewGC",
|
||||||
"-Xconcgc",
|
"-Xconcgc"
|
||||||
"-Xincgc"
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,46 +0,0 @@
|
||||||
|
|
||||||
/*
|
|
||||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* @test TestCMSIncrementalMode
|
|
||||||
* @key gc
|
|
||||||
* @bug 8006398
|
|
||||||
* @summary Test that the deprecated CMSIncrementalMode print a warning message
|
|
||||||
* @library /testlibrary
|
|
||||||
*/
|
|
||||||
|
|
||||||
import com.oracle.java.testlibrary.OutputAnalyzer;
|
|
||||||
import com.oracle.java.testlibrary.ProcessTools;
|
|
||||||
|
|
||||||
public class TestCMSIncrementalMode {
|
|
||||||
|
|
||||||
public static void main(String args[]) throws Exception {
|
|
||||||
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseConcMarkSweepGC", "-XX:+CMSIncrementalMode", "-version");
|
|
||||||
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
|
||||||
output.shouldContain("warning: Using incremental CMS is deprecated and will likely be removed in a future release");
|
|
||||||
output.shouldNotContain("error");
|
|
||||||
output.shouldHaveExitValue(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,45 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* @test TestCMSNoIncrementalMode
|
|
||||||
* @key gc
|
|
||||||
* @bug 8006398
|
|
||||||
* @summary Test that CMS with incremental mode turned off does not print a warning message
|
|
||||||
* @library /testlibrary
|
|
||||||
*/
|
|
||||||
|
|
||||||
import com.oracle.java.testlibrary.OutputAnalyzer;
|
|
||||||
import com.oracle.java.testlibrary.ProcessTools;
|
|
||||||
|
|
||||||
public class TestCMSNoIncrementalMode {
|
|
||||||
|
|
||||||
public static void main(String args[]) throws Exception {
|
|
||||||
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseConcMarkSweepGC", "-XX:-CMSIncrementalMode", "-version");
|
|
||||||
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
|
||||||
output.shouldNotContain("deprecated");
|
|
||||||
output.shouldNotContain("error");
|
|
||||||
output.shouldHaveExitValue(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -143,8 +143,14 @@ public class WhiteBox {
|
||||||
}
|
}
|
||||||
public native boolean enqueueMethodForCompilation(Executable method, int compLevel, int entry_bci);
|
public native boolean enqueueMethodForCompilation(Executable method, int compLevel, int entry_bci);
|
||||||
public native void clearMethodState(Executable method);
|
public native void clearMethodState(Executable method);
|
||||||
|
public native void lockCompilation();
|
||||||
|
public native void unlockCompilation();
|
||||||
public native int getMethodEntryBci(Executable method);
|
public native int getMethodEntryBci(Executable method);
|
||||||
public native Object[] getNMethod(Executable method, boolean isOsr);
|
public native Object[] getNMethod(Executable method, boolean isOsr);
|
||||||
|
public native long allocateCodeBlob(int size, int type);
|
||||||
|
public native void freeCodeBlob(long addr);
|
||||||
|
public native void forceNMethodSweep();
|
||||||
|
public native Object[] getCodeHeapEntries(int type);
|
||||||
|
|
||||||
// Intered strings
|
// Intered strings
|
||||||
public native boolean isInStringTable(String str);
|
public native boolean isInStringTable(String str);
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue