This commit is contained in:
Serguei Spitsyn 2017-10-09 07:08:53 +00:00
commit 70bbf43808
385 changed files with 10653 additions and 3929 deletions

View file

@ -449,3 +449,4 @@ a85884d55ce32799f5c7382b7ea4839052b362a2 jdk-10+21
e5357aa85dadacc6562175ff74714fecfb4470cf jdk-10+22 e5357aa85dadacc6562175ff74714fecfb4470cf jdk-10+22
22850b3a55240253841b9a425ad60a7fcdb22d47 jdk-10+23 22850b3a55240253841b9a425ad60a7fcdb22d47 jdk-10+23
3b201865d5c1f244f555cad58da599c9261286d8 jdk-10+24 3b201865d5c1f244f555cad58da599c9261286d8 jdk-10+24
8eb5e3ccee560c28ac9b1df2670adac2b3d36fad jdk-10+25

View file

@ -28,8 +28,8 @@
mydir="$(dirname "${BASH_SOURCE[0]}")" mydir="$(dirname "${BASH_SOURCE[0]}")"
myname="$(basename "${BASH_SOURCE[0]}")" myname="$(basename "${BASH_SOURCE[0]}")"
installed_jib_script=${mydir}/../../.jib/jib installed_jib_script=${mydir}/../.jib/jib
install_data=${mydir}/../../.jib/.data install_data=${mydir}/../.jib/.data
setup_url() { setup_url() {
if [ -f ~/.config/jib/jib.conf ]; then if [ -f ~/.config/jib/jib.conf ]; then
@ -42,7 +42,7 @@ setup_url() {
jib_revision="2.0-SNAPSHOT" jib_revision="2.0-SNAPSHOT"
jib_ext="jib.sh.gz" jib_ext="jib.sh.gz"
closed_script="${mydir}/../../../closed/conf/jib-install.conf" closed_script="${mydir}/../../closed/make/conf/jib-install.conf"
if [ -f "${closed_script}" ]; then if [ -f "${closed_script}" ]; then
source "${closed_script}" source "${closed_script}"
fi fi

View file

@ -127,7 +127,7 @@ scripting language.</span></p>
<hr> <hr>
<span><a name="package" id="package"></a></span> <span><a name="package" id="package"></a></span>
<h2><span>Scripting Package</span></h2> <h2><span>Scripting Package</span></h2>
<p><span>The Java Scripting functionality is in the <code><a href="http://docs.oracle.com/javase/6/docs/api/javax/script/package-summary.html">javax.script</a></code> <p><span>The Java Scripting functionality is in the <code><a href="http://docs.oracle.com/javase/9/docs/api/javax/script/package-summary.html">javax.script</a></code>
package. This is a relatively small, simple API. The starting point package. This is a relatively small, simple API. The starting point
of the scripting API is the <code>ScriptEngineManager</code> class. of the scripting API is the <code>ScriptEngineManager</code> class.
A ScriptEngineManager object can discover script engines through A ScriptEngineManager object can discover script engines through

View file

@ -41,7 +41,7 @@ JDK_CLASSES := $(call PathList, $(strip $(addprefix $(JDK_OUTPUTDIR)/modules/, \
$(eval $(call SetupJavaCompiler, GENERATE_NEWBYTECODE_DEBUG, \ $(eval $(call SetupJavaCompiler, GENERATE_NEWBYTECODE_DEBUG, \
JVM := $(JAVA_JAVAC), \ JVM := $(JAVA_JAVAC), \
JAVAC := $(NEW_JAVAC), \ JAVAC := $(NEW_JAVAC), \
FLAGS := -g -source 9 -target 9 --upgrade-module-path "$(JDK_OUTPUTDIR)/modules/" \ FLAGS := -g -source 10 -target 10 --upgrade-module-path "$(JDK_OUTPUTDIR)/modules/" \
--system none --module-source-path $(call GetModuleSrcPath), \ --system none --module-source-path $(call GetModuleSrcPath), \
SERVER_DIR := $(SJAVAC_SERVER_DIR), \ SERVER_DIR := $(SJAVAC_SERVER_DIR), \
SERVER_JVM := $(SJAVAC_SERVER_JAVA))) SERVER_JVM := $(SJAVAC_SERVER_JAVA)))

View file

@ -36,7 +36,7 @@ ifeq ($(HAS_SPEC),)
# Include the corresponding closed file, if present. # Include the corresponding closed file, if present.
# Normal hook mechanism cannot be used since we have no SPEC. # Normal hook mechanism cannot be used since we have no SPEC.
-include $(topdir)/closed/make/InitSupport.gmk -include $(topdir)/../closed/make/InitSupport.gmk
############################################################################## ##############################################################################
# Helper functions for the initial part of Init.gmk, before the spec file is # Helper functions for the initial part of Init.gmk, before the spec file is

View file

@ -1311,6 +1311,7 @@ AC_DEFUN([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK_HELPER],
$2LDFLAGS_JDKLIB="${$2LDFLAGS_JDK}" $2LDFLAGS_JDKLIB="${$2LDFLAGS_JDK}"
$2LDFLAGS_JDKLIB="${$2LDFLAGS_JDKLIB} ${SHARED_LIBRARY_FLAGS}" $2LDFLAGS_JDKLIB="${$2LDFLAGS_JDKLIB} ${SHARED_LIBRARY_FLAGS}"
$2LDFLAGS_JDKLIB="${$2LDFLAGS_JDKLIB} ${LDFLAGS_NO_EXEC_STACK}"
if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
$2JAVA_BASE_LDFLAGS="${$2JAVA_BASE_LDFLAGS} \ $2JAVA_BASE_LDFLAGS="${$2JAVA_BASE_LDFLAGS} \
-libpath:${OUTPUTDIR}/support/modules_libs/java.base" -libpath:${OUTPUTDIR}/support/modules_libs/java.base"
@ -1388,6 +1389,7 @@ $2LDFLAGS_JDKLIB="${$2LDFLAGS_JDKLIB} ${$2JAVA_BASE_LDFLAGS}"
AC_SUBST($2JDKEXE_LIBS) AC_SUBST($2JDKEXE_LIBS)
AC_SUBST($2LDFLAGS_CXX_JDK) AC_SUBST($2LDFLAGS_CXX_JDK)
AC_SUBST($2LDFLAGS_HASH_STYLE) AC_SUBST($2LDFLAGS_HASH_STYLE)
AC_SUBST($2LDFLAGS_NO_EXEC_STACK)
AC_SUBST($2JVM_CFLAGS) AC_SUBST($2JVM_CFLAGS)
AC_SUBST($2JVM_LDFLAGS) AC_SUBST($2JVM_LDFLAGS)

View file

@ -723,6 +723,7 @@ OPENJDK_BUILD_JVM_LIBS
OPENJDK_BUILD_JVM_ASFLAGS OPENJDK_BUILD_JVM_ASFLAGS
OPENJDK_BUILD_JVM_LDFLAGS OPENJDK_BUILD_JVM_LDFLAGS
OPENJDK_BUILD_JVM_CFLAGS OPENJDK_BUILD_JVM_CFLAGS
OPENJDK_BUILD_LDFLAGS_NO_EXEC_STACK
OPENJDK_BUILD_LDFLAGS_HASH_STYLE OPENJDK_BUILD_LDFLAGS_HASH_STYLE
OPENJDK_BUILD_LDFLAGS_CXX_JDK OPENJDK_BUILD_LDFLAGS_CXX_JDK
OPENJDK_BUILD_JDKEXE_LIBS OPENJDK_BUILD_JDKEXE_LIBS
@ -738,6 +739,7 @@ JVM_LIBS
JVM_ASFLAGS JVM_ASFLAGS
JVM_LDFLAGS JVM_LDFLAGS
JVM_CFLAGS JVM_CFLAGS
LDFLAGS_NO_EXEC_STACK
LDFLAGS_HASH_STYLE LDFLAGS_HASH_STYLE
LDFLAGS_CXX_JDK LDFLAGS_CXX_JDK
JDKEXE_LIBS JDKEXE_LIBS
@ -5115,7 +5117,7 @@ VS_SDK_PLATFORM_NAME_2013=
#CUSTOM_AUTOCONF_INCLUDE #CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks: # Do not change or remove the following line, it is needed for consistency checks:
DATE_WHEN_GENERATED=1506333008 DATE_WHEN_GENERATED=1506397140
############################################################################### ###############################################################################
# #
@ -52024,6 +52026,7 @@ fi
LDFLAGS_JDKLIB="${LDFLAGS_JDK}" LDFLAGS_JDKLIB="${LDFLAGS_JDK}"
LDFLAGS_JDKLIB="${LDFLAGS_JDKLIB} ${SHARED_LIBRARY_FLAGS}" LDFLAGS_JDKLIB="${LDFLAGS_JDKLIB} ${SHARED_LIBRARY_FLAGS}"
LDFLAGS_JDKLIB="${LDFLAGS_JDKLIB} ${LDFLAGS_NO_EXEC_STACK}"
if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
JAVA_BASE_LDFLAGS="${JAVA_BASE_LDFLAGS} \ JAVA_BASE_LDFLAGS="${JAVA_BASE_LDFLAGS} \
-libpath:${OUTPUTDIR}/support/modules_libs/java.base" -libpath:${OUTPUTDIR}/support/modules_libs/java.base"
@ -52109,6 +52112,7 @@ LDFLAGS_JDKLIB="${LDFLAGS_JDKLIB} ${JAVA_BASE_LDFLAGS}"
# Special extras... # Special extras...
if test "x$TOOLCHAIN_TYPE" = xsolstudio; then if test "x$TOOLCHAIN_TYPE" = xsolstudio; then
if test "x$OPENJDK_BUILD_CPU_ARCH" = "xsparc"; then if test "x$OPENJDK_BUILD_CPU_ARCH" = "xsparc"; then
@ -52903,6 +52907,7 @@ fi
OPENJDK_BUILD_LDFLAGS_JDKLIB="${OPENJDK_BUILD_LDFLAGS_JDK}" OPENJDK_BUILD_LDFLAGS_JDKLIB="${OPENJDK_BUILD_LDFLAGS_JDK}"
OPENJDK_BUILD_LDFLAGS_JDKLIB="${OPENJDK_BUILD_LDFLAGS_JDKLIB} ${SHARED_LIBRARY_FLAGS}" OPENJDK_BUILD_LDFLAGS_JDKLIB="${OPENJDK_BUILD_LDFLAGS_JDKLIB} ${SHARED_LIBRARY_FLAGS}"
OPENJDK_BUILD_LDFLAGS_JDKLIB="${OPENJDK_BUILD_LDFLAGS_JDKLIB} ${LDFLAGS_NO_EXEC_STACK}"
if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
OPENJDK_BUILD_JAVA_BASE_LDFLAGS="${OPENJDK_BUILD_JAVA_BASE_LDFLAGS} \ OPENJDK_BUILD_JAVA_BASE_LDFLAGS="${OPENJDK_BUILD_JAVA_BASE_LDFLAGS} \
-libpath:${OUTPUTDIR}/support/modules_libs/java.base" -libpath:${OUTPUTDIR}/support/modules_libs/java.base"
@ -52988,6 +52993,7 @@ OPENJDK_BUILD_LDFLAGS_JDKLIB="${OPENJDK_BUILD_LDFLAGS_JDKLIB} ${OPENJDK_BUILD_JA
# Tests are only ever compiled for TARGET # Tests are only ever compiled for TARGET
# Flags for compiling test libraries # Flags for compiling test libraries
CFLAGS_TESTLIB="$COMMON_CCXXFLAGS_JDK $CFLAGS_JDK $PICFLAG $CFLAGS_JDKLIB_EXTRA" CFLAGS_TESTLIB="$COMMON_CCXXFLAGS_JDK $CFLAGS_JDK $PICFLAG $CFLAGS_JDKLIB_EXTRA"

View file

@ -387,6 +387,7 @@ CFLAGS_JDKEXE:=@CFLAGS_JDKEXE@
CXXFLAGS_JDKEXE:=@CXXFLAGS_JDKEXE@ CXXFLAGS_JDKEXE:=@CXXFLAGS_JDKEXE@
LDFLAGS_HASH_STYLE := @LDFLAGS_HASH_STYLE@ LDFLAGS_HASH_STYLE := @LDFLAGS_HASH_STYLE@
LDFLAGS_NO_EXEC_STACK := @LDFLAGS_NO_EXEC_STACK@
JVM_CFLAGS := @JVM_CFLAGS@ JVM_CFLAGS := @JVM_CFLAGS@
JVM_CFLAGS_SYMBOLS := @JVM_CFLAGS_SYMBOLS@ JVM_CFLAGS_SYMBOLS := @JVM_CFLAGS_SYMBOLS@

View file

@ -58,7 +58,6 @@ BOOT_MODULES += \
java.rmi \ java.rmi \
java.security.sasl \ java.security.sasl \
java.xml \ java.xml \
jdk.httpserver \
jdk.internal.vm.ci \ jdk.internal.vm.ci \
jdk.management \ jdk.management \
jdk.management.agent \ jdk.management.agent \
@ -112,6 +111,7 @@ PLATFORM_MODULES += \
jdk.crypto.cryptoki \ jdk.crypto.cryptoki \
jdk.crypto.ec \ jdk.crypto.ec \
jdk.dynalink \ jdk.dynalink \
jdk.httpserver \
jdk.incubator.httpclient \ jdk.incubator.httpclient \
jdk.internal.vm.compiler.management \ jdk.internal.vm.compiler.management \
jdk.jsobject \ jdk.jsobject \

View file

@ -900,6 +900,45 @@ var getJibProfilesProfiles = function (input, common, data) {
} }
}, },
"windows-x64-open": {
artifacts: {
jdk: {
local: "bundles/\\(jdk.*bin.tar.gz\\)",
remote: [
"bundles/openjdk/GPL/windows-x64/jdk-" + data.version
+ "_windows-x64_bin.tar.gz",
"bundles/openjdk/GPL/windows-x64/\\1"
],
subdir: "jdk-" + data.version
},
jre: {
local: "bundles/\\(jre.*bin.tar.gz\\)",
remote: "bundles/openjdk/GPL/windows-x64/\\1"
},
test: {
local: "bundles/\\(jdk.*bin-tests.tar.gz\\)",
remote: [
"bundles/openjdk/GPL/windows-x64/jdk-" + data.version
+ "_windows-x64_bin-tests.tar.gz",
"bundles/openjdk/GPL/windows-x64/\\1"
]
},
jdk_symbols: {
local: "bundles/\\(jdk.*bin-symbols.tar.gz\\)",
remote: [
"bundles/openjdk/GPL/windows-x64/jdk-" + data.version
+ "_windows-x64_bin-symbols.tar.gz",
"bundles/openjdk/GPL/windows-x64/\\1"
],
subdir: "jdk-" + data.version
},
jre_symbols: {
local: "bundles/\\(jre.*bin-symbols.tar.gz\\)",
remote: "bundles/openjdk/GPL/windows-x64/\\1",
}
}
},
"linux-x86-open-debug": { "linux-x86-open-debug": {
artifacts: { artifacts: {
jdk: { jdk: {
@ -929,9 +968,10 @@ var getJibProfilesProfiles = function (input, common, data) {
profiles["linux-x86-ri-debug"] = clone(profiles["linux-x86-open-debug"]); profiles["linux-x86-ri-debug"] = clone(profiles["linux-x86-open-debug"]);
profiles["macosx-x64-ri"] = clone(profiles["macosx-x64-open"]); profiles["macosx-x64-ri"] = clone(profiles["macosx-x64-open"]);
profiles["windows-x86-ri"] = clone(profiles["windows-x86-open"]); profiles["windows-x86-ri"] = clone(profiles["windows-x86-open"]);
profiles["windows-x64-ri"] = clone(profiles["windows-x64-open"]);
// Generate artifacts for ri profiles // Generate artifacts for ri profiles
[ "linux-x64-ri", "linux-x86-ri", "linux-x86-ri-debug", "macosx-x64-ri", "windows-x86-ri" ] [ "linux-x64-ri", "linux-x86-ri", "linux-x86-ri-debug", "macosx-x64-ri", "windows-x86-ri", "windows-x64-ri" ]
.forEach(function (name) { .forEach(function (name) {
// Rewrite all remote dirs to "bundles/openjdk/BCL/..." // Rewrite all remote dirs to "bundles/openjdk/BCL/..."
for (artifactName in profiles[name].artifacts) { for (artifactName in profiles[name].artifacts) {
@ -947,6 +987,11 @@ var getJibProfilesProfiles = function (input, common, data) {
configure_args: "--with-freetype-license=" configure_args: "--with-freetype-license="
+ input.get("freetype", "install_path") + input.get("freetype", "install_path")
+ "/freetype-2.7.1-v120-x86/freetype.md" + "/freetype-2.7.1-v120-x86/freetype.md"
},
"windows-x64-ri": {
configure_args: "--with-freetype-license="
+ input.get("freetype", "install_path")
+ "/freetype-2.7.1-v120-x64/freetype.md"
} }
}; };
profiles = concatObjects(profiles, profilesRiFreetype); profiles = concatObjects(profiles, profilesRiFreetype);

View file

@ -1,5 +1,5 @@
# #
# Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -36,7 +36,7 @@ ifneq ($(OPENJDK_TARGET_OS), windows)
ifeq ($(STATIC_BUILD), false) ifeq ($(STATIC_BUILD), false)
ifeq ($(OPENJDK_TARGET_OS), linux) ifeq ($(OPENJDK_TARGET_OS), linux)
LIBJSIG_CFLAGS := -fPIC -D_GNU_SOURCE -D_REENTRANT $(EXTRA_CFLAGS) LIBJSIG_CFLAGS := -fPIC -D_GNU_SOURCE -D_REENTRANT $(EXTRA_CFLAGS)
LIBJSIG_LDFLAGS := $(LDFLAGS_HASH_STYLE) $(EXTRA_CFLAGS) LIBJSIG_LDFLAGS := $(LDFLAGS_HASH_STYLE) ${LDFLAGS_NO_EXEC_STACK} $(EXTRA_CFLAGS)
LIBJSIG_LIBS := $(LIBDL) LIBJSIG_LIBS := $(LIBDL)
# NOTE: The old build compiled this library without -soname. # NOTE: The old build compiled this library without -soname.

View file

@ -57,8 +57,8 @@ public class TransitiveDependencies {
} }
JavaCompiler compiler = ToolProvider.getSystemJavaCompiler(); JavaCompiler compiler = ToolProvider.getSystemJavaCompiler();
List<String> options = Arrays.asList("-source", "9", List<String> options = Arrays.asList("-source", "10",
"-target", "9", "-target", "10",
"-proc:only", "-proc:only",
"--system", "none", "--system", "none",
"--module-source-path", args[0], "--module-source-path", args[0],

View file

@ -174,8 +174,6 @@
<target name="compile" depends="prepare" description="Compiles nashorn"> <target name="compile" depends="prepare" description="Compiles nashorn">
<javac srcdir="${dynalink.module.src.dir}" <javac srcdir="${dynalink.module.src.dir}"
destdir="${dynalink.module.classes.dir}" destdir="${dynalink.module.classes.dir}"
source="${javac.source}"
target="${javac.target}"
debug="${javac.debug}" debug="${javac.debug}"
encoding="${javac.encoding}" encoding="${javac.encoding}"
includeantruntime="false" fork="true"> includeantruntime="false" fork="true">
@ -190,8 +188,6 @@
</delete> </delete>
<javac srcdir="${nashorn.module.src.dir}" <javac srcdir="${nashorn.module.src.dir}"
destdir="${nashorn.module.classes.dir}" destdir="${nashorn.module.classes.dir}"
source="${javac.source}"
target="${javac.target}"
debug="${javac.debug}" debug="${javac.debug}"
encoding="${javac.encoding}" encoding="${javac.encoding}"
includeantruntime="false" fork="true"> includeantruntime="false" fork="true">
@ -207,8 +203,6 @@
</delete> </delete>
<javac srcdir="${nashorn.shell.module.src.dir}" <javac srcdir="${nashorn.shell.module.src.dir}"
destdir="${nashorn.shell.module.classes.dir}" destdir="${nashorn.shell.module.classes.dir}"
source="${javac.source}"
target="${javac.target}"
debug="${javac.debug}" debug="${javac.debug}"
encoding="${javac.encoding}" encoding="${javac.encoding}"
includeantruntime="false" fork="true"> includeantruntime="false" fork="true">
@ -342,8 +336,6 @@
<javac srcdir="${test.src.dir}" <javac srcdir="${test.src.dir}"
destdir="${build.test.classes.dir}" destdir="${build.test.classes.dir}"
classpath="${javac.test.classpath}" classpath="${javac.test.classpath}"
source="${javac.source}"
target="${javac.target}"
debug="${javac.debug}" debug="${javac.debug}"
encoding="${javac.encoding}" encoding="${javac.encoding}"
includeantruntime="false" fork="true"> includeantruntime="false" fork="true">
@ -351,7 +343,7 @@
<compilerarg value="-Xlint:unchecked"/> <compilerarg value="-Xlint:unchecked"/>
<compilerarg value="-Xlint:deprecation"/> <compilerarg value="-Xlint:deprecation"/>
<compilerarg value="-Xdiags:verbose"/> <compilerarg value="-Xdiags:verbose"/>
<compilerarg line="${test.module.imports}"/> <compilerarg line="${test.module.imports.compile.time}"/>
</javac> </javac>
<copy todir="${build.test.classes.dir}/META-INF/services"> <copy todir="${build.test.classes.dir}/META-INF/services">

View file

@ -24,8 +24,6 @@ application.title=nasgen
# source and target levels # source and target levels
build.compiler=modern build.compiler=modern
javac.source=1.7
javac.target=1.7
# This directory is removed when the project is cleaned: # This directory is removed when the project is cleaned:
nasgen.build.dir=../../../../build/nashorn/nasgen nasgen.build.dir=../../../../build/nashorn/nasgen

View file

@ -24,8 +24,6 @@ application.title=nashorntask
# source and target levels # source and target levels
build.compiler=modern build.compiler=modern
javac.source=1.8
javac.target=1.8
# This directory is removed when the project is cleaned: # This directory is removed when the project is cleaned:
nashorntask.build.dir=../../../../build/nashorn/nashorntask nashorntask.build.dir=../../../../build/nashorn/nashorntask

View file

@ -32,8 +32,6 @@ jdk.jline.src.dir=src/jdk.internal.le/share/classes
# source and target levels # source and target levels
build.compiler=modern build.compiler=modern
javac.source=1.9
javac.target=1.9
javadoc.option=\ javadoc.option=\
-tag "implSpec:a:Implementation Requirements:" \ -tag "implSpec:a:Implementation Requirements:" \
@ -146,7 +144,7 @@ javac.test.classpath=\
${file.reference.bsh.jar}${path.separator}\ ${file.reference.bsh.jar}${path.separator}\
${file.reference.snakeyaml.jar} ${file.reference.snakeyaml.jar}
test.module.imports=\ test.module.imports.compile.time=\
--add-exports jdk.scripting.nashorn/jdk.nashorn.internal.ir=ALL-UNNAMED \ --add-exports jdk.scripting.nashorn/jdk.nashorn.internal.ir=ALL-UNNAMED \
--add-exports jdk.scripting.nashorn/jdk.nashorn.internal.codegen=ALL-UNNAMED \ --add-exports jdk.scripting.nashorn/jdk.nashorn.internal.codegen=ALL-UNNAMED \
--add-exports jdk.scripting.nashorn/jdk.nashorn.internal.parser=ALL-UNNAMED \ --add-exports jdk.scripting.nashorn/jdk.nashorn.internal.parser=ALL-UNNAMED \
@ -159,7 +157,10 @@ test.module.imports=\
--add-exports jdk.scripting.nashorn/jdk.nashorn.internal.runtime.regexp=ALL-UNNAMED \ --add-exports jdk.scripting.nashorn/jdk.nashorn.internal.runtime.regexp=ALL-UNNAMED \
--add-exports jdk.scripting.nashorn/jdk.nashorn.internal.runtime.regexp.joni=ALL-UNNAMED \ --add-exports jdk.scripting.nashorn/jdk.nashorn.internal.runtime.regexp.joni=ALL-UNNAMED \
--add-exports jdk.scripting.nashorn/jdk.nashorn.tools=ALL-UNNAMED \ --add-exports jdk.scripting.nashorn/jdk.nashorn.tools=ALL-UNNAMED \
--add-exports java.base/jdk.internal.org.objectweb.asm=ALL-UNNAMED \ --add-exports java.base/jdk.internal.org.objectweb.asm=ALL-UNNAMED
test.module.imports.runtime=\
${test.module.imports.compile.time} \
--add-opens jdk.scripting.nashorn/jdk.nashorn.internal.runtime=ALL-UNNAMED \ --add-opens jdk.scripting.nashorn/jdk.nashorn.internal.runtime=ALL-UNNAMED \
--add-opens jdk.scripting.nashorn/jdk.nashorn.internal.runtime.doubleconv=ALL-UNNAMED --add-opens jdk.scripting.nashorn/jdk.nashorn.internal.runtime.doubleconv=ALL-UNNAMED
@ -359,7 +360,7 @@ run.test.user.country=TR
run.test.jvmargs.common=\ run.test.jvmargs.common=\
-server \ -server \
${test.module.imports} \ ${test.module.imports.runtime} \
${run.test.jvmargs.external} \ ${run.test.jvmargs.external} \
--add-modules jdk.scripting.nashorn.shell \ --add-modules jdk.scripting.nashorn.shell \
${nashorn.override.option} \ ${nashorn.override.option} \

View file

@ -59,6 +59,7 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC += \
$(TOPDIR)/test/hotspot/jtreg/runtime/SameObject \ $(TOPDIR)/test/hotspot/jtreg/runtime/SameObject \
$(TOPDIR)/test/hotspot/jtreg/runtime/BoolReturn \ $(TOPDIR)/test/hotspot/jtreg/runtime/BoolReturn \
$(TOPDIR)/test/hotspot/jtreg/runtime/noClassDefFoundMsg \ $(TOPDIR)/test/hotspot/jtreg/runtime/noClassDefFoundMsg \
$(TOPDIR)/test/hotspot/jtreg/runtime/RedefineTests \
$(TOPDIR)/test/hotspot/jtreg/compiler/floatingpoint/ \ $(TOPDIR)/test/hotspot/jtreg/compiler/floatingpoint/ \
$(TOPDIR)/test/hotspot/jtreg/compiler/calls \ $(TOPDIR)/test/hotspot/jtreg/compiler/calls \
$(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/GetOwnedMonitorInfo \ $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/GetOwnedMonitorInfo \
@ -103,6 +104,7 @@ ifeq ($(TOOLCHAIN_TYPE), solstudio)
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAClassLoadPrepare := -lc BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAClassLoadPrepare := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAThreadStart := -lc BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAThreadStart := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libAllowedFunctions := -lc BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libAllowedFunctions := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libRedefineDoubleDelete := -lc
endif endif
ifeq ($(OPENJDK_TARGET_OS), linux) ifeq ($(OPENJDK_TARGET_OS), linux)

View file

@ -2840,6 +2840,44 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Regi
bind(L_done); bind(L_done);
} }
// Code for BigInteger::mulAdd instrinsic
// out = r0
// in = r1
// offset = r2 (already out.length-offset)
// len = r3
// k = r4
//
// pseudo code from java implementation:
// carry = 0;
// offset = out.length-offset - 1;
// for (int j=len-1; j >= 0; j--) {
// product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry;
// out[offset--] = (int)product;
// carry = product >>> 32;
// }
// return (int)carry;
void MacroAssembler::mul_add(Register out, Register in, Register offset,
Register len, Register k) {
Label LOOP, END;
// pre-loop
cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches
csel(out, zr, out, Assembler::EQ);
br(Assembler::EQ, END);
add(in, in, len, LSL, 2); // in[j+1] address
add(offset, out, offset, LSL, 2); // out[offset + 1] address
mov(out, zr); // used to keep carry now
BIND(LOOP);
ldrw(rscratch1, Address(pre(in, -4)));
madd(rscratch1, rscratch1, k, out);
ldrw(rscratch2, Address(pre(offset, -4)));
add(rscratch1, rscratch1, rscratch2);
strw(rscratch1, Address(offset));
lsr(out, rscratch1, 32);
subs(len, len, 1);
br(Assembler::NE, LOOP);
BIND(END);
}
/** /**
* Emits code to update CRC-32 with a byte value according to constants in table * Emits code to update CRC-32 with a byte value according to constants in table
* *
@ -3291,6 +3329,7 @@ void MacroAssembler::load_mirror(Register dst, Register method) {
ldr(dst, Address(dst, ConstMethod::constants_offset())); ldr(dst, Address(dst, ConstMethod::constants_offset()));
ldr(dst, Address(dst, ConstantPool::pool_holder_offset_in_bytes())); ldr(dst, Address(dst, ConstantPool::pool_holder_offset_in_bytes()));
ldr(dst, Address(dst, mirror_offset)); ldr(dst, Address(dst, mirror_offset));
resolve_oop_handle(dst);
} }
void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) { void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {

View file

@ -1265,6 +1265,7 @@ public:
void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z,
Register zlen, Register tmp1, Register tmp2, Register tmp3, Register zlen, Register tmp1, Register tmp2, Register tmp3,
Register tmp4, Register tmp5, Register tmp6, Register tmp7); Register tmp4, Register tmp5, Register tmp6, Register tmp7);
void mul_add(Register out, Register in, Register offs, Register len, Register k);
// ISB may be needed because of a safepoint // ISB may be needed because of a safepoint
void maybe_isb() { isb(); } void maybe_isb() { isb(); }

View file

@ -3607,6 +3607,63 @@ class StubGenerator: public StubCodeGenerator {
return start; return start;
} }
address generate_squareToLen() {
// squareToLen algorithm for sizes 1..127 described in java code works
// faster than multiply_to_len on some CPUs and slower on others, but
// multiply_to_len shows a bit better overall results
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "squareToLen");
address start = __ pc();
const Register x = r0;
const Register xlen = r1;
const Register z = r2;
const Register zlen = r3;
const Register y = r4; // == x
const Register ylen = r5; // == xlen
const Register tmp1 = r10;
const Register tmp2 = r11;
const Register tmp3 = r12;
const Register tmp4 = r13;
const Register tmp5 = r14;
const Register tmp6 = r15;
const Register tmp7 = r16;
RegSet spilled_regs = RegSet::of(y, ylen);
BLOCK_COMMENT("Entry:");
__ enter();
__ push(spilled_regs, sp);
__ mov(y, x);
__ mov(ylen, xlen);
__ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
__ pop(spilled_regs, sp);
__ leave();
__ ret(lr);
return start;
}
address generate_mulAdd() {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "mulAdd");
address start = __ pc();
const Register out = r0;
const Register in = r1;
const Register offset = r2;
const Register len = r3;
const Register k = r4;
BLOCK_COMMENT("Entry:");
__ enter();
__ mul_add(out, in, offset, len, k);
__ leave();
__ ret(lr);
return start;
}
void ghash_multiply(FloatRegister result_lo, FloatRegister result_hi, void ghash_multiply(FloatRegister result_lo, FloatRegister result_hi,
FloatRegister a, FloatRegister b, FloatRegister a1_xor_a0, FloatRegister a, FloatRegister b, FloatRegister a1_xor_a0,
FloatRegister tmp1, FloatRegister tmp2, FloatRegister tmp3, FloatRegister tmp4) { FloatRegister tmp1, FloatRegister tmp2, FloatRegister tmp3, FloatRegister tmp4) {
@ -4913,6 +4970,14 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_multiplyToLen = generate_multiplyToLen(); StubRoutines::_multiplyToLen = generate_multiplyToLen();
} }
if (UseSquareToLenIntrinsic) {
StubRoutines::_squareToLen = generate_squareToLen();
}
if (UseMulAddIntrinsic) {
StubRoutines::_mulAdd = generate_mulAdd();
}
if (UseMontgomeryMultiplyIntrinsic) { if (UseMontgomeryMultiplyIntrinsic) {
StubCodeMark mark(this, "StubRoutines", "montgomeryMultiply"); StubCodeMark mark(this, "StubRoutines", "montgomeryMultiply");
MontgomeryMultiplyGenerator g(_masm, /*squaring*/false); MontgomeryMultiplyGenerator g(_masm, /*squaring*/false);

View file

@ -2297,6 +2297,7 @@ void TemplateTable::load_field_cp_cache_entry(Register obj,
ConstantPoolCacheEntry::f1_offset()))); ConstantPoolCacheEntry::f1_offset())));
const int mirror_offset = in_bytes(Klass::java_mirror_offset()); const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ ldr(obj, Address(obj, mirror_offset)); __ ldr(obj, Address(obj, mirror_offset));
__ resolve_oop_handle(obj);
} }
} }

View file

@ -340,6 +340,14 @@ void VM_Version::get_processor_features() {
UseMultiplyToLenIntrinsic = true; UseMultiplyToLenIntrinsic = true;
} }
if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
UseSquareToLenIntrinsic = true;
}
if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
UseMulAddIntrinsic = true;
}
if (FLAG_IS_DEFAULT(UseBarriersForVolatile)) { if (FLAG_IS_DEFAULT(UseBarriersForVolatile)) {
UseBarriersForVolatile = (_features & CPU_DMB_ATOMICS) != 0; UseBarriersForVolatile = (_features & CPU_DMB_ATOMICS) != 0;
} }

View file

@ -2899,6 +2899,7 @@ void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp)
ldr(tmp, Address(tmp, ConstMethod::constants_offset())); ldr(tmp, Address(tmp, ConstMethod::constants_offset()));
ldr(tmp, Address(tmp, ConstantPool::pool_holder_offset_in_bytes())); ldr(tmp, Address(tmp, ConstantPool::pool_holder_offset_in_bytes()));
ldr(mirror, Address(tmp, mirror_offset)); ldr(mirror, Address(tmp, mirror_offset));
resolve_oop_handle(mirror);
} }

View file

@ -2963,6 +2963,7 @@ void TemplateTable::load_field_cp_cache_entry(Register Rcache,
cp_base_offset + ConstantPoolCacheEntry::f1_offset())); cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
const int mirror_offset = in_bytes(Klass::java_mirror_offset()); const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ ldr(Robj, Address(Robj, mirror_offset)); __ ldr(Robj, Address(Robj, mirror_offset));
__ resolve_oop_handle(Robj);
} }
} }

View file

@ -517,6 +517,9 @@ class Assembler : public AbstractAssembler {
XXPERMDI_OPCODE= (60u << OPCODE_SHIFT | 10u << 3), XXPERMDI_OPCODE= (60u << OPCODE_SHIFT | 10u << 3),
XXMRGHW_OPCODE = (60u << OPCODE_SHIFT | 18u << 3), XXMRGHW_OPCODE = (60u << OPCODE_SHIFT | 18u << 3),
XXMRGLW_OPCODE = (60u << OPCODE_SHIFT | 50u << 3), XXMRGLW_OPCODE = (60u << OPCODE_SHIFT | 50u << 3),
XXSPLTW_OPCODE = (60u << OPCODE_SHIFT | 164u << 2),
XXLXOR_OPCODE = (60u << OPCODE_SHIFT | 154u << 3),
XXLEQV_OPCODE = (60u << OPCODE_SHIFT | 186u << 3),
// Vector Permute and Formatting // Vector Permute and Formatting
VPKPX_OPCODE = (4u << OPCODE_SHIFT | 782u ), VPKPX_OPCODE = (4u << OPCODE_SHIFT | 782u ),
@ -1125,6 +1128,7 @@ class Assembler : public AbstractAssembler {
static int vsplti_sim(int x) { return opp_u_field(x, 15, 11); } // for vsplti* instructions static int vsplti_sim(int x) { return opp_u_field(x, 15, 11); } // for vsplti* instructions
static int vsldoi_shb(int x) { return opp_u_field(x, 25, 22); } // for vsldoi instruction static int vsldoi_shb(int x) { return opp_u_field(x, 25, 22); } // for vsldoi instruction
static int vcmp_rc( int x) { return opp_u_field(x, 21, 21); } // for vcmp* instructions static int vcmp_rc( int x) { return opp_u_field(x, 21, 21); } // for vcmp* instructions
static int xxsplt_uim(int x) { return opp_u_field(x, 15, 14); } // for xxsplt* instructions
//static int xo1( int x) { return opp_u_field(x, 29, 21); }// is contained in our opcodes //static int xo1( int x) { return opp_u_field(x, 29, 21); }// is contained in our opcodes
//static int xo2( int x) { return opp_u_field(x, 30, 21); }// is contained in our opcodes //static int xo2( int x) { return opp_u_field(x, 30, 21); }// is contained in our opcodes
@ -2155,6 +2159,11 @@ class Assembler : public AbstractAssembler {
inline void xxpermdi( VectorSRegister d, VectorSRegister a, VectorSRegister b, int dm); inline void xxpermdi( VectorSRegister d, VectorSRegister a, VectorSRegister b, int dm);
inline void xxmrghw( VectorSRegister d, VectorSRegister a, VectorSRegister b); inline void xxmrghw( VectorSRegister d, VectorSRegister a, VectorSRegister b);
inline void xxmrglw( VectorSRegister d, VectorSRegister a, VectorSRegister b); inline void xxmrglw( VectorSRegister d, VectorSRegister a, VectorSRegister b);
inline void mtvsrd( VectorSRegister d, Register a);
inline void mtvsrwz( VectorSRegister d, Register a);
inline void xxspltw( VectorSRegister d, VectorSRegister b, int ui2);
inline void xxlxor( VectorSRegister d, VectorSRegister a, VectorSRegister b);
inline void xxleqv( VectorSRegister d, VectorSRegister a, VectorSRegister b);
// VSX Extended Mnemonics // VSX Extended Mnemonics
inline void xxspltd( VectorSRegister d, VectorSRegister a, int x); inline void xxspltd( VectorSRegister d, VectorSRegister a, int x);

View file

@ -761,8 +761,13 @@ inline void Assembler::lvsr( VectorRegister d, Register s1, Register s2) { emit
// Vector-Scalar (VSX) instructions. // Vector-Scalar (VSX) instructions.
inline void Assembler::lxvd2x( VectorSRegister d, Register s1) { emit_int32( LXVD2X_OPCODE | vsrt(d) | ra(0) | rb(s1)); } inline void Assembler::lxvd2x( VectorSRegister d, Register s1) { emit_int32( LXVD2X_OPCODE | vsrt(d) | ra(0) | rb(s1)); }
inline void Assembler::lxvd2x( VectorSRegister d, Register s1, Register s2) { emit_int32( LXVD2X_OPCODE | vsrt(d) | ra0mem(s1) | rb(s2)); } inline void Assembler::lxvd2x( VectorSRegister d, Register s1, Register s2) { emit_int32( LXVD2X_OPCODE | vsrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::stxvd2x( VectorSRegister d, Register s1) { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra(0) | rb(s1)); } inline void Assembler::stxvd2x( VectorSRegister d, Register s1) { emit_int32( STXVD2X_OPCODE | vsrs(d) | ra(0) | rb(s1)); }
inline void Assembler::stxvd2x( VectorSRegister d, Register s1, Register s2) { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra0mem(s1) | rb(s2)); } inline void Assembler::stxvd2x( VectorSRegister d, Register s1, Register s2) { emit_int32( STXVD2X_OPCODE | vsrs(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::mtvsrd( VectorSRegister d, Register a) { emit_int32( MTVSRD_OPCODE | vsrt(d) | ra(a)); }
inline void Assembler::mtvsrwz( VectorSRegister d, Register a) { emit_int32( MTVSRWZ_OPCODE | vsrt(d) | ra(a)); }
inline void Assembler::xxspltw( VectorSRegister d, VectorSRegister b, int ui2) { emit_int32( XXSPLTW_OPCODE | vsrt(d) | vsrb(b) | xxsplt_uim(uimm(ui2,2))); }
inline void Assembler::xxlxor( VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXLXOR_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
inline void Assembler::xxleqv( VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXLEQV_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
inline void Assembler::mtvrd( VectorRegister d, Register a) { emit_int32( MTVSRD_OPCODE | vsrt(d->to_vsr()) | ra(a)); } inline void Assembler::mtvrd( VectorRegister d, Register a) { emit_int32( MTVSRD_OPCODE | vsrt(d->to_vsr()) | ra(a)); }
inline void Assembler::mfvrd( Register a, VectorRegister d) { emit_int32( MFVSRD_OPCODE | vsrt(d->to_vsr()) | ra(a)); } inline void Assembler::mfvrd( Register a, VectorRegister d) { emit_int32( MFVSRD_OPCODE | vsrt(d->to_vsr()) | ra(a)); }
inline void Assembler::mtvrwz( VectorRegister d, Register a) { emit_int32( MTVSRWZ_OPCODE | vsrt(d->to_vsr()) | ra(a)); } inline void Assembler::mtvrwz( VectorRegister d, Register a) { emit_int32( MTVSRWZ_OPCODE | vsrt(d->to_vsr()) | ra(a)); }

View file

@ -32,7 +32,7 @@
// Sets the default values for platform dependent flags used by the runtime system. // Sets the default values for platform dependent flags used by the runtime system.
// (see globals.hpp) // (see globals.hpp)
define_pd_global(bool, ShareVtableStubs, false); // Improves performance markedly for mtrt and compress. define_pd_global(bool, ShareVtableStubs, true);
define_pd_global(bool, NeedsDeoptSuspend, false); // Only register window machines need this. define_pd_global(bool, NeedsDeoptSuspend, false); // Only register window machines need this.
@ -103,6 +103,9 @@ define_pd_global(intx, InitArrayShortSize, 9*BytesPerLong);
"CPU Version: x for PowerX. Currently recognizes Power5 to " \ "CPU Version: x for PowerX. Currently recognizes Power5 to " \
"Power8. Default is 0. Newer CPUs will be recognized as Power8.") \ "Power8. Default is 0. Newer CPUs will be recognized as Power8.") \
\ \
product(bool, SuperwordUseVSX, false, \
"Use Power8 VSX instructions for superword optimization.") \
\
/* Reoptimize code-sequences of calls at runtime, e.g. replace an */ \ /* Reoptimize code-sequences of calls at runtime, e.g. replace an */ \
/* indirect call by a direct call. */ \ /* indirect call by a direct call. */ \
product(bool, ReoptimizeCallSequences, true, \ product(bool, ReoptimizeCallSequences, true, \

View file

@ -3382,6 +3382,7 @@ void MacroAssembler::load_mirror_from_const_method(Register mirror, Register con
ld(mirror, in_bytes(ConstMethod::constants_offset()), const_method); ld(mirror, in_bytes(ConstMethod::constants_offset()), const_method);
ld(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror); ld(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror);
ld(mirror, in_bytes(Klass::java_mirror_offset()), mirror); ld(mirror, in_bytes(Klass::java_mirror_offset()), mirror);
resolve_oop_handle(mirror);
} }
// Clear Array // Clear Array

View file

@ -254,6 +254,73 @@ register %{
reg_def SR_SPEFSCR(SOC, SOC, Op_RegP, 4, SR_SPEFSCR->as_VMReg()); // v reg_def SR_SPEFSCR(SOC, SOC, Op_RegP, 4, SR_SPEFSCR->as_VMReg()); // v
reg_def SR_PPR( SOC, SOC, Op_RegP, 5, SR_PPR->as_VMReg()); // v reg_def SR_PPR( SOC, SOC, Op_RegP, 5, SR_PPR->as_VMReg()); // v
// ----------------------------
// Vector-Scalar Registers
// ----------------------------
reg_def VSR0 ( SOC, SOC, Op_VecX, 0, NULL);
reg_def VSR1 ( SOC, SOC, Op_VecX, 1, NULL);
reg_def VSR2 ( SOC, SOC, Op_VecX, 2, NULL);
reg_def VSR3 ( SOC, SOC, Op_VecX, 3, NULL);
reg_def VSR4 ( SOC, SOC, Op_VecX, 4, NULL);
reg_def VSR5 ( SOC, SOC, Op_VecX, 5, NULL);
reg_def VSR6 ( SOC, SOC, Op_VecX, 6, NULL);
reg_def VSR7 ( SOC, SOC, Op_VecX, 7, NULL);
reg_def VSR8 ( SOC, SOC, Op_VecX, 8, NULL);
reg_def VSR9 ( SOC, SOC, Op_VecX, 9, NULL);
reg_def VSR10 ( SOC, SOC, Op_VecX, 10, NULL);
reg_def VSR11 ( SOC, SOC, Op_VecX, 11, NULL);
reg_def VSR12 ( SOC, SOC, Op_VecX, 12, NULL);
reg_def VSR13 ( SOC, SOC, Op_VecX, 13, NULL);
reg_def VSR14 ( SOC, SOC, Op_VecX, 14, NULL);
reg_def VSR15 ( SOC, SOC, Op_VecX, 15, NULL);
reg_def VSR16 ( SOC, SOC, Op_VecX, 16, NULL);
reg_def VSR17 ( SOC, SOC, Op_VecX, 17, NULL);
reg_def VSR18 ( SOC, SOC, Op_VecX, 18, NULL);
reg_def VSR19 ( SOC, SOC, Op_VecX, 19, NULL);
reg_def VSR20 ( SOC, SOC, Op_VecX, 20, NULL);
reg_def VSR21 ( SOC, SOC, Op_VecX, 21, NULL);
reg_def VSR22 ( SOC, SOC, Op_VecX, 22, NULL);
reg_def VSR23 ( SOC, SOC, Op_VecX, 23, NULL);
reg_def VSR24 ( SOC, SOC, Op_VecX, 24, NULL);
reg_def VSR25 ( SOC, SOC, Op_VecX, 25, NULL);
reg_def VSR26 ( SOC, SOC, Op_VecX, 26, NULL);
reg_def VSR27 ( SOC, SOC, Op_VecX, 27, NULL);
reg_def VSR28 ( SOC, SOC, Op_VecX, 28, NULL);
reg_def VSR29 ( SOC, SOC, Op_VecX, 29, NULL);
reg_def VSR30 ( SOC, SOC, Op_VecX, 30, NULL);
reg_def VSR31 ( SOC, SOC, Op_VecX, 31, NULL);
reg_def VSR32 ( SOC, SOC, Op_VecX, 32, NULL);
reg_def VSR33 ( SOC, SOC, Op_VecX, 33, NULL);
reg_def VSR34 ( SOC, SOC, Op_VecX, 34, NULL);
reg_def VSR35 ( SOC, SOC, Op_VecX, 35, NULL);
reg_def VSR36 ( SOC, SOC, Op_VecX, 36, NULL);
reg_def VSR37 ( SOC, SOC, Op_VecX, 37, NULL);
reg_def VSR38 ( SOC, SOC, Op_VecX, 38, NULL);
reg_def VSR39 ( SOC, SOC, Op_VecX, 39, NULL);
reg_def VSR40 ( SOC, SOC, Op_VecX, 40, NULL);
reg_def VSR41 ( SOC, SOC, Op_VecX, 41, NULL);
reg_def VSR42 ( SOC, SOC, Op_VecX, 42, NULL);
reg_def VSR43 ( SOC, SOC, Op_VecX, 43, NULL);
reg_def VSR44 ( SOC, SOC, Op_VecX, 44, NULL);
reg_def VSR45 ( SOC, SOC, Op_VecX, 45, NULL);
reg_def VSR46 ( SOC, SOC, Op_VecX, 46, NULL);
reg_def VSR47 ( SOC, SOC, Op_VecX, 47, NULL);
reg_def VSR48 ( SOC, SOC, Op_VecX, 48, NULL);
reg_def VSR49 ( SOC, SOC, Op_VecX, 49, NULL);
reg_def VSR50 ( SOC, SOC, Op_VecX, 50, NULL);
reg_def VSR51 ( SOC, SOC, Op_VecX, 51, NULL);
reg_def VSR52 ( SOC, SOC, Op_VecX, 52, NULL);
reg_def VSR53 ( SOC, SOC, Op_VecX, 53, NULL);
reg_def VSR54 ( SOC, SOC, Op_VecX, 54, NULL);
reg_def VSR55 ( SOC, SOC, Op_VecX, 55, NULL);
reg_def VSR56 ( SOC, SOC, Op_VecX, 56, NULL);
reg_def VSR57 ( SOC, SOC, Op_VecX, 57, NULL);
reg_def VSR58 ( SOC, SOC, Op_VecX, 58, NULL);
reg_def VSR59 ( SOC, SOC, Op_VecX, 59, NULL);
reg_def VSR60 ( SOC, SOC, Op_VecX, 60, NULL);
reg_def VSR61 ( SOC, SOC, Op_VecX, 61, NULL);
reg_def VSR62 ( SOC, SOC, Op_VecX, 62, NULL);
reg_def VSR63 ( SOC, SOC, Op_VecX, 63, NULL);
// ---------------------------- // ----------------------------
// Specify priority of register selection within phases of register // Specify priority of register selection within phases of register
@ -385,6 +452,73 @@ alloc_class chunk2 (
); );
alloc_class chunk3 ( alloc_class chunk3 (
VSR0,
VSR1,
VSR2,
VSR3,
VSR4,
VSR5,
VSR6,
VSR7,
VSR8,
VSR9,
VSR10,
VSR11,
VSR12,
VSR13,
VSR14,
VSR15,
VSR16,
VSR17,
VSR18,
VSR19,
VSR20,
VSR21,
VSR22,
VSR23,
VSR24,
VSR25,
VSR26,
VSR27,
VSR28,
VSR29,
VSR30,
VSR31,
VSR32,
VSR33,
VSR34,
VSR35,
VSR36,
VSR37,
VSR38,
VSR39,
VSR40,
VSR41,
VSR42,
VSR43,
VSR44,
VSR45,
VSR46,
VSR47,
VSR48,
VSR49,
VSR50,
VSR51,
VSR52,
VSR53,
VSR54,
VSR55,
VSR56,
VSR57,
VSR58,
VSR59,
VSR60,
VSR61,
VSR62,
VSR63
);
alloc_class chunk4 (
// special registers // special registers
// These registers are not allocated, but used for nodes generated by postalloc expand. // These registers are not allocated, but used for nodes generated by postalloc expand.
SR_XER, SR_XER,
@ -769,6 +903,45 @@ reg_class dbl_reg(
F31, F31_H // nv! F31, F31_H // nv!
); );
// ----------------------------
// Vector-Scalar Register Class
// ----------------------------
reg_class vs_reg(
VSR32,
VSR33,
VSR34,
VSR35,
VSR36,
VSR37,
VSR38,
VSR39,
VSR40,
VSR41,
VSR42,
VSR43,
VSR44,
VSR45,
VSR46,
VSR47,
VSR48,
VSR49,
VSR50,
VSR51
// VSR52, // nv!
// VSR53, // nv!
// VSR54, // nv!
// VSR55, // nv!
// VSR56, // nv!
// VSR57, // nv!
// VSR58, // nv!
// VSR59, // nv!
// VSR60, // nv!
// VSR61, // nv!
// VSR62, // nv!
// VSR63 // nv!
);
%} %}
//----------DEFINITION BLOCK--------------------------------------------------- //----------DEFINITION BLOCK---------------------------------------------------
@ -1502,7 +1675,7 @@ static enum RC rc_class(OptoReg::Name reg) {
if (reg < 64+64) return rc_float; if (reg < 64+64) return rc_float;
// Between float regs & stack are the flags regs. // Between float regs & stack are the flags regs.
assert(OptoReg::is_stack(reg), "blow up if spilling flags"); assert(OptoReg::is_stack(reg) || reg < 64+64+64, "blow up if spilling flags");
return rc_stack; return rc_stack;
} }
@ -2048,15 +2221,25 @@ const bool Matcher::convL2FSupported(void) {
// Vector width in bytes. // Vector width in bytes.
const int Matcher::vector_width_in_bytes(BasicType bt) { const int Matcher::vector_width_in_bytes(BasicType bt) {
if (SuperwordUseVSX) {
assert(MaxVectorSize == 16, "");
return 16;
} else {
assert(MaxVectorSize == 8, ""); assert(MaxVectorSize == 8, "");
return 8; return 8;
} }
}
// Vector ideal reg. // Vector ideal reg.
const uint Matcher::vector_ideal_reg(int size) { const uint Matcher::vector_ideal_reg(int size) {
if (SuperwordUseVSX) {
assert(MaxVectorSize == 16 && size == 16, "");
return Op_VecX;
} else {
assert(MaxVectorSize == 8 && size == 8, ""); assert(MaxVectorSize == 8 && size == 8, "");
return Op_RegL; return Op_RegL;
} }
}
const uint Matcher::vector_shift_count_ideal_reg(int size) { const uint Matcher::vector_shift_count_ideal_reg(int size) {
fatal("vector shift is not supported"); fatal("vector shift is not supported");
@ -2075,7 +2258,7 @@ const int Matcher::min_vector_size(const BasicType bt) {
// PPC doesn't support misaligned vectors store/load. // PPC doesn't support misaligned vectors store/load.
const bool Matcher::misaligned_vectors_ok() { const bool Matcher::misaligned_vectors_ok() {
return false; return !AlignVector; // can be changed by flag
} }
// PPC AES support not yet implemented // PPC AES support not yet implemented
@ -2217,10 +2400,31 @@ const MachRegisterNumbers farg_reg[13] = {
F13_num F13_num
}; };
const MachRegisterNumbers vsarg_reg[64] = {
VSR0_num, VSR1_num, VSR2_num, VSR3_num,
VSR4_num, VSR5_num, VSR6_num, VSR7_num,
VSR8_num, VSR9_num, VSR10_num, VSR11_num,
VSR12_num, VSR13_num, VSR14_num, VSR15_num,
VSR16_num, VSR17_num, VSR18_num, VSR19_num,
VSR20_num, VSR21_num, VSR22_num, VSR23_num,
VSR24_num, VSR23_num, VSR24_num, VSR25_num,
VSR28_num, VSR29_num, VSR30_num, VSR31_num,
VSR32_num, VSR33_num, VSR34_num, VSR35_num,
VSR36_num, VSR37_num, VSR38_num, VSR39_num,
VSR40_num, VSR41_num, VSR42_num, VSR43_num,
VSR44_num, VSR45_num, VSR46_num, VSR47_num,
VSR48_num, VSR49_num, VSR50_num, VSR51_num,
VSR52_num, VSR53_num, VSR54_num, VSR55_num,
VSR56_num, VSR57_num, VSR58_num, VSR59_num,
VSR60_num, VSR61_num, VSR62_num, VSR63_num
};
const int num_iarg_registers = sizeof(iarg_reg) / sizeof(iarg_reg[0]); const int num_iarg_registers = sizeof(iarg_reg) / sizeof(iarg_reg[0]);
const int num_farg_registers = sizeof(farg_reg) / sizeof(farg_reg[0]); const int num_farg_registers = sizeof(farg_reg) / sizeof(farg_reg[0]);
const int num_vsarg_registers = sizeof(vsarg_reg) / sizeof(vsarg_reg[0]);
// Return whether or not this register is ever used as an argument. This // Return whether or not this register is ever used as an argument. This
// function is used on startup to build the trampoline stubs in generateOptoStub. // function is used on startup to build the trampoline stubs in generateOptoStub.
// Registers not mentioned will be killed by the VM call in the trampoline, and // Registers not mentioned will be killed by the VM call in the trampoline, and
@ -2552,6 +2756,115 @@ loadConLNodesTuple loadConLNodesTuple_create(PhaseRegAlloc *ra_, Node *toc, immL
return nodes; return nodes;
} }
typedef struct {
loadConL_hiNode *_large_hi;
loadConL_loNode *_large_lo;
mtvsrdNode *_moved;
xxspltdNode *_replicated;
loadConLNode *_small;
MachNode *_last;
} loadConLReplicatedNodesTuple;
loadConLReplicatedNodesTuple loadConLReplicatedNodesTuple_create(Compile *C, PhaseRegAlloc *ra_, Node *toc, immLOper *immSrc,
vecXOper *dst, immI_0Oper *zero,
OptoReg::Name reg_second, OptoReg::Name reg_first,
OptoReg::Name reg_vec_second, OptoReg::Name reg_vec_first) {
loadConLReplicatedNodesTuple nodes;
const bool large_constant_pool = true; // TODO: PPC port C->cfg()->_consts_size > 4000;
if (large_constant_pool) {
// Create new nodes.
loadConL_hiNode *m1 = new loadConL_hiNode();
loadConL_loNode *m2 = new loadConL_loNode();
mtvsrdNode *m3 = new mtvsrdNode();
xxspltdNode *m4 = new xxspltdNode();
// inputs for new nodes
m1->add_req(NULL, toc);
m2->add_req(NULL, m1);
m3->add_req(NULL, m2);
m4->add_req(NULL, m3);
// operands for new nodes
m1->_opnds[0] = new iRegLdstOper(); // dst
m1->_opnds[1] = immSrc; // src
m1->_opnds[2] = new iRegPdstOper(); // toc
m2->_opnds[0] = new iRegLdstOper(); // dst
m2->_opnds[1] = immSrc; // src
m2->_opnds[2] = new iRegLdstOper(); // base
m3->_opnds[0] = new vecXOper(); // dst
m3->_opnds[1] = new iRegLdstOper(); // src
m4->_opnds[0] = new vecXOper(); // dst
m4->_opnds[1] = new vecXOper(); // src
m4->_opnds[2] = zero;
// Initialize ins_attrib TOC fields.
m1->_const_toc_offset = -1;
m2->_const_toc_offset_hi_node = m1;
// Initialize ins_attrib instruction offset.
m1->_cbuf_insts_offset = -1;
// register allocation for new nodes
ra_->set_pair(m1->_idx, reg_second, reg_first);
ra_->set_pair(m2->_idx, reg_second, reg_first);
ra_->set1(m3->_idx, reg_second);
ra_->set2(m3->_idx, reg_vec_first);
ra_->set_pair(m4->_idx, reg_vec_second, reg_vec_first);
// Create result.
nodes._large_hi = m1;
nodes._large_lo = m2;
nodes._moved = m3;
nodes._replicated = m4;
nodes._small = NULL;
nodes._last = nodes._replicated;
assert(m2->bottom_type()->isa_long(), "must be long");
} else {
loadConLNode *m2 = new loadConLNode();
mtvsrdNode *m3 = new mtvsrdNode();
xxspltdNode *m4 = new xxspltdNode();
// inputs for new nodes
m2->add_req(NULL, toc);
// operands for new nodes
m2->_opnds[0] = new iRegLdstOper(); // dst
m2->_opnds[1] = immSrc; // src
m2->_opnds[2] = new iRegPdstOper(); // toc
m3->_opnds[0] = new vecXOper(); // dst
m3->_opnds[1] = new iRegLdstOper(); // src
m4->_opnds[0] = new vecXOper(); // dst
m4->_opnds[1] = new vecXOper(); // src
m4->_opnds[2] = zero;
// Initialize ins_attrib instruction offset.
m2->_cbuf_insts_offset = -1;
ra_->set1(m3->_idx, reg_second);
ra_->set2(m3->_idx, reg_vec_first);
ra_->set_pair(m4->_idx, reg_vec_second, reg_vec_first);
// register allocation for new nodes
ra_->set_pair(m2->_idx, reg_second, reg_first);
// Create result.
nodes._large_hi = NULL;
nodes._large_lo = NULL;
nodes._small = m2;
nodes._moved = m3;
nodes._replicated = m4;
nodes._last = nodes._replicated;
assert(m2->bottom_type()->isa_long(), "must be long");
}
return nodes;
}
%} // source %} // source
encode %{ encode %{
@ -3212,6 +3525,27 @@ encode %{
assert(loadConLNodes._last->bottom_type()->isa_long(), "must be long"); assert(loadConLNodes._last->bottom_type()->isa_long(), "must be long");
%} %}
enc_class postalloc_expand_load_replF_constant_vsx(vecX dst, immF src, iRegLdst toc) %{
// Create new nodes.
// Make an operand with the bit pattern to load as float.
immLOper *op_repl = new immLOper((jlong)replicate_immF(op_src->constantF()));
immI_0Oper *op_zero = new immI_0Oper(0);
loadConLReplicatedNodesTuple loadConLNodes =
loadConLReplicatedNodesTuple_create(C, ra_, n_toc, op_repl, op_dst, op_zero,
OptoReg::Name(R20_H_num), OptoReg::Name(R20_num),
OptoReg::Name(VSR11_num), OptoReg::Name(VSR10_num));
// Push new nodes.
if (loadConLNodes._large_hi) { nodes->push(loadConLNodes._large_hi); }
if (loadConLNodes._large_lo) { nodes->push(loadConLNodes._large_lo); }
if (loadConLNodes._moved) { nodes->push(loadConLNodes._moved); }
if (loadConLNodes._last) { nodes->push(loadConLNodes._last); }
assert(nodes->length() >= 1, "must have created at least 1 node");
%}
// This enc_class is needed so that scheduler gets proper // This enc_class is needed so that scheduler gets proper
// input mapping for latency computation. // input mapping for latency computation.
enc_class enc_poll(immI dst, iRegLdst poll) %{ enc_class enc_poll(immI dst, iRegLdst poll) %{
@ -3840,6 +4174,14 @@ ins_attrib ins_field_load_ic_node(0);
// //
// Formats are generated automatically for constants and base registers. // Formats are generated automatically for constants and base registers.
operand vecX() %{
constraint(ALLOC_IN_RC(vs_reg));
match(VecX);
format %{ %}
interface(REG_INTER);
%}
//----------Simple Operands---------------------------------------------------- //----------Simple Operands----------------------------------------------------
// Immediate Operands // Immediate Operands
@ -5372,6 +5714,20 @@ instruct loadV8(iRegLdst dst, memoryAlg4 mem) %{
ins_pipe(pipe_class_memory); ins_pipe(pipe_class_memory);
%} %}
// Load Aligned Packed Byte
instruct loadV16(vecX dst, indirect mem) %{
predicate(n->as_LoadVector()->memory_size() == 16);
match(Set dst (LoadVector mem));
ins_cost(MEMORY_REF_COST);
format %{ "LXVD2X $dst, $mem \t// load 16-byte Vector" %}
size(4);
ins_encode %{
__ lxvd2x($dst$$VectorSRegister, $mem$$Register);
%}
ins_pipe(pipe_class_default);
%}
// Load Range, range = array length (=jint) // Load Range, range = array length (=jint)
instruct loadRange(iRegIdst dst, memory mem) %{ instruct loadRange(iRegIdst dst, memory mem) %{
match(Set dst (LoadRange mem)); match(Set dst (LoadRange mem));
@ -6368,6 +6724,20 @@ instruct storeA8B(memoryAlg4 mem, iRegLsrc src) %{
ins_pipe(pipe_class_memory); ins_pipe(pipe_class_memory);
%} %}
// Store Packed Byte long register to memory
instruct storeV16(indirect mem, vecX src) %{
predicate(n->as_StoreVector()->memory_size() == 16);
match(Set mem (StoreVector mem src));
ins_cost(MEMORY_REF_COST);
format %{ "STXVD2X $mem, $src \t// store 16-byte Vector" %}
size(4);
ins_encode %{
__ stxvd2x($src$$VectorSRegister, $mem$$Register);
%}
ins_pipe(pipe_class_default);
%}
// Store Compressed Oop // Store Compressed Oop
instruct storeN(memory dst, iRegN_P2N src) %{ instruct storeN(memory dst, iRegN_P2N src) %{
match(Set dst (StoreN dst src)); match(Set dst (StoreN dst src));
@ -13239,6 +13609,26 @@ instruct storeS_reversed(iRegIsrc src, indirect mem) %{
ins_pipe(pipe_class_default); ins_pipe(pipe_class_default);
%} %}
instruct mtvsrwz(vecX temp1, iRegIsrc src) %{
effect(DEF temp1, USE src);
size(4);
ins_encode %{
__ mtvsrwz($temp1$$VectorSRegister, $src$$Register);
%}
ins_pipe(pipe_class_default);
%}
instruct xxspltw(vecX dst, vecX src, immI8 imm1) %{
effect(DEF dst, USE src, USE imm1);
size(4);
ins_encode %{
__ xxspltw($dst$$VectorSRegister, $src$$VectorSRegister, $imm1$$constant);
%}
ins_pipe(pipe_class_default);
%}
//---------- Replicate Vector Instructions ------------------------------------ //---------- Replicate Vector Instructions ------------------------------------
// Insrdi does replicate if src == dst. // Insrdi does replicate if src == dst.
@ -13318,6 +13708,46 @@ instruct repl8B_immIminus1(iRegLdst dst, immI_minus1 src) %{
ins_pipe(pipe_class_default); ins_pipe(pipe_class_default);
%} %}
instruct repl16B_reg_Ex(vecX dst, iRegIsrc src) %{
match(Set dst (ReplicateB src));
predicate(n->as_Vector()->length() == 16);
expand %{
iRegLdst tmpL;
vecX tmpV;
immI8 imm1 %{ (int) 1 %}
moveReg(tmpL, src);
repl56(tmpL);
repl48(tmpL);
mtvsrwz(tmpV, tmpL);
xxspltw(dst, tmpV, imm1);
%}
%}
instruct repl16B_immI0(vecX dst, immI_0 zero) %{
match(Set dst (ReplicateB zero));
predicate(n->as_Vector()->length() == 16);
format %{ "XXLXOR $dst, $zero \t// replicate16B" %}
size(4);
ins_encode %{
__ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
%}
ins_pipe(pipe_class_default);
%}
instruct repl16B_immIminus1(vecX dst, immI_minus1 src) %{
match(Set dst (ReplicateB src));
predicate(n->as_Vector()->length() == 16);
format %{ "XXLEQV $dst, $src \t// replicate16B" %}
size(4);
ins_encode %{
__ xxleqv($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
%}
ins_pipe(pipe_class_default);
%}
instruct repl4S_reg_Ex(iRegLdst dst, iRegIsrc src) %{ instruct repl4S_reg_Ex(iRegLdst dst, iRegIsrc src) %{
match(Set dst (ReplicateS src)); match(Set dst (ReplicateS src));
predicate(n->as_Vector()->length() == 4); predicate(n->as_Vector()->length() == 4);
@ -13352,6 +13782,46 @@ instruct repl4S_immIminus1(iRegLdst dst, immI_minus1 src) %{
ins_pipe(pipe_class_default); ins_pipe(pipe_class_default);
%} %}
instruct repl8S_reg_Ex(vecX dst, iRegIsrc src) %{
match(Set dst (ReplicateS src));
predicate(n->as_Vector()->length() == 8);
expand %{
iRegLdst tmpL;
vecX tmpV;
immI8 zero %{ (int) 0 %}
moveReg(tmpL, src);
repl48(tmpL);
repl32(tmpL);
mtvsrd(tmpV, tmpL);
xxpermdi(dst, tmpV, tmpV, zero);
%}
%}
instruct repl8S_immI0(vecX dst, immI_0 zero) %{
match(Set dst (ReplicateS zero));
predicate(n->as_Vector()->length() == 8);
format %{ "XXLXOR $dst, $zero \t// replicate8S" %}
size(4);
ins_encode %{
__ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
%}
ins_pipe(pipe_class_default);
%}
instruct repl8S_immIminus1(vecX dst, immI_minus1 src) %{
match(Set dst (ReplicateS src));
predicate(n->as_Vector()->length() == 8);
format %{ "XXLEQV $dst, $src \t// replicate16B" %}
size(4);
ins_encode %{
__ xxleqv($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
%}
ins_pipe(pipe_class_default);
%}
instruct repl2I_reg_Ex(iRegLdst dst, iRegIsrc src) %{ instruct repl2I_reg_Ex(iRegLdst dst, iRegIsrc src) %{
match(Set dst (ReplicateI src)); match(Set dst (ReplicateI src));
predicate(n->as_Vector()->length() == 2); predicate(n->as_Vector()->length() == 2);
@ -13386,6 +13856,46 @@ instruct repl2I_immIminus1(iRegLdst dst, immI_minus1 src) %{
ins_pipe(pipe_class_default); ins_pipe(pipe_class_default);
%} %}
instruct repl4I_reg_Ex(vecX dst, iRegIsrc src) %{
match(Set dst (ReplicateI src));
predicate(n->as_Vector()->length() == 4);
ins_cost(2 * DEFAULT_COST);
expand %{
iRegLdst tmpL;
vecX tmpV;
immI8 zero %{ (int) 0 %}
moveReg(tmpL, src);
repl32(tmpL);
mtvsrd(tmpV, tmpL);
xxpermdi(dst, tmpV, tmpV, zero);
%}
%}
instruct repl4I_immI0(vecX dst, immI_0 zero) %{
match(Set dst (ReplicateI zero));
predicate(n->as_Vector()->length() == 4);
format %{ "XXLXOR $dst, $zero \t// replicate4I" %}
size(4);
ins_encode %{
__ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
%}
ins_pipe(pipe_class_default);
%}
instruct repl4I_immIminus1(vecX dst, immI_minus1 src) %{
match(Set dst (ReplicateI src));
predicate(n->as_Vector()->length() == 4);
format %{ "XXLEQV $dst, $dst, $dst \t// replicate4I" %}
size(4);
ins_encode %{
__ xxleqv($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
%}
ins_pipe(pipe_class_default);
%}
// Move float to int register via stack, replicate. // Move float to int register via stack, replicate.
instruct repl2F_reg_Ex(iRegLdst dst, regF src) %{ instruct repl2F_reg_Ex(iRegLdst dst, regF src) %{
match(Set dst (ReplicateF src)); match(Set dst (ReplicateF src));
@ -13484,6 +13994,154 @@ instruct overflowMulL_reg_reg(flagsRegCR0 cr0, iRegLsrc op1, iRegLsrc op2) %{
%} %}
instruct repl4F_reg_Ex(vecX dst, regF src) %{
match(Set dst (ReplicateF src));
predicate(n->as_Vector()->length() == 4);
ins_cost(2 * MEMORY_REF_COST + DEFAULT_COST);
expand %{
stackSlotL tmpS;
iRegIdst tmpI;
iRegLdst tmpL;
vecX tmpV;
immI8 zero %{ (int) 0 %}
moveF2I_reg_stack(tmpS, src); // Move float to stack.
moveF2I_stack_reg(tmpI, tmpS); // Move stack to int reg.
moveReg(tmpL, tmpI); // Move int to long reg.
repl32(tmpL); // Replicate bitpattern.
mtvsrd(tmpV, tmpL);
xxpermdi(dst, tmpV, tmpV, zero);
%}
%}
instruct repl4F_immF_Ex(vecX dst, immF src) %{
match(Set dst (ReplicateF src));
predicate(n->as_Vector()->length() == 4);
ins_cost(10 * DEFAULT_COST);
postalloc_expand( postalloc_expand_load_replF_constant_vsx(dst, src, constanttablebase) );
%}
instruct repl4F_immF0(vecX dst, immF_0 zero) %{
match(Set dst (ReplicateF zero));
predicate(n->as_Vector()->length() == 4);
format %{ "XXLXOR $dst, $zero \t// replicate4F" %}
ins_encode %{
__ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
%}
ins_pipe(pipe_class_default);
%}
instruct repl2D_reg_Ex(vecX dst, regD src) %{
match(Set dst (ReplicateD src));
predicate(n->as_Vector()->length() == 2);
expand %{
stackSlotL tmpS;
iRegLdst tmpL;
iRegLdst tmp;
vecX tmpV;
immI8 zero %{ (int) 0 %}
moveD2L_reg_stack(tmpS, src);
moveD2L_stack_reg(tmpL, tmpS);
mtvsrd(tmpV, tmpL);
xxpermdi(dst, tmpV, tmpV, zero);
%}
%}
instruct repl2D_immI0(vecX dst, immI_0 zero) %{
match(Set dst (ReplicateD zero));
predicate(n->as_Vector()->length() == 2);
format %{ "XXLXOR $dst, $zero \t// replicate2D" %}
size(4);
ins_encode %{
__ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
%}
ins_pipe(pipe_class_default);
%}
instruct repl2D_immIminus1(vecX dst, immI_minus1 src) %{
match(Set dst (ReplicateD src));
predicate(n->as_Vector()->length() == 2);
format %{ "XXLEQV $dst, $src \t// replicate16B" %}
size(4);
ins_encode %{
__ xxleqv($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
%}
ins_pipe(pipe_class_default);
%}
instruct mtvsrd(vecX dst, iRegLsrc src) %{
predicate(false);
effect(DEF dst, USE src);
format %{ "MTVSRD $dst, $src \t// Move to 16-byte register"%}
size(4);
ins_encode %{
__ mtvsrd($dst$$VectorSRegister, $src$$Register);
%}
ins_pipe(pipe_class_default);
%}
instruct xxspltd(vecX dst, vecX src, immI8 zero) %{
effect(DEF dst, USE src, USE zero);
format %{ "XXSPLATD $dst, $src, $zero \t// Permute 16-byte register"%}
size(4);
ins_encode %{
__ xxpermdi($dst$$VectorSRegister, $src$$VectorSRegister, $src$$VectorSRegister, $zero$$constant);
%}
ins_pipe(pipe_class_default);
%}
instruct xxpermdi(vecX dst, vecX src1, vecX src2, immI8 zero) %{
effect(DEF dst, USE src1, USE src2, USE zero);
format %{ "XXPERMDI $dst, $src1, $src2, $zero \t// Permute 16-byte register"%}
size(4);
ins_encode %{
__ xxpermdi($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister, $zero$$constant);
%}
ins_pipe(pipe_class_default);
%}
instruct repl2L_reg_Ex(vecX dst, iRegLsrc src) %{
match(Set dst (ReplicateL src));
predicate(n->as_Vector()->length() == 2);
expand %{
vecX tmpV;
immI8 zero %{ (int) 0 %}
mtvsrd(tmpV, src);
xxpermdi(dst, tmpV, tmpV, zero);
%}
%}
instruct repl2L_immI0(vecX dst, immI_0 zero) %{
match(Set dst (ReplicateL zero));
predicate(n->as_Vector()->length() == 2);
format %{ "XXLXOR $dst, $zero \t// replicate2L" %}
size(4);
ins_encode %{
__ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
%}
ins_pipe(pipe_class_default);
%}
instruct repl2L_immIminus1(vecX dst, immI_minus1 src) %{
match(Set dst (ReplicateL src));
predicate(n->as_Vector()->length() == 2);
format %{ "XXLEQV $dst, $src \t// replicate16B" %}
size(4);
ins_encode %{
__ xxleqv($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
%}
ins_pipe(pipe_class_default);
%}
// ============================================================================ // ============================================================================
// Safepoint Instruction // Safepoint Instruction

View file

@ -31,3 +31,5 @@
REGISTER_DEFINITION(Register, noreg); REGISTER_DEFINITION(Register, noreg);
REGISTER_DEFINITION(FloatRegister, fnoreg); REGISTER_DEFINITION(FloatRegister, fnoreg);
REGISTER_DEFINITION(VectorSRegister, vsnoreg);

View file

@ -677,7 +677,7 @@ class ConcreteRegisterImpl : public AbstractRegisterImpl {
* 2 // register halves * 2 // register halves
+ ConditionRegisterImpl::number_of_registers // condition code registers + ConditionRegisterImpl::number_of_registers // condition code registers
+ SpecialRegisterImpl::number_of_registers // special registers + SpecialRegisterImpl::number_of_registers // special registers
+ VectorRegisterImpl::number_of_registers // VSX registers + VectorSRegisterImpl::number_of_registers // VSX registers
}; };
static const int max_gpr; static const int max_gpr;

View file

@ -479,8 +479,8 @@ void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_siz
// Is vector's size (in bytes) bigger than a size saved by default? // Is vector's size (in bytes) bigger than a size saved by default?
bool SharedRuntime::is_wide_vector(int size) { bool SharedRuntime::is_wide_vector(int size) {
// Note, MaxVectorSize == 8 on PPC64. // Note, MaxVectorSize == 8/16 on PPC64.
assert(size <= 8, "%d bytes vectors are not supported", size); assert(size <= (SuperwordUseVSX ? 16 : 8), "%d bytes vectors are not supported", size);
return size > 8; return size > 8;
} }
@ -2234,9 +2234,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ release(); __ release();
// TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
__ stw(R0, thread_(thread_state)); __ stw(R0, thread_(thread_state));
if (UseMembar) {
__ fence();
}
// The JNI call // The JNI call
@ -2393,9 +2390,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ release(); __ release();
// TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
__ stw(R0, thread_(thread_state)); __ stw(R0, thread_(thread_state));
if (UseMembar) {
__ fence();
}
__ bind(after_transition); __ bind(after_transition);
// Reguard any pages if necessary. // Reguard any pages if necessary.

View file

@ -1470,10 +1470,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// TODO PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); // TODO PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
__ stw(R0, thread_(thread_state)); __ stw(R0, thread_(thread_state));
if (UseMembar) {
__ fence();
}
//============================================================================= //=============================================================================
// Call the native method. Argument registers must not have been // Call the native method. Argument registers must not have been
// overwritten since "__ call_stub(signature_handler);" (except for // overwritten since "__ call_stub(signature_handler);" (except for
@ -1594,9 +1590,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ li(R0/*thread_state*/, _thread_in_Java); __ li(R0/*thread_state*/, _thread_in_Java);
__ release(); __ release();
__ stw(R0/*thread_state*/, thread_(thread_state)); __ stw(R0/*thread_state*/, thread_(thread_state));
if (UseMembar) {
__ fence();
}
if (CheckJNICalls) { if (CheckJNICalls) {
// clear_pending_jni_exception_check // clear_pending_jni_exception_check

View file

@ -2224,6 +2224,7 @@ void TemplateTable::load_field_cp_cache_entry(Register Robj,
if (is_static) { if (is_static) {
__ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache); __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache);
__ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj); __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj);
__ resolve_oop_handle(Robj);
// Acquire not needed here. Following access has an address dependency on this value. // Acquire not needed here. Following access has an address dependency on this value.
} }
} }

View file

@ -107,7 +107,17 @@ void VM_Version::initialize() {
// TODO: PPC port PdScheduling::power6SectorSize = 0x20; // TODO: PPC port PdScheduling::power6SectorSize = 0x20;
} }
MaxVectorSize = 8; if (PowerArchitecturePPC64 >= 8) {
if (FLAG_IS_DEFAULT(SuperwordUseVSX)) {
FLAG_SET_ERGO(bool, SuperwordUseVSX, true);
}
} else {
if (SuperwordUseVSX) {
warning("SuperwordUseVSX specified, but needs at least Power8.");
FLAG_SET_DEFAULT(SuperwordUseVSX, false);
}
}
MaxVectorSize = SuperwordUseVSX ? 16 : 8;
#endif #endif
// Create and print feature-string. // Create and print feature-string.

View file

@ -1276,6 +1276,13 @@ class Assembler : public AbstractAssembler {
// Test under Mask // Test under Mask
#define VTM_ZOPC (unsigned long)(0xe7L << 40 | 0xd8L << 0) // Like TM, set CC according to state of selected bits. #define VTM_ZOPC (unsigned long)(0xe7L << 40 | 0xd8L << 0) // Like TM, set CC according to state of selected bits.
//---< Vector String Instructions >---
#define VFAE_ZOPC (unsigned long)(0xe7L << 40 | 0x82L << 0) // Find any element
#define VFEE_ZOPC (unsigned long)(0xe7L << 40 | 0x80L << 0) // Find element equal
#define VFENE_ZOPC (unsigned long)(0xe7L << 40 | 0x81L << 0) // Find element not equal
#define VSTRC_ZOPC (unsigned long)(0xe7L << 40 | 0x8aL << 0) // String range compare
#define VISTR_ZOPC (unsigned long)(0xe7L << 40 | 0x5cL << 0) // Isolate String
//-------------------------------- //--------------------------------
//-- Miscellaneous Operations -- //-- Miscellaneous Operations --
@ -1475,10 +1482,18 @@ class Assembler : public AbstractAssembler {
VRET_QW = 4 VRET_QW = 4
}; };
// Vector Operation Condition Code Control. // Vector Operation Result Control.
enum VOpCCC { // This is a set of flags used in some vector instructions to control
VOP_CCIGN = 0, // ignore, don't set CC // the result (side) effects of instruction execution.
VOP_CCSET = 1 // set the CC enum VOpRC {
VOPRC_CCSET = 0b0001, // set the CC.
VOPRC_CCIGN = 0b0000, // ignore, don't set CC.
VOPRC_ZS = 0b0010, // Zero Search. Additional, elementwise, comparison against zero.
VOPRC_NOZS = 0b0000, // No Zero Search.
VOPRC_RTBYTEIX = 0b0100, // generate byte index to lowest element with true comparison.
VOPRC_RTBITVEC = 0b0000, // generate bit vector, all 1s for true, all 0s for false element comparisons.
VOPRC_INVERT = 0b1000, // invert comparison results.
VOPRC_NOINVERT = 0b0000 // use comparison results as is, do not invert.
}; };
// Inverse condition code, i.e. determine "15 - cc" for a given condition code cc. // Inverse condition code, i.e. determine "15 - cc" for a given condition code cc.
@ -1625,10 +1640,15 @@ class Assembler : public AbstractAssembler {
return uimm4(ix, pos, 48); return uimm4(ix, pos, 48);
} }
// Vector Operation Condition Code Control. 4-bit field, one bit of which indicates if the condition code is to be set by the operation. // Vector Operation Result Control. 4-bit field.
static int64_t vccc_mask(int64_t flag, int pos) { static int64_t voprc_any(int64_t flags, int pos, int64_t allowed_flags = 0b1111) {
assert((flag == VOP_CCIGN) || (flag == VOP_CCSET), "VCCC flag value out of range"); assert((flags & allowed_flags) == flags, "Invalid VOPRC_* flag combination: %d", (int)flags);
return uimm4(flag, pos, 48); return uimm4(flags, pos, 48);
}
// Vector Operation Result Control. Condition code setting.
static int64_t voprc_ccmask(int64_t flags, int pos) {
return voprc_any(flags, pos, VOPRC_CCIGN | VOPRC_CCSET);
} }
public: public:
@ -2772,6 +2792,31 @@ class Assembler : public AbstractAssembler {
// Test under Mask // Test under Mask
inline void z_vtm( VectorRegister v1, VectorRegister v2); inline void z_vtm( VectorRegister v1, VectorRegister v2);
//---< Vector String Instructions >---
inline void z_vfae( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4, int64_t cc5); // Find any element
inline void z_vfaeb( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5);
inline void z_vfaeh( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5);
inline void z_vfaef( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5);
inline void z_vfee( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4, int64_t cc5); // Find element equal
inline void z_vfeeb( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5);
inline void z_vfeeh( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5);
inline void z_vfeef( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5);
inline void z_vfene( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4, int64_t cc5); // Find element not equal
inline void z_vfeneb( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5);
inline void z_vfeneh( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5);
inline void z_vfenef( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5);
inline void z_vstrc( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t imm5, int64_t cc6); // String range compare
inline void z_vstrcb( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t cc6);
inline void z_vstrch( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t cc6);
inline void z_vstrcf( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t cc6);
inline void z_vistr( VectorRegister v1, VectorRegister v2, int64_t imm3, int64_t cc5); // Isolate String
inline void z_vistrb( VectorRegister v1, VectorRegister v2, int64_t cc5);
inline void z_vistrh( VectorRegister v1, VectorRegister v2, int64_t cc5);
inline void z_vistrf( VectorRegister v1, VectorRegister v2, int64_t cc5);
inline void z_vistrbs(VectorRegister v1, VectorRegister v2);
inline void z_vistrhs(VectorRegister v1, VectorRegister v2);
inline void z_vistrfs(VectorRegister v1, VectorRegister v2);
// Floatingpoint instructions // Floatingpoint instructions
// ========================== // ==========================

View file

@ -762,21 +762,21 @@ inline void Assembler::z_vpkh( VectorRegister v1, VectorRegister v2, VectorReg
inline void Assembler::z_vpkf( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpk(v1, v2, v3, VRET_FW); } // vector element type 'F' inline void Assembler::z_vpkf( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpk(v1, v2, v3, VRET_FW); } // vector element type 'F'
inline void Assembler::z_vpkg( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpk(v1, v2, v3, VRET_DW); } // vector element type 'G' inline void Assembler::z_vpkg( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpk(v1, v2, v3, VRET_DW); } // vector element type 'G'
inline void Assembler::z_vpks( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4, int64_t cc5) {emit_48(VPKS_ZOPC | vreg(v1, 8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_HW, VRET_DW, 32) | vccc_mask(cc5, 24)); } inline void Assembler::z_vpks( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4, int64_t cc5) {emit_48(VPKS_ZOPC | vreg(v1, 8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_HW, VRET_DW, 32) | voprc_ccmask(cc5, 24)); }
inline void Assembler::z_vpksh( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpks(v1, v2, v3, VRET_HW, VOP_CCIGN); } // vector element type 'H', don't set CC inline void Assembler::z_vpksh( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpks(v1, v2, v3, VRET_HW, VOPRC_CCIGN); } // vector element type 'H', don't set CC
inline void Assembler::z_vpksf( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpks(v1, v2, v3, VRET_FW, VOP_CCIGN); } // vector element type 'F', don't set CC inline void Assembler::z_vpksf( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpks(v1, v2, v3, VRET_FW, VOPRC_CCIGN); } // vector element type 'F', don't set CC
inline void Assembler::z_vpksg( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpks(v1, v2, v3, VRET_DW, VOP_CCIGN); } // vector element type 'G', don't set CC inline void Assembler::z_vpksg( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpks(v1, v2, v3, VRET_DW, VOPRC_CCIGN); } // vector element type 'G', don't set CC
inline void Assembler::z_vpkshs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpks(v1, v2, v3, VRET_HW, VOP_CCSET); } // vector element type 'H', set CC inline void Assembler::z_vpkshs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpks(v1, v2, v3, VRET_HW, VOPRC_CCSET); } // vector element type 'H', set CC
inline void Assembler::z_vpksfs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpks(v1, v2, v3, VRET_FW, VOP_CCSET); } // vector element type 'F', set CC inline void Assembler::z_vpksfs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpks(v1, v2, v3, VRET_FW, VOPRC_CCSET); } // vector element type 'F', set CC
inline void Assembler::z_vpksgs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpks(v1, v2, v3, VRET_DW, VOP_CCSET); } // vector element type 'G', set CC inline void Assembler::z_vpksgs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpks(v1, v2, v3, VRET_DW, VOPRC_CCSET); } // vector element type 'G', set CC
inline void Assembler::z_vpkls( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4, int64_t cc5) {emit_48(VPKLS_ZOPC | vreg(v1, 8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_HW, VRET_DW, 32) | vccc_mask(cc5, 24)); } inline void Assembler::z_vpkls( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4, int64_t cc5) {emit_48(VPKLS_ZOPC | vreg(v1, 8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_HW, VRET_DW, 32) | voprc_ccmask(cc5, 24)); }
inline void Assembler::z_vpklsh( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpkls(v1, v2, v3, VRET_HW, VOP_CCIGN); } // vector element type 'H', don't set CC inline void Assembler::z_vpklsh( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpkls(v1, v2, v3, VRET_HW, VOPRC_CCIGN); } // vector element type 'H', don't set CC
inline void Assembler::z_vpklsf( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpkls(v1, v2, v3, VRET_FW, VOP_CCIGN); } // vector element type 'F', don't set CC inline void Assembler::z_vpklsf( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpkls(v1, v2, v3, VRET_FW, VOPRC_CCIGN); } // vector element type 'F', don't set CC
inline void Assembler::z_vpklsg( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpkls(v1, v2, v3, VRET_DW, VOP_CCIGN); } // vector element type 'G', don't set CC inline void Assembler::z_vpklsg( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpkls(v1, v2, v3, VRET_DW, VOPRC_CCIGN); } // vector element type 'G', don't set CC
inline void Assembler::z_vpklshs(VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpkls(v1, v2, v3, VRET_HW, VOP_CCSET); } // vector element type 'H', set CC inline void Assembler::z_vpklshs(VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpkls(v1, v2, v3, VRET_HW, VOPRC_CCSET); } // vector element type 'H', set CC
inline void Assembler::z_vpklsfs(VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpkls(v1, v2, v3, VRET_FW, VOP_CCSET); } // vector element type 'F', set CC inline void Assembler::z_vpklsfs(VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpkls(v1, v2, v3, VRET_FW, VOPRC_CCSET); } // vector element type 'F', set CC
inline void Assembler::z_vpklsgs(VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpkls(v1, v2, v3, VRET_DW, VOP_CCSET); } // vector element type 'G', set CC inline void Assembler::z_vpklsgs(VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vpkls(v1, v2, v3, VRET_DW, VOPRC_CCSET); } // vector element type 'G', set CC
// vector register unpack (sign-extended) // vector register unpack (sign-extended)
inline void Assembler::z_vuph( VectorRegister v1, VectorRegister v2, int64_t m3) {emit_48(VUPH_ZOPC | vreg(v1, 8) | vreg(v2, 12) | vesc_mask(m3, VRET_BYTE, VRET_FW, 32)); } inline void Assembler::z_vuph( VectorRegister v1, VectorRegister v2, int64_t m3) {emit_48(VUPH_ZOPC | vreg(v1, 8) | vreg(v2, 12) | vesc_mask(m3, VRET_BYTE, VRET_FW, 32)); }
@ -967,33 +967,33 @@ inline void Assembler::z_vno( VectorRegister v1, VectorRegister v2, VectorReg
inline void Assembler::z_vo( VectorRegister v1, VectorRegister v2, VectorRegister v3) {emit_48(VO_ZOPC | vreg(v1, 8) | vreg(v2, 12) | vreg(v3, 16)); } inline void Assembler::z_vo( VectorRegister v1, VectorRegister v2, VectorRegister v3) {emit_48(VO_ZOPC | vreg(v1, 8) | vreg(v2, 12) | vreg(v3, 16)); }
// Comparison (element-wise) // Comparison (element-wise)
inline void Assembler::z_vceq( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4, int64_t cc5) {emit_48(VCEQ_ZOPC | vreg(v1, 8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32) | vccc_mask(cc5, 24)); } inline void Assembler::z_vceq( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4, int64_t cc5) {emit_48(VCEQ_ZOPC | vreg(v1, 8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32) | voprc_ccmask(cc5, 24)); }
inline void Assembler::z_vceqb( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vceq(v1, v2, v3, VRET_BYTE, VOP_CCIGN); } // vector element type 'B', don't set CC inline void Assembler::z_vceqb( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vceq(v1, v2, v3, VRET_BYTE, VOPRC_CCIGN); } // vector element type 'B', don't set CC
inline void Assembler::z_vceqh( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vceq(v1, v2, v3, VRET_HW, VOP_CCIGN); } // vector element type 'H', don't set CC inline void Assembler::z_vceqh( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vceq(v1, v2, v3, VRET_HW, VOPRC_CCIGN); } // vector element type 'H', don't set CC
inline void Assembler::z_vceqf( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vceq(v1, v2, v3, VRET_FW, VOP_CCIGN); } // vector element type 'F', don't set CC inline void Assembler::z_vceqf( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vceq(v1, v2, v3, VRET_FW, VOPRC_CCIGN); } // vector element type 'F', don't set CC
inline void Assembler::z_vceqg( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vceq(v1, v2, v3, VRET_DW, VOP_CCIGN); } // vector element type 'G', don't set CC inline void Assembler::z_vceqg( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vceq(v1, v2, v3, VRET_DW, VOPRC_CCIGN); } // vector element type 'G', don't set CC
inline void Assembler::z_vceqbs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vceq(v1, v2, v3, VRET_BYTE, VOP_CCSET); } // vector element type 'B', don't set CC inline void Assembler::z_vceqbs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vceq(v1, v2, v3, VRET_BYTE, VOPRC_CCSET); } // vector element type 'B', don't set CC
inline void Assembler::z_vceqhs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vceq(v1, v2, v3, VRET_HW, VOP_CCSET); } // vector element type 'H', don't set CC inline void Assembler::z_vceqhs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vceq(v1, v2, v3, VRET_HW, VOPRC_CCSET); } // vector element type 'H', don't set CC
inline void Assembler::z_vceqfs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vceq(v1, v2, v3, VRET_FW, VOP_CCSET); } // vector element type 'F', don't set CC inline void Assembler::z_vceqfs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vceq(v1, v2, v3, VRET_FW, VOPRC_CCSET); } // vector element type 'F', don't set CC
inline void Assembler::z_vceqgs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vceq(v1, v2, v3, VRET_DW, VOP_CCSET); } // vector element type 'G', don't set CC inline void Assembler::z_vceqgs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vceq(v1, v2, v3, VRET_DW, VOPRC_CCSET); } // vector element type 'G', don't set CC
inline void Assembler::z_vch( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4, int64_t cc5) {emit_48(VCH_ZOPC | vreg(v1, 8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32) | vccc_mask(cc5, 24)); } inline void Assembler::z_vch( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4, int64_t cc5) {emit_48(VCH_ZOPC | vreg(v1, 8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32) | voprc_ccmask(cc5, 24)); }
inline void Assembler::z_vchb( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vch(v1, v2, v3, VRET_BYTE, VOP_CCIGN); } // vector element type 'B', don't set CC inline void Assembler::z_vchb( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vch(v1, v2, v3, VRET_BYTE, VOPRC_CCIGN); } // vector element type 'B', don't set CC
inline void Assembler::z_vchh( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vch(v1, v2, v3, VRET_HW, VOP_CCIGN); } // vector element type 'H', don't set CC inline void Assembler::z_vchh( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vch(v1, v2, v3, VRET_HW, VOPRC_CCIGN); } // vector element type 'H', don't set CC
inline void Assembler::z_vchf( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vch(v1, v2, v3, VRET_FW, VOP_CCIGN); } // vector element type 'F', don't set CC inline void Assembler::z_vchf( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vch(v1, v2, v3, VRET_FW, VOPRC_CCIGN); } // vector element type 'F', don't set CC
inline void Assembler::z_vchg( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vch(v1, v2, v3, VRET_DW, VOP_CCIGN); } // vector element type 'G', don't set CC inline void Assembler::z_vchg( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vch(v1, v2, v3, VRET_DW, VOPRC_CCIGN); } // vector element type 'G', don't set CC
inline void Assembler::z_vchbs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vch(v1, v2, v3, VRET_BYTE, VOP_CCSET); } // vector element type 'B', don't set CC inline void Assembler::z_vchbs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vch(v1, v2, v3, VRET_BYTE, VOPRC_CCSET); } // vector element type 'B', don't set CC
inline void Assembler::z_vchhs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vch(v1, v2, v3, VRET_HW, VOP_CCSET); } // vector element type 'H', don't set CC inline void Assembler::z_vchhs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vch(v1, v2, v3, VRET_HW, VOPRC_CCSET); } // vector element type 'H', don't set CC
inline void Assembler::z_vchfs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vch(v1, v2, v3, VRET_FW, VOP_CCSET); } // vector element type 'F', don't set CC inline void Assembler::z_vchfs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vch(v1, v2, v3, VRET_FW, VOPRC_CCSET); } // vector element type 'F', don't set CC
inline void Assembler::z_vchgs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vch(v1, v2, v3, VRET_DW, VOP_CCSET); } // vector element type 'G', don't set CC inline void Assembler::z_vchgs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vch(v1, v2, v3, VRET_DW, VOPRC_CCSET); } // vector element type 'G', don't set CC
inline void Assembler::z_vchl( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4, int64_t cc5) {emit_48(VCHL_ZOPC | vreg(v1, 8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32) | vccc_mask(cc5, 24)); } inline void Assembler::z_vchl( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4, int64_t cc5) {emit_48(VCHL_ZOPC | vreg(v1, 8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32) | voprc_ccmask(cc5, 24)); }
inline void Assembler::z_vchlb( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vchl(v1, v2, v3, VRET_BYTE, VOP_CCIGN); } // vector element type 'B', don't set CC inline void Assembler::z_vchlb( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vchl(v1, v2, v3, VRET_BYTE, VOPRC_CCIGN); } // vector element type 'B', don't set CC
inline void Assembler::z_vchlh( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vchl(v1, v2, v3, VRET_HW, VOP_CCIGN); } // vector element type 'H', don't set CC inline void Assembler::z_vchlh( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vchl(v1, v2, v3, VRET_HW, VOPRC_CCIGN); } // vector element type 'H', don't set CC
inline void Assembler::z_vchlf( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vchl(v1, v2, v3, VRET_FW, VOP_CCIGN); } // vector element type 'F', don't set CC inline void Assembler::z_vchlf( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vchl(v1, v2, v3, VRET_FW, VOPRC_CCIGN); } // vector element type 'F', don't set CC
inline void Assembler::z_vchlg( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vchl(v1, v2, v3, VRET_DW, VOP_CCIGN); } // vector element type 'G', don't set CC inline void Assembler::z_vchlg( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vchl(v1, v2, v3, VRET_DW, VOPRC_CCIGN); } // vector element type 'G', don't set CC
inline void Assembler::z_vchlbs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vchl(v1, v2, v3, VRET_BYTE, VOP_CCSET); } // vector element type 'B', don't set CC inline void Assembler::z_vchlbs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vchl(v1, v2, v3, VRET_BYTE, VOPRC_CCSET); } // vector element type 'B', don't set CC
inline void Assembler::z_vchlhs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vchl(v1, v2, v3, VRET_HW, VOP_CCSET); } // vector element type 'H', don't set CC inline void Assembler::z_vchlhs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vchl(v1, v2, v3, VRET_HW, VOPRC_CCSET); } // vector element type 'H', don't set CC
inline void Assembler::z_vchlfs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vchl(v1, v2, v3, VRET_FW, VOP_CCSET); } // vector element type 'F', don't set CC inline void Assembler::z_vchlfs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vchl(v1, v2, v3, VRET_FW, VOPRC_CCSET); } // vector element type 'F', don't set CC
inline void Assembler::z_vchlgs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vchl(v1, v2, v3, VRET_DW, VOP_CCSET); } // vector element type 'G', don't set CC inline void Assembler::z_vchlgs( VectorRegister v1, VectorRegister v2, VectorRegister v3) {z_vchl(v1, v2, v3, VRET_DW, VOPRC_CCSET); } // vector element type 'G', don't set CC
// Max/Min (element-wise) // Max/Min (element-wise)
inline void Assembler::z_vmx( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VMX_ZOPC | vreg(v1, 8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); } inline void Assembler::z_vmx( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t m4) {emit_48(VMX_ZOPC | vreg(v1, 8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(m4, VRET_BYTE, VRET_DW, 32)); }
@ -1091,6 +1091,31 @@ inline void Assembler::z_vsrlb( VectorRegister v1, VectorRegister v2, VectorReg
// Test under Mask // Test under Mask
inline void Assembler::z_vtm( VectorRegister v1, VectorRegister v2) {emit_48(VTM_ZOPC | vreg(v1, 8) | vreg(v2, 12)); } inline void Assembler::z_vtm( VectorRegister v1, VectorRegister v2) {emit_48(VTM_ZOPC | vreg(v1, 8) | vreg(v2, 12)); }
//---< Vector String Instructions >---
inline void Assembler::z_vfae( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4, int64_t cc5) {emit_48(VFAE_ZOPC | vreg(v1, 8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(imm4, VRET_BYTE, VRET_FW, 32) | voprc_any(cc5, 24) ); } // Find any element
inline void Assembler::z_vfaeb( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5) {z_vfae(v1, v2, v3, VRET_BYTE, cc5); }
inline void Assembler::z_vfaeh( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5) {z_vfae(v1, v2, v3, VRET_HW, cc5); }
inline void Assembler::z_vfaef( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5) {z_vfae(v1, v2, v3, VRET_FW, cc5); }
inline void Assembler::z_vfee( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4, int64_t cc5) {emit_48(VFEE_ZOPC | vreg(v1, 8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(imm4, VRET_BYTE, VRET_FW, 32) | voprc_any(cc5, 24) ); } // Find element equal
inline void Assembler::z_vfeeb( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5) {z_vfee(v1, v2, v3, VRET_BYTE, cc5); }
inline void Assembler::z_vfeeh( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5) {z_vfee(v1, v2, v3, VRET_HW, cc5); }
inline void Assembler::z_vfeef( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5) {z_vfee(v1, v2, v3, VRET_FW, cc5); }
inline void Assembler::z_vfene( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t imm4, int64_t cc5) {emit_48(VFENE_ZOPC | vreg(v1, 8) | vreg(v2, 12) | vreg(v3, 16) | vesc_mask(imm4, VRET_BYTE, VRET_FW, 32) | voprc_any(cc5, 24) ); } // Find element not equal
inline void Assembler::z_vfeneb( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5) {z_vfene(v1, v2, v3, VRET_BYTE, cc5); }
inline void Assembler::z_vfeneh( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5) {z_vfene(v1, v2, v3, VRET_HW, cc5); }
inline void Assembler::z_vfenef( VectorRegister v1, VectorRegister v2, VectorRegister v3, int64_t cc5) {z_vfene(v1, v2, v3, VRET_FW, cc5); }
inline void Assembler::z_vstrc( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t imm5, int64_t cc6) {emit_48(VSTRC_ZOPC | vreg(v1, 8) | vreg(v2, 12) | vreg(v3, 16) | vreg(v4, 32) | vesc_mask(imm5, VRET_BYTE, VRET_FW, 20) | voprc_any(cc6, 24) ); } // String range compare
inline void Assembler::z_vstrcb( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t cc6) {z_vstrc(v1, v2, v3, v4, VRET_BYTE, cc6); }
inline void Assembler::z_vstrch( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t cc6) {z_vstrc(v1, v2, v3, v4, VRET_HW, cc6); }
inline void Assembler::z_vstrcf( VectorRegister v1, VectorRegister v2, VectorRegister v3, VectorRegister v4, int64_t cc6) {z_vstrc(v1, v2, v3, v4, VRET_FW, cc6); }
inline void Assembler::z_vistr( VectorRegister v1, VectorRegister v2, int64_t imm3, int64_t cc5) {emit_48(VISTR_ZOPC | vreg(v1, 8) | vreg(v2, 12) | vesc_mask(imm3, VRET_BYTE, VRET_FW, 32) | voprc_any(cc5, 24) ); } // isolate string
inline void Assembler::z_vistrb( VectorRegister v1, VectorRegister v2, int64_t cc5) {z_vistr(v1, v2, VRET_BYTE, cc5); }
inline void Assembler::z_vistrh( VectorRegister v1, VectorRegister v2, int64_t cc5) {z_vistr(v1, v2, VRET_HW, cc5); }
inline void Assembler::z_vistrf( VectorRegister v1, VectorRegister v2, int64_t cc5) {z_vistr(v1, v2, VRET_FW, cc5); }
inline void Assembler::z_vistrbs(VectorRegister v1, VectorRegister v2) {z_vistr(v1, v2, VRET_BYTE, VOPRC_CCSET); }
inline void Assembler::z_vistrhs(VectorRegister v1, VectorRegister v2) {z_vistr(v1, v2, VRET_HW, VOPRC_CCSET); }
inline void Assembler::z_vistrfs(VectorRegister v1, VectorRegister v2) {z_vistr(v1, v2, VRET_FW, VOPRC_CCSET); }
//------------------------------- //-------------------------------
// FLOAT INSTRUCTIONS // FLOAT INSTRUCTIONS

View file

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017 Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved. * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -34,7 +34,7 @@
// Sorted according to sparc. // Sorted according to sparc.
// z/Architecture remembers branch targets, so don't share vtables. // z/Architecture remembers branch targets, so don't share vtables.
define_pd_global(bool, ShareVtableStubs, false); define_pd_global(bool, ShareVtableStubs, true);
define_pd_global(bool, NeedsDeoptSuspend, false); // Only register window machines need this. define_pd_global(bool, NeedsDeoptSuspend, false); // Only register window machines need this.
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks. define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks.

View file

@ -4671,6 +4671,7 @@ void MacroAssembler::load_mirror(Register mirror, Register method) {
mem2reg_opt(mirror, Address(mirror, ConstMethod::constants_offset())); mem2reg_opt(mirror, Address(mirror, ConstMethod::constants_offset()));
mem2reg_opt(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes())); mem2reg_opt(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes()));
mem2reg_opt(mirror, Address(mirror, Klass::java_mirror_offset())); mem2reg_opt(mirror, Address(mirror, Klass::java_mirror_offset()));
resolve_oop_handle(mirror);
} }
//--------------------------------------------------------------- //---------------------------------------------------------------

View file

@ -2382,6 +2382,7 @@ void TemplateTable::load_field_cp_cache_entry(Register obj,
if (is_static) { if (is_static) {
__ mem2reg_opt(obj, Address(cache, index, cp_base_offset + ConstantPoolCacheEntry::f1_offset())); __ mem2reg_opt(obj, Address(cache, index, cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
__ mem2reg_opt(obj, Address(obj, Klass::java_mirror_offset())); __ mem2reg_opt(obj, Address(obj, Klass::java_mirror_offset()));
__ resolve_oop_handle(obj);
} }
} }

View file

@ -3844,6 +3844,7 @@ void MacroAssembler::load_mirror(Register mirror, Register method) {
ld_ptr(mirror, in_bytes(ConstMethod::constants_offset()), mirror); ld_ptr(mirror, in_bytes(ConstMethod::constants_offset()), mirror);
ld_ptr(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror); ld_ptr(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror);
ld_ptr(mirror, mirror_offset, mirror); ld_ptr(mirror, mirror_offset, mirror);
resolve_oop_handle(mirror);
} }
void MacroAssembler::load_klass(Register src_oop, Register klass) { void MacroAssembler::load_klass(Register src_oop, Register klass) {

View file

@ -2049,6 +2049,7 @@ void TemplateTable::load_field_cp_cache_entry(Register Robj,
__ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj); __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj);
const int mirror_offset = in_bytes(Klass::java_mirror_offset()); const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ ld_ptr( Robj, mirror_offset, Robj); __ ld_ptr( Robj, mirror_offset, Robj);
__ resolve_oop_handle(Robj);
} }
} }

View file

@ -6617,6 +6617,7 @@ void MacroAssembler::load_mirror(Register mirror, Register method) {
movptr(mirror, Address(mirror, ConstMethod::constants_offset())); movptr(mirror, Address(mirror, ConstMethod::constants_offset()));
movptr(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes())); movptr(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes()));
movptr(mirror, Address(mirror, mirror_offset)); movptr(mirror, Address(mirror, mirror_offset));
resolve_oop_handle(mirror);
} }
void MacroAssembler::load_klass(Register dst, Register src) { void MacroAssembler::load_klass(Register dst, Register src) {

View file

@ -2665,6 +2665,7 @@ void TemplateTable::load_field_cp_cache_entry(Register obj,
ConstantPoolCacheEntry::f1_offset()))); ConstantPoolCacheEntry::f1_offset())));
const int mirror_offset = in_bytes(Klass::java_mirror_offset()); const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ movptr(obj, Address(obj, mirror_offset)); __ movptr(obj, Address(obj, mirror_offset));
__ resolve_oop_handle(obj);
} }
} }

View file

@ -46,7 +46,7 @@ address VM_Version::_cpuinfo_segv_addr = 0;
address VM_Version::_cpuinfo_cont_addr = 0; address VM_Version::_cpuinfo_cont_addr = 0;
static BufferBlob* stub_blob; static BufferBlob* stub_blob;
static const int stub_size = 1000; static const int stub_size = 1100;
extern "C" { extern "C" {
typedef void (*get_cpu_info_stub_t)(void*); typedef void (*get_cpu_info_stub_t)(void*);
@ -70,7 +70,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
bool use_evex = FLAG_IS_DEFAULT(UseAVX) || (UseAVX > 2); bool use_evex = FLAG_IS_DEFAULT(UseAVX) || (UseAVX > 2);
Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4; Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4;
Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, done, wrapup; Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, ext_cpuid8, done, wrapup;
Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check; Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check;
StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub"); StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub");
@ -267,14 +267,30 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ cmpl(rax, 0x80000000); // Is cpuid(0x80000001) supported? __ cmpl(rax, 0x80000000); // Is cpuid(0x80000001) supported?
__ jcc(Assembler::belowEqual, done); __ jcc(Assembler::belowEqual, done);
__ cmpl(rax, 0x80000004); // Is cpuid(0x80000005) supported? __ cmpl(rax, 0x80000004); // Is cpuid(0x80000005) supported?
__ jccb(Assembler::belowEqual, ext_cpuid1); __ jcc(Assembler::belowEqual, ext_cpuid1);
__ cmpl(rax, 0x80000006); // Is cpuid(0x80000007) supported? __ cmpl(rax, 0x80000006); // Is cpuid(0x80000007) supported?
__ jccb(Assembler::belowEqual, ext_cpuid5); __ jccb(Assembler::belowEqual, ext_cpuid5);
__ cmpl(rax, 0x80000007); // Is cpuid(0x80000008) supported? __ cmpl(rax, 0x80000007); // Is cpuid(0x80000008) supported?
__ jccb(Assembler::belowEqual, ext_cpuid7); __ jccb(Assembler::belowEqual, ext_cpuid7);
__ cmpl(rax, 0x80000008); // Is cpuid(0x80000009 and above) supported?
__ jccb(Assembler::belowEqual, ext_cpuid8);
__ cmpl(rax, 0x8000001E); // Is cpuid(0x8000001E) supported?
__ jccb(Assembler::below, ext_cpuid8);
//
// Extended cpuid(0x8000001E)
//
__ movl(rax, 0x8000001E);
__ cpuid();
__ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1E_offset())));
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx);
__ movl(Address(rsi,12), rdx);
// //
// Extended cpuid(0x80000008) // Extended cpuid(0x80000008)
// //
__ bind(ext_cpuid8);
__ movl(rax, 0x80000008); __ movl(rax, 0x80000008);
__ cpuid(); __ cpuid();
__ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset()))); __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset())));
@ -1109,11 +1125,27 @@ void VM_Version::get_processor_features() {
} }
#ifdef COMPILER2 #ifdef COMPILER2
if (MaxVectorSize > 16) { if (cpu_family() < 0x17 && MaxVectorSize > 16) {
// Limit vectors size to 16 bytes on current AMD cpus. // Limit vectors size to 16 bytes on AMD cpus < 17h.
FLAG_SET_DEFAULT(MaxVectorSize, 16); FLAG_SET_DEFAULT(MaxVectorSize, 16);
} }
#endif // COMPILER2 #endif // COMPILER2
// Some defaults for AMD family 17h
if ( cpu_family() == 0x17 ) {
// On family 17h processors use XMM and UnalignedLoadStores for Array Copy
if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
FLAG_SET_DEFAULT(UseXMMForArrayCopy, true);
}
if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
FLAG_SET_DEFAULT(UseUnalignedLoadStores, true);
}
#ifdef COMPILER2
if (supports_sse4_2() && FLAG_IS_DEFAULT(UseFPUForSpilling)) {
FLAG_SET_DEFAULT(UseFPUForSpilling, true);
}
#endif
}
} }
if( is_intel() ) { // Intel cpus specific settings if( is_intel() ) { // Intel cpus specific settings

View file

@ -228,6 +228,15 @@ class VM_Version : public Abstract_VM_Version {
} bits; } bits;
}; };
union ExtCpuid1EEbx {
uint32_t value;
struct {
uint32_t : 8,
threads_per_core : 8,
: 16;
} bits;
};
union XemXcr0Eax { union XemXcr0Eax {
uint32_t value; uint32_t value;
struct { struct {
@ -398,6 +407,12 @@ protected:
ExtCpuid8Ecx ext_cpuid8_ecx; ExtCpuid8Ecx ext_cpuid8_ecx;
uint32_t ext_cpuid8_edx; // reserved uint32_t ext_cpuid8_edx; // reserved
// cpuid function 0x8000001E // AMD 17h
uint32_t ext_cpuid1E_eax;
ExtCpuid1EEbx ext_cpuid1E_ebx; // threads per core (AMD17h)
uint32_t ext_cpuid1E_ecx;
uint32_t ext_cpuid1E_edx; // unused currently
// extended control register XCR0 (the XFEATURE_ENABLED_MASK register) // extended control register XCR0 (the XFEATURE_ENABLED_MASK register)
XemXcr0Eax xem_xcr0_eax; XemXcr0Eax xem_xcr0_eax;
uint32_t xem_xcr0_edx; // reserved uint32_t xem_xcr0_edx; // reserved
@ -505,6 +520,14 @@ protected:
result |= CPU_CLMUL; result |= CPU_CLMUL;
if (_cpuid_info.sef_cpuid7_ebx.bits.rtm != 0) if (_cpuid_info.sef_cpuid7_ebx.bits.rtm != 0)
result |= CPU_RTM; result |= CPU_RTM;
if(_cpuid_info.sef_cpuid7_ebx.bits.adx != 0)
result |= CPU_ADX;
if(_cpuid_info.sef_cpuid7_ebx.bits.bmi2 != 0)
result |= CPU_BMI2;
if (_cpuid_info.sef_cpuid7_ebx.bits.sha != 0)
result |= CPU_SHA;
if (_cpuid_info.std_cpuid1_ecx.bits.fma != 0)
result |= CPU_FMA;
// AMD features. // AMD features.
if (is_amd()) { if (is_amd()) {
@ -518,16 +541,8 @@ protected:
} }
// Intel features. // Intel features.
if(is_intel()) { if(is_intel()) {
if(_cpuid_info.sef_cpuid7_ebx.bits.adx != 0)
result |= CPU_ADX;
if(_cpuid_info.sef_cpuid7_ebx.bits.bmi2 != 0)
result |= CPU_BMI2;
if (_cpuid_info.sef_cpuid7_ebx.bits.sha != 0)
result |= CPU_SHA;
if(_cpuid_info.ext_cpuid1_ecx.bits.lzcnt_intel != 0) if(_cpuid_info.ext_cpuid1_ecx.bits.lzcnt_intel != 0)
result |= CPU_LZCNT; result |= CPU_LZCNT;
if (_cpuid_info.std_cpuid1_ecx.bits.fma != 0)
result |= CPU_FMA;
// for Intel, ecx.bits.misalignsse bit (bit 8) indicates support for prefetchw // for Intel, ecx.bits.misalignsse bit (bit 8) indicates support for prefetchw
if (_cpuid_info.ext_cpuid1_ecx.bits.misalignsse != 0) { if (_cpuid_info.ext_cpuid1_ecx.bits.misalignsse != 0) {
result |= CPU_3DNOW_PREFETCH; result |= CPU_3DNOW_PREFETCH;
@ -590,6 +605,7 @@ public:
static ByteSize ext_cpuid5_offset() { return byte_offset_of(CpuidInfo, ext_cpuid5_eax); } static ByteSize ext_cpuid5_offset() { return byte_offset_of(CpuidInfo, ext_cpuid5_eax); }
static ByteSize ext_cpuid7_offset() { return byte_offset_of(CpuidInfo, ext_cpuid7_eax); } static ByteSize ext_cpuid7_offset() { return byte_offset_of(CpuidInfo, ext_cpuid7_eax); }
static ByteSize ext_cpuid8_offset() { return byte_offset_of(CpuidInfo, ext_cpuid8_eax); } static ByteSize ext_cpuid8_offset() { return byte_offset_of(CpuidInfo, ext_cpuid8_eax); }
static ByteSize ext_cpuid1E_offset() { return byte_offset_of(CpuidInfo, ext_cpuid1E_eax); }
static ByteSize tpl_cpuidB0_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB0_eax); } static ByteSize tpl_cpuidB0_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB0_eax); }
static ByteSize tpl_cpuidB1_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB1_eax); } static ByteSize tpl_cpuidB1_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB1_eax); }
static ByteSize tpl_cpuidB2_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB2_eax); } static ByteSize tpl_cpuidB2_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB2_eax); }
@ -673,9 +689,13 @@ public:
if (is_intel() && supports_processor_topology()) { if (is_intel() && supports_processor_topology()) {
result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus; result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
} else if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) { } else if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) {
if (cpu_family() >= 0x17) {
result = _cpuid_info.ext_cpuid1E_ebx.bits.threads_per_core + 1;
} else {
result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu / result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu /
cores_per_cpu(); cores_per_cpu();
} }
}
return (result == 0 ? 1 : result); return (result == 0 ? 1 : result);
} }

View file

@ -34,22 +34,6 @@
// Implementation of class atomic // Implementation of class atomic
inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
// //
// machine barrier instructions: // machine barrier instructions:
// //
@ -148,13 +132,15 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
return result; return result;
} }
template<>
inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) { template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
// Note that xchg_ptr doesn't necessarily do an acquire // Note that xchg_ptr doesn't necessarily do an acquire
// (see synchronizer.cpp). // (see synchronizer.cpp).
unsigned int old_value; T old_value;
const uint64_t zero = 0; const uint64_t zero = 0;
__asm__ __volatile__ ( __asm__ __volatile__ (
@ -182,15 +168,18 @@ inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
"memory" "memory"
); );
return (jint) old_value; return old_value;
} }
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
// Note that xchg_ptr doesn't necessarily do an acquire // Note that xchg_ptr doesn't necessarily do an acquire
// (see synchronizer.cpp). // (see synchronizer.cpp).
long old_value; T old_value;
const uint64_t zero = 0; const uint64_t zero = 0;
__asm__ __volatile__ ( __asm__ __volatile__ (
@ -218,11 +207,7 @@ inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* des
"memory" "memory"
); );
return (intptr_t) old_value; return old_value;
}
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
} }
inline void cmpxchg_pre_membar(cmpxchg_memory_order order) { inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {

View file

@ -27,19 +27,6 @@
// Implementation of class atomic // Implementation of class atomic
inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
template<size_t byte_size> template<size_t byte_size>
struct Atomic::PlatformAdd struct Atomic::PlatformAdd
: Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> > : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
@ -61,7 +48,11 @@ inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) co
return old_value; return old_value;
} }
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
__asm__ volatile ( "xchgl (%2),%0" __asm__ volatile ( "xchgl (%2),%0"
: "=r" (exchange_value) : "=r" (exchange_value)
: "0" (exchange_value), "r" (dest) : "0" (exchange_value), "r" (dest)
@ -69,10 +60,6 @@ inline jint Atomic::xchg (jint exchange_value, volatile jint* des
return exchange_value; return exchange_value;
} }
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
}
template<> template<>
template<typename T> template<typename T>
inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
@ -102,9 +89,6 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
} }
#ifdef AMD64 #ifdef AMD64
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
template<> template<>
template<typename I, typename D> template<typename I, typename D>
inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const { inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const {
@ -118,7 +102,11 @@ inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) co
return old_value; return old_value;
} }
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
__asm__ __volatile__ ("xchgq (%2),%0" __asm__ __volatile__ ("xchgq (%2),%0"
: "=r" (exchange_value) : "=r" (exchange_value)
: "0" (exchange_value), "r" (dest) : "0" (exchange_value), "r" (dest)
@ -140,14 +128,8 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
return exchange_value; return exchange_value;
} }
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
#else // !AMD64 #else // !AMD64
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
}
extern "C" { extern "C" {
// defined in bsd_x86.s // defined in bsd_x86.s
jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool); jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
@ -164,18 +146,21 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
return cmpxchg_using_helper<jlong>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value); return cmpxchg_using_helper<jlong>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
} }
inline jlong Atomic::load(const volatile jlong* src) { template<>
template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
volatile jlong dest; volatile jlong dest;
_Atomic_move_long(src, &dest); _Atomic_move_long(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
return dest; return PrimitiveConversions::cast<T>(dest);
} }
inline void Atomic::store(jlong store_value, jlong* dest) { template<>
_Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest); template<typename T>
} inline void Atomic::PlatformStore<8>::operator()(T store_value,
T volatile* dest) const {
inline void Atomic::store(jlong store_value, volatile jlong* dest) { STATIC_ASSERT(8 == sizeof(T));
_Atomic_move_long((volatile jlong*)&store_value, dest); _Atomic_move_long(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
} }
#endif // AMD64 #endif // AMD64

View file

@ -87,7 +87,7 @@ static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) {
/* Atomically write VALUE into `*PTR' and returns the previous /* Atomically write VALUE into `*PTR' and returns the previous
contents of `*PTR'. */ contents of `*PTR'. */
static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) { static inline int m68k_lock_test_and_set(int newval, volatile int *ptr) {
for (;;) { for (;;) {
// Loop until success. // Loop until success.
int prev = *ptr; int prev = *ptr;
@ -148,7 +148,7 @@ static inline int arm_add_and_fetch(int add_value, volatile int *ptr) {
/* Atomically write VALUE into `*PTR' and returns the previous /* Atomically write VALUE into `*PTR' and returns the previous
contents of `*PTR'. */ contents of `*PTR'. */
static inline int arm_lock_test_and_set(volatile int *ptr, int newval) { static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {
for (;;) { for (;;) {
// Loop until a __kernel_cmpxchg succeeds. // Loop until a __kernel_cmpxchg succeeds.
int prev = *ptr; int prev = *ptr;
@ -159,20 +159,6 @@ static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
} }
#endif // ARM #endif // ARM
inline void Atomic::store(jint store_value, volatile jint* dest) {
#if !defined(ARM) && !defined(M68K)
__sync_synchronize();
#endif
*dest = store_value;
}
inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
#if !defined(ARM) && !defined(M68K)
__sync_synchronize();
#endif
*dest = store_value;
}
template<size_t byte_size> template<size_t byte_size>
struct Atomic::PlatformAdd struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
@ -207,18 +193,22 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
return __sync_add_and_fetch(dest, add_value); return __sync_add_and_fetch(dest, add_value);
} }
inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) { template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
#ifdef ARM #ifdef ARM
return arm_lock_test_and_set(dest, exchange_value); return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest);
#else #else
#ifdef M68K #ifdef M68K
return m68k_lock_test_and_set(dest, exchange_value); return xchg_using_helper<int>(m68k_lock_test_and_set, exchange_value, dest);
#else #else
// __sync_lock_test_and_set is a bizarrely named atomic exchange // __sync_lock_test_and_set is a bizarrely named atomic exchange
// operation. Note that some platforms only support this with the // operation. Note that some platforms only support this with the
// limitation that the only valid value to store is the immediate // limitation that the only valid value to store is the immediate
// constant 1. There is a test for this in JNI_CreateJavaVM(). // constant 1. There is a test for this in JNI_CreateJavaVM().
jint result = __sync_lock_test_and_set (dest, exchange_value); T result = __sync_lock_test_and_set (dest, exchange_value);
// All atomic operations are expected to be full memory barriers // All atomic operations are expected to be full memory barriers
// (see atomic.hpp). However, __sync_lock_test_and_set is not // (see atomic.hpp). However, __sync_lock_test_and_set is not
// a full memory barrier, but an acquire barrier. Hence, this added // a full memory barrier, but an acquire barrier. Hence, this added
@ -229,24 +219,14 @@ inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
#endif // ARM #endif // ARM
} }
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, template<>
volatile intptr_t* dest) { template<typename T>
#ifdef ARM inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
return arm_lock_test_and_set(dest, exchange_value); T volatile* dest) const {
#else STATIC_ASSERT(8 == sizeof(T));
#ifdef M68K T result = __sync_lock_test_and_set (dest, exchange_value);
return m68k_lock_test_and_set(dest, exchange_value);
#else
intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
__sync_synchronize(); __sync_synchronize();
return result; return result;
#endif // M68K
#endif // ARM
}
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void *) xchg_ptr((intptr_t) exchange_value,
(volatile intptr_t*) dest);
} }
// No direct support for cmpxchg of bytes; emulate using int. // No direct support for cmpxchg of bytes; emulate using int.
@ -281,18 +261,21 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
return __sync_val_compare_and_swap(dest, compare_value, exchange_value); return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
} }
inline jlong Atomic::load(const volatile jlong* src) { template<>
template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
volatile jlong dest; volatile jlong dest;
os::atomic_copy64(src, &dest); os::atomic_copy64(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
return dest; return PrimitiveConversions::cast<T>(dest);
} }
inline void Atomic::store(jlong store_value, jlong* dest) { template<>
os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest); template<typename T>
} inline void Atomic::PlatformStore<8>::operator()(T store_value,
T volatile* dest) const {
inline void Atomic::store(jlong store_value, volatile jlong* dest) { STATIC_ASSERT(8 == sizeof(T));
os::atomic_copy64((volatile jlong*)&store_value, dest); os::atomic_copy64(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
} }
#endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP #endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP

View file

@ -34,19 +34,6 @@
#define READ_MEM_BARRIER __atomic_thread_fence(__ATOMIC_ACQUIRE); #define READ_MEM_BARRIER __atomic_thread_fence(__ATOMIC_ACQUIRE);
#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE); #define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
template<size_t byte_size> template<size_t byte_size>
struct Atomic::PlatformAdd struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
@ -57,19 +44,16 @@ struct Atomic::PlatformAdd
} }
}; };
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) template<size_t byte_size>
{ template<typename T>
jint res = __sync_lock_test_and_set (dest, exchange_value); inline T Atomic::PlatformXchg<byte_size>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(byte_size == sizeof(T));
T res = __sync_lock_test_and_set(dest, exchange_value);
FULL_MEM_BARRIER; FULL_MEM_BARRIER;
return res; return res;
} }
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest)
{
return (void *) xchg_ptr((intptr_t) exchange_value,
(volatile intptr_t*) dest);
}
template<size_t byte_size> template<size_t byte_size>
template<typename T> template<typename T>
inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value, inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
@ -87,16 +71,4 @@ inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
} }
} }
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
{
intptr_t res = __sync_lock_test_and_set (dest, exchange_value);
FULL_MEM_BARRIER;
return res;
}
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
#endif // OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP #endif // OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP

View file

@ -44,39 +44,24 @@
* kernel source or kernel_user_helpers.txt in Linux Doc. * kernel source or kernel_user_helpers.txt in Linux Doc.
*/ */
inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } #ifndef AARCH64
inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } template<>
inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } template<typename T>
inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } STATIC_ASSERT(8 == sizeof(T));
return PrimitiveConversions::cast<T>(
(*os::atomic_load_long_func)(reinterpret_cast<const volatile jlong*>(src)));
}
inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } template<>
inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } template<typename T>
inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } inline void Atomic::PlatformStore<8>::operator()(T store_value,
inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } T volatile* dest) const {
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } STATIC_ASSERT(8 == sizeof(T));
(*os::atomic_store_long_func)(
inline jlong Atomic::load (const volatile jlong* src) { PrimitiveConversions::cast<jlong>(store_value), reinterpret_cast<volatile jlong*>(dest));
assert(((intx)src & (sizeof(jlong)-1)) == 0, "Atomic load jlong mis-aligned"); }
#ifdef AARCH64
return *src;
#else
return (*os::atomic_load_long_func)(src);
#endif #endif
}
inline void Atomic::store (jlong value, volatile jlong* dest) {
assert(((intx)dest & (sizeof(jlong)-1)) == 0, "Atomic store jlong mis-aligned");
#ifdef AARCH64
*dest = value;
#else
(*os::atomic_store_long_func)(value, dest);
#endif
}
inline void Atomic::store (jlong value, jlong* dest) {
store(value, (volatile jlong*)dest);
}
// As per atomic.hpp all read-modify-write operations have to provide two-way // As per atomic.hpp all read-modify-write operations have to provide two-way
// barriers semantics. For AARCH64 we are using load-acquire-with-reservation and // barriers semantics. For AARCH64 we are using load-acquire-with-reservation and
@ -141,11 +126,15 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
: "memory"); : "memory");
return val; return val;
} }
#endif // AARCH64 #endif
inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) { template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
#ifdef AARCH64 #ifdef AARCH64
jint old_val; T old_val;
int tmp; int tmp;
__asm__ volatile( __asm__ volatile(
"1:\n\t" "1:\n\t"
@ -157,13 +146,17 @@ inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
: "memory"); : "memory");
return old_val; return old_val;
#else #else
return (*os::atomic_xchg_func)(exchange_value, dest); return xchg_using_helper<jint>(os::atomic_xchg_func, exchange_value, dest);
#endif #endif
} }
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
#ifdef AARCH64 #ifdef AARCH64
intptr_t old_val; template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
T old_val;
int tmp; int tmp;
__asm__ volatile( __asm__ volatile(
"1:\n\t" "1:\n\t"
@ -174,14 +167,8 @@ inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* des
: [new_val] "r" (exchange_value), [dest] "r" (dest) : [new_val] "r" (exchange_value), [dest] "r" (dest)
: "memory"); : "memory");
return old_val; return old_val;
#else
return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
#endif
}
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
} }
#endif // AARCH64
// The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering

View file

@ -32,22 +32,6 @@
// Implementation of class atomic // Implementation of class atomic
inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
// //
// machine barrier instructions: // machine barrier instructions:
// //
@ -146,12 +130,14 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
return result; return result;
} }
inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) { template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
// Note that xchg_ptr doesn't necessarily do an acquire // Note that xchg_ptr doesn't necessarily do an acquire
// (see synchronizer.cpp). // (see synchronizer.cpp).
unsigned int old_value; T old_value;
const uint64_t zero = 0; const uint64_t zero = 0;
__asm__ __volatile__ ( __asm__ __volatile__ (
@ -179,15 +165,18 @@ inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
"memory" "memory"
); );
return (jint) old_value; return old_value;
} }
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
// Note that xchg_ptr doesn't necessarily do an acquire // Note that xchg_ptr doesn't necessarily do an acquire
// (see synchronizer.cpp). // (see synchronizer.cpp).
long old_value; T old_value;
const uint64_t zero = 0; const uint64_t zero = 0;
__asm__ __volatile__ ( __asm__ __volatile__ (
@ -215,11 +204,7 @@ inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* des
"memory" "memory"
); );
return (intptr_t) old_value; return old_value;
}
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
} }
inline void cmpxchg_pre_membar(cmpxchg_memory_order order) { inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {

View file

@ -53,20 +53,6 @@
// is an integer multiple of the data length. Furthermore, all stores are ordered: // is an integer multiple of the data length. Furthermore, all stores are ordered:
// a store which occurs conceptually before another store becomes visible to other CPUs // a store which occurs conceptually before another store becomes visible to other CPUs
// before the other store becomes visible. // before the other store becomes visible.
inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
//------------ //------------
// Atomic::add // Atomic::add
@ -208,8 +194,12 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I inc, D volatile* dest) const {
// //
// The return value is the (unchanged) value from memory as it was when the // The return value is the (unchanged) value from memory as it was when the
// replacement succeeded. // replacement succeeded.
inline jint Atomic::xchg (jint xchg_val, volatile jint* dest) { template<>
unsigned int old; template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
T old;
__asm__ __volatile__ ( __asm__ __volatile__ (
" LLGF %[old],%[mem] \n\t" // get old value " LLGF %[old],%[mem] \n\t" // get old value
@ -219,16 +209,20 @@ inline jint Atomic::xchg (jint xchg_val, volatile jint* dest) {
: [old] "=&d" (old) // write-only, prev value irrelevant : [old] "=&d" (old) // write-only, prev value irrelevant
, [mem] "+Q" (*dest) // read/write, memory to be updated atomically , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
//---< inputs >--- //---< inputs >---
: [upd] "d" (xchg_val) // read-only, value to be written to memory : [upd] "d" (exchange_value) // read-only, value to be written to memory
//---< clobbered >--- //---< clobbered >---
: "cc", "memory" : "cc", "memory"
); );
return (jint)old; return old;
} }
inline intptr_t Atomic::xchg_ptr(intptr_t xchg_val, volatile intptr_t* dest) { template<>
unsigned long old; template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
T old;
__asm__ __volatile__ ( __asm__ __volatile__ (
" LG %[old],%[mem] \n\t" // get old value " LG %[old],%[mem] \n\t" // get old value
@ -238,16 +232,12 @@ inline intptr_t Atomic::xchg_ptr(intptr_t xchg_val, volatile intptr_t* dest) {
: [old] "=&d" (old) // write-only, init from memory : [old] "=&d" (old) // write-only, init from memory
, [mem] "+Q" (*dest) // read/write, memory to be updated atomically , [mem] "+Q" (*dest) // read/write, memory to be updated atomically
//---< inputs >--- //---< inputs >---
: [upd] "d" (xchg_val) // read-only, value to be written to memory : [upd] "d" (exchange_value) // read-only, value to be written to memory
//---< clobbered >--- //---< clobbered >---
: "cc", "memory" : "cc", "memory"
); );
return (intptr_t)old; return old;
}
inline void *Atomic::xchg_ptr(void *exchange_value, volatile void *dest) {
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
} }
//---------------- //----------------
@ -331,6 +321,4 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T xchg_val,
return old; return old;
} }
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
#endif // OS_CPU_LINUX_S390_VM_ATOMIC_LINUX_S390_INLINE_HPP #endif // OS_CPU_LINUX_S390_VM_ATOMIC_LINUX_S390_INLINE_HPP

View file

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved. * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -471,7 +471,7 @@ JVM_handle_linux_signal(int sig,
// Info->si_addr need not be the exact address, it is only // Info->si_addr need not be the exact address, it is only
// guaranteed to be on the same page as the address that caused // guaranteed to be on the same page as the address that caused
// the SIGSEGV. // the SIGSEGV.
if ((sig == SIGSEGV) && if ((sig == SIGSEGV) && !UseMembar &&
(os::get_memory_serialize_page() == (os::get_memory_serialize_page() ==
(address)((uintptr_t)info->si_addr & ~(os::vm_page_size()-1)))) { (address)((uintptr_t)info->si_addr & ~(os::vm_page_size()-1)))) {
return true; return true;

View file

@ -27,22 +27,6 @@
// Implementation of class atomic // Implementation of class atomic
inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
template<size_t byte_size> template<size_t byte_size>
struct Atomic::PlatformAdd struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
@ -95,9 +79,12 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
return rv; return rv;
} }
template<>
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { template<typename T>
intptr_t rv = exchange_value; inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
T rv = exchange_value;
__asm__ volatile( __asm__ volatile(
" swap [%2],%1\n\t" " swap [%2],%1\n\t"
: "=r" (rv) : "=r" (rv)
@ -106,8 +93,12 @@ inline jint Atomic::xchg (jint exchange_value, volatile jint* des
return rv; return rv;
} }
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { template<>
intptr_t rv = exchange_value; template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
T rv = exchange_value;
__asm__ volatile( __asm__ volatile(
"1:\n\t" "1:\n\t"
" mov %1, %%o3\n\t" " mov %1, %%o3\n\t"
@ -123,10 +114,6 @@ inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* des
return rv; return rv;
} }
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
}
// No direct support for cmpxchg of bytes; emulate using int. // No direct support for cmpxchg of bytes; emulate using int.
template<> template<>
struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {}; struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};

View file

@ -27,19 +27,6 @@
// Implementation of class atomic // Implementation of class atomic
inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
template<size_t byte_size> template<size_t byte_size>
struct Atomic::PlatformAdd struct Atomic::PlatformAdd
: Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> > : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
@ -61,7 +48,11 @@ inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) co
return old_value; return old_value;
} }
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
__asm__ volatile ( "xchgl (%2),%0" __asm__ volatile ( "xchgl (%2),%0"
: "=r" (exchange_value) : "=r" (exchange_value)
: "0" (exchange_value), "r" (dest) : "0" (exchange_value), "r" (dest)
@ -69,10 +60,6 @@ inline jint Atomic::xchg (jint exchange_value, volatile jint* des
return exchange_value; return exchange_value;
} }
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
}
template<> template<>
template<typename T> template<typename T>
inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
@ -102,8 +89,6 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
} }
#ifdef AMD64 #ifdef AMD64
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
template<> template<>
template<typename I, typename D> template<typename I, typename D>
@ -118,7 +103,11 @@ inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) co
return old_value; return old_value;
} }
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
__asm__ __volatile__ ("xchgq (%2),%0" __asm__ __volatile__ ("xchgq (%2),%0"
: "=r" (exchange_value) : "=r" (exchange_value)
: "0" (exchange_value), "r" (dest) : "0" (exchange_value), "r" (dest)
@ -140,14 +129,8 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
return exchange_value; return exchange_value;
} }
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
#else // !AMD64 #else // !AMD64
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
}
extern "C" { extern "C" {
// defined in linux_x86.s // defined in linux_x86.s
jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong); jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong);
@ -164,18 +147,21 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
return cmpxchg_using_helper<jlong>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value); return cmpxchg_using_helper<jlong>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
} }
inline jlong Atomic::load(const volatile jlong* src) { template<>
template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
volatile jlong dest; volatile jlong dest;
_Atomic_move_long(src, &dest); _Atomic_move_long(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
return dest; return PrimitiveConversions::cast<T>(dest);
} }
inline void Atomic::store(jlong store_value, jlong* dest) { template<>
_Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest); template<typename T>
} inline void Atomic::PlatformStore<8>::operator()(T store_value,
T volatile* dest) const {
inline void Atomic::store(jlong store_value, volatile jlong* dest) { STATIC_ASSERT(8 == sizeof(T));
_Atomic_move_long((volatile jlong*)&store_value, dest); _Atomic_move_long(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
} }
#endif // AMD64 #endif // AMD64

View file

@ -87,7 +87,7 @@ static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) {
/* Atomically write VALUE into `*PTR' and returns the previous /* Atomically write VALUE into `*PTR' and returns the previous
contents of `*PTR'. */ contents of `*PTR'. */
static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) { static inline int m68k_lock_test_and_set(int newval, volatile int *ptr) {
for (;;) { for (;;) {
// Loop until success. // Loop until success.
int prev = *ptr; int prev = *ptr;
@ -148,7 +148,7 @@ static inline int arm_add_and_fetch(int add_value, volatile int *ptr) {
/* Atomically write VALUE into `*PTR' and returns the previous /* Atomically write VALUE into `*PTR' and returns the previous
contents of `*PTR'. */ contents of `*PTR'. */
static inline int arm_lock_test_and_set(volatile int *ptr, int newval) { static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {
for (;;) { for (;;) {
// Loop until a __kernel_cmpxchg succeeds. // Loop until a __kernel_cmpxchg succeeds.
int prev = *ptr; int prev = *ptr;
@ -159,14 +159,6 @@ static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
} }
#endif // ARM #endif // ARM
inline void Atomic::store(jint store_value, volatile jint* dest) {
*dest = store_value;
}
inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
*dest = store_value;
}
template<size_t byte_size> template<size_t byte_size>
struct Atomic::PlatformAdd struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
@ -201,18 +193,22 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
return __sync_add_and_fetch(dest, add_value); return __sync_add_and_fetch(dest, add_value);
} }
inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) { template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
#ifdef ARM #ifdef ARM
return arm_lock_test_and_set(dest, exchange_value); return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest);
#else #else
#ifdef M68K #ifdef M68K
return m68k_lock_test_and_set(dest, exchange_value); return xchg_using_helper<int>(m68k_lock_test_and_set, exchange_value, dest);
#else #else
// __sync_lock_test_and_set is a bizarrely named atomic exchange // __sync_lock_test_and_set is a bizarrely named atomic exchange
// operation. Note that some platforms only support this with the // operation. Note that some platforms only support this with the
// limitation that the only valid value to store is the immediate // limitation that the only valid value to store is the immediate
// constant 1. There is a test for this in JNI_CreateJavaVM(). // constant 1. There is a test for this in JNI_CreateJavaVM().
jint result = __sync_lock_test_and_set (dest, exchange_value); T result = __sync_lock_test_and_set (dest, exchange_value);
// All atomic operations are expected to be full memory barriers // All atomic operations are expected to be full memory barriers
// (see atomic.hpp). However, __sync_lock_test_and_set is not // (see atomic.hpp). However, __sync_lock_test_and_set is not
// a full memory barrier, but an acquire barrier. Hence, this added // a full memory barrier, but an acquire barrier. Hence, this added
@ -223,24 +219,14 @@ inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
#endif // ARM #endif // ARM
} }
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, template<>
volatile intptr_t* dest) { template<typename T>
#ifdef ARM inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
return arm_lock_test_and_set(dest, exchange_value); T volatile* dest) const {
#else STATIC_ASSERT(8 == sizeof(T));
#ifdef M68K T result = __sync_lock_test_and_set (dest, exchange_value);
return m68k_lock_test_and_set(dest, exchange_value);
#else
intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
__sync_synchronize(); __sync_synchronize();
return result; return result;
#endif // M68K
#endif // ARM
}
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void *) xchg_ptr((intptr_t) exchange_value,
(volatile intptr_t*) dest);
} }
// No direct support for cmpxchg of bytes; emulate using int. // No direct support for cmpxchg of bytes; emulate using int.
@ -275,18 +261,21 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
return __sync_val_compare_and_swap(dest, compare_value, exchange_value); return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
} }
inline jlong Atomic::load(const volatile jlong* src) { template<>
template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
volatile jlong dest; volatile jlong dest;
os::atomic_copy64(src, &dest); os::atomic_copy64(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
return dest; return PrimitiveConversions::cast<T>(dest);
} }
inline void Atomic::store(jlong store_value, jlong* dest) { template<>
os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest); template<typename T>
} inline void Atomic::PlatformStore<8>::operator()(T store_value,
T volatile* dest) const {
inline void Atomic::store(jlong store_value, volatile jlong* dest) { STATIC_ASSERT(8 == sizeof(T));
os::atomic_copy64((volatile jlong*)&store_value, dest); os::atomic_copy64(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
} }
#endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP

View file

@ -27,32 +27,6 @@
// Implementation of class atomic // Implementation of class atomic
inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
inline void Atomic::store(jlong store_value, jlong* dest) { *dest = store_value; }
inline void Atomic::store(jlong store_value, volatile jlong* dest) { *dest = store_value; }
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
// This is the interface to the atomic instructions in solaris_sparc.il.
// It's very messy because we need to support v8 and these instructions
// are illegal there. When sparc v8 is dropped, we can drop out lots of
// this code. Also compiler2 does not support v8 so the conditional code
// omits the instruction set check.
extern "C" jint _Atomic_swap32(jint exchange_value, volatile jint* dest);
extern "C" intptr_t _Atomic_swap64(intptr_t exchange_value, volatile intptr_t* dest);
// Implement ADD using a CAS loop. // Implement ADD using a CAS loop.
template<size_t byte_size> template<size_t byte_size>
struct Atomic::PlatformAdd VALUE_OBJ_CLASS_SPEC { struct Atomic::PlatformAdd VALUE_OBJ_CLASS_SPEC {
@ -69,16 +43,30 @@ struct Atomic::PlatformAdd VALUE_OBJ_CLASS_SPEC {
} }
}; };
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { template<>
return _Atomic_swap32(exchange_value, dest); template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
__asm__ volatile ( "swap [%2],%0"
: "=r" (exchange_value)
: "0" (exchange_value), "r" (dest)
: "memory");
return exchange_value;
} }
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { template<>
return _Atomic_swap64(exchange_value, dest); template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
T old_value = *dest;
while (true) {
T result = cmpxchg(exchange_value, dest, old_value);
if (result == old_value) break;
old_value = result;
} }
return old_value;
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
} }
// No direct support for cmpxchg of bytes; emulate using int. // No direct support for cmpxchg of bytes; emulate using int.

View file

@ -32,47 +32,6 @@
.end .end
// Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
//
// Arguments:
// exchange_value: O0
// dest: O1
//
// Results:
// O0: the value previously stored in dest
.inline _Atomic_swap32, 2
.volatile
swap [%o1],%o0
.nonvolatile
.end
// Support for intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t * dest).
//
// 64-bit
//
// Arguments:
// exchange_value: O0
// dest: O1
//
// Results:
// O0: the value previously stored in dest
.inline _Atomic_swap64, 2
.volatile
1:
mov %o0, %o3
ldx [%o1], %o2
casx [%o1], %o2, %o3
cmp %o2, %o3
bne %xcc, 1b
nop
mov %o2, %o0
.nonvolatile
.end
// Support for jlong Atomic::load and Atomic::store on v9. // Support for jlong Atomic::load and Atomic::store on v9.
// //
// void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst) // void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst)

View file

@ -25,20 +25,6 @@
#ifndef OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP #ifndef OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP
#define OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP #define OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP
inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
// For Sun Studio - implementation is in solaris_x86_64.il. // For Sun Studio - implementation is in solaris_x86_64.il.
extern "C" { extern "C" {
@ -84,8 +70,26 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
reinterpret_cast<jlong volatile*>(dest))); reinterpret_cast<jlong volatile*>(dest)));
} }
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { template<>
return _Atomic_xchg(exchange_value, dest); template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
return PrimitiveConversions::cast<T>(
_Atomic_xchg(PrimitiveConversions::cast<jint>(exchange_value),
reinterpret_cast<jint volatile*>(dest)));
}
extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest);
template<>
template<typename T>
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
return PrimitiveConversions::cast<T>(
_Atomic_xchg_long(PrimitiveConversions::cast<jlong>(exchange_value),
reinterpret_cast<jlong volatile*>(dest)));
} }
// Not using cmpxchg_using_helper here, because some configurations of // Not using cmpxchg_using_helper here, because some configurations of
@ -133,18 +137,4 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
PrimitiveConversions::cast<jlong>(compare_value))); PrimitiveConversions::cast<jlong>(compare_value)));
} }
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest);
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
return (intptr_t)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest);
}
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void*)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest);
}
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
#endif // OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP #endif // OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP

View file

@ -42,21 +42,6 @@
#pragma warning(disable: 4035) // Disables warnings reporting missing return statement #pragma warning(disable: 4035) // Disables warnings reporting missing return statement
inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
template<size_t byte_size> template<size_t byte_size>
struct Atomic::PlatformAdd struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
@ -66,9 +51,6 @@ struct Atomic::PlatformAdd
}; };
#ifdef AMD64 #ifdef AMD64
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
template<> template<>
template<typename I, typename D> template<typename I, typename D>
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const { inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
@ -81,17 +63,19 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
return add_using_helper<intptr_t>(os::atomic_add_ptr_func, add_value, dest); return add_using_helper<intptr_t>(os::atomic_add_ptr_func, add_value, dest);
} }
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \
return (jint)(*os::atomic_xchg_func)(exchange_value, dest); template<> \
template<typename T> \
inline T Atomic::PlatformXchg<ByteSize>::operator()(T exchange_value, \
T volatile* dest) const { \
STATIC_ASSERT(ByteSize == sizeof(T)); \
return xchg_using_helper<StubType>(StubName, exchange_value, dest); \
} }
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { DEFINE_STUB_XCHG(4, jint, os::atomic_xchg_func)
return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest); DEFINE_STUB_XCHG(8, jlong, os::atomic_xchg_ptr_func)
}
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { #undef DEFINE_STUB_XCHG
return (void *)(os::atomic_xchg_ptr_func)((intptr_t)exchange_value, (volatile intptr_t*)dest);
}
#define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \ #define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \
template<> \ template<> \
@ -110,8 +94,6 @@ DEFINE_STUB_CMPXCHG(8, jlong, os::atomic_cmpxchg_long_func)
#undef DEFINE_STUB_CMPXCHG #undef DEFINE_STUB_CMPXCHG
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
#else // !AMD64 #else // !AMD64
template<> template<>
@ -128,7 +110,11 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
} }
} }
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { template<>
template<typename T>
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T));
// alternative for InterlockedExchange // alternative for InterlockedExchange
__asm { __asm {
mov eax, exchange_value; mov eax, exchange_value;
@ -137,14 +123,6 @@ inline jint Atomic::xchg (jint exchange_value, volatile jint* des
} }
} }
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
}
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
}
template<> template<>
template<typename T> template<typename T>
inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
@ -202,9 +180,12 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
} }
} }
inline jlong Atomic::load(const volatile jlong* src) { template<>
volatile jlong dest; template<typename T>
volatile jlong* pdest = &dest; inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
volatile T dest;
volatile T* pdest = &dest;
__asm { __asm {
mov eax, src mov eax, src
fild qword ptr [eax] fild qword ptr [eax]
@ -214,8 +195,12 @@ inline jlong Atomic::load(const volatile jlong* src) {
return dest; return dest;
} }
inline void Atomic::store(jlong store_value, volatile jlong* dest) { template<>
volatile jlong* src = &store_value; template<typename T>
inline void Atomic::PlatformStore<8>::operator()(T store_value,
T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T));
volatile T* src = &store_value;
__asm { __asm {
mov eax, src mov eax, src
fild qword ptr [eax] fild qword ptr [eax]
@ -224,10 +209,6 @@ inline void Atomic::store(jlong store_value, volatile jlong* dest) {
} }
} }
inline void Atomic::store(jlong store_value, jlong* dest) {
Atomic::store(store_value, (volatile jlong*)dest);
}
#endif // AMD64 #endif // AMD64
#pragma warning(default: 4035) // Enables warnings reporting missing return statement #pragma warning(default: 4035) // Enables warnings reporting missing return statement

View file

@ -2276,6 +2276,10 @@ private:
if (strcmp(rep_var,"$XMMRegister") == 0) return "as_XMMRegister"; if (strcmp(rep_var,"$XMMRegister") == 0) return "as_XMMRegister";
#endif #endif
if (strcmp(rep_var,"$CondRegister") == 0) return "as_ConditionRegister"; if (strcmp(rep_var,"$CondRegister") == 0) return "as_ConditionRegister";
#if defined(PPC64)
if (strcmp(rep_var,"$VectorRegister") == 0) return "as_VectorRegister";
if (strcmp(rep_var,"$VectorSRegister") == 0) return "as_VectorSRegister";
#endif
return NULL; return NULL;
} }

View file

@ -1304,7 +1304,9 @@ void LIRGenerator::do_getClass(Intrinsic* x) {
// FIXME T_ADDRESS should actually be T_METADATA but it can't because the // FIXME T_ADDRESS should actually be T_METADATA but it can't because the
// meaning of these two is mixed up (see JDK-8026837). // meaning of these two is mixed up (see JDK-8026837).
__ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info); __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info);
__ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result); __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_ADDRESS), result);
// mirror = ((OopHandle)mirror)->resolve();
__ move_wide(new LIR_Address(result, T_OBJECT), result);
} }
// java.lang.Class::isPrimitive() // java.lang.Class::isPrimitive()

View file

@ -98,7 +98,8 @@ ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Depen
_keep_alive((is_anonymous || h_class_loader.is_null()) ? 1 : 0), _keep_alive((is_anonymous || h_class_loader.is_null()) ? 1 : 0),
_metaspace(NULL), _unloading(false), _klasses(NULL), _metaspace(NULL), _unloading(false), _klasses(NULL),
_modules(NULL), _packages(NULL), _modules(NULL), _packages(NULL),
_claimed(0), _jmethod_ids(NULL), _handles(), _deallocate_list(NULL), _claimed(0), _modified_oops(true), _accumulated_modified_oops(false),
_jmethod_ids(NULL), _handles(), _deallocate_list(NULL),
_next(NULL), _dependencies(dependencies), _next(NULL), _dependencies(dependencies),
_metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true, _metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true,
Monitor::_safepoint_check_never)) { Monitor::_safepoint_check_never)) {
@ -207,7 +208,7 @@ bool ClassLoaderData::ChunkedHandleList::contains(oop* p) {
oops_do(&cl); oops_do(&cl);
return cl.found(); return cl.found();
} }
#endif #endif // ASSERT
bool ClassLoaderData::claim() { bool ClassLoaderData::claim() {
if (_claimed == 1) { if (_claimed == 1) {
@ -236,19 +237,19 @@ void ClassLoaderData::dec_keep_alive() {
} }
} }
void ClassLoaderData::oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) { void ClassLoaderData::oops_do(OopClosure* f, bool must_claim, bool clear_mod_oops) {
if (must_claim && !claim()) { if (must_claim && !claim()) {
return; return;
} }
// Only clear modified_oops after the ClassLoaderData is claimed.
if (clear_mod_oops) {
clear_modified_oops();
}
f->do_oop(&_class_loader); f->do_oop(&_class_loader);
_dependencies.oops_do(f); _dependencies.oops_do(f);
_handles.oops_do(f); _handles.oops_do(f);
if (klass_closure != NULL) {
classes_do(klass_closure);
}
} }
void ClassLoaderData::Dependencies::oops_do(OopClosure* f) { void ClassLoaderData::Dependencies::oops_do(OopClosure* f) {
@ -368,6 +369,9 @@ void ClassLoaderData::record_dependency(const Klass* k, TRAPS) {
// Must handle over GC point. // Must handle over GC point.
Handle dependency(THREAD, to); Handle dependency(THREAD, to);
from_cld->_dependencies.add(dependency, CHECK); from_cld->_dependencies.add(dependency, CHECK);
// Added a potentially young gen oop to the ClassLoaderData
record_modified_oops();
} }
@ -764,6 +768,7 @@ Metaspace* ClassLoaderData::metaspace_non_null() {
OopHandle ClassLoaderData::add_handle(Handle h) { OopHandle ClassLoaderData::add_handle(Handle h) {
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
record_modified_oops();
return OopHandle(_handles.add(h())); return OopHandle(_handles.add(h()));
} }
@ -875,8 +880,7 @@ void ClassLoaderData::dump(outputStream * const out) {
if (Verbose) { if (Verbose) {
Klass* k = _klasses; Klass* k = _klasses;
while (k != NULL) { while (k != NULL) {
out->print_cr("klass " PTR_FORMAT ", %s, CT: %d, MUT: %d", k, k->name()->as_C_string(), out->print_cr("klass " PTR_FORMAT ", %s", p2i(k), k->name()->as_C_string());
k->has_modified_oops(), k->has_accumulated_modified_oops());
assert(k != k->next_link(), "no loops!"); assert(k != k->next_link(), "no loops!");
k = k->next_link(); k = k->next_link();
} }
@ -1003,25 +1007,25 @@ void ClassLoaderDataGraph::print_creation(outputStream* out, Handle loader, Clas
} }
void ClassLoaderDataGraph::oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) { void ClassLoaderDataGraph::oops_do(OopClosure* f, bool must_claim) {
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) { for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
cld->oops_do(f, klass_closure, must_claim); cld->oops_do(f, must_claim);
} }
} }
void ClassLoaderDataGraph::keep_alive_oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) { void ClassLoaderDataGraph::keep_alive_oops_do(OopClosure* f, bool must_claim) {
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) { for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
if (cld->keep_alive()) { if (cld->keep_alive()) {
cld->oops_do(f, klass_closure, must_claim); cld->oops_do(f, must_claim);
} }
} }
} }
void ClassLoaderDataGraph::always_strong_oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) { void ClassLoaderDataGraph::always_strong_oops_do(OopClosure* f, bool must_claim) {
if (ClassUnloading) { if (ClassUnloading) {
keep_alive_oops_do(f, klass_closure, must_claim); keep_alive_oops_do(f, must_claim);
} else { } else {
oops_do(f, klass_closure, must_claim); oops_do(f, must_claim);
} }
} }

View file

@ -87,9 +87,9 @@ class ClassLoaderDataGraph : public AllStatic {
static void purge(); static void purge();
static void clear_claimed_marks(); static void clear_claimed_marks();
// oops do // oops do
static void oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim); static void oops_do(OopClosure* f, bool must_claim);
static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim); static void keep_alive_oops_do(OopClosure* blk, bool must_claim);
static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim); static void always_strong_oops_do(OopClosure* blk, bool must_claim);
// cld do // cld do
static void cld_do(CLDClosure* cl); static void cld_do(CLDClosure* cl);
static void cld_unloading_do(CLDClosure* cl); static void cld_unloading_do(CLDClosure* cl);
@ -230,10 +230,16 @@ class ClassLoaderData : public CHeapObj<mtClass> {
Mutex* _metaspace_lock; // Locks the metaspace for allocations and setup. Mutex* _metaspace_lock; // Locks the metaspace for allocations and setup.
bool _unloading; // true if this class loader goes away bool _unloading; // true if this class loader goes away
bool _is_anonymous; // if this CLD is for an anonymous class bool _is_anonymous; // if this CLD is for an anonymous class
// Remembered sets support for the oops in the class loader data.
bool _modified_oops; // Card Table Equivalent (YC/CMS support)
bool _accumulated_modified_oops; // Mod Union Equivalent (CMS support)
s2 _keep_alive; // if this CLD is kept alive without a keep_alive_object(). s2 _keep_alive; // if this CLD is kept alive without a keep_alive_object().
// Used for anonymous classes and the boot class // Used for anonymous classes and the boot class
// loader. _keep_alive does not need to be volatile or // loader. _keep_alive does not need to be volatile or
// atomic since there is one unique CLD per anonymous class. // atomic since there is one unique CLD per anonymous class.
volatile int _claimed; // true if claimed, for example during GC traces. volatile int _claimed; // true if claimed, for example during GC traces.
// To avoid applying oop closure more than once. // To avoid applying oop closure more than once.
// Has to be an int because we cas it. // Has to be an int because we cas it.
@ -276,6 +282,19 @@ class ClassLoaderData : public CHeapObj<mtClass> {
bool claimed() const { return _claimed == 1; } bool claimed() const { return _claimed == 1; }
bool claim(); bool claim();
// The CLD are not placed in the Heap, so the Card Table or
// the Mod Union Table can't be used to mark when CLD have modified oops.
// The CT and MUT bits saves this information for the whole class loader data.
void clear_modified_oops() { _modified_oops = false; }
public:
void record_modified_oops() { _modified_oops = true; }
bool has_modified_oops() { return _modified_oops; }
void accumulate_modified_oops() { if (has_modified_oops()) _accumulated_modified_oops = true; }
void clear_accumulated_modified_oops() { _accumulated_modified_oops = false; }
bool has_accumulated_modified_oops() { return _accumulated_modified_oops; }
private:
void unload(); void unload();
bool keep_alive() const { return _keep_alive > 0; } bool keep_alive() const { return _keep_alive > 0; }
void classes_do(void f(Klass*)); void classes_do(void f(Klass*));
@ -346,8 +365,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
inline unsigned int identity_hash() const { return (unsigned int)(((intptr_t)this) >> 3); } inline unsigned int identity_hash() const { return (unsigned int)(((intptr_t)this) >> 3); }
// Used when tracing from klasses. void oops_do(OopClosure* f, bool must_claim, bool clear_modified_oops = false);
void oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim);
void classes_do(KlassClosure* klass_closure); void classes_do(KlassClosure* klass_closure);
Klass* klasses() { return _klasses; } Klass* klasses() { return _klasses; }

View file

@ -889,7 +889,7 @@ void java_lang_Class::create_mirror(Klass* k, Handle class_loader,
// Setup indirection from klass->mirror // Setup indirection from klass->mirror
// after any exceptions can happen during allocations. // after any exceptions can happen during allocations.
k->set_java_mirror(mirror()); k->set_java_mirror(mirror);
// Set the module field in the java_lang_Class instance. This must be done // Set the module field in the java_lang_Class instance. This must be done
// after the mirror is set. // after the mirror is set.

View file

@ -781,6 +781,7 @@
do_name(decrementExact_name,"decrementExact") \ do_name(decrementExact_name,"decrementExact") \
do_name(incrementExact_name,"incrementExact") \ do_name(incrementExact_name,"incrementExact") \
do_name(multiplyExact_name,"multiplyExact") \ do_name(multiplyExact_name,"multiplyExact") \
do_name(multiplyHigh_name,"multiplyHigh") \
do_name(negateExact_name,"negateExact") \ do_name(negateExact_name,"negateExact") \
do_name(subtractExact_name,"subtractExact") \ do_name(subtractExact_name,"subtractExact") \
do_name(fma_name, "fma") \ do_name(fma_name, "fma") \
@ -805,6 +806,7 @@
do_intrinsic(_incrementExactL, java_lang_Math, incrementExact_name, long_long_signature, F_S) \ do_intrinsic(_incrementExactL, java_lang_Math, incrementExact_name, long_long_signature, F_S) \
do_intrinsic(_multiplyExactI, java_lang_Math, multiplyExact_name, int2_int_signature, F_S) \ do_intrinsic(_multiplyExactI, java_lang_Math, multiplyExact_name, int2_int_signature, F_S) \
do_intrinsic(_multiplyExactL, java_lang_Math, multiplyExact_name, long2_long_signature, F_S) \ do_intrinsic(_multiplyExactL, java_lang_Math, multiplyExact_name, long2_long_signature, F_S) \
do_intrinsic(_multiplyHigh, java_lang_Math, multiplyHigh_name, long2_long_signature, F_S) \
do_intrinsic(_negateExactI, java_lang_Math, negateExact_name, int_int_signature, F_S) \ do_intrinsic(_negateExactI, java_lang_Math, negateExact_name, int_int_signature, F_S) \
do_intrinsic(_negateExactL, java_lang_Math, negateExact_name, long_long_signature, F_S) \ do_intrinsic(_negateExactL, java_lang_Math, negateExact_name, long_long_signature, F_S) \
do_intrinsic(_subtractExactI, java_lang_Math, subtractExact_name, int2_int_signature, F_S) \ do_intrinsic(_subtractExactI, java_lang_Math, subtractExact_name, int2_int_signature, F_S) \

View file

@ -332,7 +332,7 @@ public:
static void disable_compilation_forever() { static void disable_compilation_forever() {
UseCompiler = false; UseCompiler = false;
AlwaysCompileLoopMethods = false; AlwaysCompileLoopMethods = false;
Atomic::xchg(shutdown_compilation, &_should_compile_new_jobs); Atomic::xchg(jint(shutdown_compilation), &_should_compile_new_jobs);
} }
static bool is_compilation_disabled_forever() { static bool is_compilation_disabled_forever() {

View file

@ -96,7 +96,7 @@ bool MethodMatcher::canonicalize(char * line, const char *& error_msg) {
bool have_colon = (colon != NULL); bool have_colon = (colon != NULL);
if (have_colon) { if (have_colon) {
// Don't allow multiple '::' // Don't allow multiple '::'
if (colon + 2 != '\0') { if (colon[2] != '\0') {
if (strstr(colon+2, "::")) { if (strstr(colon+2, "::")) {
error_msg = "Method pattern only allows one '::' allowed"; error_msg = "Method pattern only allows one '::' allowed";
return false; return false;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -48,12 +48,7 @@ class ParMarkFromRootsClosure;
// because some CMS OopClosures derive from OopsInGenClosure. It would be // because some CMS OopClosures derive from OopsInGenClosure. It would be
// good to get rid of them completely. // good to get rid of them completely.
class MetadataAwareOopsInGenClosure: public OopsInGenClosure { class MetadataAwareOopsInGenClosure: public OopsInGenClosure {
KlassToOopClosure _klass_closure;
public: public:
MetadataAwareOopsInGenClosure() {
_klass_closure.initialize(this);
}
virtual bool do_metadata() { return do_metadata_nv(); } virtual bool do_metadata() { return do_metadata_nv(); }
inline bool do_metadata_nv() { return true; } inline bool do_metadata_nv() { return true; }

View file

@ -40,10 +40,8 @@ inline void MetadataAwareOopsInGenClosure::do_klass_nv(Klass* k) {
inline void MetadataAwareOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); } inline void MetadataAwareOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); }
inline void MetadataAwareOopsInGenClosure::do_cld_nv(ClassLoaderData* cld) { inline void MetadataAwareOopsInGenClosure::do_cld_nv(ClassLoaderData* cld) {
assert(_klass_closure._oop_closure == this, "Must be");
bool claim = true; // Must claim the class loader data before processing. bool claim = true; // Must claim the class loader data before processing.
cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim); cld->oops_do(this, claim);
} }
// Decode the oop and call do_oop on it. // Decode the oop and call do_oop on it.

View file

@ -1553,9 +1553,10 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
assert(_collectorState != Idling || _modUnionTable.isAllClear(), assert(_collectorState != Idling || _modUnionTable.isAllClear(),
"_modUnionTable should be clear if the baton was not passed"); "_modUnionTable should be clear if the baton was not passed");
_modUnionTable.clear_all(); _modUnionTable.clear_all();
assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(), assert(_collectorState != Idling || _ct->cld_rem_set()->mod_union_is_clear(),
"mod union for klasses should be clear if the baton was passed"); "mod union for klasses should be clear if the baton was passed");
_ct->klass_rem_set()->clear_mod_union(); _ct->cld_rem_set()->clear_mod_union();
// We must adjust the allocation statistics being maintained // We must adjust the allocation statistics being maintained
// in the free list space. We do so by reading and clearing // in the free list space. We do so by reading and clearing
@ -2025,7 +2026,7 @@ void CMSCollector::gc_prologue(bool full) {
// that information. Tell the young collection to save the union of all // that information. Tell the young collection to save the union of all
// modified klasses. // modified klasses.
if (duringMarking) { if (duringMarking) {
_ct->klass_rem_set()->set_accumulate_modified_oops(true); _ct->cld_rem_set()->set_accumulate_modified_oops(true);
} }
bool registerClosure = duringMarking; bool registerClosure = duringMarking;
@ -2101,7 +2102,7 @@ void CMSCollector::gc_epilogue(bool full) {
assert(haveFreelistLocks(), "must have freelist locks"); assert(haveFreelistLocks(), "must have freelist locks");
assert_lock_strong(bitMapLock()); assert_lock_strong(bitMapLock());
_ct->klass_rem_set()->set_accumulate_modified_oops(false); _ct->cld_rem_set()->set_accumulate_modified_oops(false);
_cmsGen->gc_epilogue_work(full); _cmsGen->gc_epilogue_work(full);
@ -2380,18 +2381,18 @@ void CMSCollector::verify_after_remark_work_1() {
} }
} }
class VerifyKlassOopsKlassClosure : public KlassClosure { class VerifyCLDOopsCLDClosure : public CLDClosure {
class VerifyKlassOopsClosure : public OopClosure { class VerifyCLDOopsClosure : public OopClosure {
CMSBitMap* _bitmap; CMSBitMap* _bitmap;
public: public:
VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { } VerifyCLDOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
void do_oop(oop* p) { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); } void do_oop(oop* p) { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
void do_oop(narrowOop* p) { ShouldNotReachHere(); } void do_oop(narrowOop* p) { ShouldNotReachHere(); }
} _oop_closure; } _oop_closure;
public: public:
VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {} VerifyCLDOopsCLDClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
void do_klass(Klass* k) { void do_cld(ClassLoaderData* cld) {
k->oops_do(&_oop_closure); cld->oops_do(&_oop_closure, false, false);
} }
}; };
@ -2437,8 +2438,8 @@ void CMSCollector::verify_after_remark_work_2() {
assert(verification_mark_stack()->isEmpty(), "Should have been drained"); assert(verification_mark_stack()->isEmpty(), "Should have been drained");
verify_work_stacks_empty(); verify_work_stacks_empty();
VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm()); VerifyCLDOopsCLDClosure verify_cld_oops(verification_mark_bm());
ClassLoaderDataGraph::classes_do(&verify_klass_oops); ClassLoaderDataGraph::cld_do(&verify_cld_oops);
// Marking completed -- now verify that each bit marked in // Marking completed -- now verify that each bit marked in
// verification_mark_bm() is also marked in markBitMap(); flag all // verification_mark_bm() is also marked in markBitMap(); flag all
@ -2911,7 +2912,7 @@ void CMSCollector::checkpointRootsInitialWork() {
" or no bits are set in the gc_prologue before the start of the next " " or no bits are set in the gc_prologue before the start of the next "
"subsequent marking phase."); "subsequent marking phase.");
assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be"); assert(_ct->cld_rem_set()->mod_union_is_clear(), "Must be");
// Save the end of the used_region of the constituent generations // Save the end of the used_region of the constituent generations
// to be used to limit the extent of sweep in each generation. // to be used to limit the extent of sweep in each generation.
@ -3848,7 +3849,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
} }
} }
preclean_klasses(&mrias_cl, _cmsGen->freelistLock()); preclean_cld(&mrias_cl, _cmsGen->freelistLock());
curNumCards = preclean_card_table(_cmsGen, &smoac_cl); curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
cumNumCards += curNumCards; cumNumCards += curNumCards;
@ -4067,21 +4068,21 @@ size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
return cumNumDirtyCards; return cumNumDirtyCards;
} }
class PrecleanKlassClosure : public KlassClosure { class PrecleanCLDClosure : public CLDClosure {
KlassToOopClosure _cm_klass_closure; MetadataAwareOopsInGenClosure* _cm_closure;
public: public:
PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {} PrecleanCLDClosure(MetadataAwareOopsInGenClosure* oop_closure) : _cm_closure(oop_closure) {}
void do_klass(Klass* k) { void do_cld(ClassLoaderData* cld) {
if (k->has_accumulated_modified_oops()) { if (cld->has_accumulated_modified_oops()) {
k->clear_accumulated_modified_oops(); cld->clear_accumulated_modified_oops();
_cm_klass_closure.do_klass(k); _cm_closure->do_cld(cld);
} }
} }
}; };
// The freelist lock is needed to prevent asserts, is it really needed? // The freelist lock is needed to prevent asserts, is it really needed?
void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) { void CMSCollector::preclean_cld(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
cl->set_freelistLock(freelistLock); cl->set_freelistLock(freelistLock);
@ -4089,8 +4090,8 @@ void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freel
// SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean? // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
// SSS: We should probably check if precleaning should be aborted, at suitable intervals? // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
PrecleanKlassClosure preclean_klass_closure(cl); PrecleanCLDClosure preclean_closure(cl);
ClassLoaderDataGraph::classes_do(&preclean_klass_closure); ClassLoaderDataGraph::cld_do(&preclean_closure);
verify_work_stacks_empty(); verify_work_stacks_empty();
verify_overflow_empty(); verify_overflow_empty();
@ -4250,7 +4251,7 @@ void CMSCollector::checkpointRootsFinalWork() {
// Call isAllClear() under bitMapLock // Call isAllClear() under bitMapLock
assert(_modUnionTable.isAllClear(), assert(_modUnionTable.isAllClear(),
"Should be clear by end of the final marking"); "Should be clear by end of the final marking");
assert(_ct->klass_rem_set()->mod_union_is_clear(), assert(_ct->cld_rem_set()->mod_union_is_clear(),
"Should be clear by end of the final marking"); "Should be clear by end of the final marking");
} }
@ -4332,26 +4333,26 @@ class CMSParRemarkTask: public CMSParMarkTask {
void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, int* seed); void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, int* seed);
}; };
class RemarkKlassClosure : public KlassClosure { class RemarkCLDClosure : public CLDClosure {
KlassToOopClosure _cm_klass_closure; CLDToOopClosure _cm_closure;
public: public:
RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {} RemarkCLDClosure(OopClosure* oop_closure) : _cm_closure(oop_closure) {}
void do_klass(Klass* k) { void do_cld(ClassLoaderData* cld) {
// Check if we have modified any oops in the Klass during the concurrent marking. // Check if we have modified any oops in the CLD during the concurrent marking.
if (k->has_accumulated_modified_oops()) { if (cld->has_accumulated_modified_oops()) {
k->clear_accumulated_modified_oops(); cld->clear_accumulated_modified_oops();
// We could have transfered the current modified marks to the accumulated marks, // We could have transfered the current modified marks to the accumulated marks,
// like we do with the Card Table to Mod Union Table. But it's not really necessary. // like we do with the Card Table to Mod Union Table. But it's not really necessary.
} else if (k->has_modified_oops()) { } else if (cld->has_modified_oops()) {
// Don't clear anything, this info is needed by the next young collection. // Don't clear anything, this info is needed by the next young collection.
} else { } else {
// No modified oops in the Klass. // No modified oops in the ClassLoaderData.
return; return;
} }
// The klass has modified fields, need to scan the klass. // The klass has modified fields, need to scan the klass.
_cm_klass_closure.do_klass(k); _cm_closure.do_cld(cld);
} }
}; };
@ -4439,24 +4440,24 @@ void CMSParRemarkTask::work(uint worker_id) {
log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
} }
// ---------- dirty klass scanning ---------- // We might have added oops to ClassLoaderData::_handles during the
// concurrent marking phase. These oops do not always point to newly allocated objects
// that are guaranteed to be kept alive. Hence,
// we do have to revisit the _handles block during the remark phase.
// ---------- dirty CLD scanning ----------
if (worker_id == 0) { // Single threaded at the moment. if (worker_id == 0) { // Single threaded at the moment.
_timer.reset(); _timer.reset();
_timer.start(); _timer.start();
// Scan all classes that was dirtied during the concurrent marking phase. // Scan all classes that was dirtied during the concurrent marking phase.
RemarkKlassClosure remark_klass_closure(&par_mrias_cl); RemarkCLDClosure remark_closure(&par_mrias_cl);
ClassLoaderDataGraph::classes_do(&remark_klass_closure); ClassLoaderDataGraph::cld_do(&remark_closure);
_timer.stop(); _timer.stop();
log_trace(gc, task)("Finished dirty klass scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); log_trace(gc, task)("Finished dirty CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
} }
// We might have added oops to ClassLoaderData::_handles during the
// concurrent marking phase. These oops point to newly allocated objects
// that are guaranteed to be kept alive. Either by the direct allocation
// code, or when the young collector processes the roots. Hence,
// we don't have to revisit the _handles block during the remark phase.
// ---------- rescan dirty cards ------------ // ---------- rescan dirty cards ------------
_timer.reset(); _timer.reset();
@ -4981,23 +4982,21 @@ void CMSCollector::do_remark_non_parallel() {
verify_work_stacks_empty(); verify_work_stacks_empty();
} }
// We might have added oops to ClassLoaderData::_handles during the
// concurrent marking phase. These oops do not point to newly allocated objects
// that are guaranteed to be kept alive. Hence,
// we do have to revisit the _handles block during the remark phase.
{ {
GCTraceTime(Trace, gc, phases) t("Dirty Klass Scan", _gc_timer_cm); GCTraceTime(Trace, gc, phases) t("Dirty CLD Scan", _gc_timer_cm);
verify_work_stacks_empty(); verify_work_stacks_empty();
RemarkKlassClosure remark_klass_closure(&mrias_cl); RemarkCLDClosure remark_closure(&mrias_cl);
ClassLoaderDataGraph::classes_do(&remark_klass_closure); ClassLoaderDataGraph::cld_do(&remark_closure);
verify_work_stacks_empty(); verify_work_stacks_empty();
} }
// We might have added oops to ClassLoaderData::_handles during the
// concurrent marking phase. These oops point to newly allocated objects
// that are guaranteed to be kept alive. Either by the direct allocation
// code, or when the young collector processes the roots. Hence,
// we don't have to revisit the _handles block during the remark phase.
verify_work_stacks_empty(); verify_work_stacks_empty();
// Restore evacuated mark words, if any, used for overflow list links // Restore evacuated mark words, if any, used for overflow list links
restore_preserved_marks_if_any(); restore_preserved_marks_if_any();

View file

@ -777,7 +777,7 @@ class CMSCollector: public CHeapObj<mtGC> {
// Does precleaning work, returning a quantity indicative of // Does precleaning work, returning a quantity indicative of
// the amount of "useful work" done. // the amount of "useful work" done.
size_t preclean_work(bool clean_refs, bool clean_survivors); size_t preclean_work(bool clean_refs, bool clean_survivors);
void preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock); void preclean_cld(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock);
void abortable_preclean(); // Preclean while looking for possible abort void abortable_preclean(); // Preclean while looking for possible abort
void initialize_sequential_subtasks_for_young_gen_rescan(int i); void initialize_sequential_subtasks_for_young_gen_rescan(int i);
// Helper function for above; merge-sorts the per-thread plab samples // Helper function for above; merge-sorts the per-thread plab samples

View file

@ -493,7 +493,7 @@ void ParScanThreadStateSet::flush() {
ParScanClosure::ParScanClosure(ParNewGeneration* g, ParScanClosure::ParScanClosure(ParNewGeneration* g,
ParScanThreadState* par_scan_state) : ParScanThreadState* par_scan_state) :
OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) { OopsInClassLoaderDataOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) {
_boundary = _g->reserved().end(); _boundary = _g->reserved().end();
} }
@ -601,11 +601,8 @@ void ParNewGenTask::work(uint worker_id) {
par_scan_state.set_young_old_boundary(_young_old_boundary); par_scan_state.set_young_old_boundary(_young_old_boundary);
KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(), CLDScanClosure cld_scan_closure(&par_scan_state.to_space_root_closure(),
gch->rem_set()->klass_rem_set()); gch->rem_set()->cld_rem_set()->accumulate_modified_oops());
CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
&par_scan_state.to_space_root_closure(),
false);
par_scan_state.start_strong_roots(); par_scan_state.start_strong_roots();
gch->young_process_roots(_strong_roots_scope, gch->young_process_roots(_strong_roots_scope,

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -37,7 +37,7 @@ typedef Padded<OopTaskQueue> ObjToScanQueue;
typedef GenericTaskQueueSet<ObjToScanQueue, mtGC> ObjToScanQueueSet; typedef GenericTaskQueueSet<ObjToScanQueue, mtGC> ObjToScanQueueSet;
class ParallelTaskTerminator; class ParallelTaskTerminator;
class ParScanClosure: public OopsInKlassOrGenClosure { class ParScanClosure: public OopsInClassLoaderDataOrGenClosure {
protected: protected:
ParScanThreadState* _par_scan_state; ParScanThreadState* _par_scan_state;
ParNewGeneration* _g; ParNewGeneration* _g;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -126,8 +126,8 @@ inline void ParScanClosure::do_oop_work(T* p,
(void)_par_scan_state->trim_queues(10 * ParallelGCThreads); (void)_par_scan_state->trim_queues(10 * ParallelGCThreads);
} }
} }
if (is_scanning_a_klass()) { if (is_scanning_a_cld()) {
do_klass_barrier(); do_cld_barrier();
} else if (gc_barrier) { } else if (gc_barrier) {
// Now call parent closure // Now call parent closure
par_do_barrier(p); par_do_barrier(p);

View file

@ -63,7 +63,6 @@ class HRRSCleanupTask;
class GenerationSpec; class GenerationSpec;
class G1ParScanThreadState; class G1ParScanThreadState;
class G1ParScanThreadStateSet; class G1ParScanThreadStateSet;
class G1KlassScanClosure;
class G1ParScanThreadState; class G1ParScanThreadState;
class ObjectClosure; class ObjectClosure;
class SpaceClosure; class SpaceClosure;

View file

@ -161,18 +161,18 @@ class YoungRefCounterClosure : public OopClosure {
void reset_count() { _count = 0; }; void reset_count() { _count = 0; };
}; };
class VerifyKlassClosure: public KlassClosure { class VerifyCLDClosure: public CLDClosure {
YoungRefCounterClosure _young_ref_counter_closure; YoungRefCounterClosure _young_ref_counter_closure;
OopClosure *_oop_closure; OopClosure *_oop_closure;
public: public:
VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {} VerifyCLDClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
void do_klass(Klass* k) { void do_cld(ClassLoaderData* cld) {
k->oops_do(_oop_closure); cld->oops_do(_oop_closure, false);
_young_ref_counter_closure.reset_count(); _young_ref_counter_closure.reset_count();
k->oops_do(&_young_ref_counter_closure); cld->oops_do(&_young_ref_counter_closure, false);
if (_young_ref_counter_closure.count() > 0) { if (_young_ref_counter_closure.count() > 0) {
guarantee(k->has_modified_oops(), "Klass " PTR_FORMAT ", has young refs but is not dirty.", p2i(k)); guarantee(cld->has_modified_oops(), "CLD " PTR_FORMAT ", has young %d refs but is not dirty.", p2i(cld), _young_ref_counter_closure.count());
} }
} }
}; };
@ -390,8 +390,7 @@ void G1HeapVerifier::verify(VerifyOption vo) {
log_debug(gc, verify)("Roots"); log_debug(gc, verify)("Roots");
VerifyRootsClosure rootsCl(vo); VerifyRootsClosure rootsCl(vo);
VerifyKlassClosure klassCl(_g1h, &rootsCl); VerifyCLDClosure cldCl(_g1h, &rootsCl);
CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
// We apply the relevant closures to all the oops in the // We apply the relevant closures to all the oops in the
// system dictionary, class loader data graph, the string table // system dictionary, class loader data graph, the string table

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -34,7 +34,7 @@ G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par
_g1(g1), _g1(g1),
_par_scan_state(par_scan_state), _par_scan_state(par_scan_state),
_worker_id(par_scan_state->worker_id()), _worker_id(par_scan_state->worker_id()),
_scanned_klass(NULL), _scanned_cld(NULL),
_cm(_g1->concurrent_mark()) _cm(_g1->concurrent_mark())
{ } { }
@ -42,20 +42,20 @@ G1ScanClosureBase::G1ScanClosureBase(G1CollectedHeap* g1, G1ParScanThreadState*
_g1(g1), _par_scan_state(par_scan_state), _from(NULL) _g1(g1), _par_scan_state(par_scan_state), _from(NULL)
{ } { }
void G1KlassScanClosure::do_klass(Klass* klass) { void G1CLDScanClosure::do_cld(ClassLoaderData* cld) {
// If the klass has not been dirtied we know that there's // If the class loader data has not been dirtied we know that there's
// no references into the young gen and we can skip it. // no references into the young gen and we can skip it.
if (!_process_only_dirty || klass->has_modified_oops()) { if (!_process_only_dirty || cld->has_modified_oops()) {
// Clean the klass since we're going to scavenge all the metadata.
klass->clear_modified_oops();
// Tell the closure that this klass is the Klass to scavenge // Tell the closure that this class loader data is the CLD to scavenge
// and is the one to dirty if oops are left pointing into the young gen. // and is the one to dirty if oops are left pointing into the young gen.
_closure->set_scanned_klass(klass); _closure->set_scanned_cld(cld);
klass->oops_do(_closure); // Clean the cld since we're going to scavenge all the metadata.
// Clear modified oops only if this cld is claimed.
cld->oops_do(_closure, _must_claim, /*clear_modified_oops*/true);
_closure->set_scanned_klass(NULL); _closure->set_scanned_cld(NULL);
} }
_count++; _count++;
} }

View file

@ -107,7 +107,7 @@ protected:
G1CollectedHeap* _g1; G1CollectedHeap* _g1;
G1ParScanThreadState* _par_scan_state; G1ParScanThreadState* _par_scan_state;
uint _worker_id; // Cache value from par_scan_state. uint _worker_id; // Cache value from par_scan_state.
Klass* _scanned_klass; ClassLoaderData* _scanned_cld;
G1ConcurrentMark* _cm; G1ConcurrentMark* _cm;
// Mark the object if it's not already marked. This is used to mark // Mark the object if it's not already marked. This is used to mark
@ -124,13 +124,13 @@ protected:
~G1ParCopyHelper() { } ~G1ParCopyHelper() { }
public: public:
void set_scanned_klass(Klass* k) { _scanned_klass = k; } void set_scanned_cld(ClassLoaderData* cld) { _scanned_cld = cld; }
template <class T> inline void do_klass_barrier(T* p, oop new_obj); inline void do_cld_barrier(oop new_obj);
}; };
enum G1Barrier { enum G1Barrier {
G1BarrierNone, G1BarrierNone,
G1BarrierKlass G1BarrierCLD
}; };
enum G1Mark { enum G1Mark {
@ -150,14 +150,16 @@ public:
virtual void do_oop(narrowOop* p) { do_oop_work(p); } virtual void do_oop(narrowOop* p) { do_oop_work(p); }
}; };
class G1KlassScanClosure : public KlassClosure { class G1CLDScanClosure : public CLDClosure {
G1ParCopyHelper* _closure; G1ParCopyHelper* _closure;
bool _process_only_dirty; bool _process_only_dirty;
bool _must_claim;
int _count; int _count;
public: public:
G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty) G1CLDScanClosure(G1ParCopyHelper* closure,
: _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {} bool process_only_dirty, bool must_claim)
void do_klass(Klass* klass); : _process_only_dirty(process_only_dirty), _must_claim(must_claim), _closure(closure), _count(0) {}
void do_cld(ClassLoaderData* cld);
}; };
// Closure for iterating over object fields during concurrent marking // Closure for iterating over object fields during concurrent marking

View file

@ -195,10 +195,9 @@ inline void G1ScanObjsDuringScanRSClosure::do_oop_nv(T* p) {
} }
} }
template <class T> void G1ParCopyHelper::do_cld_barrier(oop new_obj) {
void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
if (_g1->heap_region_containing(new_obj)->is_young()) { if (_g1->heap_region_containing(new_obj)->is_young()) {
_scanned_klass->record_modified_oops(); _scanned_cld->record_modified_oops();
} }
} }
@ -249,8 +248,8 @@ void G1ParCopyClosure<barrier, do_mark_object, use_ext>::do_oop_work(T* p) {
mark_forwarded_object(obj, forwardee); mark_forwarded_object(obj, forwardee);
} }
if (barrier == G1BarrierKlass) { if (barrier == G1BarrierCLD) {
do_klass_barrier(p, forwardee); do_cld_barrier(forwardee);
} }
} else { } else {
if (state.is_humongous()) { if (state.is_humongous()) {
@ -267,5 +266,4 @@ void G1ParCopyClosure<barrier, do_mark_object, use_ext>::do_oop_work(T* p) {
} }
} }
} }
#endif // SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP #endif // SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -35,17 +35,16 @@ template <G1Mark Mark, bool use_ext = false>
class G1SharedClosures VALUE_OBJ_CLASS_SPEC { class G1SharedClosures VALUE_OBJ_CLASS_SPEC {
public: public:
G1ParCopyClosure<G1BarrierNone, Mark, use_ext> _oops; G1ParCopyClosure<G1BarrierNone, Mark, use_ext> _oops;
G1ParCopyClosure<G1BarrierKlass, Mark, use_ext> _oop_in_klass; G1ParCopyClosure<G1BarrierCLD, Mark, use_ext> _oops_in_cld;
G1KlassScanClosure _klass_in_cld_closure;
CLDToKlassAndOopClosure _clds; G1CLDScanClosure _clds;
G1CodeBlobClosure _codeblobs; G1CodeBlobClosure _codeblobs;
BufferingOopClosure _buffered_oops; BufferingOopClosure _buffered_oops;
G1SharedClosures(G1CollectedHeap* g1h, G1ParScanThreadState* pss, bool process_only_dirty_klasses, bool must_claim_cld) : G1SharedClosures(G1CollectedHeap* g1h, G1ParScanThreadState* pss, bool process_only_dirty, bool must_claim_cld) :
_oops(g1h, pss), _oops(g1h, pss),
_oop_in_klass(g1h, pss), _oops_in_cld(g1h, pss),
_klass_in_cld_closure(&_oop_in_klass, process_only_dirty_klasses), _clds(&_oops_in_cld, process_only_dirty, must_claim_cld),
_clds(&_klass_in_cld_closure, &_oops, must_claim_cld),
_codeblobs(&_oops), _codeblobs(&_oops),
_buffered_oops(&_oops) {} _buffered_oops(&_oops) {}
}; };

View file

@ -81,7 +81,6 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
ParCompactionManager* cm = ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::gc_thread_compaction_manager(which);
ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm); ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
ParCompactionManager::FollowKlassClosure follow_klass_closure(&mark_and_push_closure);
switch (_root_type) { switch (_root_type) {
case universe: case universe:
@ -117,7 +116,7 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
break; break;
case class_loader_data: case class_loader_data:
ClassLoaderDataGraph::always_strong_oops_do(&mark_and_push_closure, &follow_klass_closure, true); ClassLoaderDataGraph::always_strong_oops_do(&mark_and_push_closure, true);
break; break;
case code_cache: case code_cache:

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -196,17 +196,6 @@ private:
FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
virtual void do_void(); virtual void do_void();
}; };
// The one and only place to start following the classes.
// Should only be applied to the ClassLoaderData klasses list.
class FollowKlassClosure : public KlassClosure {
private:
MarkAndPushClosure* _mark_and_push_closure;
public:
FollowKlassClosure(MarkAndPushClosure* mark_and_push_closure) :
_mark_and_push_closure(mark_and_push_closure) { }
void do_klass(Klass* klass);
};
}; };
inline ParCompactionManager* ParCompactionManager::manager_array(uint index) { inline ParCompactionManager* ParCompactionManager::manager_array(uint index) {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2010, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -98,15 +98,10 @@ inline void ParCompactionManager::FollowStackClosure::do_void() {
_compaction_manager->follow_marking_stacks(); _compaction_manager->follow_marking_stacks();
} }
inline void ParCompactionManager::FollowKlassClosure::do_klass(Klass* klass) {
klass->oops_do(_mark_and_push_closure);
}
inline void ParCompactionManager::follow_class_loader(ClassLoaderData* cld) { inline void ParCompactionManager::follow_class_loader(ClassLoaderData* cld) {
MarkAndPushClosure mark_and_push_closure(this); MarkAndPushClosure mark_and_push_closure(this);
FollowKlassClosure follow_klass_closure(&mark_and_push_closure);
cld->oops_do(&mark_and_push_closure, &follow_klass_closure, true); cld->oops_do(&mark_and_push_closure, true);
} }
inline void ParCompactionManager::follow_contents(oop obj) { inline void ParCompactionManager::follow_contents(oop obj) {

View file

@ -838,11 +838,6 @@ PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); } bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) {
PSParallelCompact::AdjustPointerClosure closure(_cm);
klass->oops_do(&closure);
}
void PSParallelCompact::post_initialize() { void PSParallelCompact::post_initialize() {
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
MemRegion mr = heap->reserved_region(); MemRegion mr = heap->reserved_region();
@ -2162,7 +2157,6 @@ void PSParallelCompact::adjust_roots(ParCompactionManager* cm) {
ClassLoaderDataGraph::clear_claimed_marks(); ClassLoaderDataGraph::clear_claimed_marks();
PSParallelCompact::AdjustPointerClosure oop_closure(cm); PSParallelCompact::AdjustPointerClosure oop_closure(cm);
PSParallelCompact::AdjustKlassClosure klass_closure(cm);
// General strong roots. // General strong roots.
Universe::oops_do(&oop_closure); Universe::oops_do(&oop_closure);
@ -2172,7 +2166,7 @@ void PSParallelCompact::adjust_roots(ParCompactionManager* cm) {
Management::oops_do(&oop_closure); Management::oops_do(&oop_closure);
JvmtiExport::oops_do(&oop_closure); JvmtiExport::oops_do(&oop_closure);
SystemDictionary::oops_do(&oop_closure); SystemDictionary::oops_do(&oop_closure);
ClassLoaderDataGraph::oops_do(&oop_closure, &klass_closure, true); ClassLoaderDataGraph::oops_do(&oop_closure, true);
// Now adjust pointers in remaining weak roots. (All of which should // Now adjust pointers in remaining weak roots. (All of which should
// have been cleared if they pointed to non-surviving objects.) // have been cleared if they pointed to non-surviving objects.)

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -85,15 +85,15 @@ class PSRootsClosure: public OopClosure {
typedef PSRootsClosure</*promote_immediately=*/false> PSScavengeRootsClosure; typedef PSRootsClosure</*promote_immediately=*/false> PSScavengeRootsClosure;
typedef PSRootsClosure</*promote_immediately=*/true> PSPromoteRootsClosure; typedef PSRootsClosure</*promote_immediately=*/true> PSPromoteRootsClosure;
// Scavenges a single oop in a Klass. // Scavenges a single oop in a ClassLoaderData.
class PSScavengeFromKlassClosure: public OopClosure { class PSScavengeFromCLDClosure: public OopClosure {
private: private:
PSPromotionManager* _pm; PSPromotionManager* _pm;
// Used to redirty a scanned klass if it has oops // Used to redirty a scanned cld if it has oops
// pointing to the young generation after being scanned. // pointing to the young generation after being scanned.
Klass* _scanned_klass; ClassLoaderData* _scanned_cld;
public: public:
PSScavengeFromKlassClosure(PSPromotionManager* pm) : _pm(pm), _scanned_klass(NULL) { } PSScavengeFromCLDClosure(PSPromotionManager* pm) : _pm(pm), _scanned_cld(NULL) { }
void do_oop(narrowOop* p) { ShouldNotReachHere(); } void do_oop(narrowOop* p) { ShouldNotReachHere(); }
void do_oop(oop* p) { void do_oop(oop* p) {
ParallelScavengeHeap* psh = ParallelScavengeHeap::heap(); ParallelScavengeHeap* psh = ParallelScavengeHeap::heap();
@ -111,48 +111,46 @@ class PSScavengeFromKlassClosure: public OopClosure {
oopDesc::encode_store_heap_oop_not_null(p, new_obj); oopDesc::encode_store_heap_oop_not_null(p, new_obj);
if (PSScavenge::is_obj_in_young(new_obj)) { if (PSScavenge::is_obj_in_young(new_obj)) {
do_klass_barrier(); do_cld_barrier();
} }
} }
} }
void set_scanned_klass(Klass* klass) { void set_scanned_cld(ClassLoaderData* cld) {
assert(_scanned_klass == NULL || klass == NULL, "Should always only handling one klass at a time"); assert(_scanned_cld == NULL || cld == NULL, "Should always only handling one cld at a time");
_scanned_klass = klass; _scanned_cld = cld;
} }
private: private:
void do_klass_barrier() { void do_cld_barrier() {
assert(_scanned_klass != NULL, "Should not be called without having a scanned klass"); assert(_scanned_cld != NULL, "Should not be called without having a scanned cld");
_scanned_klass->record_modified_oops(); _scanned_cld->record_modified_oops();
} }
}; };
// Scavenges the oop in a Klass. // Scavenges the oop in a ClassLoaderData.
class PSScavengeKlassClosure: public KlassClosure { class PSScavengeCLDClosure: public CLDClosure {
private: private:
PSScavengeFromKlassClosure _oop_closure; PSScavengeFromCLDClosure _oop_closure;
protected: protected:
public: public:
PSScavengeKlassClosure(PSPromotionManager* pm) : _oop_closure(pm) { } PSScavengeCLDClosure(PSPromotionManager* pm) : _oop_closure(pm) { }
void do_klass(Klass* klass) { void do_cld(ClassLoaderData* cld) {
// If the klass has not been dirtied we know that there's // If the cld has not been dirtied we know that there's
// no references into the young gen and we can skip it. // no references into the young gen and we can skip it.
if (klass->has_modified_oops()) { if (cld->has_modified_oops()) {
// Clean the klass since we're going to scavenge all the metadata. // Setup the promotion manager to redirty this cld
klass->clear_modified_oops();
// Setup the promotion manager to redirty this klass
// if references are left in the young gen. // if references are left in the young gen.
_oop_closure.set_scanned_klass(klass); _oop_closure.set_scanned_cld(cld);
klass->oops_do(&_oop_closure); // Clean the cld since we're going to scavenge all the metadata.
cld->oops_do(&_oop_closure, false, /*clear_modified_oops*/true);
_oop_closure.set_scanned_klass(NULL); _oop_closure.set_scanned_cld(NULL);
} }
} }
}; };
#endif // SHARE_VM_GC_PARALLEL_PSSCAVENGE_INLINE_HPP #endif // SHARE_VM_GC_PARALLEL_PSSCAVENGE_INLINE_HPP

View file

@ -79,8 +79,8 @@ void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
case class_loader_data: case class_loader_data:
{ {
PSScavengeKlassClosure klass_closure(pm); PSScavengeCLDClosure cld_closure(pm);
ClassLoaderDataGraph::oops_do(&roots_closure, &klass_closure, false); ClassLoaderDataGraph::cld_do(&cld_closure);
} }
break; break;

View file

@ -121,7 +121,7 @@ void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
} }
ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) OopsInClassLoaderDataOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
{ {
_boundary = _g->reserved().end(); _boundary = _g->reserved().end();
} }
@ -130,7 +130,7 @@ void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) OopsInClassLoaderDataOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
{ {
_boundary = _g->reserved().end(); _boundary = _g->reserved().end();
} }
@ -138,30 +138,28 @@ FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
void KlassScanClosure::do_klass(Klass* klass) { void CLDScanClosure::do_cld(ClassLoaderData* cld) {
NOT_PRODUCT(ResourceMark rm); NOT_PRODUCT(ResourceMark rm);
log_develop_trace(gc, scavenge)("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s", log_develop_trace(gc, scavenge)("CLDScanClosure::do_cld " PTR_FORMAT ", %s, dirty: %s",
p2i(klass), p2i(cld),
klass->external_name(), cld->loader_name(),
klass->has_modified_oops() ? "true" : "false"); cld->has_modified_oops() ? "true" : "false");
// If the klass has not been dirtied we know that there's // If the cld has not been dirtied we know that there's
// no references into the young gen and we can skip it. // no references into the young gen and we can skip it.
if (klass->has_modified_oops()) { if (cld->has_modified_oops()) {
if (_accumulate_modified_oops) { if (_accumulate_modified_oops) {
klass->accumulate_modified_oops(); cld->accumulate_modified_oops();
} }
// Clear this state since we're going to scavenge all the metadata. // Tell the closure which CLD is being scanned so that it can be dirtied
klass->clear_modified_oops();
// Tell the closure which Klass is being scanned so that it can be dirtied
// if oops are left pointing into the young gen. // if oops are left pointing into the young gen.
_scavenge_closure->set_scanned_klass(klass); _scavenge_closure->set_scanned_cld(cld);
klass->oops_do(_scavenge_closure); // Clean the cld since we're going to scavenge all the metadata.
cld->oops_do(_scavenge_closure, false, /*clear_modified_oops*/true);
_scavenge_closure->set_scanned_klass(NULL); _scavenge_closure->set_scanned_cld(NULL);
} }
} }
@ -177,12 +175,6 @@ void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(
void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); } void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); } void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
KlassRemSet* klass_rem_set)
: _scavenge_closure(scavenge_closure),
_accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
DefNewGeneration::DefNewGeneration(ReservedSpace rs, DefNewGeneration::DefNewGeneration(ReservedSpace rs,
size_t initial_size, size_t initial_size,
const char* policy) const char* policy)
@ -629,11 +621,8 @@ void DefNewGeneration::collect(bool full,
FastScanClosure fsc_with_no_gc_barrier(this, false); FastScanClosure fsc_with_no_gc_barrier(this, false);
FastScanClosure fsc_with_gc_barrier(this, true); FastScanClosure fsc_with_gc_barrier(this, true);
KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier, CLDScanClosure cld_scan_closure(&fsc_with_no_gc_barrier,
gch->rem_set()->klass_rem_set()); gch->rem_set()->cld_rem_set()->accumulate_modified_oops());
CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
&fsc_with_no_gc_barrier,
false);
set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
FastEvacuateFollowersClosure evacuate_followers(gch, FastEvacuateFollowersClosure evacuate_followers(gch,

View file

@ -34,16 +34,16 @@
#include "runtime/os.hpp" #include "runtime/os.hpp"
#include "utilities/macros.hpp" #include "utilities/macros.hpp"
class HasAccumulatedModifiedOopsClosure : public KlassClosure { class HasAccumulatedModifiedOopsClosure : public CLDClosure {
bool _found; bool _found;
public: public:
HasAccumulatedModifiedOopsClosure() : _found(false) {} HasAccumulatedModifiedOopsClosure() : _found(false) {}
void do_klass(Klass* klass) { void do_cld(ClassLoaderData* cld) {
if (_found) { if (_found) {
return; return;
} }
if (klass->has_accumulated_modified_oops()) { if (cld->has_accumulated_modified_oops()) {
_found = true; _found = true;
} }
} }
@ -52,28 +52,29 @@ class HasAccumulatedModifiedOopsClosure : public KlassClosure {
} }
}; };
bool KlassRemSet::mod_union_is_clear() { bool CLDRemSet::mod_union_is_clear() {
HasAccumulatedModifiedOopsClosure closure; HasAccumulatedModifiedOopsClosure closure;
ClassLoaderDataGraph::classes_do(&closure); ClassLoaderDataGraph::cld_do(&closure);
return !closure.found(); return !closure.found();
} }
class ClearKlassModUnionClosure : public KlassClosure { class ClearCLDModUnionClosure : public CLDClosure {
public: public:
void do_klass(Klass* klass) { void do_cld(ClassLoaderData* cld) {
if (klass->has_accumulated_modified_oops()) { if (cld->has_accumulated_modified_oops()) {
klass->clear_accumulated_modified_oops(); cld->clear_accumulated_modified_oops();
} }
} }
}; };
void KlassRemSet::clear_mod_union() { void CLDRemSet::clear_mod_union() {
ClearKlassModUnionClosure closure; ClearCLDModUnionClosure closure;
ClassLoaderDataGraph::classes_do(&closure); ClassLoaderDataGraph::cld_do(&closure);
} }
CardTableRS::CardTableRS(MemRegion whole_heap) : CardTableRS::CardTableRS(MemRegion whole_heap) :
_bs(NULL), _bs(NULL),
_cur_youngergen_card_val(youngergenP1_card) _cur_youngergen_card_val(youngergenP1_card)

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -31,11 +31,11 @@
class Space; class Space;
class OopsInGenClosure; class OopsInGenClosure;
// Helper to remember modified oops in all klasses. // Helper to remember modified oops in all clds.
class KlassRemSet { class CLDRemSet {
bool _accumulate_modified_oops; bool _accumulate_modified_oops;
public: public:
KlassRemSet() : _accumulate_modified_oops(false) {} CLDRemSet() : _accumulate_modified_oops(false) {}
void set_accumulate_modified_oops(bool value) { _accumulate_modified_oops = value; } void set_accumulate_modified_oops(bool value) { _accumulate_modified_oops = value; }
bool accumulate_modified_oops() { return _accumulate_modified_oops; } bool accumulate_modified_oops() { return _accumulate_modified_oops; }
bool mod_union_is_clear(); bool mod_union_is_clear();
@ -64,7 +64,7 @@ class CardTableRS: public CHeapObj<mtGC> {
return CardTableModRefBSForCTRS::card_is_dirty_wrt_gen_iter(cv); return CardTableModRefBSForCTRS::card_is_dirty_wrt_gen_iter(cv);
} }
KlassRemSet _klass_rem_set; CLDRemSet _cld_rem_set;
BarrierSet* _bs; BarrierSet* _bs;
CardTableModRefBSForCTRS* _ct_bs; CardTableModRefBSForCTRS* _ct_bs;
@ -121,7 +121,7 @@ public:
// Set the barrier set. // Set the barrier set.
void set_bs(BarrierSet* bs) { _bs = bs; } void set_bs(BarrierSet* bs) { _bs = bs; }
KlassRemSet* klass_rem_set() { return &_klass_rem_set; } CLDRemSet* cld_rem_set() { return &_cld_rem_set; }
CardTableModRefBSForCTRS* ct_bs() { return _ct_bs; } CardTableModRefBSForCTRS* ct_bs() { return _ct_bs; }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -81,24 +81,25 @@ class OopsInGenClosure : public ExtendedOopClosure {
}; };
// Super class for scan closures. It contains code to dirty scanned Klasses. // Super class for scan closures. It contains code to dirty scanned class loader data.
class OopsInKlassOrGenClosure: public OopsInGenClosure { class OopsInClassLoaderDataOrGenClosure: public OopsInGenClosure {
Klass* _scanned_klass; ClassLoaderData* _scanned_cld;
public: public:
OopsInKlassOrGenClosure(Generation* g) : OopsInGenClosure(g), _scanned_klass(NULL) {} OopsInClassLoaderDataOrGenClosure(Generation* g) : OopsInGenClosure(g), _scanned_cld(NULL) {}
void set_scanned_klass(Klass* k) { void set_scanned_cld(ClassLoaderData* cld) {
assert(k == NULL || _scanned_klass == NULL, "Must be"); assert(cld == NULL || _scanned_cld == NULL, "Must be");
_scanned_klass = k; _scanned_cld = cld;
} }
bool is_scanning_a_klass() { return _scanned_klass != NULL; } bool is_scanning_a_cld() { return _scanned_cld != NULL; }
void do_klass_barrier(); void do_cld_barrier();
}; };
// Closure for scanning DefNewGeneration. // Closure for scanning DefNewGeneration.
// //
// This closure will perform barrier store calls for ALL // This closure will perform barrier store calls for ALL
// pointers in scanned oops. // pointers in scanned oops.
class ScanClosure: public OopsInKlassOrGenClosure { class ScanClosure: public OopsInClassLoaderDataOrGenClosure {
protected: protected:
DefNewGeneration* _g; DefNewGeneration* _g;
HeapWord* _boundary; HeapWord* _boundary;
@ -117,7 +118,7 @@ class ScanClosure: public OopsInKlassOrGenClosure {
// This closure only performs barrier store calls on // This closure only performs barrier store calls on
// pointers into the DefNewGeneration. This is less // pointers into the DefNewGeneration. This is less
// precise, but faster, than a ScanClosure // precise, but faster, than a ScanClosure
class FastScanClosure: public OopsInKlassOrGenClosure { class FastScanClosure: public OopsInClassLoaderDataOrGenClosure {
protected: protected:
DefNewGeneration* _g; DefNewGeneration* _g;
HeapWord* _boundary; HeapWord* _boundary;
@ -131,14 +132,15 @@ class FastScanClosure: public OopsInKlassOrGenClosure {
inline void do_oop_nv(narrowOop* p); inline void do_oop_nv(narrowOop* p);
}; };
class KlassScanClosure: public KlassClosure { class CLDScanClosure: public CLDClosure {
OopsInKlassOrGenClosure* _scavenge_closure; OopsInClassLoaderDataOrGenClosure* _scavenge_closure;
// true if the the modified oops state should be saved. // true if the the modified oops state should be saved.
bool _accumulate_modified_oops; bool _accumulate_modified_oops;
public: public:
KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure, CLDScanClosure(OopsInClassLoaderDataOrGenClosure* scavenge_closure,
KlassRemSet* klass_rem_set_policy); bool accumulate_modified_oops) :
void do_klass(Klass* k); _scavenge_closure(scavenge_closure), _accumulate_modified_oops(accumulate_modified_oops) {}
void do_cld(ClassLoaderData* cld);
}; };
class FilteringClosure: public ExtendedOopClosure { class FilteringClosure: public ExtendedOopClosure {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -68,9 +68,11 @@ template <class T> inline void OopsInGenClosure::par_do_barrier(T* p) {
} }
} }
inline void OopsInKlassOrGenClosure::do_klass_barrier() { inline void OopsInClassLoaderDataOrGenClosure::do_cld_barrier() {
assert(_scanned_klass != NULL, "Must be"); assert(_scanned_cld != NULL, "Must be");
_scanned_klass->record_modified_oops(); if (!_scanned_cld->has_modified_oops()) {
_scanned_cld->record_modified_oops();
}
} }
// NOTE! Any changes made here should also be made // NOTE! Any changes made here should also be made
@ -87,8 +89,8 @@ template <class T> inline void ScanClosure::do_oop_work(T* p) {
oopDesc::encode_store_heap_oop_not_null(p, new_obj); oopDesc::encode_store_heap_oop_not_null(p, new_obj);
} }
if (is_scanning_a_klass()) { if (is_scanning_a_cld()) {
do_klass_barrier(); do_cld_barrier();
} else if (_gc_barrier) { } else if (_gc_barrier) {
// Now call parent closure // Now call parent closure
do_barrier(p); do_barrier(p);
@ -111,8 +113,8 @@ template <class T> inline void FastScanClosure::do_oop_work(T* p) {
oop new_obj = obj->is_forwarded() ? obj->forwardee() oop new_obj = obj->is_forwarded() ? obj->forwardee()
: _g->copy_to_survivor_space(obj); : _g->copy_to_survivor_space(obj);
oopDesc::encode_store_heap_oop_not_null(p, new_obj); oopDesc::encode_store_heap_oop_not_null(p, new_obj);
if (is_scanning_a_klass()) { if (is_scanning_a_cld()) {
do_klass_barrier(); do_cld_barrier();
} else if (_gc_barrier) { } else if (_gc_barrier) {
// Now call parent closure // Now call parent closure
do_barrier(p); do_barrier(p);

View file

@ -412,6 +412,7 @@ C2V_VMENTRY(jobjectArray, readConfiguration, (JNIEnv *env))
} else if (strcmp(vmField.typeString, "address") == 0 || } else if (strcmp(vmField.typeString, "address") == 0 ||
strcmp(vmField.typeString, "intptr_t") == 0 || strcmp(vmField.typeString, "intptr_t") == 0 ||
strcmp(vmField.typeString, "uintptr_t") == 0 || strcmp(vmField.typeString, "uintptr_t") == 0 ||
strcmp(vmField.typeString, "OopHandle") == 0 ||
strcmp(vmField.typeString, "size_t") == 0 || strcmp(vmField.typeString, "size_t") == 0 ||
// All foo* types are addresses. // All foo* types are addresses.
vmField.typeString[strlen(vmField.typeString) - 1] == '*') { vmField.typeString[strlen(vmField.typeString) - 1] == '*') {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -33,6 +33,7 @@
#include "jvmci/vmStructs_compiler_runtime.hpp" #include "jvmci/vmStructs_compiler_runtime.hpp"
#include "jvmci/vmStructs_jvmci.hpp" #include "jvmci/vmStructs_jvmci.hpp"
#include "oops/oop.hpp" #include "oops/oop.hpp"
#include "oops/oopHandle.hpp"
#include "oops/objArrayKlass.hpp" #include "oops/objArrayKlass.hpp"
#include "runtime/globals.hpp" #include "runtime/globals.hpp"
#include "runtime/sharedRuntime.hpp" #include "runtime/sharedRuntime.hpp"
@ -192,7 +193,7 @@
nonstatic_field(Klass, _name, Symbol*) \ nonstatic_field(Klass, _name, Symbol*) \
nonstatic_field(Klass, _prototype_header, markOop) \ nonstatic_field(Klass, _prototype_header, markOop) \
nonstatic_field(Klass, _next_sibling, Klass*) \ nonstatic_field(Klass, _next_sibling, Klass*) \
nonstatic_field(Klass, _java_mirror, oop) \ nonstatic_field(Klass, _java_mirror, OopHandle) \
nonstatic_field(Klass, _modifier_flags, jint) \ nonstatic_field(Klass, _modifier_flags, jint) \
nonstatic_field(Klass, _access_flags, AccessFlags) \ nonstatic_field(Klass, _access_flags, AccessFlags) \
\ \

View file

@ -233,7 +233,6 @@ class MetaspaceObj {
void print_address_on(outputStream* st) const; // nonvirtual address printing void print_address_on(outputStream* st) const; // nonvirtual address printing
#define METASPACE_OBJ_TYPES_DO(f) \ #define METASPACE_OBJ_TYPES_DO(f) \
f(Unknown) \
f(Class) \ f(Class) \
f(Symbol) \ f(Symbol) \
f(TypeArrayU1) \ f(TypeArrayU1) \

View file

@ -182,6 +182,7 @@ void FileMapInfo::FileMapHeader::populate(FileMapInfo* mapinfo, size_t alignment
_obj_alignment = ObjectAlignmentInBytes; _obj_alignment = ObjectAlignmentInBytes;
_compact_strings = CompactStrings; _compact_strings = CompactStrings;
_narrow_oop_mode = Universe::narrow_oop_mode(); _narrow_oop_mode = Universe::narrow_oop_mode();
_narrow_oop_base = Universe::narrow_oop_base();
_narrow_oop_shift = Universe::narrow_oop_shift(); _narrow_oop_shift = Universe::narrow_oop_shift();
_max_heap_size = MaxHeapSize; _max_heap_size = MaxHeapSize;
_narrow_klass_base = Universe::narrow_klass_base(); _narrow_klass_base = Universe::narrow_klass_base();
@ -687,8 +688,14 @@ static int num_open_archive_heap_ranges = 0;
// open archive objects. // open archive objects.
void FileMapInfo::map_heap_regions() { void FileMapInfo::map_heap_regions() {
if (MetaspaceShared::is_heap_object_archiving_allowed()) { if (MetaspaceShared::is_heap_object_archiving_allowed()) {
log_info(cds)("Archived narrow_oop_mode = %d, narrow_oop_base = " PTR_FORMAT ", narrow_oop_shift = %d",
narrow_oop_mode(), p2i(narrow_oop_base()), narrow_oop_shift());
log_info(cds)("Archived narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
p2i(narrow_klass_base()), narrow_klass_shift());
// Check that all the narrow oop and klass encodings match the archive // Check that all the narrow oop and klass encodings match the archive
if (narrow_oop_mode() != Universe::narrow_oop_mode() || if (narrow_oop_mode() != Universe::narrow_oop_mode() ||
narrow_oop_base() != Universe::narrow_oop_base() ||
narrow_oop_shift() != Universe::narrow_oop_shift() || narrow_oop_shift() != Universe::narrow_oop_shift() ||
narrow_klass_base() != Universe::narrow_klass_base() || narrow_klass_base() != Universe::narrow_klass_base() ||
narrow_klass_shift() != Universe::narrow_klass_shift()) { narrow_klass_shift() != Universe::narrow_klass_shift()) {
@ -697,6 +704,11 @@ void FileMapInfo::map_heap_regions() {
"The current CompressedOops/CompressedClassPointers encoding differs from " "The current CompressedOops/CompressedClassPointers encoding differs from "
"that archived due to heap size change. The archive was dumped using max heap " "that archived due to heap size change. The archive was dumped using max heap "
"size " UINTX_FORMAT "M.", max_heap_size()/M); "size " UINTX_FORMAT "M.", max_heap_size()/M);
log_info(cds)("Current narrow_oop_mode = %d, narrow_oop_base = " PTR_FORMAT ", narrow_oop_shift = %d",
Universe::narrow_oop_mode(), p2i(Universe::narrow_oop_base()),
Universe::narrow_oop_shift());
log_info(cds)("Current narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
} }
} else { } else {
// First, map string regions as closed archive heap regions. // First, map string regions as closed archive heap regions.

View file

@ -112,6 +112,7 @@ public:
int _version; // (from enum, above.) int _version; // (from enum, above.)
size_t _alignment; // how shared archive should be aligned size_t _alignment; // how shared archive should be aligned
int _obj_alignment; // value of ObjectAlignmentInBytes int _obj_alignment; // value of ObjectAlignmentInBytes
address _narrow_oop_base; // compressed oop encoding base
int _narrow_oop_shift; // compressed oop encoding shift int _narrow_oop_shift; // compressed oop encoding shift
bool _compact_strings; // value of CompactStrings bool _compact_strings; // value of CompactStrings
uintx _max_heap_size; // java max heap size during dumping uintx _max_heap_size; // java max heap size during dumping
@ -203,8 +204,9 @@ public:
int version() { return _header->_version; } int version() { return _header->_version; }
size_t alignment() { return _header->_alignment; } size_t alignment() { return _header->_alignment; }
Universe::NARROW_OOP_MODE narrow_oop_mode() { return _header->_narrow_oop_mode; } Universe::NARROW_OOP_MODE narrow_oop_mode() { return _header->_narrow_oop_mode; }
int narrow_oop_shift() { return _header->_narrow_oop_shift; } address narrow_oop_base() const { return _header->_narrow_oop_base; }
uintx max_heap_size() { return _header->_max_heap_size; } int narrow_oop_shift() const { return _header->_narrow_oop_shift; }
uintx max_heap_size() const { return _header->_max_heap_size; }
address narrow_klass_base() const { return _header->_narrow_klass_base; } address narrow_klass_base() const { return _header->_narrow_klass_base; }
int narrow_klass_shift() const { return _header->_narrow_klass_shift; } int narrow_klass_shift() const { return _header->_narrow_klass_shift; }
struct FileMapHeader* header() { return _header; } struct FileMapHeader* header() { return _header; }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,17 +29,8 @@
#include "utilities/debug.hpp" #include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"
void KlassToOopClosure::do_klass(Klass* k) {
assert(_oop_closure != NULL, "Not initialized?");
k->oops_do(_oop_closure);
}
void CLDToOopClosure::do_cld(ClassLoaderData* cld) { void CLDToOopClosure::do_cld(ClassLoaderData* cld) {
cld->oops_do(_oop_closure, &_klass_closure, _must_claim_cld); cld->oops_do(_oop_closure, _must_claim_cld);
}
void CLDToKlassAndOopClosure::do_cld(ClassLoaderData* cld) {
cld->oops_do(_oop_closure, _klass_closure, _must_claim_cld);
} }
void ObjectToOopClosure::do_object(oop obj) { void ObjectToOopClosure::do_object(oop obj) {

View file

@ -138,67 +138,27 @@ class CLDClosure : public Closure {
virtual void do_cld(ClassLoaderData* cld) = 0; virtual void do_cld(ClassLoaderData* cld) = 0;
}; };
class KlassToOopClosure : public KlassClosure {
friend class MetadataAwareOopClosure;
friend class MetadataAwareOopsInGenClosure;
OopClosure* _oop_closure;
// Used when _oop_closure couldn't be set in an initialization list.
void initialize(OopClosure* oop_closure) {
assert(_oop_closure == NULL, "Should only be called once");
_oop_closure = oop_closure;
}
public:
KlassToOopClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) {}
virtual void do_klass(Klass* k);
};
class CLDToOopClosure : public CLDClosure { class CLDToOopClosure : public CLDClosure {
OopClosure* _oop_closure; OopClosure* _oop_closure;
KlassToOopClosure _klass_closure;
bool _must_claim_cld; bool _must_claim_cld;
public: public:
CLDToOopClosure(OopClosure* oop_closure, bool must_claim_cld = true) : CLDToOopClosure(OopClosure* oop_closure, bool must_claim_cld = true) :
_oop_closure(oop_closure), _oop_closure(oop_closure),
_klass_closure(oop_closure),
_must_claim_cld(must_claim_cld) {} _must_claim_cld(must_claim_cld) {}
void do_cld(ClassLoaderData* cld); void do_cld(ClassLoaderData* cld);
}; };
class CLDToKlassAndOopClosure : public CLDClosure {
friend class G1CollectedHeap;
protected:
OopClosure* _oop_closure;
KlassClosure* _klass_closure;
bool _must_claim_cld;
public:
CLDToKlassAndOopClosure(KlassClosure* klass_closure,
OopClosure* oop_closure,
bool must_claim_cld) :
_oop_closure(oop_closure),
_klass_closure(klass_closure),
_must_claim_cld(must_claim_cld) {}
void do_cld(ClassLoaderData* cld);
};
// The base class for all concurrent marking closures, // The base class for all concurrent marking closures,
// that participates in class unloading. // that participates in class unloading.
// It's used to proxy through the metadata to the oops defined in them. // It's used to proxy through the metadata to the oops defined in them.
class MetadataAwareOopClosure: public ExtendedOopClosure { class MetadataAwareOopClosure: public ExtendedOopClosure {
KlassToOopClosure _klass_closure;
public: public:
MetadataAwareOopClosure() : ExtendedOopClosure() { MetadataAwareOopClosure() : ExtendedOopClosure() { }
_klass_closure.initialize(this); MetadataAwareOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) { }
}
MetadataAwareOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) {
_klass_closure.initialize(this);
}
bool do_metadata_nv() { return true; } bool do_metadata_nv() { return true; }
virtual bool do_metadata() { return do_metadata_nv(); } virtual bool do_metadata() { return do_metadata_nv(); }

Some files were not shown because too many files have changed in this diff Show more