diff --git a/.hgtags b/.hgtags
index 80a05d1444a..dd49fb5ce19 100644
--- a/.hgtags
+++ b/.hgtags
@@ -487,3 +487,5 @@ e1e60f75cd39312a7f59d2a4f91d624e5aecc95e jdk-11+11
758deedaae8406ae60147486107a54e9864aa7b0 jdk-11+13
3595bd343b65f8c37818ebe6a4c343ddeb1a5f88 jdk-11+14
a11c1cb542bbd1671d25b85efe7d09b983c48525 jdk-11+15
+02934b0d661b82b7fe1052a04998d2091352e08d jdk-11+16
+64e4b1686141e57a681936a8283983341484676e jdk-11+17
diff --git a/doc/building.html b/doc/building.html
index 520c91a407f..3c982e9d4ca 100644
--- a/doc/building.html
+++ b/doc/building.html
@@ -505,7 +505,7 @@ CC: Sun C++ 5.13 SunOS_i386 151846-10 2015/10/30
hotspot
- Build all of hotspot (but only hotspot)
hotspot-<variant>
- Build just the specified jvm variant
-images
or product-images
- Build the JRE and JDK images
+images
or product-images
- Build the JDK image
docs
or docs-image
- Build the documentation image
test-image
- Build the test image
all
or all-images
- Build all images (product, docs and test)
@@ -639,7 +639,7 @@ cp: cannot stat `arm-linux-gnueabihf/libXt.so': No such file or directoryOpenJDK contains two different ports for the aarch64 platform, one is the original aarch64 port from the AArch64 Port Project and one is a 64-bit version of the Oracle contributed ARM port. When targeting aarch64, by the default the original aarch64 port is used. To select the Oracle ARM 64 port, use --with-cpu-port=arm64
. Also set the corresponding value (aarch64
or arm64
) to --with-abi-profile, to ensure a consistent build.
Verifying the Build
The build will end up in a directory named like build/linux-arm-normal-server-release
.
-Inside this build output directory, the images/jdk
and images/jre
will contain the newly built JDK and JRE, respectively, for your target system.
+Inside this build output directory, the images/jdk
will contain the newly built JDK, for your target system.
Copy these folders to your target system. Then you can run e.g. images/jdk/bin/java -version
.
Building OpenJDK requires a lot of horsepower. Some of the build tools can be adjusted to utilize more or less of resources such as parallel threads and memory. The configure
script analyzes your system and selects reasonable values for such options based on your hardware. If you encounter resource problems, such as out of memory conditions, you can modify the detected values with:
diff --git a/doc/building.md b/doc/building.md
index 6fff221da9c..8b9b81391c7 100644
--- a/doc/building.md
+++ b/doc/building.md
@@ -747,7 +747,7 @@ Apart from the default target, here are some common make targets:
* `hotspot` - Build all of hotspot (but only hotspot)
* `hotspot-` - Build just the specified jvm variant
- * `images` or `product-images` - Build the JRE and JDK images
+ * `images` or `product-images` - Build the JDK image
* `docs` or `docs-image` - Build the documentation image
* `test-image` - Build the test image
* `all` or `all-images` - Build all images (product, docs and test)
@@ -1039,8 +1039,8 @@ original aarch64 port is used. To select the Oracle ARM 64 port, use
The build will end up in a directory named like
`build/linux-arm-normal-server-release`.
-Inside this build output directory, the `images/jdk` and `images/jre` will
-contain the newly built JDK and JRE, respectively, for your *target* system.
+Inside this build output directory, the `images/jdk` will contain the newly
+built JDK, for your *target* system.
Copy these folders to your *target* system. Then you can run e.g.
`images/jdk/bin/java -version`.
diff --git a/make/BuildNashorn.gmk b/make/BuildNashorn.gmk
deleted file mode 100644
index c9e31433cf2..00000000000
--- a/make/BuildNashorn.gmk
+++ /dev/null
@@ -1,115 +0,0 @@
-#
-# Copyright (c) 2010, 2017, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation. Oracle designates this
-# particular file as subject to the "Classpath" exception as provided
-# by Oracle in the LICENSE file that accompanied this code.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-# This must be the first rule
-default: all
-
-include $(SPEC)
-include MakeBase.gmk
-include JarArchive.gmk
-include JavaCompilation.gmk
-include SetupJavaCompilers.gmk
-include TextFileProcessing.gmk
-include Modules.gmk
-
-JDK_CLASSES := $(call PathList, $(strip $(addprefix $(JDK_OUTPUTDIR)/modules/, \
- java.base java.logging java.scripting jdk.dynalink)))
-
-# Need to use source and target 8 for nasgen to work.
-$(eval $(call SetupJavaCompiler, GENERATE_NEWBYTECODE_DEBUG, \
- JVM := $(JAVA_JAVAC), \
- JAVAC := $(NEW_JAVAC), \
- FLAGS := -g -source 10 -target 10 --upgrade-module-path "$(JDK_OUTPUTDIR)/modules/" \
- --system none --module-source-path $(call GetModuleSrcPath), \
- SERVER_DIR := $(SJAVAC_SERVER_DIR), \
- SERVER_JVM := $(SJAVAC_SERVER_JAVA)))
-
-# Build nashorn into intermediate directory
-# Name the compilation setup the same as the module, as is done in the global
-# CompileJavaModules.gmk, to make dependency checking with other modules work
-# seamlessly.
-$(eval $(call SetupJavaCompilation, jdk.scripting.nashorn, \
- SETUP := GENERATE_NEWBYTECODE_DEBUG, \
- MODULE := jdk.scripting.nashorn, \
- SRC := $(TOPDIR)/src/jdk.scripting.nashorn/share/classes, \
- COPY := .properties .js, \
- BIN := $(SUPPORT_OUTPUTDIR)/special_classes, \
- CREATE_API_DIGEST := true, \
-))
-
-# Declare dependencies between java compilations of different modules.
-# Since the other modules are declared in different invocations of this file,
-# use the macro to find the correct target file to depend on.
-# Only the javac compilation actually depends on other modules so limit
-# dependency declaration to that by using the *_COMPILE_TARGET variable.
-$(jdk.scripting.nashorn_COMPILE_TARGET): $(foreach d, $(call FindDepsForModule, jdk.scripting.nashorn), \
- $(call SetupJavaCompilationApiTarget, $d, \
- $(if $($d_BIN), $($d_BIN), $(JDK_OUTPUTDIR)/modules/$d)))
-
-NASGEN_SRC := $(TOPDIR)/make/nashorn/buildtools/nasgen/src
-ASM_SRC := $(TOPDIR)/src/java.base/share/classes/jdk/internal/org/objectweb/asm
-
-# Build nasgen
-$(eval $(call SetupJavaCompilation, BUILD_NASGEN, \
- SETUP := GENERATE_OLDBYTECODE, \
- SRC := $(NASGEN_SRC) $(ASM_SRC), \
- BIN := $(BUILDTOOLS_OUTPUTDIR)/nasgen_classes))
-
-NASHORN_CLASSES_DIR := $(JDK_OUTPUTDIR)/modules/jdk.scripting.nashorn
-NASGEN_RUN_FILE := $(NASHORN_CLASSES_DIR)/_the.nasgen.run
-
-NASGEN_OPTIONS := \
- -cp $(BUILDTOOLS_OUTPUTDIR)/nasgen_classes \
- --patch-module java.base=$(BUILDTOOLS_OUTPUTDIR)/nasgen_classes \
- --add-exports java.base/jdk.internal.org.objectweb.asm=ALL-UNNAMED \
- --add-exports java.base/jdk.internal.org.objectweb.asm.util=ALL-UNNAMED \
- #
-
-# Copy classes to final classes dir and run nasgen to modify classes in jdk.nashorn.internal.objects package
-$(NASGEN_RUN_FILE): $(BUILD_NASGEN) $(jdk.scripting.nashorn)
- $(ECHO) Running nasgen
- $(MKDIR) -p $(@D)
- $(RM) -rf $(@D)/jdk $(@D)/netscape
- $(CP) -R -p $(SUPPORT_OUTPUTDIR)/special_classes/jdk.scripting.nashorn/* $(@D)/
- $(JAVA_SMALL) $(NASGEN_OPTIONS) \
- jdk.nashorn.internal.tools.nasgen.Main $(@D) jdk.nashorn.internal.objects $(@D)
- $(TOUCH) $@
-
-# Version file needs to be processed with version numbers
-$(eval $(call SetupTextFileProcessing, BUILD_VERSION_FILE, \
- SOURCE_FILES := $(TOPDIR)/src/jdk.scripting.nashorn/share/classes/jdk/nashorn/internal/runtime/resources/version.properties.template, \
- OUTPUT_FILE := $(JDK_OUTPUTDIR)/modules/jdk.scripting.nashorn/jdk/nashorn/internal/runtime/resources/version.properties, \
- REPLACEMENTS := \
- @@VERSION_STRING@@ => $(VERSION_STRING) ; \
- @@VERSION_SHORT@@ => $(VERSION_SHORT) , \
-))
-
-# Version processing needs to happen after nasgen run since nasgen run deletes it
-$(BUILD_VERSION_FILE): $(NASGEN_RUN_FILE)
-
-compile: $(NASGEN_RUN_FILE) $(BUILD_VERSION_FILE)
-all: compile
-
-.PHONY: compile all
diff --git a/make/Bundles.gmk b/make/Bundles.gmk
index e447eb51c54..de9508b767c 100644
--- a/make/Bundles.gmk
+++ b/make/Bundles.gmk
@@ -146,19 +146,13 @@ endef
# correct base directories.
ifeq ($(OPENJDK_TARGET_OS)-$(DEBUG_LEVEL), macosx-release)
JDK_IMAGE_DIR := $(JDK_MACOSX_BUNDLE_DIR)
- JRE_IMAGE_DIR := $(JRE_MACOSX_BUNDLE_DIR)
JDK_IMAGE_HOMEDIR := $(JDK_MACOSX_CONTENTS_DIR)/Home
- JRE_IMAGE_HOMEDIR := $(JRE_MACOSX_CONTENTS_DIR)/Home
JDK_BUNDLE_SUBDIR :=
- JRE_BUNDLE_SUBDIR :=
else
JDK_IMAGE_HOMEDIR := $(JDK_IMAGE_DIR)
- JRE_IMAGE_HOMEDIR := $(JRE_IMAGE_DIR)
JDK_BUNDLE_SUBDIR := jdk-$(VERSION_NUMBER)
- JRE_BUNDLE_SUBDIR := jre-$(VERSION_NUMBER)
ifneq ($(DEBUG_LEVEL), release)
JDK_BUNDLE_SUBDIR := $(JDK_BUNDLE_SUBDIR)/$(DEBUG_LEVEL)
- JRE_BUNDLE_SUBDIR := $(JRE_BUNDLE_SUBDIR)/$(DEBUG_LEVEL)
endif
endif
@@ -204,26 +198,6 @@ ifneq ($(filter product-bundles, $(MAKECMDGOALS)), )
TEST_DEMOS_BUNDLE_FILES := $(filter $(JDK_IMAGE_HOMEDIR)/demo/%, $(ALL_JDK_FILES))
- ALL_JRE_FILES := $(call CacheFind, $(JRE_IMAGE_DIR))
-
- # Create special filter rules when dealing with unzipped .dSYM directories on
- # macosx
- ifeq ($(OPENJDK_TARGET_OS), macosx)
- ifeq ($(ZIP_EXTERNAL_DEBUG_SYMBOLS), false)
- JRE_SYMBOLS_EXCLUDE_PATTERN := $(addprefix %, \
- $(call containing, .dSYM/, $(patsubst $(JRE_IMAGE_DIR)/%, %, $(ALL_JRE_FILES))))
- endif
- endif
-
- JRE_BUNDLE_FILES := $(filter-out \
- $(JRE_SYMBOLS_EXCLUDE_PATTERN) \
- $(SYMBOLS_EXCLUDE_PATTERN), \
- $(ALL_JRE_FILES))
- JRE_SYMBOLS_BUNDLE_FILES := $(filter \
- $(JRE_SYMBOLS_EXCLUDE_PATTERN) \
- $(SYMBOLS_EXCLUDE_PATTERN), \
- $(ALL_JRE_FILES))
-
$(eval $(call SetupBundleFile, BUILD_JDK_BUNDLE, \
BUNDLE_NAME := $(JDK_BUNDLE_NAME), \
FILES := $(JDK_BUNDLE_FILES), \
@@ -234,15 +208,6 @@ ifneq ($(filter product-bundles, $(MAKECMDGOALS)), )
PRODUCT_TARGETS += $(BUILD_JDK_BUNDLE)
- $(eval $(call SetupBundleFile, BUILD_JRE_BUNDLE, \
- BUNDLE_NAME := $(JRE_BUNDLE_NAME), \
- FILES := $(JRE_BUNDLE_FILES), \
- BASE_DIRS := $(JRE_IMAGE_DIR), \
- SUBDIR := $(JRE_BUNDLE_SUBDIR), \
- ))
-
- PRODUCT_TARGETS += $(BUILD_JRE_BUNDLE)
-
$(eval $(call SetupBundleFile, BUILD_JDK_SYMBOLS_BUNDLE, \
BUNDLE_NAME := $(JDK_SYMBOLS_BUNDLE_NAME), \
FILES := $(JDK_SYMBOLS_BUNDLE_FILES), \
@@ -253,16 +218,6 @@ ifneq ($(filter product-bundles, $(MAKECMDGOALS)), )
PRODUCT_TARGETS += $(BUILD_JDK_SYMBOLS_BUNDLE)
- $(eval $(call SetupBundleFile, BUILD_JRE_SYMBOLS_BUNDLE, \
- BUNDLE_NAME := $(JRE_SYMBOLS_BUNDLE_NAME), \
- FILES := $(JRE_SYMBOLS_BUNDLE_FILES), \
- BASE_DIRS := $(JRE_IMAGE_DIR), \
- SUBDIR := $(JRE_BUNDLE_SUBDIR), \
- UNZIP_DEBUGINFO := true, \
- ))
-
- PRODUCT_TARGETS += $(BUILD_JRE_SYMBOLS_BUNDLE)
-
# The demo bundle is only created to support client tests. Ideally it should
# be built with the main test bundle, but since the prerequisites match
# better with the product build, it makes more sense to keep it there for now.
diff --git a/make/CompileJavaModules.gmk b/make/CompileJavaModules.gmk
index 72c7391d45d..058f9a58eb8 100644
--- a/make/CompileJavaModules.gmk
+++ b/make/CompileJavaModules.gmk
@@ -325,6 +325,10 @@ jdk.internal.le_COPY += .properties
################################################################################
+jdk.internal.opt_COPY += .properties
+
+################################################################################
+
jdk.jcmd_COPY += _options
################################################################################
@@ -341,6 +345,10 @@ jdk.jartool_ADD_JAVAC_FLAGS += -XDstringConcat=inline
################################################################################
+jdk.scripting.nashorn_COPY := .properties .js
+
+################################################################################
+
jdk.scripting.nashorn.shell_COPY += .js .properties
################################################################################
@@ -440,11 +448,13 @@ jdk.internal.vm.compiler_ADD_JAVAC_FLAGS += -parameters -XDstringConcat=inline \
jdk.internal.vm.compiler_EXCLUDES += \
jdk.internal.vm.compiler.collections.test \
+ org.graalvm.compiler.processor \
org.graalvm.compiler.core.match.processor \
org.graalvm.compiler.nodeinfo.processor \
org.graalvm.compiler.options.processor \
org.graalvm.compiler.serviceprovider.processor \
- org.graalvm.compiler.replacements.verifier \
+ org.graalvm.compiler.replacements.processor \
+ org.graalvm.compiler.replacements.jdk9.test \
org.graalvm.compiler.api.directives.test \
org.graalvm.compiler.api.test \
org.graalvm.compiler.asm.aarch64.test \
@@ -642,6 +652,12 @@ endif
################################################################################
+ifeq ($(MODULE), jdk.scripting.nashorn)
+ include CompileJavaModulesNashorn.gmk
+endif
+
+################################################################################
+
$(eval $(call IncludeCustomExtension, CompileJavaModules-post.gmk))
################################################################################
diff --git a/make/CompileJavaModulesNashorn.gmk b/make/CompileJavaModulesNashorn.gmk
new file mode 100644
index 00000000000..cc09a88b3bf
--- /dev/null
+++ b/make/CompileJavaModulesNashorn.gmk
@@ -0,0 +1,54 @@
+#
+# Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation. Oracle designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+NASGEN_SRC := $(TOPDIR)/make/nashorn/buildtools/nasgen/src
+ASM_SRC := $(TOPDIR)/src/java.base/share/classes/jdk/internal/org/objectweb/asm
+
+# Build nasgen
+$(eval $(call SetupJavaCompilation, BUILD_NASGEN, \
+ SETUP := GENERATE_OLDBYTECODE, \
+ SRC := $(NASGEN_SRC) $(ASM_SRC), \
+ BIN := $(BUILDTOOLS_OUTPUTDIR)/nasgen_classes, \
+))
+
+NASHORN_CLASSES_DIR := $(JDK_OUTPUTDIR)/modules/$(MODULE)
+NASGEN_RUN_FILE := $(NASHORN_CLASSES_DIR)/_the.nasgen.run
+
+NASGEN_OPTIONS := \
+ -cp $(BUILDTOOLS_OUTPUTDIR)/nasgen_classes \
+ --patch-module java.base=$(BUILDTOOLS_OUTPUTDIR)/nasgen_classes \
+ --add-exports java.base/jdk.internal.org.objectweb.asm=ALL-UNNAMED \
+ --add-exports java.base/jdk.internal.org.objectweb.asm.util=ALL-UNNAMED \
+ #
+
+# Run nasgen to modify classes in jdk.nashorn.internal.objects package
+$(NASGEN_RUN_FILE): $(BUILD_NASGEN) $($(MODULE))
+ $(ECHO) Running nasgen
+ $(JAVA_SMALL) $(NASGEN_OPTIONS) \
+ jdk.nashorn.internal.tools.nasgen.Main $(@D) \
+ jdk.nashorn.internal.objects $(@D)
+ $(TOUCH) $@
+
+TARGETS += $(NASGEN_RUN_FILE)
diff --git a/make/CompileToolsHotspot.gmk b/make/CompileToolsHotspot.gmk
index f0eb30cbd1d..7180658c2b3 100644
--- a/make/CompileToolsHotspot.gmk
+++ b/make/CompileToolsHotspot.gmk
@@ -47,34 +47,8 @@ ifeq ($(INCLUDE_GRAAL), true)
$(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_MATCH_PROCESSOR, \
SETUP := GENERATE_OLDBYTECODE, \
SRC := \
- $(SRC_DIR)/jdk.internal.vm.compiler.word/src \
- $(SRC_DIR)/jdk.internal.vm.compiler.collections/src \
- $(SRC_DIR)/org.graalvm.compiler.core/src \
- $(SRC_DIR)/org.graalvm.compiler.core.common/src \
+ $(SRC_DIR)/org.graalvm.compiler.processor/src \
$(SRC_DIR)/org.graalvm.compiler.core.match.processor/src \
- $(SRC_DIR)/org.graalvm.compiler.api.replacements/src \
- $(SRC_DIR)/org.graalvm.compiler.asm/src \
- $(SRC_DIR)/org.graalvm.compiler.bytecode/src \
- $(SRC_DIR)/org.graalvm.compiler.code/src \
- $(SRC_DIR)/org.graalvm.compiler.debug/src \
- $(SRC_DIR)/org.graalvm.compiler.graph/src \
- $(SRC_DIR)/org.graalvm.compiler.lir/src \
- $(SRC_DIR)/org.graalvm.compiler.loop/src \
- $(SRC_DIR)/org.graalvm.compiler.loop.phases/src \
- $(SRC_DIR)/org.graalvm.compiler.nodeinfo/src \
- $(SRC_DIR)/org.graalvm.compiler.nodes/src \
- $(SRC_DIR)/org.graalvm.compiler.options/src \
- $(SRC_DIR)/org.graalvm.compiler.phases/src \
- $(SRC_DIR)/org.graalvm.compiler.phases.common/src \
- $(SRC_DIR)/org.graalvm.compiler.serviceprovider/src \
- $(SRC_DIR)/org.graalvm.compiler.virtual/src \
- $(SRC_DIR)/org.graalvm.graphio/src \
- $(SRC_DIR)/org.graalvm.util/src \
- $(VM_CI_SRC_DIR)/jdk.vm.ci.code/src \
- $(VM_CI_SRC_DIR)/jdk.vm.ci.common/src \
- $(VM_CI_SRC_DIR)/jdk.vm.ci.meta/src \
- $(VM_CI_SRC_DIR)/jdk.vm.ci.runtime/src \
- $(VM_CI_SRC_DIR)/jdk.vm.ci.services/src \
, \
EXCLUDE_FILES := $(EXCLUDE_FILES), \
BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.match.processor, \
@@ -88,7 +62,7 @@ ifeq ($(INCLUDE_GRAAL), true)
$(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_NODEINFO_PROCESSOR, \
SETUP := GENERATE_OLDBYTECODE, \
SRC := \
- $(SRC_DIR)/org.graalvm.compiler.nodeinfo/src \
+ $(SRC_DIR)/org.graalvm.compiler.processor/src \
$(SRC_DIR)/org.graalvm.compiler.nodeinfo.processor/src \
, \
BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.nodeinfo.processor, \
@@ -102,10 +76,8 @@ ifeq ($(INCLUDE_GRAAL), true)
$(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_OPTIONS_PROCESSOR, \
SETUP := GENERATE_OLDBYTECODE, \
SRC := \
- $(SRC_DIR)/jdk.internal.vm.compiler.collections/src \
- $(SRC_DIR)/org.graalvm.compiler.options/src \
+ $(SRC_DIR)/org.graalvm.compiler.processor/src \
$(SRC_DIR)/org.graalvm.compiler.options.processor/src \
- $(SRC_DIR)/org.graalvm.util/src \
, \
BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor, \
JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor.jar, \
@@ -115,44 +87,26 @@ ifeq ($(INCLUDE_GRAAL), true)
##############################################################################
- $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_REPLACEMENTS_VERIFIER, \
+ $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_REPLACEMENTS_PROCESSOR, \
SETUP := GENERATE_OLDBYTECODE, \
SRC := \
- $(SRC_DIR)/jdk.internal.vm.compiler.word/src \
- $(SRC_DIR)/jdk.internal.vm.compiler.collections/src \
- $(SRC_DIR)/org.graalvm.compiler.bytecode/src \
- $(SRC_DIR)/org.graalvm.compiler.replacements.verifier/src \
- $(SRC_DIR)/org.graalvm.compiler.api.replacements/src \
- $(SRC_DIR)/org.graalvm.compiler.code/src \
- $(SRC_DIR)/org.graalvm.compiler.core.common/src \
- $(SRC_DIR)/org.graalvm.compiler.debug/src \
- $(SRC_DIR)/org.graalvm.compiler.graph/src \
- $(SRC_DIR)/org.graalvm.compiler.nodeinfo/src \
- $(SRC_DIR)/org.graalvm.compiler.options/src \
- $(SRC_DIR)/org.graalvm.compiler.serviceprovider/src \
- $(SRC_DIR)/org.graalvm.graphio/src \
- $(SRC_DIR)/org.graalvm.util/src \
- $(VM_CI_SRC_DIR)/jdk.vm.ci.code/src \
- $(VM_CI_SRC_DIR)/jdk.vm.ci.common/src \
- $(VM_CI_SRC_DIR)/jdk.vm.ci.meta/src \
- $(VM_CI_SRC_DIR)/jdk.vm.ci.runtime/src \
- $(VM_CI_SRC_DIR)/jdk.vm.ci.services/src \
+ $(SRC_DIR)/org.graalvm.compiler.processor/src \
+ $(SRC_DIR)/org.graalvm.compiler.replacements.processor/src \
, \
EXCLUDE_FILES := $(EXCLUDE_FILES), \
BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.replacements.verifier, \
JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.replacements.verifier.jar, \
))
- TARGETS += $(BUILD_VM_COMPILER_REPLACEMENTS_VERIFIER)
+ TARGETS += $(BUILD_VM_COMPILER_REPLACEMENTS_PROCESSOR)
##############################################################################
$(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_SERVICEPROVIDER_PROCESSOR, \
SETUP := GENERATE_OLDBYTECODE, \
SRC := \
- $(SRC_DIR)/org.graalvm.compiler.serviceprovider/src \
+ $(SRC_DIR)/org.graalvm.compiler.processor/src \
$(SRC_DIR)/org.graalvm.compiler.serviceprovider.processor/src \
- $(VM_CI_SRC_DIR)/jdk.vm.ci.services/src \
, \
EXCLUDE_FILES := $(EXCLUDE_FILES), \
BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.serviceprovider.processor, \
diff --git a/make/Help.gmk b/make/Help.gmk
index e839fb81d42..461d3431a62 100644
--- a/make/Help.gmk
+++ b/make/Help.gmk
@@ -40,10 +40,10 @@ help:
$(info $(_) # image (alias for jdk or exploded-image))
$(info $(_) make all # Create all images: product, test, docs)
$(info $(_) # (alias for all-images))
- $(info $(_) make images # Create complete jdk and jre images)
+ $(info $(_) make images # Create a complete jdk image)
$(info $(_) # (alias for product-images))
$(info $(_) make -image # Build just the image for any of: )
- $(info $(_) # jdk, jre, test, docs, symbols)
+ $(info $(_) # jdk, test, docs, symbols, legacy-jre)
$(info $(_) make # Build the specified phase and everything it depends on)
$(info $(_) # (gensrc, java, copy, libs, launchers, gendata, rmic))
$(info $(_) make *-only # Applies to most targets and disables building the)
diff --git a/make/Images.gmk b/make/Images.gmk
index 1f67472418d..83b692ee650 100644
--- a/make/Images.gmk
+++ b/make/Images.gmk
@@ -90,7 +90,7 @@ endif
$(JDK_IMAGE_DIR)/$(JIMAGE_TARGET_FILE): $(JMODS) \
$(call DependOnVariable, JDK_MODULES_LIST) $(BASE_RELEASE_FILE)
- $(ECHO) Creating jdk jimage
+ $(ECHO) Creating jdk image
$(RM) -r $(JDK_IMAGE_DIR)
$(call ExecuteWithLog, $(SUPPORT_OUTPUTDIR)/images/jdk, \
$(JLINK_TOOL) --add-modules $(JDK_MODULES_LIST) \
@@ -101,7 +101,7 @@ $(JDK_IMAGE_DIR)/$(JIMAGE_TARGET_FILE): $(JMODS) \
$(JRE_IMAGE_DIR)/$(JIMAGE_TARGET_FILE): $(JMODS) \
$(call DependOnVariable, JRE_MODULES_LIST) $(BASE_RELEASE_FILE)
- $(ECHO) Creating jre jimage
+ $(ECHO) Creating legacy jre image
$(RM) -r $(JRE_IMAGE_DIR)
$(call ExecuteWithLog, $(SUPPORT_OUTPUTDIR)/images/jre, \
$(JLINK_TOOL) --add-modules $(JRE_MODULES_LIST) \
diff --git a/make/InitSupport.gmk b/make/InitSupport.gmk
index 37bb7ebdd2f..e885e0fb1a3 100644
--- a/make/InitSupport.gmk
+++ b/make/InitSupport.gmk
@@ -34,6 +34,9 @@ _INITSUPPORT_GMK := 1
ifeq ($(HAS_SPEC),)
+ # COMMA is defined in spec.gmk, but that is not included yet
+ COMMA := ,
+
# Include the corresponding closed file, if present.
ifneq ($(CUSTOM_MAKE_DIR), )
-include $(CUSTOM_MAKE_DIR)/InitSupport.gmk
@@ -531,8 +534,6 @@ endif # HAS_SPEC
define ParseLogOption
ifneq ($$(findstring $1, $$(LOG)),)
override $2 := true
- # COMMA is defined in spec.gmk, but that is not included yet
- COMMA := ,
# First try to remove "," if it exists, otherwise just remove " "
LOG_STRIPPED := $$(subst $1,, $$(subst $$(COMMA)$$(strip $1),, $$(LOG)))
# We might have ended up with a leading comma. Remove it. Need override
@@ -550,8 +551,6 @@ define ParseLogValue
# Make words of out comma-separated list and find the one with opt=val
value := $$(strip $$(subst $$(strip $1)=,, $$(filter $$(strip $1)=%, $$(subst $$(COMMA), , $$(LOG)))))
override $2 := $$(value)
- # COMMA is defined in spec.gmk, but that is not included yet
- COMMA := ,
# First try to remove ", " if it exists, otherwise just remove " "
LOG_STRIPPED := $$(subst $$(strip $1)=$$(value),, \
$$(subst $$(COMMA)$$(strip $1)=$$(value),, $$(LOG)))
diff --git a/make/Main.gmk b/make/Main.gmk
index dd5f3a1c9de..81626672c55 100644
--- a/make/Main.gmk
+++ b/make/Main.gmk
@@ -98,13 +98,10 @@ ALL_TARGETS += buildtools-langtools interim-langtools \
################################################################################
# Special targets for certain modules
-unpack-sec:
- +($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f UnpackSecurity.gmk)
-
generate-exported-symbols:
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f BuildStatic.gmk)
-ALL_TARGETS += unpack-sec generate-exported-symbols
+ALL_TARGETS += generate-exported-symbols
################################################################################
# Gensrc targets, generating source before java compilation can be done
@@ -189,7 +186,7 @@ $(foreach m, $(IMPORT_COPY_MODULES), $(eval $(call DeclareImportCopyRecipe,$m)))
ALL_TARGETS += $(ALL_COPY_TARGETS)
################################################################################
-# Targets for compiling all java modules. Nashorn is treated separately.
+# Targets for compiling all java modules.
JAVA_MODULES := $(ALL_MODULES)
JAVA_TARGETS := $(addsuffix -java, $(JAVA_MODULES))
@@ -199,14 +196,7 @@ define DeclareCompileJavaRecipe
-f CompileJavaModules.gmk MODULE=$1)
endef
-$(foreach m, $(filter-out jdk.scripting.nashorn, $(JAVA_MODULES)), \
- $(eval $(call DeclareCompileJavaRecipe,$m)))
-
-# Build nashorn. Needs to be compiled separately from the rest of the modules
-# due to nasgen.
-jdk.scripting.nashorn-java:
- +($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) \
- -f BuildNashorn.gmk compile)
+$(foreach m, $(JAVA_MODULES), $(eval $(call DeclareCompileJavaRecipe,$m)))
ALL_TARGETS += $(JAVA_TARGETS)
@@ -338,14 +328,17 @@ jrtfs-jar:
jdk-image:
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f Images.gmk jdk)
-jre-image:
+legacy-jre-image:
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f Images.gmk jre)
symbols-image:
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f Images.gmk symbols)
-mac-bundles-jdk:
- +($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f MacBundles.gmk)
+mac-jdk-bundle:
+ +($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f MacBundles.gmk jdk-bundle)
+
+mac-legacy-jre-bundle:
+ +($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f MacBundles.gmk jre-bundle)
release-file:
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f ReleaseFile.gmk)
@@ -354,8 +347,8 @@ exploded-image-optimize:
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f ExplodedImageOptimize.gmk)
ALL_TARGETS += store-source-revision create-source-revision-tracker bootcycle-images zip-security \
- zip-source jrtfs-jar jdk-image jre-image \
- symbols-image mac-bundles-jdk \
+ zip-source jrtfs-jar jdk-image legacy-jre-image \
+ symbols-image mac-jdk-bundle mac-legacy-jre-bundle \
release-file exploded-image-optimize
################################################################################
@@ -700,9 +693,6 @@ else
# file to be processed by the gensrc-moduleinfo target.
jdk.internal.vm.compiler-gensrc-moduleinfo: jdk.internal.vm.compiler-gensrc-src
- # Explicitly add dependencies for special targets
- java.base-java: unpack-sec
-
jdk.jdeps-gendata: java rmic
# The ct.sym generation uses all the moduleinfos as input
@@ -789,16 +779,17 @@ else
generate-link-opt-data: buildtools-jdk
# The generated classlist needs to go into java.base-jmod.
- java.base-jmod jdk.jlink-jmod jdk-image jre-image: generate-link-opt-data
+ java.base-jmod jdk.jlink-jmod jdk-image legacy-jre-image: generate-link-opt-data
endif
release-file: create-source-revision-tracker
jdk-image: jmods zip-source demos release-file
- jre-image: jmods release-file
+ legacy-jre-image: jmods release-file
symbols-image: $(LIBS_TARGETS) $(LAUNCHER_TARGETS)
- mac-bundles-jdk: jdk-image jre-image
+ mac-jdk-bundle: jdk-image
+ mac-legacy-jre-bundle: legacy-jre-image
# The optimize target can run as soon as the modules dir has been completely
# populated (java, copy and gendata targets) and the basic libs and launchers
@@ -969,14 +960,14 @@ docs-reference: docs-reference-api
# alias for backwards compatibility
docs-javadoc: docs-jdk-api
-mac-bundles: mac-bundles-jdk
+mac-bundles: mac-jdk-bundle
# The $(OUTPUTDIR)/images directory contain the resulting deliverables,
# and in line with this, our targets for creating these are named *-image[s].
-# This target builds the product images, e.g. the JRE and JDK image
+# This target builds the product images, e.g. the JDK image
# (and possibly other, more specific versions)
-product-images: jdk-image jre-image symbols-image exploded-image
+product-images: jdk-image symbols-image exploded-image
# zip-security is actually a bundle, but for now it needs to be considered
# an image until this can be cleaned up properly.
@@ -993,7 +984,7 @@ ifneq ($(CREATE_BUILDJDK), true)
endif
ifeq ($(OPENJDK_TARGET_OS), macosx)
- product-images: mac-bundles
+ product-images: mac-jdk-bundle
endif
# This target builds the documentation image
diff --git a/make/UnpackSecurity.gmk b/make/UnpackSecurity.gmk
deleted file mode 100644
index 0a3b2d0e443..00000000000
--- a/make/UnpackSecurity.gmk
+++ /dev/null
@@ -1,88 +0,0 @@
-#
-# Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation. Oracle designates this
-# particular file as subject to the "Classpath" exception as provided
-# by Oracle in the LICENSE file that accompanied this code.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-default: all
-
-include $(SPEC)
-include MakeBase.gmk
-
-################################################################################
-# Unpack the binary distributions of the crypto classes if they exist.
-SEC_FILES_ZIP := $(TOPDIR)/make/tools/crypto/sec-bin.zip
-SEC_FILES_WIN_ZIP := $(TOPDIR)/make/tools/crypto/sec-windows-bin.zip
-JGSS_WIN32_FILES_ZIP := $(TOPDIR)/make/tools/crypto/jgss-windows-i586-bin.zip
-JGSS_WIN64_FILES_ZIP := $(TOPDIR)/make/tools/crypto/jgss-windows-x64-bin.zip
-
-define unzip-sec-file
- $(ECHO) Unzipping $( $@.tmp)
- $(MV) $@.tmp $@
-endef
-
-define unzip-native-sec-file
- $(ECHO) Unzipping $( $@.tmp)
- $(MV) $@.tmp $@
-endef
-
-$(SUPPORT_OUTPUTDIR)/_the.sec-bin.unzipped: $(SEC_FILES_ZIP)
- $(call unzip-sec-file)
-
-# Trying to unzip both of the sec files at the same time may cause a race
-# when creating directories common to both files.
-$(SUPPORT_OUTPUTDIR)/_the.sec-windows-bin.unzipped: $(SEC_FILES_WIN_ZIP) \
- | $(SUPPORT_OUTPUTDIR)/_the.sec-bin.unzipped
- $(call unzip-sec-file)
-
-$(SUPPORT_OUTPUTDIR)/_the.jgss-windows-i586-bin.unzipped: $(JGSS_WIN32_FILES_ZIP)
- $(call unzip-native-sec-file)
-
-$(SUPPORT_OUTPUTDIR)/_the.jgss-windows-x64-bin.unzipped: $(JGSS_WIN64_FILES_ZIP)
- $(call unzip-native-sec-file)
-
-ifneq ($(wildcard $(SEC_FILES_ZIP)), )
- IMPORT_TARGET_FILES += $(SUPPORT_OUTPUTDIR)/_the.sec-bin.unzipped
- ifeq ($(OPENJDK_TARGET_OS), windows)
- IMPORT_TARGET_FILES += $(SUPPORT_OUTPUTDIR)/_the.sec-windows-bin.unzipped
- ifeq ($(OPENJDK_TARGET_CPU), x86)
- IMPORT_TARGET_FILES += $(SUPPORT_OUTPUTDIR)/_the.jgss-windows-i586-bin.unzipped
- endif
- ifeq ($(OPENJDK_TARGET_CPU), x86_64)
- IMPORT_TARGET_FILES += $(SUPPORT_OUTPUTDIR)/_the.jgss-windows-x64-bin.unzipped
- endif
- endif
-endif
-
-################################################################################
-
-sec: $(IMPORT_TARGET_FILES)
-
-all: sec
-
-.PHONY: sec all
diff --git a/make/autoconf/build-aux/config.guess b/make/autoconf/build-aux/config.guess
index 148a61a8ab8..ec17740edd4 100644
--- a/make/autoconf/build-aux/config.guess
+++ b/make/autoconf/build-aux/config.guess
@@ -86,6 +86,17 @@ if [ "x$OUT" = x ]; then
fi
fi
+# Test and fix little endian MIPS.
+if [ "x$OUT" = x ]; then
+ if [ `uname -s` = Linux ]; then
+ if [ `uname -m` = mipsel ]; then
+ OUT=mipsel-unknown-linux-gnu
+ elif [ `uname -m` = mips64el ]; then
+ OUT=mips64el-unknown-linux-gnu
+ fi
+ fi
+fi
+
# Test and fix cpu on Macosx when C preprocessor is not on the path
echo $OUT | grep i386-apple-darwin > /dev/null 2> /dev/null
if test $? = 0; then
diff --git a/make/autoconf/hotspot.m4 b/make/autoconf/hotspot.m4
index 24a1de8bc40..c56b4d7e052 100644
--- a/make/autoconf/hotspot.m4
+++ b/make/autoconf/hotspot.m4
@@ -25,7 +25,7 @@
# All valid JVM features, regardless of platform
VALID_JVM_FEATURES="compiler1 compiler2 zero minimal dtrace jvmti jvmci \
- graal vm-structs jni-check services management cmsgc g1gc parallelgc serialgc nmt cds \
+ graal vm-structs jni-check services management cmsgc epsilongc g1gc parallelgc serialgc zgc nmt cds \
static-build link-time-opt aot jfr"
# Deprecated JVM features (these are ignored, but with a warning)
@@ -328,6 +328,19 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
fi
fi
+ # Only enable ZGC on Linux x86_64
+ AC_MSG_CHECKING([if zgc should be built])
+ if HOTSPOT_CHECK_JVM_FEATURE(zgc); then
+ if test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xx86_64"; then
+ AC_MSG_RESULT([yes])
+ else
+ DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES zgc"
+ AC_MSG_RESULT([no, platform not supported])
+ fi
+ else
+ AC_MSG_RESULT([no])
+ fi
+
# Turn on additional features based on other parts of configure
if test "x$INCLUDE_DTRACE" = "xtrue"; then
JVM_FEATURES="$JVM_FEATURES dtrace"
@@ -410,7 +423,7 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
fi
# All variants but minimal (and custom) get these features
- NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cmsgc g1gc parallelgc serialgc jni-check jvmti management nmt services vm-structs"
+ NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cmsgc g1gc parallelgc serialgc epsilongc jni-check jvmti management nmt services vm-structs"
if test "x$ENABLE_CDS" = "xtrue"; then
NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cds"
fi
diff --git a/make/autoconf/spec.gmk.in b/make/autoconf/spec.gmk.in
index 4e437265604..8de0abeee81 100644
--- a/make/autoconf/spec.gmk.in
+++ b/make/autoconf/spec.gmk.in
@@ -858,18 +858,19 @@ ifeq ($(DEBUG_LEVEL), fastdebug)
else ifneq ($(DEBUG_LEVEL), release)
DEBUG_PART := -$(DEBUG_LEVEL)
endif
-JDK_BUNDLE_NAME := jdk-$(BASE_NAME)_bin$(DEBUG_PART).tar.gz
-JRE_BUNDLE_NAME := jre-$(BASE_NAME)_bin$(DEBUG_PART).tar.gz
+ifeq ($(OPENJDK_TARGET_OS), windows)
+ JDK_BUNDLE_EXTENSION := zip
+else
+ JDK_BUNDLE_EXTENSION := tar.gz
+endif
+JDK_BUNDLE_NAME := jdk-$(BASE_NAME)_bin$(DEBUG_PART).$(JDK_BUNDLE_EXTENSION)
JDK_SYMBOLS_BUNDLE_NAME := jdk-$(BASE_NAME)_bin$(DEBUG_PART)-symbols.tar.gz
-JRE_SYMBOLS_BUNDLE_NAME := jre-$(BASE_NAME)_bin$(DEBUG_PART)-symbols.tar.gz
TEST_DEMOS_BUNDLE_NAME := jdk-$(BASE_NAME)_bin-tests-demos$(DEBUG_PART).tar.gz
TEST_BUNDLE_NAME := jdk-$(BASE_NAME)_bin-tests$(DEBUG_PART).tar.gz
DOCS_BUNDLE_NAME := jdk-$(BASE_NAME)_doc-api-spec$(DEBUG_PART).tar.gz
JDK_BUNDLE := $(BUNDLES_OUTPUTDIR)/$(JDK_BUNDLE_NAME)
-JRE_BUNDLE := $(BUNDLES_OUTPUTDIR)/$(JRE_BUNDLE_NAME)
JDK_SYMBOLS_BUNDLE := $(BUNDLES_OUTPUTDIR)/$(JDK_SYMBOLS_BUNDLE_NAME)
-JRE_SYMBOLS_BUNDLE := $(BUNDLES_OUTPUTDIR)/$(JRE_SYMBOLS_BUNDLE_NAME)
TEST_DEMOS_BUNDLE := $(BUNDLES_OUTPUTDIR)/$(TEST_DEMOS_BUNDLE_NAME)
TEST_BUNDLE := $(BUNDLES_OUTPUTDIR)/$(TEST_BUNDLE_NAME)
DOCS_BUNDLE := $(BUNDLES_OUTPUTDIR)/$(DOCS_BUNDLE_NAME)
diff --git a/make/common/JdkNativeCompilation.gmk b/make/common/JdkNativeCompilation.gmk
index 49b0f3e795c..c1bd70350dd 100644
--- a/make/common/JdkNativeCompilation.gmk
+++ b/make/common/JdkNativeCompilation.gmk
@@ -32,6 +32,36 @@ endif
include NativeCompilation.gmk
+# Hook to include the corresponding custom file, if present.
+$(eval $(call IncludeCustomExtension, common/JdkNativeCompilation.gmk))
+
+FindSrcDirsForLib += \
+ $(call uniq, $(wildcard \
+ $(TOPDIR)/src/$(strip $1)/$(OPENJDK_TARGET_OS)/native/lib$(strip $2) \
+ $(TOPDIR)/src/$(strip $1)/$(OPENJDK_TARGET_OS_TYPE)/native/lib$(strip $2) \
+ $(TOPDIR)/src/$(strip $1)/share/native/lib$(strip $2)))
+
+FindSrcDirsForComponent += \
+ $(call uniq, $(wildcard \
+ $(TOPDIR)/src/$(strip $1)/$(OPENJDK_TARGET_OS)/native/$(strip $2) \
+ $(TOPDIR)/src/$(strip $1)/$(OPENJDK_TARGET_OS_TYPE)/native/$(strip $2) \
+ $(TOPDIR)/src/$(strip $1)/share/native/$(strip $2)))
+
+GetJavaHeaderDir = \
+ $(wildcard $(SUPPORT_OUTPUTDIR)/headers/$(strip $1))
+
+# Process a dir description such as "java.base:headers" into a set of proper absolute paths.
+ProcessDir = \
+ $(if $(findstring :, $1), \
+ $(call FindSrcDirsForComponent, $(firstword $(subst :, , $1)), $(lastword $(subst :, , $1))) \
+ , \
+ $(if $(filter /%, $1), \
+ $1 \
+ , \
+ $(call FindSrcDirsForComponent, $(MODULE), $1) \
+ ) \
+ )
+
# Setup make rules for creating a native shared library with suitable defaults
# for the OpenJDK project.
#
@@ -39,8 +69,16 @@ include NativeCompilation.gmk
# and the targets generated are listed in a variable by that name.
#
# Remaining parameters are named arguments. These are all passed on to
-# SetupNativeCompilation, except for
+# SetupNativeCompilation, except for
# EXTRA_RC_FLAGS -- additional RC_FLAGS to append.
+# EXTRA_HEADER_DIRS -- additional directories to look for headers in
+# EXTRA_SRC -- additional directories to look for source in
+# EXCLUDE_SRC_PATTERNS -- exclude source dirs matching these patterns from
+# appearing in SRC.
+# HEADERS_FROM_SRC -- if false, does not add source dirs automatically as
+# header include dirs. (Defaults to true.)
+# SRC -- this is passed on, but preprocessed to accept source dir designations
+# such as "java.base:headers".
SetupJdkLibrary = $(NamedParamsMacroTemplate)
define SetupJdkLibraryBody
ifeq ($$($1_OUTPUT_DIR), )
@@ -51,6 +89,20 @@ define SetupJdkLibraryBody
$1_OBJECT_DIR := $$(SUPPORT_OUTPUTDIR)/native/$$(MODULE)/lib$$($1_NAME)
endif
+ ifeq ($$($1_SRC), )
+ $1_SRC := $$(call FindSrcDirsForLib, $$(MODULE), $$($1_NAME))
+ else
+ $1_SRC := $$(foreach dir, $$($1_SRC), $$(call ProcessDir, $$(dir)))
+ endif
+ ifneq ($$($1_EXTRA_SRC), )
+ $1_SRC += $$(foreach dir, $$($1_EXTRA_SRC), $$(call ProcessDir, $$(dir)))
+ endif
+
+ ifneq ($$($1_EXCLUDE_SRC_PATTERNS), )
+ $1_EXCLUDE_SRC := $$(call containing, $$($1_EXCLUDE_SRC_PATTERNS), $$($1_SRC))
+ $1_SRC := $$(filter-out $$($1_EXCLUDE_SRC), $$($1_SRC))
+ endif
+
ifeq ($$($1_VERSIONINFO_RESOURCE), )
$1_VERSIONINFO_RESOURCE := $$(GLOBAL_VERSION_INFO_RESOURCE)
else ifeq ($$($1_VERSIONINFO_RESOURCE), DISABLE)
@@ -66,6 +118,25 @@ define SetupJdkLibraryBody
$1_RC_FLAGS :=
endif
+ ifneq ($$($1_HEADERS_FROM_SRC), false)
+ $1_SRC_HEADER_FLAGS := $$(foreach dir, $$(wildcard $$($1_SRC) \
+ $$(call GetJavaHeaderDir, $$(MODULE))), -I$$(dir))
+ endif
+ ifneq ($$($1_EXTRA_HEADER_DIRS), )
+ $1_PROCESSED_EXTRA_HEADER_DIRS := $$(foreach dir, $$($1_EXTRA_HEADER_DIRS), \
+ $$(call ProcessDir, $$(dir)))
+ $1_EXTRA_HEADER_FLAGS := $$(addprefix -I, $$($1_PROCESSED_EXTRA_HEADER_DIRS))
+ endif
+
+ ifneq ($$($1_CFLAGS), )
+ $1_CFLAGS += $$($1_SRC_HEADER_FLAGS) $$($1_EXTRA_HEADER_FLAGS)
+ endif
+ ifneq ($$($1_CXXFLAGS), )
+ $1_CXXFLAGS += $$($1_SRC_HEADER_FLAGS) $$($1_EXTRA_HEADER_FLAGS)
+ endif
+ ifeq ($$($1_CFLAGS)$$($1_CXXFLAGS), )
+ $1_CFLAGS += $$($1_SRC_HEADER_FLAGS) $$($1_EXTRA_HEADER_FLAGS)
+ endif
$1_RC_FLAGS += $$($1_EXTRA_RC_FLAGS)
# Since we reuse the rule name ($1), all our arguments will pass through.
@@ -80,7 +151,7 @@ endef
# and the targets generated are listed in a variable by that name.
#
# Remaining parameters are named arguments. These are all passed on to
-# SetupNativeCompilation, except for
+# SetupNativeCompilation, except for
# EXTRA_RC_FLAGS -- additional RC_FLAGS to append.
SetupJdkExecutable = $(NamedParamsMacroTemplate)
define SetupJdkExecutableBody
diff --git a/make/common/MakeBase.gmk b/make/common/MakeBase.gmk
index 960e9985206..b3a59e14aa1 100644
--- a/make/common/MakeBase.gmk
+++ b/make/common/MakeBase.gmk
@@ -634,7 +634,7 @@ endef
################################################################################
# Filter out duplicate sub strings while preserving order. Keeps the first occurance.
uniq = \
- $(if $1,$(firstword $1) $(call uniq,$(filter-out $(firstword $1),$1)))
+ $(strip $(if $1,$(firstword $1) $(call uniq,$(filter-out $(firstword $1),$1))))
# Returns all whitespace-separated words in $2 where at least one of the
# whitespace-separated words in $1 is a substring.
diff --git a/make/conf/jib-profiles.js b/make/conf/jib-profiles.js
index 90bb74f470f..9324ccb6722 100644
--- a/make/conf/jib-profiles.js
+++ b/make/conf/jib-profiles.js
@@ -233,7 +233,7 @@ var getJibProfilesCommon = function (input, data) {
common.main_profile_names = [
"linux-x64", "linux-x86", "macosx-x64", "solaris-x64",
"solaris-sparcv9", "windows-x64", "windows-x86",
- "linux-aarch64", "linux-arm64", "linux-arm-vfp-hflt",
+ "linux-aarch64", "linux-arm32", "linux-arm64", "linux-arm-vfp-hflt",
"linux-arm-vfp-hflt-dyn"
];
@@ -272,28 +272,19 @@ var getJibProfilesCommon = function (input, data) {
*/
common.main_profile_artifacts = function (o) {
var jdk_subdir = (o.jdk_subdir != null ? o.jdk_subdir : "jdk-" + data.version);
- var jre_subdir = (o.jre_subdir != null ? o.jre_subdir : "jre-" + data.version);
+ var jdk_suffix = (o.jdk_suffix != null ? o.jdk_suffix : "tar.gz");
var pf = o.platform
return {
artifacts: {
jdk: {
- local: "bundles/\\(jdk.*bin.tar.gz\\)",
+ local: "bundles/\\(jdk.*bin." + jdk_suffix + "\\)",
remote: [
- "bundles/" + pf + "/jdk-" + data.version + "_" + pf + "_bin.tar.gz",
+ "bundles/" + pf + "/jdk-" + data.version + "_" + pf + "_bin." + jdk_suffix,
"bundles/" + pf + "/\\1"
],
subdir: jdk_subdir,
exploded: "images/jdk"
},
- jre: {
- local: "bundles/\\(jre.*bin.tar.gz\\)",
- remote: [
- "bundles/" + pf + "/jre-" + data.version + "_" + pf + "_bin.tar.gz",
- "bundles/" + pf + "/\\1"
- ],
- subdir: jre_subdir,
- exploded: "images/jre"
- },
test: {
local: "bundles/\\(jdk.*bin-tests.tar.gz\\)",
remote: [
@@ -319,15 +310,6 @@ var getJibProfilesCommon = function (input, data) {
subdir: jdk_subdir,
exploded: "images/jdk"
},
- jre_symbols: {
- local: "bundles/\\(jre.*bin-symbols.tar.gz\\)",
- remote: [
- "bundles/" + pf + "/jre-" + data.version + "_" + pf + "_bin-symbols.tar.gz",
- "bundles/" + pf + "/\\1"
- ],
- subdir: jre_subdir,
- exploded: "images/jre"
- }
}
};
};
@@ -339,28 +321,19 @@ var getJibProfilesCommon = function (input, data) {
*/
common.debug_profile_artifacts = function (o) {
var jdk_subdir = "jdk-" + data.version + "/fastdebug";
- var jre_subdir = "jre-" + data.version + "/fastdebug";
+ var jdk_suffix = (o.jdk_suffix != null ? o.jdk_suffix : "tar.gz");
var pf = o.platform
return {
artifacts: {
jdk: {
- local: "bundles/\\(jdk.*bin-debug.tar.gz\\)",
+ local: "bundles/\\(jdk.*bin-debug." + jdk_suffix + "\\)",
remote: [
- "bundles/" + pf + "/jdk-" + data.version + "_" + pf + "_bin-debug.tar.gz",
+ "bundles/" + pf + "/jdk-" + data.version + "_" + pf + "_bin-debug." + jdk_suffix,
"bundles/" + pf + "/\\1"
],
subdir: jdk_subdir,
exploded: "images/jdk"
},
- jre: {
- local: "bundles/\\(jre.*bin-debug.tar.gz\\)",
- remote: [
- "bundles/" + pf + "/jre-" + data.version + "_" + pf + "_bin-debug.tar.gz",
- "bundles/" + pf + "/\\1"
- ],
- subdir: jre_subdir,
- exploded: "images/jre"
- },
test: {
local: "bundles/\\(jdk.*bin-tests-debug.tar.gz\\)",
remote: [
@@ -378,15 +351,6 @@ var getJibProfilesCommon = function (input, data) {
subdir: jdk_subdir,
exploded: "images/jdk"
},
- jre_symbols: {
- local: "bundles/\\(jre.*bin-debug-symbols.tar.gz\\)",
- remote: [
- "bundles/" + pf + "/jre-" + data.version + "_" + pf + "_bin-debug-symbols.tar.gz",
- "bundles/" + pf + "/\\1"
- ],
- subdir: jre_subdir,
- exploded: "images/jre"
- }
}
};
};
@@ -490,6 +454,17 @@ var getJibProfilesProfiles = function (input, common, data) {
],
},
+ "linux-arm32": {
+ target_os: "linux",
+ target_cpu: "arm",
+ build_cpu: "x64",
+ dependencies: ["devkit", "autoconf", "build_devkit", "cups"],
+ configure_args: [
+ "--openjdk-target=arm-linux-gnueabihf", "--with-freetype=bundled",
+ "--with-abi-profile=arm-vfp-hflt", "--disable-warnings-as-errors"
+ ],
+ },
+
"linux-arm-vfp-hflt": {
target_os: "linux",
target_cpu: "arm",
@@ -608,7 +583,6 @@ var getJibProfilesProfiles = function (input, common, data) {
"macosx-x64": {
platform: "osx-x64",
jdk_subdir: "jdk-" + data.version + ".jdk/Contents/Home",
- jre_subdir: "jre-" + data.version + ".jre/Contents/Home"
},
"solaris-x64": {
platform: "solaris-x64",
@@ -618,13 +592,18 @@ var getJibProfilesProfiles = function (input, common, data) {
},
"windows-x64": {
platform: "windows-x64",
+ jdk_suffix: "zip",
},
"windows-x86": {
platform: "windows-x86",
+ jdk_suffix: "zip",
},
"linux-aarch64": {
platform: "linux-aarch64",
},
+ "linux-arm32": {
+ platform: "linux-arm32",
+ },
"linux-arm64": {
platform: "linux-arm64-vfp-hflt",
},
@@ -715,6 +694,14 @@ var getJibProfilesProfiles = function (input, common, data) {
profiles[openName].artifacts["jdk"].remote));
});
+ // Enable ZGC in linux-x64-open builds
+ [ "linux-x64-open" ].forEach(function (name) {
+ var configureArgs = { configure_args: [ "--with-jvm-features=zgc" ] };
+ var debugName = name + common.debug_suffix;
+ profiles[name] = concatObjects(profiles[name], configureArgs);
+ profiles[debugName] = concatObjects(profiles[debugName], configureArgs);
+ });
+
// Profiles used to run tests. Used in JPRT and Mach 5.
var testOnlyProfiles = {
"run-test-jprt": {
@@ -829,7 +816,11 @@ var getJibProfilesDependencies = function (input, common) {
: "gcc7.3.0-Fedora27+1.0"),
linux_arm: (input.profile != null && input.profile.indexOf("hflt") >= 0
? "gcc-linaro-arm-linux-gnueabihf-raspbian-2012.09-20120921_linux+1.0"
- : "arm-linaro-4.7+1.0")
+ : (input.profile.indexOf("arm32") >= 0
+ ? "gcc7.3.0-Fedora27+1.0"
+ : "arm-linaro-4.7+1.0"
+ )
+ )
};
var devkit_platform = (input.target_cpu == "x86"
diff --git a/make/data/currency/CurrencyData.properties b/make/data/currency/CurrencyData.properties
index 64b0bc3839d..60a2ae893f4 100644
--- a/make/data/currency/CurrencyData.properties
+++ b/make/data/currency/CurrencyData.properties
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,7 @@ formatVersion=3
# Version of the currency code information in this class.
# It is a serial number that accompanies with each amendment.
-dataVersion=164
+dataVersion=167
# List of all valid ISO 4217 currency codes.
# To ensure compatibility, do not remove codes.
@@ -47,7 +47,7 @@ all=ADP020-AED784-AFA004-AFN971-ALL008-AMD051-ANG532-AOA973-ARS032-ATS040-AUD036
HRK191-HTG332-HUF348-IDR360-IEP372-ILS376-INR356-IQD368-IRR364-ISK352-\
ITL380-JMD388-JOD400-JPY392-KES404-KGS417-KHR116-KMF174-KPW408-KRW410-\
KWD414-KYD136-KZT398-LAK418-LBP422-LKR144-LRD430-LSL426-LTL440-LUF442-\
- LVL428-LYD434-MAD504-MDL498-MGA969-MGF450-MKD807-MMK104-MNT496-MOP446-MRO478-\
+ LVL428-LYD434-MAD504-MDL498-MGA969-MGF450-MKD807-MMK104-MNT496-MOP446-MRO478-MRU929-\
MTL470-MUR480-MVR462-MWK454-MXN484-MXV979-MYR458-MZM508-MZN943-NAD516-NGN566-\
NIO558-NLG528-NOK578-NPR524-NZD554-OMR512-PAB590-PEN604-PGK598-PHP608-\
PKR586-PLN985-PTE620-PYG600-QAR634-ROL946-RON946-RSD941-RUB643-RUR810-RWF646-SAR682-\
@@ -324,7 +324,7 @@ KG=KGS
# LAO PEOPLE'S DEMOCRATIC REPUBLIC (THE)
LA=LAK
# LATVIA
-LV=LVL;2013-12-31-22-00-00;EUR
+LV=EUR
# LEBANON
LB=LBP
# LESOTHO
@@ -336,7 +336,7 @@ LY=LYD
# LIECHTENSTEIN
LI=CHF
# LITHUANIA
-LT=LTL;2014-12-31-22-00-00;EUR
+LT=EUR
# LUXEMBOURG
LU=EUR
# MACAU
@@ -360,7 +360,7 @@ MH=USD
# MARTINIQUE
MQ=EUR
# MAURITANIA
-MR=MRO
+MR=MRU
# MAURITIUS
MU=MUR
# MAYOTTE
diff --git a/make/data/fontconfig/macosx.fontconfig.properties b/make/data/fontconfig/macosx.fontconfig.properties
index 7e3c2fd1731..e1b8cdcc44a 100644
--- a/make/data/fontconfig/macosx.fontconfig.properties
+++ b/make/data/fontconfig/macosx.fontconfig.properties
@@ -34,9 +34,7 @@ allfonts.chinese-gb18030=SimSun-18030
allfonts.chinese-hkscs=MingLiU_HKSCS
allfonts.devanagari=Mangal
allfonts.dingbats=Wingdings
-allfonts.lucida=Lucida Sans Regular
allfonts.symbol=Symbol
-allfonts.thai=Lucida Sans Regular
serif.plain.alphabetic=Times New Roman
serif.plain.chinese-ms950=MingLiU
@@ -202,10 +200,9 @@ sequence.monospaced.x-windows-949=korean,alphabetic,dingbats,symbol
sequence.dialog.x-windows-949=alphabetic,korean,dingbats,symbol
sequence.dialoginput.x-windows-949=alphabetic,korean,dingbats,symbol
-sequence.allfonts.x-windows-874=alphabetic,thai,dingbats,symbol
+sequence.allfonts.x-windows-874=alphabetic,dingbats,symbol
-sequence.fallback=lucida,\
- chinese-ms950,chinese-hkscs,chinese-ms936,chinese-gb18030,\
+sequence.fallback=chinese-ms950,chinese-hkscs,chinese-ms936,chinese-gb18030,\
japanese,korean
# Exclusion Ranges
@@ -256,7 +253,6 @@ filename.Gulim=gulim.TTC
filename.Batang=batang.TTC
filename.GulimChe=gulim.TTC
-filename.Lucida_Sans_Regular=LucidaSansRegular.ttf
filename.Mangal=MANGAL.TTF
filename.Symbol=SYMBOL.TTF
filename.Wingdings=WINGDING.TTF
diff --git a/make/data/fontconfig/solaris.fontconfig.properties b/make/data/fontconfig/solaris.fontconfig.properties
index 03f61e7709d..4b3d48b56dd 100644
--- a/make/data/fontconfig/solaris.fontconfig.properties
+++ b/make/data/fontconfig/solaris.fontconfig.properties
@@ -43,7 +43,6 @@ allfonts.dingbats=-microsoft-wingdings-medium-r-normal--*-%d-*-*-p-*-adobe-fonts
allfonts.japanese-x0212=-misc-ipagothic-medium-r-normal--*-%d-*-*-m-*-iso10646-1
allfonts.korean=-hanyang-gothic-medium-r-normal--*-%d-*-*-m-*-iso10646-1
allfonts.korean-johab=-hanyang-gothic-medium-r-normal--*-%d-*-*-m-*-iso10646-1
-allfonts.lucida=-b&h-lucidasans-medium-r-normal-sans-*-%d-*-*-p-*-iso8859-1
allfonts.symbol=-monotype-symbol-medium-r-normal--*-%d-*-*-p-*-adobe-symbol
allfonts.bengali=-misc-lohit bengali-medium-r-normal--0-0-0-0-p-0-iso10646-1
allfonts.gujarati=-misc-lohit gujarati-medium-r-normal--0-0-0-0-p-0-iso10646-1
@@ -426,7 +425,7 @@ sequence.allfonts.UTF-8.zh.TW=latin-1,chinese-big5,chinese-hkscs,chinese-gb18030
# - japanese-x0212: same files as japanese-x0201
# - korean: same file as korean-johab
sequence.fallback=latin-1,latin-2,latin-7,cyrillic-iso8859-5,greek,latin-5,latin-9,\
- arabic,hebrew,thai,lucida,\
+ arabic,hebrew,thai,\
chinese-gb18030-0,\
japanese-x0201,korean-johab,\
hindi,bengali,telugu,marathi,tamil,gujarati,kannada,malayalam,\
@@ -466,7 +465,6 @@ filename.-hanyang-gothic-medium-r-normal--*-%d-*-*-m-*-iso10646-1=/usr/share/fon
filename.-arphic-uming-medium-r-normal--*-%d-*-*-m-*-iso10646-1=/usr/share/fonts/TrueType/arphic/uming.ttf
filename.-monotype-symbol-medium-r-normal--*-%d-*-*-p-*-adobe-symbol=/usr/share/fonts/TrueType/core/symbol.ttf
filename.-microsoft-wingdings-medium-r-normal--*-%d-*-*-p-*-adobe-fontspecific=/usr/share/fonts/TrueType/core/wingdings.ttf
-filename.-b&h-lucidasans-medium-r-normal-sans-*-%d-*-*-p-*-iso8859-1=$JRE_LIB_FONTS/LucidaSansRegular.ttf
filename.-misc-lohit_bengali-medium-r-normal--0-0-0-0-p-0-iso10646-1=/usr/share/fonts/TrueType/lohit/Lohit-Bengali.ttf
filename.-misc-lohit_gujarati-medium-r-normal--0-0-0-0-p-0-iso10646-1=/usr/share/fonts/TrueType/lohit/Lohit-Gujarati.ttf
filename.-misc-lohit_hindi-medium-r-normal--0-0-0-0-p-0-iso10646-1=/usr/share/fonts/TrueType/lohit/Lohit-Hindi.ttf
diff --git a/make/data/fontconfig/windows.fontconfig.properties b/make/data/fontconfig/windows.fontconfig.properties
index fcfcb8f5190..95c35b5526b 100644
--- a/make/data/fontconfig/windows.fontconfig.properties
+++ b/make/data/fontconfig/windows.fontconfig.properties
@@ -39,10 +39,9 @@ allfonts.chinese-ms950-extb=MingLiU-ExtB
allfonts.devanagari=Mangal
allfonts.kannada=Tunga
allfonts.dingbats=Wingdings
-allfonts.lucida=Lucida Sans Regular
allfonts.symbol=Symbol
allfonts.symbols=Segoe UI Symbol
-allfonts.thai=Lucida Sans Regular
+allfonts.thai=DokChampa
allfonts.georgian=Sylfaen
serif.plain.alphabetic=Times New Roman
@@ -238,7 +237,7 @@ sequence.dialoginput.x-windows-949=alphabetic,korean,dingbats,symbol
sequence.allfonts.x-windows-874=alphabetic,thai,dingbats,symbol
-sequence.fallback=lucida,symbols,\
+sequence.fallback=symbols,\
chinese-ms950,chinese-hkscs,chinese-ms936,chinese-gb18030,\
japanese,korean,chinese-ms950-extb,chinese-ms936-extb,georgian,kannada
@@ -294,7 +293,7 @@ filename.Gulim=gulim.TTC
filename.Batang=batang.TTC
filename.GulimChe=gulim.TTC
-filename.Lucida_Sans_Regular=LucidaSansRegular.ttf
+filename.DokChampa=dokchamp.ttf
filename.Mangal=MANGAL.TTF
filename.Tunga=TUNGA.TTF
filename.Symbol=SYMBOL.TTF
diff --git a/make/data/jdwp/jdwp.spec b/make/data/jdwp/jdwp.spec
index a33f31ed41c..e701a404398 100644
--- a/make/data/jdwp/jdwp.spec
+++ b/make/data/jdwp/jdwp.spec
@@ -1997,8 +1997,7 @@ JDWP "Java(tm) Debug Wire Protocol"
)
)
(Command Stop=10
- "Stops the thread with an asynchronous exception, as if done by "
- "java.lang.Thread.stop "
+ "Stops the thread with an asynchronous exception. "
(Out
(threadObject thread "The thread object ID. ")
(object throwable "Asynchronous exception. This object must "
diff --git a/make/data/lsrdata/language-subtag-registry.txt b/make/data/lsrdata/language-subtag-registry.txt
index b38d2c82319..dc532a20197 100644
--- a/make/data/lsrdata/language-subtag-registry.txt
+++ b/make/data/lsrdata/language-subtag-registry.txt
@@ -1,4 +1,4 @@
-File-Date: 2017-08-15
+File-Date: 2018-04-23
%%
Type: language
Subtag: aa
@@ -378,6 +378,7 @@ Subtag: hy
Description: Armenian
Added: 2005-10-16
Suppress-Script: Armn
+Comments: see also hyw
%%
Type: language
Subtag: hz
@@ -525,6 +526,7 @@ Suppress-Script: Latn
%%
Type: language
Subtag: km
+Description: Khmer
Description: Central Khmer
Added: 2005-10-16
Suppress-Script: Khmr
@@ -957,6 +959,7 @@ Subtag: sr
Description: Serbian
Added: 2005-10-16
Macrolanguage: sh
+Comments: see cnr for Montenegrin
%%
Type: language
Subtag: ss
@@ -1531,6 +1534,7 @@ Added: 2009-07-29
%%
Type: language
Subtag: add
+Description: Lidzonka
Description: Dzodinka
Added: 2009-07-29
%%
@@ -2114,7 +2118,7 @@ Added: 2009-07-29
%%
Type: language
Subtag: aja
-Description: Aja (Sudan)
+Description: Aja (South Sudan)
Added: 2009-07-29
%%
Type: language
@@ -3097,6 +3101,7 @@ Added: 2009-07-29
%%
Type: language
Subtag: asf
+Description: Auslan
Description: Australian Sign Language
Added: 2009-07-29
%%
@@ -4240,7 +4245,7 @@ Added: 2009-07-29
%%
Type: language
Subtag: bdh
-Description: Baka (Sudan)
+Description: Baka (South Sudan)
Added: 2009-07-29
%%
Type: language
@@ -4250,6 +4255,7 @@ Added: 2009-07-29
%%
Type: language
Subtag: bdj
+Description: Bai (South Sudan)
Description: Bai
Added: 2009-07-29
%%
@@ -5293,7 +5299,7 @@ Added: 2009-07-29
%%
Type: language
Subtag: blm
-Description: Beli (Sudan)
+Description: Beli (South Sudan)
Added: 2009-07-29
%%
Type: language
@@ -8104,6 +8110,13 @@ Description: Con
Added: 2009-07-29
%%
Type: language
+Subtag: cnr
+Description: Montenegrin
+Added: 2018-01-23
+Macrolanguage: sh
+Comments: see sr for Serbian
+%%
+Type: language
Subtag: cns
Description: Central Asmat
Added: 2009-07-29
@@ -8768,6 +8781,11 @@ Description: Tepeuxila Cuicatec
Added: 2009-07-29
%%
Type: language
+Subtag: cuy
+Description: Cuitlatec
+Added: 2018-03-08
+%%
+Type: language
Subtag: cvg
Description: Chug
Added: 2009-07-29
@@ -11089,7 +11107,7 @@ Added: 2005-10-16
%%
Type: language
Subtag: fap
-Description: Palor
+Description: Paloor
Added: 2009-07-29
%%
Type: language
@@ -12282,6 +12300,11 @@ Description: Guya
Added: 2009-07-29
%%
Type: language
+Subtag: gkd
+Description: Magɨ (Madang Province)
+Added: 2018-03-08
+%%
+Type: language
Subtag: gke
Description: Ndai
Added: 2009-07-29
@@ -12494,6 +12517,11 @@ Description: Gooniyandi
Added: 2009-07-29
%%
Type: language
+Subtag: gnj
+Description: Ngen
+Added: 2018-03-08
+%%
+Type: language
Subtag: gnk
Description: //Gana
Description: ǁGana
@@ -13224,6 +13252,11 @@ Description: Guyanese Creole English
Added: 2009-07-29
%%
Type: language
+Subtag: gyo
+Description: Gyalsumdo
+Added: 2018-03-08
+%%
+Type: language
Subtag: gyr
Description: Guarayu
Added: 2009-07-29
@@ -13584,6 +13617,11 @@ Description: Hunjara-Kaina Ke
Added: 2009-07-29
%%
Type: language
+Subtag: hkn
+Description: Mel-Khaonh
+Added: 2018-03-08
+%%
+Type: language
Subtag: hks
Description: Hong Kong Sign Language
Description: Heung Kong Sau Yue
@@ -14238,6 +14276,12 @@ Description: Hya
Added: 2009-07-29
%%
Type: language
+Subtag: hyw
+Description: Western Armenian
+Added: 2018-03-08
+Comments: see also hy
+%%
+Type: language
Subtag: hyx
Description: Armenian (family)
Added: 2009-07-29
@@ -14860,6 +14904,7 @@ Added: 2009-07-29
%%
Type: language
Subtag: iri
+Description: Rigwe
Description: Irigwe
Added: 2009-07-29
%%
@@ -20313,7 +20358,7 @@ Added: 2009-07-29
%%
Type: language
Subtag: lno
-Description: Lango (Sudan)
+Description: Lango (South Sudan)
Added: 2009-07-29
%%
Type: language
@@ -20579,6 +20624,7 @@ Type: language
Subtag: lsg
Description: Lyons Sign Language
Added: 2009-07-29
+Deprecated: 2018-03-08
%%
Type: language
Subtag: lsh
@@ -20850,6 +20896,11 @@ Description: Luwo
Added: 2009-07-29
%%
Type: language
+Subtag: lws
+Description: Malawian Sign Language
+Added: 2018-03-08
+%%
+Type: language
Subtag: lwt
Description: Lewotobi
Added: 2009-07-29
@@ -20904,6 +20955,7 @@ Type: language
Subtag: maa
Description: San Jerónimo Tecóatl Mazatec
Added: 2009-07-29
+Comments: see also pbm
%%
Type: language
Subtag: mab
@@ -23799,11 +23851,13 @@ Type: language
Subtag: mwx
Description: Mediak
Added: 2009-07-29
+Deprecated: 2018-03-08
%%
Type: language
Subtag: mwy
Description: Mosiro
Added: 2009-07-29
+Deprecated: 2018-03-08
%%
Type: language
Subtag: mwz
@@ -24527,6 +24581,8 @@ Type: language
Subtag: ncp
Description: Ndaktup
Added: 2009-07-29
+Deprecated: 2018-03-08
+Preferred-Value: kdz
%%
Type: language
Subtag: ncq
@@ -25458,6 +25514,11 @@ Description: Nihali
Added: 2009-07-29
%%
Type: language
+Subtag: nlm
+Description: Mankiyali
+Added: 2018-03-08
+%%
+Type: language
Subtag: nln
Description: Durango Nahuatl
Added: 2009-07-29
@@ -26693,6 +26754,11 @@ Description: Njebi
Added: 2009-07-29
%%
Type: language
+Subtag: nzd
+Description: Nzadi
+Added: 2018-03-08
+%%
+Type: language
Subtag: nzi
Description: Nzima
Added: 2005-10-16
@@ -27757,6 +27823,12 @@ Description: Mak (Nigeria)
Added: 2009-07-29
%%
Type: language
+Subtag: pbm
+Description: Puebla Mazatec
+Added: 2018-03-08
+Comments: see also maa
+%%
+Type: language
Subtag: pbn
Description: Kpasam
Added: 2009-07-29
@@ -30902,6 +30974,7 @@ Added: 2005-10-16
%%
Type: language
Subtag: scp
+Description: Hyolmo
Description: Helambu Sherpa
Added: 2009-07-29
%%
@@ -33049,6 +33122,7 @@ Added: 2009-07-29
%%
Type: language
Subtag: sxg
+Description: Shuhi
Description: Shixing
Added: 2009-07-29
%%
@@ -33835,6 +33909,11 @@ Description: Tulishi
Added: 2009-07-29
%%
Type: language
+Subtag: tez
+Description: Tetserret
+Added: 2018-03-08
+%%
+Type: language
Subtag: tfi
Description: Tofin Gbe
Added: 2009-07-29
@@ -34399,7 +34478,7 @@ Added: 2009-07-29
Type: language
Subtag: tlh
Description: Klingon
-Description: tlhIngan-Hol
+Description: tlhIngan Hol
Added: 2005-10-16
%%
Type: language
@@ -42199,6 +42278,7 @@ Prefix: sgn
%%
Type: extlang
Subtag: asf
+Description: Auslan
Description: Australian Sign Language
Added: 2009-07-29
Preferred-Value: asf
@@ -42927,7 +43007,7 @@ Type: extlang
Subtag: lsg
Description: Lyons Sign Language
Added: 2009-07-29
-Preferred-Value: lsg
+Deprecated: 2018-03-08
Prefix: sgn
%%
Type: extlang
@@ -42983,6 +43063,13 @@ Prefix: lv
Macrolanguage: lv
%%
Type: extlang
+Subtag: lws
+Description: Malawian Sign Language
+Added: 2018-03-08
+Preferred-Value: lws
+Prefix: sgn
+%%
+Type: extlang
Subtag: lzh
Description: Literary Chinese
Added: 2009-07-29
@@ -44493,6 +44580,11 @@ Description: Kaganga
Added: 2006-10-17
%%
Type: script
+Subtag: Rohg
+Description: Hanifi Rohingya
+Added: 2017-12-13
+%%
+Type: script
Subtag: Roro
Description: Rongorongo
Added: 2005-10-16
@@ -44563,6 +44655,16 @@ Description: Sinhala
Added: 2005-10-16
%%
Type: script
+Subtag: Sogd
+Description: Sogdian
+Added: 2017-12-13
+%%
+Type: script
+Subtag: Sogo
+Description: Old Sogdian
+Added: 2017-12-13
+%%
+Type: script
Subtag: Sora
Description: Sora Sompeng
Added: 2011-01-07
@@ -46412,15 +46514,26 @@ Comments: Portuguese orthography conventions established in 1990 but
not brought into effect until 2009
%%
Type: variant
+Subtag: aranes
+Description: Aranese
+Added: 2018-04-22
+Prefix: oc
+Comments: Occitan variant spoken in the Val d'Aran
+%%
+Type: variant
Subtag: arevela
Description: Eastern Armenian
Added: 2006-09-18
+Deprecated: 2018-03-24
+Preferred-Value: hy
Prefix: hy
%%
Type: variant
Subtag: arevmda
Description: Western Armenian
Added: 2006-09-18
+Deprecated: 2018-03-24
+Preferred-Value: hyw
Prefix: hy
%%
Type: variant
@@ -46431,6 +46544,13 @@ Added: 2017-06-05
Prefix: tw
%%
Type: variant
+Subtag: auvern
+Description: Auvergnat
+Added: 2018-04-22
+Prefix: oc
+Comments: Occitan variant spoken in Auvergne
+%%
+Type: variant
Subtag: baku1926
Description: Unified Turkic Latin Alphabet (Historical)
Added: 2007-04-18
@@ -46510,6 +46630,13 @@ Prefix: en
Comments: Jargon embedded in American English
%%
Type: variant
+Subtag: cisaup
+Description: Cisalpine
+Added: 2018-04-22
+Prefix: oc
+Comments: Occitan variant spoken in northwestern Italy
+%%
+Type: variant
Subtag: colb1945
Description: Portuguese-Brazilian Orthographic Convention of 1945
(Convenção Ortográfica Luso-Brasileira de 1945)
@@ -46528,6 +46655,12 @@ Added: 2015-12-07
Prefix: en
%%
Type: variant
+Subtag: creiss
+Description: Occitan variants of the Croissant area
+Added: 2018-04-22
+Prefix: oc
+%%
+Type: variant
Subtag: dajnko
Description: Slovene in Dajnko alphabet
Added: 2012-06-27
@@ -46556,6 +46689,11 @@ Description: International Phonetic Alphabet
Added: 2006-12-11
%%
Type: variant
+Subtag: fonkirsh
+Description: Kirshenbaum Phonetic Alphabet
+Added: 2018-04-22
+%%
+Type: variant
Subtag: fonnapa
Description: North American Phonetic Alphabet
Description: Americanist Phonetic Notation
@@ -46573,6 +46711,36 @@ Added: 2010-10-23
Comments: Indicates that the content is transcribed according to X-SAMPA
%%
Type: variant
+Subtag: gascon
+Description: Gascon
+Added: 2018-04-22
+Prefix: oc
+Comments: Occitan variant spoken in Gascony
+%%
+Type: variant
+Subtag: grclass
+Description: Classical Occitan orthography
+Added: 2018-04-22
+Prefix: oc
+Comments: Classical written standard for Occitan developed in 1935 by
+ Alibèrt
+%%
+Type: variant
+Subtag: grital
+Description: Italian-inspired Occitan orthography
+Added: 2018-04-22
+Prefix: oc
+%%
+Type: variant
+Subtag: grmistr
+Description: Mistralian or Mistralian-inspired Occitan orthography
+Added: 2018-04-22
+Prefix: oc
+Comments: Written standard developed by Romanilha in 1853 and used by
+ Mistral and the Félibres, including derived standards such as Escolo
+ dóu Po, Escolo Gaston Febus, and others
+%%
+Type: variant
Subtag: hepburn
Description: Hepburn romanization
Added: 2009-10-01
@@ -46617,6 +46785,13 @@ Added: 2010-07-28
Prefix: sa
%%
Type: variant
+Subtag: ivanchov
+Description: Bulgarian in 1899 orthography
+Added: 2017-12-13
+Prefix: bg
+Comments: Bulgarian orthography introduced by Todor Ivanchov in 1899
+%%
+Type: variant
Subtag: jauer
Description: Jauer dialect of Romansh
Added: 2010-06-29
@@ -46659,6 +46834,20 @@ Added: 2010-07-28
Prefix: sa
%%
Type: variant
+Subtag: lemosin
+Description: Limousin
+Added: 2018-04-22
+Prefix: oc
+Comments: Occitan variant spoken in Limousin
+%%
+Type: variant
+Subtag: lengadoc
+Description: Languedocien
+Added: 2018-04-22
+Prefix: oc
+Comments: Occitan variant spoken in Languedoc
+%%
+Type: variant
Subtag: lipaw
Description: The Lipovaz dialect of Resian
Description: The Lipovec dialect of Resian
@@ -46712,6 +46901,13 @@ Added: 2015-11-25
Prefix: en-CA
%%
Type: variant
+Subtag: nicard
+Description: Niçard
+Added: 2018-04-22
+Prefix: oc
+Comments: Occitan variant spoken in Nice
+%%
+Type: variant
Subtag: njiva
Description: The Gniva dialect of Resian
Description: The Njiva dialect of Resian
@@ -46798,6 +46994,13 @@ Added: 2006-12-11
Prefix: el
%%
Type: variant
+Subtag: provenc
+Description: Provençal
+Added: 2018-04-22
+Prefix: oc
+Comments: Occitan variant spoken in Provence
+%%
+Type: variant
Subtag: puter
Description: Puter idiom of Romansh
Added: 2010-06-29
@@ -46959,6 +47162,13 @@ Comments: Vallader is one of the five traditional written standards or
"idioms" of the Romansh language.
%%
Type: variant
+Subtag: vivaraup
+Description: Vivaro-Alpine
+Added: 2018-04-22
+Prefix: oc
+Comments: Occitan variant spoken in northeastern Occitania
+%%
+Type: variant
Subtag: wadegile
Description: Wade-Giles romanization
Added: 2008-10-03
diff --git a/make/devkit/Makefile b/make/devkit/Makefile
index 1bd37951fd3..d0464fbecd9 100644
--- a/make/devkit/Makefile
+++ b/make/devkit/Makefile
@@ -42,6 +42,8 @@
# line looking like this:
#
# make cross_compile_target="aarch64-linux-gnu" BASE_OS=Fedora27
+# or
+# make cross_compile_target="arm-linux-gnueabihf" BASE_OS=Fedora27
#
# This is the makefile which iterates over all host and target platforms.
#
diff --git a/make/devkit/Tools.gmk b/make/devkit/Tools.gmk
index 5bc63448111..5e87a848e68 100644
--- a/make/devkit/Tools.gmk
+++ b/make/devkit/Tools.gmk
@@ -44,13 +44,23 @@ $(info HOST=$(HOST))
$(info BUILD=$(BUILD))
ARCH := $(word 1,$(subst -, ,$(TARGET)))
+
+ifeq ($(TARGET), arm-linux-gnueabihf)
+ ARCH=armhfp
+endif
+
$(info ARCH=$(ARCH))
ifeq ($(BASE_OS), OEL6)
OEL_URL := http://yum.oracle.com/repo/OracleLinux/OL6/4/base/$(ARCH)/
LINUX_VERSION := OEL6.4
else ifeq ($(BASE_OS), Fedora27)
- OEL_URL := https://dl.fedoraproject.org/pub/fedora-secondary/releases/27/Everything/$(ARCH)/os/Packages/
+ ifeq ($(ARCH), aarch64)
+ FEDORA_TYPE=fedora-secondary
+ else
+ FEDORA_TYPE=fedora/linux
+ endif
+ OEL_URL := https://dl.fedoraproject.org/pub/$(FEDORA_TYPE)/releases/27/Everything/$(ARCH)/os/Packages/
LINUX_VERSION := Fedora 27
else
$(error Unknown base OS $(BASE_OS))
@@ -189,6 +199,8 @@ ifeq ($(ARCH),x86_64)
endif
else ifeq ($(ARCH),i686)
RPM_ARCHS := i386 i686 noarch
+else ifeq ($(ARCH), armhfp)
+ RPM_ARCHS := $(ARCH) armv7hl noarch
else
RPM_ARCHS := $(ARCH) noarch
endif
@@ -410,6 +422,10 @@ ifneq (,$(findstring linux,$(TARGET)))
$(BUILDDIR)/$(gcc_ver)/Makefile : CONFIG += --enable-__cxa_atexit
endif
+ifeq ($(ARCH), armhfp)
+ $(BUILDDIR)/$(gcc_ver)/Makefile : CONFIG += --with-float=hard
+endif
+
# Want:
# c,c++
# shared libs
diff --git a/make/devkit/createWindowsDevkit2017.sh b/make/devkit/createWindowsDevkit2017.sh
index bd9993965cc..4f208dad8b9 100644
--- a/make/devkit/createWindowsDevkit2017.sh
+++ b/make/devkit/createWindowsDevkit2017.sh
@@ -108,8 +108,8 @@ if [ ! -d $DEVKIT_ROOT/VC ]; then
# The redist runtime libs are needed to run the compiler but may not be
# installed on the machine where the devkit will be used.
- cp $DEVKIT_ROOT/VC/redist/x64/$MSVCR_DLL $DEVKIT_ROOT/VC/bin/x86
- cp $DEVKIT_ROOT/VC/redist/x64/$MSVCP_DLL $DEVKIT_ROOT/VC/bin/x86
+ cp $DEVKIT_ROOT/VC/redist/x86/$MSVCR_DLL $DEVKIT_ROOT/VC/bin/x86
+ cp $DEVKIT_ROOT/VC/redist/x86/$MSVCP_DLL $DEVKIT_ROOT/VC/bin/x86
cp $DEVKIT_ROOT/VC/redist/x64/$MSVCR_DLL $DEVKIT_ROOT/VC/bin/x64
cp $DEVKIT_ROOT/VC/redist/x64/$MSVCP_DLL $DEVKIT_ROOT/VC/bin/x64
fi
diff --git a/make/gendata/GendataFontConfig.gmk b/make/gendata/GendataFontConfig.gmk
index 52ee37c1880..50a3bcd49d0 100644
--- a/make/gendata/GendataFontConfig.gmk
+++ b/make/gendata/GendataFontConfig.gmk
@@ -23,8 +23,6 @@
# questions.
#
-$(eval $(call IncludeCustomExtension, gendata/GendataFontConfig.gmk))
-
GENDATA_FONT_CONFIG_DST := $(SUPPORT_OUTPUTDIR)/modules_libs/$(MODULE)
GENDATA_FONT_CONFIG_DATA_DIR ?= $(TOPDIR)/make/data/fontconfig
diff --git a/make/gensrc/Gensrc-java.desktop.gmk b/make/gensrc/Gensrc-java.desktop.gmk
index 62197ad6b13..aca529c8134 100644
--- a/make/gensrc/Gensrc-java.desktop.gmk
+++ b/make/gensrc/Gensrc-java.desktop.gmk
@@ -78,13 +78,6 @@ $(eval $(call SetupCompileProperties, COMPILE_PROPERTIES, \
GENSRC_JAVA_DESKTOP += $(COMPILE_PROPERTIES)
-# Some resources bundles are already present as java files but still need to be
-# copied to zh_HK locale.
-$(eval $(call SetupCopy-zh_HK,COPY_ZH_HK, \
- $(TOPDIR)/src/java.desktop/share/classes/sun/applet/resources/MsgAppletViewer_zh_TW.java))
-
-GENSRC_JAVA_DESKTOP += $(COPY_ZH_HK)
-
################################################################################
java.desktop: $(GENSRC_JAVA_DESKTOP)
diff --git a/make/gensrc/Gensrc-jdk.compiler.gmk b/make/gensrc/Gensrc-jdk.compiler.gmk
index 24be68e89c1..e5a299f7970 100644
--- a/make/gensrc/Gensrc-jdk.compiler.gmk
+++ b/make/gensrc/Gensrc-jdk.compiler.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@ $(eval $(call SetupCompileProperties,COMPILE_PROPERTIES, \
$(JAVAC_VERSION)))
$(eval $(call SetupParseProperties,PARSE_PROPERTIES, \
- com/sun/tools/javac/resources/compiler.properties))
+ com/sun/tools/javac/resources/compiler.properties \
+ com/sun/tools/javac/resources/launcher.properties))
all: $(COMPILE_PROPERTIES) $(PARSE_PROPERTIES)
diff --git a/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk b/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk
index 7d90a974060..c88862ea721 100644
--- a/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk
+++ b/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk
@@ -103,7 +103,7 @@ ADD_EXPORTS := \
$(GENSRC_DIR)/_gensrc_proc_done: $(PROC_SRCS) $(PROCESSOR_JARS)
$(call MakeDir, $(@D))
$(eval $(call ListPathsSafely,PROC_SRCS,$(@D)/_gensrc_proc_files))
- $(JAVA_SMALL) $(NEW_JAVAC) \
+ $(JAVA) $(NEW_JAVAC) \
-XDignore.symbol.file \
--upgrade-module-path $(JDK_OUTPUTDIR)/modules --system none \
$(ADD_EXPORTS) \
diff --git a/make/gensrc/Gensrc-jdk.scripting.nashorn.gmk b/make/gensrc/Gensrc-jdk.scripting.nashorn.gmk
new file mode 100644
index 00000000000..9dec093eede
--- /dev/null
+++ b/make/gensrc/Gensrc-jdk.scripting.nashorn.gmk
@@ -0,0 +1,51 @@
+#
+# Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation. Oracle designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+default: all
+
+include $(SPEC)
+include MakeBase.gmk
+include TextFileProcessing.gmk
+
+################################################################################
+
+# Version file needs to be processed with version numbers
+VERSION_FILE := jdk/nashorn/internal/runtime/resources/version.properties
+
+$(eval $(call SetupTextFileProcessing, BUILD_VERSION_FILE, \
+ SOURCE_FILES := $(TOPDIR)/src/$(MODULE)/share/classes/$(VERSION_FILE).template, \
+ OUTPUT_FILE := $(SUPPORT_OUTPUTDIR)/gensrc/$(MODULE)/$(VERSION_FILE), \
+ REPLACEMENTS := \
+ @@VERSION_STRING@@ => $(VERSION_STRING) ; \
+ @@VERSION_SHORT@@ => $(VERSION_SHORT) , \
+))
+
+TARGETS += $(NASGEN_RUN_FILE) $(BUILD_VERSION_FILE)
+
+################################################################################
+
+all: $(TARGETS)
+
+.PHONY: all default
diff --git a/make/hotspot/lib/JvmFeatures.gmk b/make/hotspot/lib/JvmFeatures.gmk
index 428a1bbc737..d4f0891532e 100644
--- a/make/hotspot/lib/JvmFeatures.gmk
+++ b/make/hotspot/lib/JvmFeatures.gmk
@@ -155,6 +155,16 @@ ifneq ($(call check-jvm-feature, serialgc), true)
JVM_EXCLUDE_FILES += psMarkSweep.cpp psMarkSweepDecorator.cpp
endif
+ifneq ($(call check-jvm-feature, epsilongc), true)
+ JVM_CFLAGS_FEATURES += -DINCLUDE_EPSILONGC=0
+ JVM_EXCLUDE_PATTERNS += gc/epsilon
+endif
+
+ifneq ($(call check-jvm-feature, zgc), true)
+ JVM_CFLAGS_FEATURES += -DINCLUDE_ZGC=0
+ JVM_EXCLUDE_PATTERNS += gc/z
+endif
+
ifneq ($(call check-jvm-feature, jfr), true)
JVM_CFLAGS_FEATURES += -DINCLUDE_JFR=0
JVM_EXCLUDE_PATTERNS += jfr
diff --git a/make/langtools/build.properties b/make/langtools/build.properties
index 53260739538..30ca441558c 100644
--- a/make/langtools/build.properties
+++ b/make/langtools/build.properties
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,8 @@ module.names = java.compiler \
jdk.jshell
langtools.resource.includes = \
- com/sun/tools/javac/resources/compiler.properties
+ com/sun/tools/javac/resources/compiler.properties \
+ com/sun/tools/javac/resources/launcher.properties
# Version info -- override as needed
jdk.version = 9
diff --git a/make/lib/Awt2dLibraries.gmk b/make/lib/Awt2dLibraries.gmk
index b492120a36d..8a138b2e788 100644
--- a/make/lib/Awt2dLibraries.gmk
+++ b/make/lib/Awt2dLibraries.gmk
@@ -27,37 +27,38 @@ $(eval $(call IncludeCustomExtension, lib/Awt2dLibraries-pre.gmk))
WIN_AWT_LIB := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libawt/awt.lib
+LIBAWT_DEFAULT_HEADER_DIRS := \
+ libawt/awt/image \
+ libawt/awt/image/cvutils \
+ libawt/java2d \
+ libawt/java2d/loops \
+ libawt/java2d/pipe \
+ #
+
################################################################################
-BUILD_LIBMLIB_SRC := $(TOPDIR)/src/java.desktop/share/native/libmlib_image \
- $(TOPDIR)/src/java.desktop/share/native/common/awt/medialib
-BUILD_LIBMLIB_CFLAGS := -D__USE_J2D_NAMES -D__MEDIALIB_OLD_NAMES \
- $(addprefix -I, $(BUILD_LIBMLIB_SRC)) \
- -I$(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/libmlib_image
+# We must not include java.desktop/unix/native/libmlib_image, which is only
+# for usage by solaris-sparc in libmlib_image_v.
+BUILD_LIBMLIB_EXCLUDE_SRC_PATTERNS := unix
-BUILD_LIBMLIB_LDLIBS :=
-
-BUILD_LIBMLIB_CFLAGS += -DMLIB_NO_LIBSUNMATH
+BUILD_LIBMLIB_CFLAGS := -D__USE_J2D_NAMES -D__MEDIALIB_OLD_NAMES -DMLIB_NO_LIBSUNMATH
ifeq ($(OPENJDK_TARGET_CPU_BITS), 64)
BUILD_LIBMLIB_CFLAGS += -DMLIB_OS64BIT
endif
-ifneq ($(OPENJDK_TARGET_OS), windows)
- BUILD_LIBMLIB_LDLIBS += $(LIBM) $(LIBDL)
-endif
-
$(eval $(call SetupJdkLibrary, BUILD_LIBMLIB_IMAGE, \
NAME := mlib_image, \
- SRC := $(BUILD_LIBMLIB_SRC), \
+ EXTRA_SRC := common/awt/medialib, \
EXCLUDE_FILES := mlib_c_ImageBlendTable.c, \
+ EXCLUDE_SRC_PATTERNS := $(BUILD_LIBMLIB_EXCLUDE_SRC_PATTERNS), \
OPTIMIZATION := HIGHEST, \
CFLAGS := $(CFLAGS_JDKLIB) \
$(BUILD_LIBMLIB_CFLAGS), \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
- LIBS := $(BUILD_LIBMLIB_LDLIBS) \
- $(JDKLIB_LIBS), \
+ LIBS := $(JDKLIB_LIBS), \
+ LIBS_unix := $(LIBM) $(LIBDL), \
))
$(BUILD_LIBMLIB_IMAGE): $(call FindLib, java.base, java)
@@ -68,14 +69,19 @@ TARGETS += $(BUILD_LIBMLIB_IMAGE)
ifeq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU_ARCH), solaris-sparc)
- LIBMLIB_IMAGE_V_SRC := $(TOPDIR)/src/java.desktop/share/native/libmlib_image \
- $(TOPDIR)/src/java.desktop/unix/native/libmlib_image \
- $(TOPDIR)/src/java.desktop/share/native/common/awt/medialib \
- $(TOPDIR)/src/java.desktop/unix/native/common/awt/medialib \
- #
- LIBMLIB_IMAGE_V_CFLAGS := $(TOPDIR)/src/java.desktop/unix/native/libmlib_image/vis_$(OPENJDK_TARGET_CPU_BITS).il \
- $(addprefix -I, $(LIBMLIB_IMAGE_V_SRC)) \
- #
+ # libmlib_image_v is basically built from mlib_image sources, with some additions
+ # and some exclusions.
+ LIBMLIB_IMAGE_V_SRC := \
+ libmlib_image \
+ common/awt/medialib \
+ #
+
+ LIBMLIB_IMAGE_V_CFLAGS := -xarch=sparcvis -D__USE_J2D_NAMES -D__MEDIALIB_OLD_NAMES \
+ $(TOPDIR)/src/$(MODULE)/unix/native/libmlib_image/vis_$(OPENJDK_TARGET_CPU_BITS).il
+
+ ifeq ($(OPENJDK_TARGET_CPU_BITS), 64)
+ LIBMLIB_IMAGE_V_CFLAGS += -DMLIB_OS64BIT
+ endif
BUILD_LIBMLIB_IMAGE_V_EXFILES := \
awt_ImagingLib.c \
@@ -95,19 +101,16 @@ ifeq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU_ARCH), solaris-sparc)
mlib_c_ImageLookUp_f.c \
#
- LIBMLIB_IMAGE_V_CFLAGS += $(filter-out -DMLIB_NO_LIBSUNMATH, $(BUILD_LIBMLIB_CFLAGS))
-
$(eval $(call SetupJdkLibrary, BUILD_LIBMLIB_IMAGE_V, \
NAME := mlib_image_v, \
SRC := $(LIBMLIB_IMAGE_V_SRC), \
EXCLUDE_FILES := $(BUILD_LIBMLIB_IMAGE_V_EXFILES), \
OPTIMIZATION := HIGHEST, \
- CFLAGS := -xarch=sparcvis \
- $(LIBMLIB_IMAGE_V_CFLAGS) \
- $(CFLAGS_JDKLIB), \
+ CFLAGS := $(CFLAGS_JDKLIB) \
+ $(LIBMLIB_IMAGE_V_CFLAGS), \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
- LIBS := -ljava -ljvm $(BUILD_LIBMLIB_LDLIBS), \
+ LIBS := -ljava -ljvm $(LIBM) $(LIBDL), \
))
$(BUILD_LIBMLIB_IMAGE_V): $(call FindLib, java.base, java)
@@ -118,18 +121,22 @@ endif
################################################################################
-LIBAWT_DIRS := $(TOPDIR)/src/java.desktop/share/native/libawt \
- $(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/libawt \
- $(TOPDIR)/src/java.desktop/share/native/common/awt/debug \
- $(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/common/awt \
+LIBAWT_EXTRA_SRC := \
+ common/awt/debug \
+ $(TOPDIR)/src/$(MODULE)/$(OPENJDK_TARGET_OS_TYPE)/native/common/awt \
#
-ifeq ($(OPENJDK_TARGET_OS), aix)
- LIBAWT_DIRS += $(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS)/native/libawt
+ifeq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU_ARCH), solaris-sparc)
+ LIBAWT_EXTRA_SRC += $(TOPDIR)/src/$(MODULE)/share/native/common/awt/medialib
endif
ifeq ($(OPENJDK_TARGET_OS), windows)
- LIBAWT_DIRS += $(TOPDIR)/src/java.desktop/share/native/common/awt/utility
+ LIBAWT_EXTRA_SRC += \
+ $(TOPDIR)/src/$(MODULE)/share/native/common/awt/utility \
+ $(TOPDIR)/src/$(MODULE)/share/native/common/font \
+ $(TOPDIR)/src/$(MODULE)/share/native/common/java2d/opengl \
+ $(TOPDIR)/src/$(MODULE)/$(OPENJDK_TARGET_OS_TYPE)/native/common/awt/systemscale \
+ #
endif
ifneq ($(filter $(OPENJDK_TARGET_OS), solaris linux macosx aix), )
@@ -140,33 +147,45 @@ ifeq ($(OPENJDK_TARGET_OS), macosx)
LIBAWT_EXFILES += initIDs.c awt/image/cvutils/img_colors.c
endif
-LIBAWT_CFLAGS += -I$(SUPPORT_OUTPUTDIR)/headers/java.desktop \
- $(addprefix -I, $(shell find $(LIBAWT_DIRS) -type d)) \
- $(LIBJAVA_HEADER_FLAGS) \
- $(addprefix -I, $(BUILD_LIBMLIB_IMAGE_SRC)) \
+ifeq ($(OPENJDK_TARGET_OS), windows)
+ LIBAWT_EXFILES += \
+ java2d/d3d/D3DShaderGen.c \
+ awt/image/cvutils/img_colors.c \
+ #
+endif
+
+ifeq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU), solaris-sparcv9)
+ LIBAWT_EXFILES += java2d/loops/MapAccelFunc.c
+else
+ LIBAWT_EXCLUDES += \
+ $(TOPDIR)/src/$(MODULE)/unix/native/libawt/awt/medialib \
+ $(TOPDIR)/src/$(MODULE)/unix/native/libawt/java2d/loops \
+ $(TOPDIR)/src/$(MODULE)/unix/native/common/awt/medialib \
+ #
+endif
+
+LIBAWT_EXTRA_HEADER_DIRS := \
+ $(LIBAWT_DEFAULT_HEADER_DIRS) \
+ $(call GetJavaHeaderDir, java.base) \
+ libawt/awt/medialib \
+ libawt/java2d/d3d \
+ libawt/java2d/opengl \
+ libawt/java2d/windows \
+ libawt/windows \
+ common/awt/medialib \
+ libmlib_image \
+ include \
+ java.base:libjava \
+ java.base:include \
#
LIBAWT_CFLAGS += -D__MEDIALIB_OLD_NAMES -D__USE_J2D_NAMES $(X_CFLAGS)
-ifeq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU_ARCH), solaris-sparc)
- LIBAWT_CFLAGS += -DMLIB_ADD_SUFF
- LIBAWT_CFLAGS += -xarch=sparcvis
+ifeq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU), solaris-sparcv9)
+ LIBAWT_CFLAGS += -xarch=sparcvis -DMLIB_ADD_SUFF \
+ $(TOPDIR)/src/$(MODULE)/unix/native/libmlib_image/vis_$(OPENJDK_TARGET_CPU_BITS).il
- LIBAWT_CFLAGS += $(TOPDIR)/src/java.desktop/unix/native/libmlib_image/vis_$(OPENJDK_TARGET_CPU_BITS).il
- LIBAWT_DIRS += $(TOPDIR)/src/java.desktop/share/native/common/awt/medialib
- LIBAWT_EXFILES += java2d/loops/MapAccelFunc.c
-
- ifeq ($(OPENJDK_TARGET_CPU), sparcv9)
- LIBAWT_ASFLAGS = -P -xarch=v9a
- else
- LIBAWT_ASFLAGS = -P -xarch=v8plusa
- endif
-else
- LIBAWT_EXCLUDES += \
- $(TOPDIR)/src/java.desktop/unix/native/libawt/awt/medialib \
- $(TOPDIR)/src/java.desktop/unix/native/libawt/java2d/loops \
- $(TOPDIR)/src/java.desktop/unix/native/common/awt/medialib \
- #
+ LIBAWT_ASFLAGS = -P -xarch=v9a
endif
ifneq ($(OPENJDK_TARGET_OS), solaris)
@@ -174,29 +193,13 @@ ifneq ($(OPENJDK_TARGET_OS), solaris)
endif
ifeq ($(OPENJDK_TARGET_OS), windows)
- LIBAWT_DIRS += $(TOPDIR)/src/java.desktop/share/native/common/font \
- $(TOPDIR)/src/java.desktop/share/native/common/java2d/opengl \
- $(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/common/awt/systemscale \
- # Why does libawt need java.base headers?
- LIBAWT_CFLAGS += -I$(TOPDIR)/src/java.desktop/share/native/common/font \
- -I$(TOPDIR)/src/java.desktop/share/native/common/java2d/opengl \
- -I$(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/common/java2d/opengl \
- -I$(TOPDIR)/src/java.desktop/windows/native/include \
- -I$(TOPDIR)/src/java.desktop/share/native/include \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.base \
- #
- LIBAWT_EXFILES += \
- java2d/d3d/D3DShaderGen.c \
- awt/image/cvutils/img_colors.c \
- #
-
LIBAWT_CFLAGS += -EHsc -DUNICODE -D_UNICODE
ifeq ($(OPENJDK_TARGET_CPU_BITS), 64)
LIBAWT_CFLAGS += -DMLIB_OS64BIT
endif
LIBAWT_RC_FLAGS ?= -I $(TOPDIR)/src/java.base/windows/native/launcher/icons
- LIBAWT_VERSIONINFO_RESOURCE := $(TOPDIR)/src/java.desktop/windows/native/libawt/windows/awt.rc
+ LIBAWT_VERSIONINFO_RESOURCE := $(TOPDIR)/src/$(MODULE)/windows/native/libawt/windows/awt.rc
endif
ifeq ($(OPENJDK_TARGET_OS), linux)
@@ -215,16 +218,17 @@ endif
$(eval $(call SetupJdkLibrary, BUILD_LIBAWT, \
NAME := awt, \
- SRC := $(LIBAWT_DIRS), \
+ EXTRA_SRC := $(LIBAWT_EXTRA_SRC), \
EXCLUDES := $(LIBAWT_EXCLUDES), \
EXCLUDE_FILES := $(LIBAWT_EXFILES), \
OPTIMIZATION := LOW, \
CFLAGS := $(CFLAGS_JDKLIB) $(LIBAWT_CFLAGS), \
+ EXTRA_HEADER_DIRS := $(LIBAWT_EXTRA_HEADER_DIRS), \
DISABLED_WARNINGS_gcc := sign-compare unused-result maybe-uninitialized \
format-nonliteral parentheses, \
DISABLED_WARNINGS_clang := logical-op-parentheses extern-initializer, \
DISABLED_WARNINGS_solstudio := E_DECLARATION_IN_CODE, \
- DISABLED_WARNINGS_microsoft := 4297 4244 4267 4291 4302 4311 4996, \
+ DISABLED_WARNINGS_microsoft := 4244 4267 4996, \
ASFLAGS := $(LIBAWT_ASFLAGS), \
LDFLAGS := $(LDFLAGS_JDKLIB) $(call SET_SHARED_LIBRARY_ORIGIN), \
LDFLAGS_macosx := -L$(INSTALL_LIBRARIES_HERE), \
@@ -265,38 +269,26 @@ TARGETS += $(BUILD_LIBAWT)
################################################################################
-ifeq ($(findstring $(OPENJDK_TARGET_OS),windows macosx),)
+ifeq ($(findstring $(OPENJDK_TARGET_OS), windows macosx), )
ifeq ($(ENABLE_HEADLESS_ONLY), false)
- LIBAWT_XAWT_DIRS := \
- $(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/libawt_xawt \
- $(TOPDIR)/src/java.desktop/share/native/common/awt/debug \
- $(TOPDIR)/src/java.desktop/share/native/common/awt/utility \
- $(TOPDIR)/src/java.desktop/share/native/common/font \
- $(TOPDIR)/src/java.desktop/share/native/common/java2d \
- $(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/common/java2d \
- $(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/common/awt \
+ LIBAWT_XAWT_EXTRA_SRC := \
+ common/awt \
+ common/java2d \
+ common/font \
#
- ifneq ($(filter $(OPENJDK_TARGET_OS),linux solaris aix), )
- LIBAWT_XAWT_DIRS += $(TOPDIR)/src/java.desktop/unix/native/common/awt/systemscale
- endif
-
LIBAWT_XAWT_EXCLUDES := medialib
- LIBAWT_XAWT_CFLAGS := $(addprefix -I, $(shell $(FIND) $(LIBAWT_XAWT_DIRS) -type d)) \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.desktop \
- -I$(TOPDIR)/src/java.desktop/share/native/include \
- -I$(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS)/native/include \
- -I$(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/include \
- -I$(TOPDIR)/src/java.desktop/share/native/libawt/java2d \
- -I$(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/libawt/java2d \
- -I$(TOPDIR)/src/java.desktop/share/native/libawt/java2d/loops \
- -I$(TOPDIR)/src/java.desktop/share/native/libawt/java2d/pipe \
- -I$(TOPDIR)/src/java.desktop/share/native/libawt/awt/image/cvutils \
- -I$(TOPDIR)/src/java.desktop/share/native/libawt/awt/image \
- -I$(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/common/font \
- $(LIBJAVA_HEADER_FLAGS)
+ LIBAWT_XAWT_EXTRA_HEADER_DIRS := \
+ $(LIBAWT_DEFAULT_HEADER_DIRS) \
+ libawt_xawt/awt \
+ include \
+ common/awt/debug \
+ common/awt/systemscale \
+ common/font \
+ common/java2d/opengl \
+ common/java2d/x11 \
#
LIBAWT_XAWT_CFLAGS += -DXAWT -DXAWT_HACK \
@@ -329,11 +321,12 @@ ifeq ($(findstring $(OPENJDK_TARGET_OS),windows macosx),)
BUILD_LIBAWT_XAWT_awt_Font.c_CFLAGS := -w
# initializing a declared 'extern'
BUILD_LIBAWT_XAWT_debug_mem.c_CFLAGS := -w
- endif
+ endif
$(eval $(call SetupJdkLibrary, BUILD_LIBAWT_XAWT, \
NAME := awt_xawt, \
- SRC := $(LIBAWT_XAWT_DIRS), \
+ EXTRA_SRC := $(LIBAWT_XAWT_EXTRA_SRC), \
+ EXTRA_HEADER_DIRS := $(LIBAWT_XAWT_EXTRA_HEADER_DIRS), \
EXCLUDES := $(LIBAWT_XAWT_EXCLUDES), \
OPTIMIZATION := LOW, \
CFLAGS := $(CFLAGS_JDKLIB) $(LIBAWT_XAWT_CFLAGS) \
@@ -349,10 +342,6 @@ ifeq ($(findstring $(OPENJDK_TARGET_OS),windows macosx),)
$(call SET_SHARED_LIBRARY_ORIGIN) \
-L$(INSTALL_LIBRARIES_HERE), \
LIBS := $(X_LIBS) $(LIBAWT_XAWT_LIBS), \
- RC_FLAGS := $(RC_FLAGS) \
- -D "JDK_FNAME=xawt.dll" \
- -D "JDK_INTERNAL_NAME=xawt" \
- -D "JDK_FTYPE=0x2L", \
))
$(BUILD_LIBAWT_XAWT): $(call FindLib, java.base, java)
@@ -366,36 +355,34 @@ endif
################################################################################
-LIBLCMS_SRC := $(TOPDIR)/src/java.desktop/share/native/liblcms
-LIBLCMS_CPPFLAGS += -I$(SUPPORT_OUTPUTDIR)/headers/java.desktop \
- -I$(TOPDIR)/src/java.desktop/share/native/libawt/java2d \
- -I$(TOPDIR)/src/java.desktop/share/native/common/awt/debug \
- $(LIBJAVA_HEADER_FLAGS) \
- #
# The fast floor code loses precision.
LCMS_CFLAGS=-DCMS_DONT_USE_FAST_FLOOR
+LCMS_CFLAGS_JDKLIB := $(filter-out -xc99=%none, $(CFLAGS_JDKLIB))
+
ifeq ($(USE_EXTERNAL_LCMS), true)
# If we're using an external library, we'll just need the wrapper part.
# By including it explicitly, all other files will be excluded.
BUILD_LIBLCMS_INCLUDE_FILES := LCMS.c
+ # If we're using an external library, we can't include our own SRC path
+ # as includes, instead the system headers should be used.
+ LIBLCMS_HEADERS_FROM_SRC := false
else
BUILD_LIBLCMS_INCLUDE_FILES :=
- # If we're using the bundled library, we'll need to include it in the
- # include path explicitly. Otherwise the system headers will be used.
- LIBLCMS_CPPFLAGS += $(addprefix -I, $(LIBLCMS_SRC))
endif
$(eval $(call SetupJdkLibrary, BUILD_LIBLCMS, \
NAME := lcms, \
- SRC := $(LIBLCMS_SRC), \
INCLUDE_FILES := $(BUILD_LIBLCMS_INCLUDE_FILES), \
OPTIMIZATION := HIGHEST, \
- CFLAGS := $(filter-out -xc99=%none, $(CFLAGS_JDKLIB)) \
- $(LIBLCMS_CPPFLAGS) \
+ CFLAGS := $(LCMS_CFLAGS_JDKLIB) \
$(LCMS_CFLAGS), \
CFLAGS_solaris := -xc99=no_lib, \
CFLAGS_windows := -DCMS_IS_WINDOWS_, \
+ EXTRA_HEADER_DIRS := \
+ common/awt/debug \
+ libawt/java2d, \
+ HEADERS_FROM_SRC := $(LIBLCMS_HEADERS_FROM_SRC), \
DISABLED_WARNINGS_gcc := format-nonliteral type-limits misleading-indentation, \
DISABLED_WARNINGS_clang := tautological-compare, \
DISABLED_WARNINGS_solstudio := E_STATEMENT_NOT_REACHED, \
@@ -413,8 +400,6 @@ $(BUILD_LIBLCMS): $(BUILD_LIBAWT)
################################################################################
-LIBJAVAJPEG_SRC += $(TOPDIR)/src/java.desktop/share/native/libjavajpeg
-
# "DISABLED_WARNINGS_gcc := clobbered" rationale:
# Suppress gcc warnings like "variable might be clobbered by 'longjmp'
# or 'vfork'": this warning indicates that some variable is placed to
@@ -428,21 +413,20 @@ ifeq ($(USE_EXTERNAL_LIBJPEG), true)
BUILD_LIBJAVAJPEG_INCLUDE_FILES := \
imageioJPEG.c \
jpegdecoder.c
- BUILD_LIBJAVAJPEG_HEADERS :=
+ # If we're using an external library, we can't include our own SRC path
+ # as includes, instead the system headers should be used.
+ LIBJPEG_HEADERS_FROM_SRC := false
else
LIBJPEG_LIBS :=
BUILD_LIBJAVAJPEG_INCLUDE_FILES :=
- BUILD_LIBJAVAJPEG_HEADERS := $(addprefix -I, $(LIBJAVAJPEG_SRC))
endif
$(eval $(call SetupJdkLibrary, BUILD_LIBJAVAJPEG, \
NAME := javajpeg, \
- SRC := $(LIBJAVAJPEG_SRC), \
INCLUDE_FILES := $(BUILD_LIBJAVAJPEG_INCLUDE_FILES), \
OPTIMIZATION := HIGHEST, \
- CFLAGS := $(CFLAGS_JDKLIB) $(BUILD_LIBJAVAJPEG_HEADERS) \
- $(LIBJAVA_HEADER_FLAGS) \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.desktop, \
+ CFLAGS := $(CFLAGS_JDKLIB), \
+ HEADERS_FROM_SRC := $(LIBJPEG_HEADERS_FROM_SRC), \
DISABLED_WARNINGS_gcc := clobbered implicit-fallthrough shift-negative-value, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
@@ -459,42 +443,32 @@ TARGETS += $(BUILD_LIBJAVAJPEG)
# Mac and Windows only use the native AWT lib, do not build libawt_headless
ifeq ($(findstring $(OPENJDK_TARGET_OS), windows macosx),)
- LIBAWT_HEADLESS_DIRS := $(TOPDIR)/src/java.desktop/unix/native/libawt_headless/awt \
- $(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/common/awt \
- $(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/common/java2d \
- $(TOPDIR)/src/java.desktop/share/native/common/java2d \
- $(TOPDIR)/src/java.desktop/share/native/common/font \
+ LIBAWT_HEADLESS_EXTRA_SRC := \
+ common/font \
+ common/java2d \
+ $(TOPDIR)/src/$(MODULE)/$(OPENJDK_TARGET_OS_TYPE)/native/common/awt \
#
LIBAWT_HEADLESS_EXCLUDES := medialib
- LIBAWT_HEADLESS_CFLAGS := -I$(SUPPORT_OUTPUTDIR)/headers/java.desktop \
- $(addprefix -I, $(LIBAWT_HEADLESS_DIRS)) \
- -I$(TOPDIR)/src/java.desktop/share/native/libawt/awt/image \
- -I$(TOPDIR)/src/java.desktop/share/native/libawt/awt/image/cvutils \
- -I$(TOPDIR)/src/java.desktop/share/native/libawt/java2d \
- -I$(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/libawt/java2d \
- -I$(TOPDIR)/src/java.desktop/share/native/libawt/java2d/loops \
- -I$(TOPDIR)/src/java.desktop/share/native/libawt/java2d/pipe \
- -I$(TOPDIR)/src/java.desktop/share/native/common/awt/debug \
- -I$(TOPDIR)/src/java.desktop/share/native/common/font \
- -I$(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/common/font \
- -I$(TOPDIR)/src/java.desktop/share/native/common/java2d/opengl \
- -I$(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/common/java2d/opengl \
- $(LIBJAVA_HEADER_FLAGS) \
+
+ LIBAWT_HEADLESS_EXTRA_HEADER_DIRS := \
+ $(LIBAWT_DEFAULT_HEADER_DIRS) \
+ common/awt/debug \
+ common/font \
+ common/java2d/opengl \
#
+ LIBAWT_HEADLESS_CFLAGS := $(CUPS_CFLAGS) $(FONTCONFIG_CFLAGS) $(X_CFLAGS) \
+ -DHEADLESS=true -DPACKAGE_PATH=\"$(PACKAGE_PATH)\"
+
$(eval $(call SetupJdkLibrary, BUILD_LIBAWT_HEADLESS, \
NAME := awt_headless, \
- SRC := $(LIBAWT_HEADLESS_DIRS), \
+ EXTRA_SRC := $(LIBAWT_HEADLESS_EXTRA_SRC), \
EXCLUDES := $(LIBAWT_HEADLESS_EXCLUDES), \
OPTIMIZATION := LOW, \
CFLAGS := $(CFLAGS_JDKLIB) \
- -DHEADLESS=true \
- -DPACKAGE_PATH=\"$(PACKAGE_PATH)\" \
- $(CUPS_CFLAGS) \
- $(FONTCONFIG_CFLAGS) \
- $(X_CFLAGS) \
$(LIBAWT_HEADLESS_CFLAGS), \
+ EXTRA_HEADER_DIRS := $(LIBAWT_HEADLESS_EXTRA_HEADER_DIRS), \
DISABLED_WARNINGS_xlc := 1506-356, \
DISABLED_WARNINGS_solstudio := E_EMPTY_TRANSLATION_UNIT, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
@@ -518,12 +492,15 @@ endif
################################################################################
ifeq ($(FREETYPE_TO_USE), system)
+ # For use by libfontmanager:
LIBFREETYPE_CFLAGS := $(FREETYPE_CFLAGS)
LIBFREETYPE_LIBS := $(FREETYPE_LIBS)
else
- LIBFREETYPE_SRC := $(TOPDIR)/src/java.desktop/share/native/libfreetype
- BUILD_LIBFREETYPE_HEADERS := $(addprefix -I, $(LIBFREETYPE_SRC)/include)
- LIBFREETYPE_CFLAGS := $(BUILD_LIBFREETYPE_HEADERS)
+ BUILD_LIBFREETYPE_HEADER_DIRS := $(TOPDIR)/src/$(MODULE)/share/native/libfreetype/include
+ BUILD_LIBFREETYPE_CFLAGS := -DFT2_BUILD_LIBRARY $(EXPORT_ALL_SYMBOLS)
+
+ # For use by libfontmanager:
+ LIBFREETYPE_CFLAGS := -I$(BUILD_LIBFREETYPE_HEADER_DIRS)
ifeq ($(OPENJDK_TARGET_OS), windows)
LIBFREETYPE_LIBS := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libfreetype/freetype.lib
else
@@ -532,14 +509,15 @@ else
$(eval $(call SetupJdkLibrary, BUILD_LIBFREETYPE, \
NAME := freetype, \
- SRC := $(LIBFREETYPE_SRC)/src, \
OPTIMIZATION := HIGHEST, \
- CFLAGS := $(CFLAGS_JDKLIB) $(BUILD_LIBFREETYPE_HEADERS) \
- -DFT2_BUILD_LIBRARY $(EXPORT_ALL_SYMBOLS), \
+ CFLAGS := $(CFLAGS_JDKLIB) \
+ $(BUILD_LIBFREETYPE_CFLAGS), \
+ EXTRA_HEADER_DIRS := $(BUILD_LIBFREETYPE_HEADER_DIRS), \
DISABLED_WARNINGS_solstudio := \
E_STATEMENT_NOT_REACHED \
E_END_OF_LOOP_CODE_NOT_REACHED, \
DISABLED_WARNINGS_microsoft := 4267 4244 4312, \
+ DISABLED_WARNINGS_gcc := implicit-fallthrough, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
))
@@ -549,19 +527,6 @@ endif
###########################################################################
-LIBFONTMANAGER_SRC := $(TOPDIR)/src/java.desktop/share/native/libfontmanager \
- $(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/libfontmanager
-LIBFONTMANAGER_CFLAGS := \
- $(addprefix -I, $(shell $(FIND) \
- $(LIBFONTMANAGER_SRC) \
- $(TOPDIR)/src/java.desktop/share/native/libawt \
- $(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/libawt \
- $(TOPDIR)/src/java.desktop/share/native/common \
- $(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/common -type d)) \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.desktop \
- $(LIBJAVA_HEADER_FLAGS) \
- #
-
#### Begin harfbuzz configuration
HARFBUZZ_CFLAGS := -DHAVE_OT -DHAVE_FALLBACK -DHAVE_UCDN
@@ -589,6 +554,16 @@ LIBFONTMANAGER_CFLAGS += $(HARFBUZZ_CFLAGS)
#### End harfbuzz configuration
+LIBFONTMANAGER_EXTRA_HEADER_DIRS := \
+ libfontmanager/harfbuzz \
+ libfontmanager/harfbuzz/hb-ucdn \
+ common/awt \
+ common/font \
+ libawt/java2d \
+ libawt/java2d/pipe \
+ libawt/java2d/loops \
+ #
+
LIBFONTMANAGER_CFLAGS += $(LIBFREETYPE_CFLAGS)
BUILD_LIBFONTMANAGER_FONTLIB += $(LIBFREETYPE_LIBS)
@@ -598,7 +573,6 @@ ifeq ($(OPENJDK_TARGET_OS), windows)
LIBFONTMANAGER_EXCLUDE_FILES += X11FontScaler.c \
X11TextRenderer.c
LIBFONTMANAGER_OPTIMIZATION := HIGHEST
- LIBFONTMANAGER_CFLAGS += -I$(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/libawt/windows
else ifeq ($(OPENJDK_TARGET_OS), macosx)
LIBFONTMANAGER_EXCLUDE_FILES += X11FontScaler.c \
X11TextRenderer.c \
@@ -609,7 +583,7 @@ else
lcdglyph.c
endif
-LIBFONTMANAGER_CFLAGS += $(FONT_HEADERS) $(X_CFLAGS) -DLE_STANDALONE -DHEADLESS
+LIBFONTMANAGER_CFLAGS += $(X_CFLAGS) -DLE_STANDALONE -DHEADLESS
ifeq ($(TOOLCHAIN_TYPE), gcc)
# Turn off all warnings for sunFont.c. This is needed because the specific warning
@@ -625,7 +599,6 @@ endif
# libawt_xawt). See JDK-8196516 for details.
$(eval $(call SetupJdkLibrary, BUILD_LIBFONTMANAGER, \
NAME := fontmanager, \
- SRC := $(LIBFONTMANAGER_SRC), \
EXCLUDE_FILES := $(LIBFONTMANAGER_EXCLUDE_FILES) \
AccelGlyphCache.c, \
TOOLCHAIN := TOOLCHAIN_LINK_CXX, \
@@ -633,6 +606,7 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBFONTMANAGER, \
CXXFLAGS := $(CXXFLAGS_JDKLIB) $(LIBFONTMANAGER_CFLAGS), \
OPTIMIZATION := $(LIBFONTMANAGER_OPTIMIZATION), \
CFLAGS_windows = -DCC_NOEX, \
+ EXTRA_HEADER_DIRS := $(LIBFONTMANAGER_EXTRA_HEADER_DIRS), \
WARNINGS_AS_ERRORS_xlc := false, \
DISABLED_WARNINGS_gcc := sign-compare int-to-pointer-cast \
type-limits missing-field-initializers implicit-fallthrough, \
@@ -664,7 +638,7 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBFONTMANAGER, \
$(BUILD_LIBFONTMANAGER): $(BUILD_LIBAWT)
ifeq ($(OPENJDK_TARGET_OS), macosx)
- $(BUILD_LIBFONTMANAGER): $(call FindLib, java.desktop, awt_lwawt)
+ $(BUILD_LIBFONTMANAGER): $(call FindLib, $(MODULE), awt_lwawt)
endif
ifeq ($(FREETYPE_TO_USE), bundled)
@@ -676,29 +650,30 @@ TARGETS += $(BUILD_LIBFONTMANAGER)
################################################################################
ifeq ($(OPENJDK_TARGET_OS), windows)
- LIBJAWT_SRC := $(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/libjawt
- LIBJAWT_CFLAGS := -I$(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/libawt/windows \
- -I$(TOPDIR)/src/java.desktop/share/native/common/awt/debug \
- -I$(TOPDIR)/src/java.desktop/share/native/libawt/java2d \
- -I$(TOPDIR)/src/java.desktop/share/native/libawt/awt/image/cvutils \
- -I$(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/libawt/java2d/windows \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.desktop \
- -I$(TOPDIR)/src/java.desktop/windows/native/include \
- -I$(TOPDIR)/src/java.desktop/share/native/include \
- $(LIBJAVA_HEADER_FLAGS) \
+
+ LIBJAWT_CFLAGS := -EHsc -DUNICODE -D_UNICODE
+
+ LIBJAWT_EXTRA_HEADER_DIRS := \
+ include \
+ common/awt/debug \
+ libawt/awt/image/cvutils \
+ libawt/java2d \
+ libawt/java2d/windows \
+ libawt/windows \
+ java.base:include \
+ java.base:libjava \
#
ifeq ($(OPENJDK_TARGET_CPU), x86)
KERNEL32_LIB := kernel32.lib
endif
+
$(eval $(call SetupJdkLibrary, BUILD_LIBJAWT, \
NAME := jawt, \
- SRC := $(LIBJAWT_SRC), \
- INCLUDE_FILES := $(LIBJAWT_INCLUDE_FILES), \
OPTIMIZATION := LOW, \
CFLAGS := $(CXXFLAGS_JDKLIB) \
- -EHsc -DUNICODE -D_UNICODE \
$(LIBJAWT_CFLAGS), \
+ EXTRA_HEADER_DIRS := $(LIBJAWT_EXTRA_HEADER_DIRS), \
LDFLAGS := $(LDFLAGS_JDKLIB) $(LDFLAGS_CXX_JDK), \
LIBS := $(JDKLIB_LIBS) $(KERNEL32_LIB) advapi32.lib $(WIN_AWT_LIB), \
))
@@ -717,17 +692,9 @@ ifeq ($(OPENJDK_TARGET_OS), windows)
else # OPENJDK_TARGET_OS not windows
ifeq ($(OPENJDK_TARGET_OS), macosx)
- LIBJAWT_SRC := $(TOPDIR)/src/java.desktop/macosx/native/libjawt
- else
- LIBJAWT_SRC := $(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/libjawt
+ # libjawt on macosx do not use the unix code
+ LIBJAWT_EXCLUDE_SRC_PATTERNS := unix
endif
- LIBJAWT_CFLAGS := \
- -I$(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/common/awt \
- -I$(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS)/native/include \
- -I$(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/include \
- -I$(TOPDIR)/src/java.desktop/share/native/include \
- $(LIBJAVA_HEADER_FLAGS) \
- #
ifeq ($(OPENJDK_TARGET_OS), macosx)
JAWT_LIBS := -lawt_lwawt
@@ -740,19 +707,22 @@ else # OPENJDK_TARGET_OS not windows
JAWT_LIBS += -lawt_xawt
else
JAWT_LIBS += -lawt_headless
- HEADLESS_CFLAG += -DHEADLESS
+ ifeq ($(OPENJDK_TARGET_OS), linux)
+ JAWT_CFLAGS += -DHEADLESS
+ endif
endif
endif
$(eval $(call SetupJdkLibrary, BUILD_LIBJAWT, \
NAME := jawt, \
- SRC := $(LIBJAWT_SRC), \
+ EXCLUDE_SRC_PATTERNS := $(LIBJAWT_EXCLUDE_SRC_PATTERNS), \
INCLUDE_FILES := $(JAWT_FILES), \
OPTIMIZATION := LOW, \
CFLAGS := $(CFLAGS_JDKLIB) \
- $(LIBJAWT_CFLAGS), \
- CFLAGS_linux := $(HEADLESS_CFLAG), \
- CFLAGS_macosx := $(LIBJAWT_CFLAGS_macosx), \
+ $(JAWT_CFLAGS), \
+ EXTRA_HEADER_DIRS := \
+ include \
+ common/awt, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LDFLAGS_unix := -L$(INSTALL_LIBRARIES_HERE), \
@@ -780,52 +750,56 @@ TARGETS += $(BUILD_LIBJAWT)
ifeq ($(ENABLE_HEADLESS_ONLY), false)
- LIBSPLASHSCREEN_DIRS := \
- $(TOPDIR)/src/java.desktop/share/native/libjavajpeg \
- $(TOPDIR)/src/java.desktop/share/native/libsplashscreen \
+ LIBSPLASHSCREEN_EXTRA_SRC := \
+ common/awt/systemscale \
#
- ifeq ($(USE_EXTERNAL_LIBGIF), true)
- GIFLIB_LIBS := -lgif
- LIBSPLASHSCREEN_EXCLUDES := giflib
+ ifeq ($(USE_EXTERNAL_LIBGIF), false)
+ LIBSPLASHSCREEN_HEADER_DIRS += libsplashscreen/giflib
else
- LIBSPLASHSCREEN_CFLAGS += -I$(TOPDIR)/src/java.desktop/share/native/libsplashscreen/giflib
+ LIBSPLASHSCREEN_EXCLUDES := giflib
+ GIFLIB_LIBS := -lgif
endif
- ifeq ($(USE_EXTERNAL_LIBJPEG), true)
- LIBJPEG_LIBS := -ljpeg
+ ifeq ($(USE_EXTERNAL_LIBJPEG), false)
+ # While the following ought to work, it will currently pull in the closed
+ # additions to this library, and this was not done previously in the build.
+ # LIBSPLASHSCREEN_EXTRA_SRC += libjavajpeg
+ LIBSPLASHSCREEN_EXTRA_SRC += $(TOPDIR)/src/java.desktop/share/native/libjavajpeg
else
- LIBSPLASHSCREEN_DIRS += $(TOPDIR)/src/java.desktop/share/native/libjavajpeg
- LIBJPEG_CFLAGS := -I$(TOPDIR)/src/java.desktop/share/native/libjavajpeg
+ LIBJPEG_LIBS := -ljpeg
endif
ifeq ($(USE_EXTERNAL_LIBPNG), false)
- LIBSPLASHSCREEN_DIRS += $(TOPDIR)/src/java.desktop/share/native/libsplashscreen/libpng
+ LIBSPLASHSCREEN_HEADER_DIRS += libsplashscreen/libpng
+
+ ifeq ($(OPENJDK_TARGET_OS), macosx)
+ ifeq ($(USE_EXTERNAL_LIBZ), true)
+ # When building our own libpng and using an external libz, we need to
+ # inject our own libz.h to tweak the exported ZLIB_VERNUM macro. See
+ # $(TOPDIR)/src/java.desktop/macosx/native/libsplashscreen/libpng/zlibwrapper/zlib.h
+ # for details. This must be specified with -iquote, not -I to avoid a
+ # circular include.
+ LIBSPLASHSCREEN_CFLAGS += -iquote $(TOPDIR)/src/$(MODULE)/macosx/native/libsplashscreen/libpng/zlibwrapper
+ endif
+ endif
else
LIBSPLASHSCREEN_EXCLUDES += libpng
endif
- ifneq ($(OPENJDK_TARGET_OS), macosx)
- LIBSPLASHSCREEN_DIRS += $(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/libsplashscreen
- else
- LIBSPLASHSCREEN_DIRS += $(TOPDIR)/src/java.desktop/macosx/native/libsplashscreen
+ ifeq ($(USE_EXTERNAL_LIBZ), false)
+ LIBSPLASHSCREEN_EXTRA_SRC += java.base:libzip/zlib
endif
- ifneq ($(filter $(OPENJDK_TARGET_OS),linux solaris aix), )
- LIBSPLASHSCREEN_DIRS += $(TOPDIR)/src/java.desktop/unix/native/common/awt/systemscale
+ ifeq ($(OPENJDK_TARGET_OS), macosx)
+ # libsplashscreen on macosx do not use the unix code
+ LIBSPLASHSCREEN_EXCLUDE_SRC_PATTERNS := unix
endif
- ifeq ($(OPENJDK_TARGET_OS), windows)
- LIBSPLASHSCREEN_DIRS += $(TOPDIR)/src/java.desktop/windows/native/common/awt/systemscale
- endif
- LIBSPLASHSCREEN_CFLAGS += -DSPLASHSCREEN -DPNG_NO_MMX_CODE -DPNG_ARM_NEON_OPT=0 \
- $(addprefix -I, $(LIBSPLASHSCREEN_DIRS)) \
- $(LIBJAVA_HEADER_FLAGS) \
- #
+ LIBSPLASHSCREEN_CFLAGS += -DSPLASHSCREEN -DPNG_NO_MMX_CODE -DPNG_ARM_NEON_OPT=0
ifeq ($(OPENJDK_TARGET_OS), macosx)
LIBSPLASHSCREEN_CFLAGS += -DWITH_MACOSX
- LIBSPLASHSCREEN_CFLAGS += -I$(TOPDIR)/src/java.desktop/macosx/native/libosxapp
BUILD_LIBSPLASHSCREEN_java_awt_SplashScreen.c_CFLAGS := -x objective-c -O0
BUILD_LIBSPLASHSCREEN_splashscreen_gfx_impl.c_CFLAGS := -x objective-c -O0
@@ -843,20 +817,6 @@ ifeq ($(ENABLE_HEADLESS_ONLY), false)
LIBSPLASHSCREEN_LIBS :=
- ifeq ($(USE_EXTERNAL_LIBZ), false)
- LIBSPLASHSCREEN_DIRS += $(TOPDIR)/src/java.base/share/native/libzip/zlib
- else
- ifeq ($(OPENJDK_TARGET_OS), macosx)
- ifeq ($(USE_EXTERNAL_LIBPNG), false)
- # When building our own libpng and using an external libz, we need to
- # inject our own libz.h to tweak the exported ZLIB_VERNUM macro. See
- # $(TOPDIR)/src/java.desktop/macosx/native/libsplashscreen/libpng/zlib.h
- # for details.
- LIBSPLASHSCREEN_CFLAGS += -iquote $(TOPDIR)/src/java.desktop/macosx/native/libsplashscreen/libpng
- endif
- endif
- endif
-
ifeq ($(OPENJDK_TARGET_OS), macosx)
LIBSPLASHSCREEN_LIBS += \
$(LIBM) -lpthread -liconv -losxapp \
@@ -870,14 +830,22 @@ ifeq ($(ENABLE_HEADLESS_ONLY), false)
LIBSPLASHSCREEN_LIBS += $(X_LIBS) -lX11 -lXext $(LIBM) -lpthread -ldl
endif
+ LIBSPLASHSCREEN_HEADER_DIRS += \
+ libosxapp \
+ java.base:include \
+ java.base:libjava \
+ #
+
$(eval $(call SetupJdkLibrary, BUILD_LIBSPLASHSCREEN, \
NAME := splashscreen, \
- SRC := $(LIBSPLASHSCREEN_DIRS), \
+ EXTRA_SRC := $(LIBSPLASHSCREEN_EXTRA_SRC), \
+ EXCLUDE_SRC_PATTERNS := $(LIBSPLASHSCREEN_EXCLUDE_SRC_PATTERNS), \
EXCLUDE_FILES := imageioJPEG.c jpegdecoder.c pngtest.c, \
EXCLUDES := $(LIBSPLASHSCREEN_EXCLUDES), \
OPTIMIZATION := LOW, \
- CFLAGS := $(LIBSPLASHSCREEN_CFLAGS) $(CFLAGS_JDKLIB) \
+ CFLAGS := $(CFLAGS_JDKLIB) $(LIBSPLASHSCREEN_CFLAGS) \
$(GIFLIB_CFLAGS) $(LIBJPEG_CFLAGS) $(PNG_CFLAGS) $(LIBZ_CFLAGS), \
+ EXTRA_HEADER_DIRS := $(LIBSPLASHSCREEN_HEADER_DIRS), \
DISABLED_WARNINGS_gcc := sign-compare type-limits unused-result \
maybe-uninitialized shift-negative-value implicit-fallthrough, \
DISABLED_WARNINGS_clang := incompatible-pointer-types, \
@@ -896,7 +864,7 @@ ifeq ($(ENABLE_HEADLESS_ONLY), false)
TARGETS += $(BUILD_LIBSPLASHSCREEN)
ifeq ($(OPENJDK_TARGET_OS), macosx)
- $(BUILD_LIBSPLASHSCREEN): $(call FindLib, java.desktop, osxapp)
+ $(BUILD_LIBSPLASHSCREEN): $(call FindLib, $(MODULE), osxapp)
endif
endif
@@ -905,49 +873,38 @@ endif
ifeq ($(OPENJDK_TARGET_OS), macosx)
- LIBAWT_LWAWT_DIRS := \
- $(TOPDIR)/src/java.desktop/macosx/native/libawt_lwawt \
- $(TOPDIR)/src/java.desktop/unix/native/common/awt \
- $(TOPDIR)/src/java.desktop/share/native/common/font \
- $(TOPDIR)/src/java.desktop/share/native/common/java2d \
+ LIBAWT_LWAWT_EXTRA_SRC := \
+ $(TOPDIR)/src/$(MODULE)/unix/native/common/awt \
+ $(TOPDIR)/src/$(MODULE)/share/native/common/font \
+ $(TOPDIR)/src/$(MODULE)/share/native/common/java2d \
#
- LIBAWT_LWAWT_CFLAGS := \
- $(addprefix -I, $(LIBAWT_LWAWT_DIRS)) \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.desktop \
- -I$(TOPDIR)/src/java.desktop/macosx/native/libawt_lwawt/awt \
- -I$(TOPDIR)/src/java.desktop/unix/native/libawt_xawt/awt \
- -I$(TOPDIR)/src/java.desktop/macosx/native/libawt_lwawt/font \
- -I$(TOPDIR)/src/java.desktop/macosx/native/libawt_lwawt/java2d/opengl \
- -I$(TOPDIR)/src/java.desktop/share/native/common/awt/debug \
- -I$(TOPDIR)/src/java.desktop/share/native/common/java2d/opengl \
- -I$(TOPDIR)/src/java.desktop/macosx/native/include \
- -I$(TOPDIR)/src/java.desktop/share/native/include \
- -I$(TOPDIR)/src/java.desktop/share/native/libawt/awt/image \
- -I$(TOPDIR)/src/java.desktop/share/native/libawt/awt/image/cvutils \
- -I$(TOPDIR)/src/java.desktop/share/native/libawt/java2d \
- -I$(TOPDIR)/src/java.desktop/unix/native/libawt/java2d \
- -I$(TOPDIR)/src/java.desktop/share/native/libawt/java2d/loops \
- -I$(TOPDIR)/src/java.desktop/share/native/libawt/java2d/pipe \
- -I$(TOPDIR)/src/java.desktop/share/native/libmlib_image/ \
- -I$(TOPDIR)/src/java.desktop/macosx/native/libosxapp \
- $(LIBJAVA_HEADER_FLAGS) \
+ LIBAWT_LWAWT_EXTRA_HEADER_DIRS := \
+ $(LIBAWT_DEFAULT_HEADER_DIRS) \
+ libawt_lwawt/awt \
+ libawt_lwawt/font \
+ libawt_lwawt/java2d/opengl \
+ include \
+ common/awt/debug \
+ common/java2d/opengl \
+ libosxapp \
#
+ LIBAWT_LWAWT_CFLAGS := $(X_CFLAGS) $(X_LIBS)
+
LIBAWT_LWAWT_EXFILES := fontpath.c awt_Font.c X11Color.c
- LIBAWT_LWAWT_EXCLUDES := $(TOPDIR)/src/java.desktop/unix/native/common/awt/medialib
+ LIBAWT_LWAWT_EXCLUDES := $(TOPDIR)/src/$(MODULE)/unix/native/common/awt/medialib
$(eval $(call SetupJdkLibrary, BUILD_LIBAWT_LWAWT, \
NAME := awt_lwawt, \
- SRC := $(LIBAWT_LWAWT_DIRS), \
+ EXTRA_SRC := $(LIBAWT_LWAWT_EXTRA_SRC), \
INCLUDE_FILES := $(LIBAWT_LWAWT_FILES), \
EXCLUDE_FILES := $(LIBAWT_LWAWT_EXFILES), \
EXCLUDES := $(LIBAWT_LWAWT_EXCLUDES), \
OPTIMIZATION := LOW, \
CFLAGS := $(CFLAGS_JDKLIB) \
- $(X_CFLAGS) \
- $(X_LIBS) \
$(LIBAWT_LWAWT_CFLAGS), \
+ EXTRA_HEADER_DIRS := $(LIBAWT_LWAWT_EXTRA_HEADER_DIRS), \
DISABLED_WARNINGS_clang := incomplete-implementation enum-conversion \
deprecated-declarations objc-method-access bitwise-op-parentheses \
incompatible-pointer-types parentheses-equality extra-tokens, \
@@ -974,7 +931,7 @@ ifeq ($(OPENJDK_TARGET_OS), macosx)
$(BUILD_LIBAWT_LWAWT): $(BUILD_LIBMLIB_IMAGE)
- $(BUILD_LIBAWT_LWAWT): $(call FindLib, java.desktop, osxapp)
+ $(BUILD_LIBAWT_LWAWT): $(call FindLib, $(MODULE), osxapp)
$(BUILD_LIBAWT_LWAWT): $(call FindLib, java.base, java)
@@ -986,15 +943,11 @@ ifeq ($(OPENJDK_TARGET_OS), macosx)
$(eval $(call SetupJdkLibrary, BUILD_LIBOSXUI, \
NAME := osxui, \
- SRC := $(TOPDIR)/src/java.desktop/macosx/native/libosxui, \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKLIB) \
- -I$(TOPDIR)/src/java.desktop/macosx/native/libosxui \
- -I$(TOPDIR)/src/java.desktop/macosx/native/libawt_lwawt/awt \
- -I$(TOPDIR)/src/java.desktop/macosx/native/libosxapp \
- -I$(TOPDIR)/src/java.base/share/native/libjava \
- -I$(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS_TYPE)/native/libjava \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.desktop, \
+ CFLAGS := $(CFLAGS_JDKLIB), \
+ EXTRA_HEADER_DIRS := \
+ libawt_lwawt/awt \
+ libosxapp, \
DISABLED_WARNINGS_clang := deprecated-declarations, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN) \
@@ -1013,7 +966,7 @@ ifeq ($(OPENJDK_TARGET_OS), macosx)
$(BUILD_LIBOSXUI): $(BUILD_LIBAWT)
- $(BUILD_LIBOSXUI): $(call FindLib, java.desktop, osxapp)
+ $(BUILD_LIBOSXUI): $(call FindLib, $(MODULE), osxapp)
$(BUILD_LIBOSXUI): $(BUILD_LIBAWT_LWAWT)
diff --git a/make/lib/CoreLibraries.gmk b/make/lib/CoreLibraries.gmk
index 4a7dae1b7e6..df5c0191f61 100644
--- a/make/lib/CoreLibraries.gmk
+++ b/make/lib/CoreLibraries.gmk
@@ -104,7 +104,6 @@ endif
$(eval $(call SetupJdkLibrary, BUILD_LIBVERIFY, \
NAME := verify, \
- SRC := $(TOPDIR)/src/java.base/share/native/libverify, \
OPTIMIZATION := $(LIBVERIFY_OPTIMIZATION), \
CFLAGS := $(CFLAGS_JDKLIB), \
DISABLED_WARNINGS_gcc := implicit-fallthrough, \
@@ -119,13 +118,7 @@ TARGETS += $(BUILD_LIBVERIFY)
##########################################################################################
-# Allow a custom makefile to add extra src dirs
-LIBJAVA_SRC_DIRS += $(call FindSrcDirsForLib, java.base, java)
-
-LIBJAVA_CFLAGS := $(addprefix -I, $(LIBJAVA_SRC_DIRS)) \
- -I$(TOPDIR)/src/java.base/share/native/libfdlibm \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.base \
- -DARCHPROPNAME='"$(OPENJDK_TARGET_CPU_OSARCH)"'
+LIBJAVA_CFLAGS := -DARCHPROPNAME='"$(OPENJDK_TARGET_CPU_OSARCH)"'
ifeq ($(OPENJDK_TARGET_OS), macosx)
BUILD_LIBJAVA_java_props_md.c_CFLAGS := -x objective-c
@@ -134,12 +127,12 @@ endif
$(eval $(call SetupJdkLibrary, BUILD_LIBJAVA, \
NAME := java, \
- SRC := $(LIBJAVA_SRC_DIRS), \
OPTIMIZATION := HIGH, \
CFLAGS := $(CFLAGS_JDKLIB) \
$(LIBJAVA_CFLAGS), \
System.c_CFLAGS := $(VERSION_CFLAGS), \
jdk_util.c_CFLAGS := $(VERSION_CFLAGS), \
+ EXTRA_HEADER_DIRS := libfdlibm, \
WARNINGS_AS_ERRORS_xlc := false, \
DISABLED_WARNINGS_gcc := unused-result, \
DISABLED_WARNINGS_solstudio := E_STATEMENT_NOT_REACHED, \
@@ -180,13 +173,9 @@ endif
$(eval $(call SetupJdkLibrary, BUILD_LIBZIP, \
NAME := zip, \
OPTIMIZATION := LOW, \
- SRC := $(TOPDIR)/src/java.base/share/native/libzip, \
EXCLUDES := $(LIBZIP_EXCLUDES), \
CFLAGS := $(CFLAGS_JDKLIB) \
- $(LIBZ_CFLAGS) \
- -I$(TOPDIR)/src/java.base/share/native/libjava \
- -I$(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS_TYPE)/native/libjava \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.base, \
+ $(LIBZ_CFLAGS), \
CFLAGS_unix := $(BUILD_LIBZIP_MMAP) -UDEBUG, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
@@ -200,22 +189,12 @@ TARGETS += $(BUILD_LIBZIP)
##########################################################################################
-JIMAGELIB_CPPFLAGS := \
- -I$(TOPDIR)/src/java.base/share/native/libjava \
- -I$(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS_TYPE)/native/libjava \
- -I$(TOPDIR)/src/java.base/share/native/libjimage \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.base \
- #
-
$(eval $(call SetupJdkLibrary, BUILD_LIBJIMAGE, \
NAME := jimage, \
TOOLCHAIN := TOOLCHAIN_LINK_CXX, \
OPTIMIZATION := LOW, \
- SRC := $(TOPDIR)/src/java.base/share/native/libjimage \
- $(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS_TYPE)/native/libjimage, \
- EXCLUDES := $(LIBJIMAGE_EXCLUDES), \
- CFLAGS := $(CFLAGS_JDKLIB) $(JIMAGELIB_CPPFLAGS), \
- CXXFLAGS := $(CXXFLAGS_JDKLIB) $(JIMAGELIB_CPPFLAGS), \
+ CFLAGS := $(CFLAGS_JDKLIB), \
+ CXXFLAGS := $(CXXFLAGS_JDKLIB), \
DISABLED_WARNINGS_gcc := implicit-fallthrough, \
CFLAGS_unix := -UDEBUG, \
LDFLAGS := $(LDFLAGS_JDKLIB) $(LDFLAGS_CXX_JDK) \
@@ -231,10 +210,6 @@ TARGETS += $(BUILD_LIBJIMAGE)
##########################################################################################
-LIBJLI_SRC_DIRS := $(call FindSrcDirsForLib, java.base, jli)
-
-LIBJLI_CFLAGS := $(CFLAGS_JDKLIB)
-
ifeq ($(call check-jvm-variant, zero), true)
ERGO_FAMILY := zero
else
@@ -263,7 +238,7 @@ endif
ifeq ($(OPENJDK_TARGET_OS), windows)
# Staticically link with c runtime on windows.
- LIBJLI_CFLAGS := $(filter-out -MD, $(LIBJLI_CFLAGS))
+ LIBJLI_CFLAGS_JDKLIB := $(filter-out -MD, $(CFLAGS_JDKLIB))
LIBJLI_OUTPUT_DIR := $(INSTALL_LIBRARIES_HERE)
# Supply the name of the C runtime lib.
LIBJLI_CFLAGS += -DMSVCR_DLL_NAME='"$(notdir $(MSVCR_DLL))"'
@@ -271,11 +246,10 @@ ifeq ($(OPENJDK_TARGET_OS), windows)
LIBJLI_CFLAGS += -DMSVCP_DLL_NAME='"$(notdir $(MSVCP_DLL))"'
endif
else
+ LIBJLI_CFLAGS_JDKLIB := $(CFLAGS_JDKLIB)
LIBJLI_OUTPUT_DIR := $(INSTALL_LIBRARIES_HERE)/jli
endif
-LIBJLI_CFLAGS += $(addprefix -I, $(LIBJLI_SRC_DIRS))
-
LIBJLI_CFLAGS += $(LIBZ_CFLAGS)
ifneq ($(USE_EXTERNAL_LIBZ), true)
@@ -293,12 +267,10 @@ endif
$(eval $(call SetupJdkLibrary, BUILD_LIBJLI, \
NAME := jli, \
OUTPUT_DIR := $(LIBJLI_OUTPUT_DIR), \
- SRC := $(LIBJLI_SRC_DIRS), \
EXCLUDE_FILES := $(LIBJLI_EXCLUDE_FILES), \
EXTRA_FILES := $(LIBJLI_EXTRA_FILES), \
OPTIMIZATION := HIGH, \
- CFLAGS := $(LIBJLI_CFLAGS), \
- DISABLED_WARNINGS_gcc := maybe-uninitialized, \
+ CFLAGS := $(LIBJLI_CFLAGS_JDKLIB) $(LIBJLI_CFLAGS), \
DISABLED_WARNINGS_solstudio := \
E_ASM_DISABLES_OPTIMIZATION \
E_STATEMENT_NOT_REACHED, \
@@ -316,6 +288,8 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBJLI, \
TARGETS += $(BUILD_LIBJLI)
+LIBJLI_SRC_DIRS := $(call FindSrcDirsForComponent, java.base, libjli)
+
# On windows, the static library has the same suffix as the import library created by
# with the shared library, so the static library is given a different name. No harm
# in doing it for all platform to reduce complexity.
@@ -328,7 +302,8 @@ ifeq ($(OPENJDK_TARGET_OS), windows)
EXCLUDE_FILES := $(LIBJLI_EXCLUDE_FILES), \
EXTRA_FILES := $(LIBJLI_EXTRA_FILES), \
OPTIMIZATION := HIGH, \
- CFLAGS := $(STATIC_LIBRARY_FLAGS) $(LIBJLI_CFLAGS), \
+ CFLAGS := $(STATIC_LIBRARY_FLAGS) $(LIBJLI_CFLAGS_JDKLIB) $(LIBJLI_CFLAGS) \
+ $(addprefix -I, $(LIBJLI_SRC_DIRS)), \
ARFLAGS := $(ARFLAGS), \
OBJECT_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libjli_static, \
))
@@ -347,7 +322,8 @@ else ifeq ($(OPENJDK_TARGET_OS), macosx)
EXCLUDE_FILES := $(LIBJLI_EXCLUDE_FILES), \
EXTRA_FILES := $(LIBJLI_EXTRA_FILES), \
OPTIMIZATION := HIGH, \
- CFLAGS := $(CFLAGS_JDKLIB) $(LIBJLI_CFLAGS), \
+ CFLAGS := $(LIBJLI_CFLAGS_JDKLIB) $(LIBJLI_CFLAGS) \
+ $(addprefix -I, $(LIBJLI_SRC_DIRS)), \
LDFLAGS := -nostdlib $(ARFLAGS), \
OBJECT_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libjli_static, \
))
@@ -371,7 +347,8 @@ else ifeq ($(OPENJDK_TARGET_OS), aix)
EXCLUDE_FILES := $(LIBJLI_EXCLUDE_FILES), \
EXTRA_FILES := $(LIBJLI_EXTRA_FILES), \
OPTIMIZATION := HIGH, \
- CFLAGS := $(STATIC_LIBRARY_FLAGS) $(LIBJLI_CFLAGS), \
+ CFLAGS := $(STATIC_LIBRARY_FLAGS) $(LIBJLI_CFLAGS_JDKLIB) $(LIBJLI_CFLAGS) \
+ $(addprefix -I, $(LIBJLI_SRC_DIRS)), \
ARFLAGS := $(ARFLAGS), \
OBJECT_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libjli_static))
diff --git a/make/lib/Lib-java.base.gmk b/make/lib/Lib-java.base.gmk
index 1d29fb9e820..66327271dd1 100644
--- a/make/lib/Lib-java.base.gmk
+++ b/make/lib/Lib-java.base.gmk
@@ -29,9 +29,7 @@ include LibCommon.gmk
$(eval $(call IncludeCustomExtension, lib/Lib-java.base.gmk))
# Prepare the find cache.
-LIB_java.base_SRC_DIRS += $(TOPDIR)/src/java.base/*/native
-
-$(eval $(call FillCacheFind, $(wildcard $(LIB_java.base_SRC_DIRS))))
+$(eval $(call FillCacheFind, $(wildcard $(TOPDIR)/src/java.base/*/native)))
################################################################################
# Create all the core libraries
@@ -41,14 +39,10 @@ include CoreLibraries.gmk
################################################################################
# Create the network library
-LIBNET_SRC_DIRS := $(call FindSrcDirsForLib, java.base, net)
-
$(eval $(call SetupJdkLibrary, BUILD_LIBNET, \
NAME := net, \
- SRC := $(LIBNET_SRC_DIRS), \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKLIB) -I$(SUPPORT_OUTPUTDIR)/headers/java.base \
- $(LIBJAVA_HEADER_FLAGS) $(addprefix -I, $(LIBNET_SRC_DIRS)), \
+ CFLAGS := $(CFLAGS_JDKLIB), \
DISABLED_WARNINGS_gcc := format-nonliteral, \
DISABLED_WARNINGS_clang := parentheses-equality constant-logical-operand, \
DISABLED_WARNINGS_microsoft := 4244 4047 4133 4996, \
@@ -72,31 +66,15 @@ TARGETS += $(BUILD_LIBNET)
################################################################################
# Create the nio library
-BUILD_LIBNIO_SRC := \
- $(TOPDIR)/src/java.base/share/native/libnio \
- $(TOPDIR)/src/java.base/share/native/libnio/ch \
- $(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS_TYPE)/native/libnio \
- $(sort $(wildcard \
- $(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS_TYPE)/native/libnio/ch \
- $(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS_TYPE)/native/libnio/fs \
- $(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS)/native/libnio/ch \
- $(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS)/native/libnio/fs)) \
- #
-
-BUILD_LIBNIO_CFLAGS := \
- $(addprefix -I, $(BUILD_LIBNIO_SRC)) \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.base \
- $(LIBJAVA_HEADER_FLAGS) \
- $(addprefix -I, $(BUILD_LIBNET_SRC))
-
$(eval $(call SetupJdkLibrary, BUILD_LIBNIO, \
NAME := nio, \
- SRC := $(BUILD_LIBNIO_SRC), \
- EXCLUDE_FILES := $(BUILD_LIBNIO_EXFILES), \
OPTIMIZATION := HIGH, \
WARNINGS_AS_ERRORS_xlc := false, \
- CFLAGS := $(CFLAGS_JDKLIB) \
- $(BUILD_LIBNIO_CFLAGS), \
+ CFLAGS := $(CFLAGS_JDKLIB), \
+ EXTRA_HEADER_DIRS := \
+ libnio/ch \
+ libnio/fs \
+ libnet, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LIBS_unix := -ljava -lnet, \
@@ -122,17 +100,10 @@ ifeq ($(OPENJDK_TARGET_OS), macosx)
# JavaNativeFoundation framework not supported in static builds
ifneq ($(STATIC_BUILD), true)
- LIBOSXSECURITY_DIRS := $(TOPDIR)/src/java.base/macosx/native/libosxsecurity
- LIBOSXSECURITY_CFLAGS := -I$(LIBOSXSECURITY_DIRS) \
- $(LIBJAVA_HEADER_FLAGS) \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.base \
-
$(eval $(call SetupJdkLibrary, BUILD_LIBOSXSECURITY, \
NAME := osxsecurity, \
- SRC := $(LIBOSXSECURITY_DIRS), \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKLIB) \
- $(LIBOSXSECURITY_CFLAGS), \
+ CFLAGS := $(CFLAGS_JDKLIB), \
DISABLED_WARNINGS_clang := deprecated-declarations, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
-L$(SUPPORT_OUTPUTDIR)/modules_libs/java.base \
@@ -158,7 +129,6 @@ endif
ifeq ($(OPENJDK_TARGET_OS_TYPE), unix)
ifeq ($(STATIC_BUILD), false)
- LIBJSIG_SRC_DIR := $(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS_TYPE)/native/libjsig
LIBJSIG_MAPFILE := $(wildcard $(TOPDIR)/make/mapfiles/libjsig/mapfile-vers-$(OPENJDK_TARGET_OS))
ifeq ($(OPENJDK_TARGET_OS), linux)
@@ -168,7 +138,6 @@ ifeq ($(OPENJDK_TARGET_OS_TYPE), unix)
$(eval $(call SetupJdkLibrary, BUILD_LIBJSIG, \
NAME := jsig, \
- SRC := $(LIBJSIG_SRC_DIR), \
CFLAGS := $(CFLAGS_JDKLIB) $(LIBJSIG_CFLAGS), \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
diff --git a/make/lib/Lib-java.desktop.gmk b/make/lib/Lib-java.desktop.gmk
index 5a2f77f878b..81f85fe28f0 100644
--- a/make/lib/Lib-java.desktop.gmk
+++ b/make/lib/Lib-java.desktop.gmk
@@ -29,9 +29,7 @@ include LibCommon.gmk
$(eval $(call IncludeCustomExtension, lib/Lib-java.desktop.gmk))
# Prepare the find cache.
-LIB_java.desktop_SRC_DIRS += $(TOPDIR)/src/java.desktop/*/native
-
-$(eval $(call FillCacheFind, $(wildcard $(LIB_java.desktop_SRC_DIRS))))
+$(eval $(call FillCacheFind, $(wildcard $(TOPDIR)/src/java.desktop/*/native)))
################################################################################
# Create the AWT/2D libraries
@@ -43,16 +41,8 @@ include Awt2dLibraries.gmk
ifneq ($(OPENJDK_TARGET_OS), aix)
- LIBJSOUND_SRC_DIRS := $(wildcard \
- $(TOPDIR)/src/java.desktop/share/native/libjsound \
- $(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS)/native/libjsound \
- )
-
LIBJSOUND_CFLAGS := \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.desktop \
$(ALSA_CFLAGS) \
- $(LIBJAVA_HEADER_FLAGS) \
- $(foreach dir, $(LIBJSOUND_SRC_DIRS), -I$(dir)) \
-DX_PLATFORM=X_$(OPENJDK_TARGET_OS_UPPERCASE) \
-DUSE_PORTS=TRUE \
-DUSE_DAUDIO=TRUE \
@@ -71,7 +61,6 @@ ifneq ($(OPENJDK_TARGET_OS), aix)
$(eval $(call SetupJdkLibrary, BUILD_LIBJSOUND, \
NAME := jsound, \
- SRC := $(LIBJSOUND_SRC_DIRS), \
TOOLCHAIN := $(LIBJSOUND_TOOLCHAIN), \
OPTIMIZATION := LOW, \
CFLAGS := $(CFLAGS_JDKLIB) \
@@ -97,15 +86,11 @@ endif
# Create the macosx specific osxapp and osx libraries
ifeq ($(OPENJDK_TARGET_OS), macosx)
- LIBOSXAPP_SRC := $(TOPDIR)/src/java.desktop/macosx/native/libosxapp
$(eval $(call SetupJdkLibrary, BUILD_LIBOSXAPP, \
NAME := osxapp, \
- SRC := $(LIBOSXAPP_SRC), \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKLIB) \
- $(addprefix -I, $(LIBOSXAPP_SRC)) \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.desktop, \
+ CFLAGS := $(CFLAGS_JDKLIB), \
DISABLED_WARNINGS_clang := objc-method-access objc-root-class \
deprecated-declarations, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
@@ -129,19 +114,11 @@ ifeq ($(OPENJDK_TARGET_OS), macosx)
##############################################################################
- LIBOSX_DIRS := $(TOPDIR)/src/java.desktop/macosx/native/libosx
- LIBOSX_CFLAGS := -I$(LIBOSX_DIRS) \
- -I$(TOPDIR)/src/java.desktop/macosx/native/libosxapp \
- $(LIBJAVA_HEADER_FLAGS) \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.desktop \
- #
-
$(eval $(call SetupJdkLibrary, BUILD_LIBOSX, \
NAME := osx, \
- SRC := $(LIBOSX_DIRS), \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKLIB) \
- $(LIBOSX_CFLAGS), \
+ CFLAGS := $(CFLAGS_JDKLIB), \
+ EXTRA_HEADER_DIRS := libosxapp, \
DISABLED_WARNINGS_clang := deprecated-declarations, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
-L$(SUPPORT_OUTPUTDIR)/modules_libs/java.desktop \
diff --git a/make/lib/Lib-java.instrument.gmk b/make/lib/Lib-java.instrument.gmk
index 3ec153ef462..7e625e3a036 100644
--- a/make/lib/Lib-java.instrument.gmk
+++ b/make/lib/Lib-java.instrument.gmk
@@ -30,32 +30,24 @@ $(eval $(call IncludeCustomExtension, lib/Lib-java.instrument.gmk))
################################################################################
-LIBINSTRUMENT_SRC := $(TOPDIR)/src/java.instrument/share/native/libinstrument \
- $(TOPDIR)/src/java.instrument/$(OPENJDK_TARGET_OS_TYPE)/native/libinstrument \
- #
-LIBINSTRUMENT_CFLAGS := $(CFLAGS_JDKLIB) \
- $(addprefix -I, $(LIBINSTRUMENT_SRC)) \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.instrument \
- -I$(TOPDIR)/src/java.base/share/native/libjli \
- -I$(TOPDIR)/src/java.base/share/native/libjava \
- #
-
ifeq ($(OPENJDK_TARGET_OS), windows)
# Statically link the C runtime so that there are not dependencies on modules
# not on the search patch when invoked from the Windows system directory
# (or elsewhere).
- LIBINSTRUMENT_CFLAGS := $(filter-out -MD, $(LIBINSTRUMENT_CFLAGS))
+ LIBINSTRUMENT_CFLAGS_JDKLIB := $(filter-out -MD, $(CFLAGS_JDKLIB))
# equivalent of strcasecmp is stricmp on Windows
- LIBINSTRUMENT_CFLAGS += -Dstrcasecmp=stricmp
+ LIBINSTRUMENT_CFLAGS := -Dstrcasecmp=stricmp
+else
+ LIBINSTRUMENT_CFLAGS_JDKLIB := $(CFLAGS_JDKLIB)
endif
$(eval $(call SetupJdkLibrary, BUILD_LIBINSTRUMENT, \
NAME := instrument, \
- SRC := $(LIBINSTRUMENT_SRC), \
OPTIMIZATION := LOW, \
- CFLAGS := $(LIBINSTRUMENT_CFLAGS), \
+ CFLAGS := $(LIBINSTRUMENT_CFLAGS_JDKLIB) $(LIBINSTRUMENT_CFLAGS), \
CFLAGS_debug := -DJPLIS_LOGGING, \
CFLAGS_release := -DNO_JPLIS_LOGGING, \
+ EXTRA_HEADER_DIRS := java.base:libjli, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN) \
$(LIBINSTRUMENT_LDFLAGS), \
diff --git a/make/lib/Lib-java.management.gmk b/make/lib/Lib-java.management.gmk
index 6444feaa2f5..a70a3ccf45a 100644
--- a/make/lib/Lib-java.management.gmk
+++ b/make/lib/Lib-java.management.gmk
@@ -30,13 +30,6 @@ $(eval $(call IncludeCustomExtension, lib/Lib-java.management.gmk))
################################################################################
-LIBMANAGEMENT_SRC += $(TOPDIR)/src/java.management/share/native/libmanagement
-LIBMANAGEMENT_CFLAGS := -I$(TOPDIR)/src/hotspot/share/include \
- $(addprefix -I,$(LIBMANAGEMENT_SRC)) \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.management \
- $(LIBJAVA_HEADER_FLAGS) \
- #
-
LIBMANAGEMENT_OPTIMIZATION := HIGH
ifneq ($(findstring $(OPENJDK_TARGET_OS), solaris linux), )
ifeq ($(COMPILE_WITH_DEBUG_SYMBOLS), true)
@@ -46,9 +39,8 @@ endif
$(eval $(call SetupJdkLibrary, BUILD_LIBMANAGEMENT, \
NAME := management, \
- SRC := $(LIBMANAGEMENT_SRC), \
OPTIMIZATION := $(LIBMANAGEMENT_OPTIMIZATION), \
- CFLAGS := $(CFLAGS_JDKLIB) $(LIBMANAGEMENT_CFLAGS), \
+ CFLAGS := $(CFLAGS_JDKLIB), \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LIBS := $(JDKLIB_LIBS), \
diff --git a/make/lib/Lib-java.prefs.gmk b/make/lib/Lib-java.prefs.gmk
index 6d72189d563..cac3b17e26a 100644
--- a/make/lib/Lib-java.prefs.gmk
+++ b/make/lib/Lib-java.prefs.gmk
@@ -27,18 +27,16 @@ include LibCommon.gmk
################################################################################
+# libprefs on macosx do not use the unix code
ifeq ($(OPENJDK_TARGET_OS), macosx)
- LIBPREF_SRC_DIRS := $(TOPDIR)/src/java.prefs/macosx/native/libprefs
-else
- LIBPREF_SRC_DIRS := $(TOPDIR)/src/java.prefs/$(OPENJDK_TARGET_OS_TYPE)/native/libprefs
+ LIBPREFS_EXCLUDE_SRC_PATTERNS := unix
endif
$(eval $(call SetupJdkLibrary, BUILD_LIBPREFS, \
NAME := prefs, \
- SRC := $(LIBPREF_SRC_DIRS), \
+ EXCLUDE_SRC_PATTERNS := $(LIBPREFS_EXCLUDE_SRC_PATTERNS), \
OPTIMIZATION := HIGH, \
- CFLAGS := $(CFLAGS_JDKLIB) $(addprefix -I, $(LIBPREF_SRC_DIRS)) \
- $(LIBJAVA_HEADER_FLAGS), \
+ CFLAGS := $(CFLAGS_JDKLIB), \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LIBS_unix := -ljvm, \
diff --git a/make/lib/Lib-java.rmi.gmk b/make/lib/Lib-java.rmi.gmk
index f2c09c01829..56b60905603 100644
--- a/make/lib/Lib-java.rmi.gmk
+++ b/make/lib/Lib-java.rmi.gmk
@@ -29,9 +29,8 @@ include LibCommon.gmk
$(eval $(call SetupJdkLibrary, BUILD_LIBRMI, \
NAME := rmi, \
- SRC := $(TOPDIR)/src/java.rmi/share/native/librmi, \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKLIB) -I$(SUPPORT_OUTPUTDIR)/headers/java.rmi, \
+ CFLAGS := $(CFLAGS_JDKLIB), \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LIBS_unix := -ljvm, \
diff --git a/make/lib/Lib-java.security.jgss.gmk b/make/lib/Lib-java.security.jgss.gmk
index 89752deef9f..b130671efd6 100644
--- a/make/lib/Lib-java.security.jgss.gmk
+++ b/make/lib/Lib-java.security.jgss.gmk
@@ -27,16 +27,10 @@ include LibCommon.gmk
################################################################################
-LIBJ2GSS_SRC := $(TOPDIR)/src/java.security.jgss/share/native/libj2gss \
- #
-
$(eval $(call SetupJdkLibrary, BUILD_LIBJ2GSS, \
NAME := j2gss, \
- SRC := $(LIBJ2GSS_SRC), \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKLIB) $(addprefix -I, $(LIBJ2GSS_SRC)) \
- $(LIBJAVA_HEADER_FLAGS) \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.security.jgss, \
+ CFLAGS := $(CFLAGS_JDKLIB), \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LIBS := $(LIBDL), \
@@ -49,15 +43,10 @@ TARGETS += $(BUILD_LIBJ2GSS)
ifneq ($(BUILD_CRYPTO), false)
ifeq ($(OPENJDK_TARGET_OS), windows)
- BUILD_LIBW2K_LSA_AUTH_SRC := $(call FindSrcDirsForLib, $(MODULE), w2k_lsa_auth)
-
$(eval $(call SetupJdkLibrary, BUILD_LIBW2K_LSA_AUTH, \
NAME := w2k_lsa_auth, \
- SRC := $(BUILD_LIBW2K_LSA_AUTH_SRC), \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKLIB) \
- $(addprefix -I, $(BUILD_LIBW2K_LSA_AUTH_SRC)) \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.security.jgss, \
+ CFLAGS := $(CFLAGS_JDKLIB), \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LIBS := advapi32.lib Secur32.lib netapi32.lib kernel32.lib user32.lib \
@@ -69,17 +58,12 @@ ifneq ($(BUILD_CRYPTO), false)
endif
ifeq ($(OPENJDK_TARGET_OS), macosx)
- BUILD_LIBOSXKRB5_SRC := $(call FindSrcDirsForLib, $(MODULE), osxkrb5)
-
# libosxkrb5 needs to call deprecated krb5 APIs so that java
# can use the native credentials cache.
$(eval $(call SetupJdkLibrary, BUILD_LIBOSXKRB5, \
NAME := osxkrb5, \
- SRC := $(BUILD_LIBOSXKRB5_SRC), \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKLIB) \
- $(addprefix -I, $(BUILD_LIBOSXKRB5_SRC)) \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.security.jgss, \
+ CFLAGS := $(CFLAGS_JDKLIB), \
DISABLED_WARNINGS_clang := deprecated-declarations, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
diff --git a/make/lib/Lib-java.smartcardio.gmk b/make/lib/Lib-java.smartcardio.gmk
index 337daa48e1e..a8c65061521 100644
--- a/make/lib/Lib-java.smartcardio.gmk
+++ b/make/lib/Lib-java.smartcardio.gmk
@@ -27,18 +27,12 @@ include LibCommon.gmk
################################################################################
-LIBJ2PCSC_SRC := $(TOPDIR)/src/java.smartcardio/share/native/libj2pcsc \
- $(TOPDIR)/src/java.smartcardio/$(OPENJDK_TARGET_OS_TYPE)/native/libj2pcsc
-LIBJ2PCSC_CPPFLAGS := $(addprefix -I,$(LIBJ2PCSC_SRC)) \
- -I$(TOPDIR)/src/java.smartcardio/$(OPENJDK_TARGET_OS_TYPE)/native/libj2pcsc/MUSCLE \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.smartcardio
-
$(eval $(call SetupJdkLibrary, BUILD_LIBJ2PCSC, \
NAME := j2pcsc, \
- SRC := $(LIBJ2PCSC_SRC), \
+ CFLAGS := $(CFLAGS_JDKLIB), \
CFLAGS_unix := -D__sun_jdk, \
+ EXTRA_HEADER_DIRS := libj2pcsc/MUSCLE, \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKLIB) $(LIBJ2PCSC_CPPFLAGS), \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LIBS_unix := $(LIBDL), \
diff --git a/make/lib/Lib-jdk.accessibility.gmk b/make/lib/Lib-jdk.accessibility.gmk
index ea35ab39293..b7f820b8ba3 100644
--- a/make/lib/Lib-jdk.accessibility.gmk
+++ b/make/lib/Lib-jdk.accessibility.gmk
@@ -30,12 +30,6 @@ include LibCommon.gmk
ifeq ($(OPENJDK_TARGET_OS), windows)
ROOT_SRCDIR := $(TOPDIR)/src/jdk.accessibility/windows/native
- JAVA_AB_SRCDIR := $(ROOT_SRCDIR)/libjavaaccessbridge $(ROOT_SRCDIR)/common
- WIN_AB_SRCDIR := $(ROOT_SRCDIR)/libwindowsaccessbridge $(ROOT_SRCDIR)/common
- SYSINFO_SRCDIR := $(ROOT_SRCDIR)/libjabsysinfo
- ACCESSBRIDGE_CFLAGS := -I$(SUPPORT_OUTPUTDIR)/headers/jdk.accessibility \
- -I$(TOPDIR)/src/java.desktop/windows/native/include \
- -I$(TOPDIR)/src/java.desktop/share/native/include
define SetupJavaDLL
# Parameter 1 Suffix
@@ -43,13 +37,16 @@ ifeq ($(OPENJDK_TARGET_OS), windows)
$(call SetupJdkLibrary, BUILD_JAVAACCESSBRIDGE$1, \
NAME := javaaccessbridge$1, \
- SRC := $(JAVA_AB_SRCDIR), \
+ SRC := libjavaaccessbridge, \
+ EXTRA_SRC := common, \
OPTIMIZATION := LOW, \
DISABLED_WARNINGS_microsoft := 4311 4302 4312, \
- CFLAGS := $(CFLAGS_JDKLIB) $(ACCESSBRIDGE_CFLAGS) \
- $(addprefix -I,$(JAVA_AB_SRCDIR)) \
- -I$(ROOT_SRCDIR)/include/bridge \
+ CFLAGS := $(CFLAGS_JDKLIB) \
-DACCESSBRIDGE_ARCH_$2, \
+ EXTRA_HEADER_DIRS := \
+ include/bridge \
+ java.base:include \
+ java.desktop:include, \
LDFLAGS := $(LDFLAGS_JDKLIB), \
LIBS := kernel32.lib user32.lib gdi32.lib \
winspool.lib comdlg32.lib advapi32.lib shell32.lib \
@@ -68,13 +65,15 @@ ifeq ($(OPENJDK_TARGET_OS), windows)
# Parameter 2 ACCESSBRIDGE_ARCH_ suffix
$(call SetupJdkLibrary, BUILD_WINDOWSACCESSBRIDGE$1, \
NAME := windowsaccessbridge$1, \
- SRC := $(WIN_AB_SRCDIR), \
+ SRC := libwindowsaccessbridge, \
+ EXTRA_SRC := common, \
OPTIMIZATION := LOW, \
DISABLED_WARNINGS_microsoft := 4311 4302 4312, \
- CFLAGS := $(filter-out -MD, $(CFLAGS_JDKLIB)) -MT $(ACCESSBRIDGE_CFLAGS) \
- $(addprefix -I,$(WIN_AB_SRCDIR)) \
- -I$(ROOT_SRCDIR)/include/bridge \
+ CFLAGS := $(filter-out -MD, $(CFLAGS_JDKLIB)) -MT \
-DACCESSBRIDGE_ARCH_$2, \
+ EXTRA_HEADER_DIRS := \
+ include/bridge \
+ java.base:include, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
-def:$(ROOT_SRCDIR)/libwindowsaccessbridge/WinAccessBridge.DEF, \
LIBS := kernel32.lib user32.lib gdi32.lib \
@@ -91,9 +90,8 @@ ifeq ($(OPENJDK_TARGET_OS), windows)
$(call SetupJdkLibrary, BUILD_ACCESSBRIDGESYSINFO, \
NAME := jabsysinfo, \
- SRC := $(SYSINFO_SRCDIR), \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKLIB) $(ACCESSBRIDGE_CFLAGS), \
+ CFLAGS := $(CFLAGS_JDKLIB), \
LDFLAGS := $(LDFLAGS_JDKLIB), \
VERSIONINFO_RESOURCE := $(ROOT_SRCDIR)/common/AccessBridgeStatusWindow.rc, \
)
diff --git a/make/lib/Lib-jdk.attach.gmk b/make/lib/Lib-jdk.attach.gmk
index 827f1bf723e..1c24e554037 100644
--- a/make/lib/Lib-jdk.attach.gmk
+++ b/make/lib/Lib-jdk.attach.gmk
@@ -36,11 +36,8 @@ endif
$(eval $(call SetupJdkLibrary, BUILD_LIBATTACH, \
NAME := attach, \
- SRC := $(call FindSrcDirsForLib, jdk.attach, attach), \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKLIB) \
- -I$(SUPPORT_OUTPUTDIR)/headers/jdk.attach \
- $(LIBJAVA_HEADER_FLAGS) $(LIBATTACH_CFLAGS), \
+ CFLAGS := $(CFLAGS_JDKLIB) $(LIBATTACH_CFLAGS), \
CFLAGS_windows := /Gy, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
diff --git a/make/lib/Lib-jdk.crypto.cryptoki.gmk b/make/lib/Lib-jdk.crypto.cryptoki.gmk
index 847d5b356be..3f25d7c04fd 100644
--- a/make/lib/Lib-jdk.crypto.cryptoki.gmk
+++ b/make/lib/Lib-jdk.crypto.cryptoki.gmk
@@ -27,16 +27,10 @@ include LibCommon.gmk
################################################################################
-LIBJ2PKCS11_SRC := $(TOPDIR)/src/jdk.crypto.cryptoki/share/native/libj2pkcs11 \
- $(TOPDIR)/src/jdk.crypto.cryptoki/$(OPENJDK_TARGET_OS_TYPE)/native/libj2pkcs11
-
$(eval $(call SetupJdkLibrary, BUILD_LIBJ2PKCS11, \
NAME := j2pkcs11, \
- SRC := $(LIBJ2PKCS11_SRC), \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKLIB) $(addprefix -I, $(LIBJ2PKCS11_SRC)) \
- $(LIBJAVA_HEADER_FLAGS) \
- -I$(SUPPORT_OUTPUTDIR)/headers/jdk.crypto.cryptoki, \
+ CFLAGS := $(CFLAGS_JDKLIB), \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LIBS_unix := $(LIBDL), \
diff --git a/make/lib/Lib-jdk.crypto.ec.gmk b/make/lib/Lib-jdk.crypto.ec.gmk
index e30b57d4341..dfecc1d3cbd 100644
--- a/make/lib/Lib-jdk.crypto.ec.gmk
+++ b/make/lib/Lib-jdk.crypto.ec.gmk
@@ -28,31 +28,23 @@ include LibCommon.gmk
################################################################################
ifeq ($(ENABLE_INTREE_EC), true)
-
- LIBSUNEC_SRC := $(TOPDIR)/src/jdk.crypto.ec/share/native/libsunec
- BUILD_LIBSUNEC_FLAGS := $(addprefix -I, $(SUNEC_SRC))
-
- #
# On sol-sparc...all libraries are compiled with -xregs=no%appl
- # (set in CFLAGS_REQUIRED_sparc)
- #
- # except!!! libsunec.so
- #
- ECC_JNI_SOLSPARC_FILTER :=
+ # (set in CFLAGS_REQUIRED_sparc) except libsunec.so
ifeq ($(OPENJDK_TARGET_CPU_ARCH), sparc)
- ECC_JNI_SOLSPARC_FILTER := -xregs=no%appl
+ BUILD_LIBSUNEC_CFLAGS_JDKLIB := $(filter-out -xregs=no%appl, $(CFLAGS_JDKLIB))
+ BUILD_LIBSUNEC_CXXFLAGS_JDKLIB := $(filter-out -xregs=no%appl, $(CXXFLAGS_JDKLIB))
+ else
+ BUILD_LIBSUNEC_CFLAGS_JDKLIB := $(CFLAGS_JDKLIB)
+ BUILD_LIBSUNEC_CXXFLAGS_JDKLIB := $(CXXFLAGS_JDKLIB)
endif
$(eval $(call SetupJdkLibrary, BUILD_LIBSUNEC, \
NAME := sunec, \
- SRC := $(LIBSUNEC_SRC), \
TOOLCHAIN := TOOLCHAIN_LINK_CXX, \
OPTIMIZATION := LOW, \
- CFLAGS := $(filter-out $(ECC_JNI_SOLSPARC_FILTER), $(CFLAGS_JDKLIB)) \
- $(BUILD_LIBSUNEC_FLAGS) \
+ CFLAGS := $(BUILD_LIBSUNEC_CFLAGS_JDKLIB) \
-DMP_API_COMPATIBLE -DNSS_ECC_MORE_THAN_SUITE_B, \
- CXXFLAGS := $(filter-out $(ECC_JNI_SOLSPARC_FILTER), $(CXXFLAGS_JDKLIB)) \
- $(BUILD_LIBSUNEC_FLAGS), \
+ CXXFLAGS := $(BUILD_LIBSUNEC_CXXFLAGS_JDKLIB), \
DISABLED_WARNINGS_gcc := sign-compare implicit-fallthrough, \
DISABLED_WARNINGS_microsoft := 4101 4244 4146 4018, \
LDFLAGS := $(LDFLAGS_JDKLIB) $(LDFLAGS_CXX_JDK), \
diff --git a/make/lib/Lib-jdk.crypto.mscapi.gmk b/make/lib/Lib-jdk.crypto.mscapi.gmk
index 3bd4eb9e357..e28e428f49e 100644
--- a/make/lib/Lib-jdk.crypto.mscapi.gmk
+++ b/make/lib/Lib-jdk.crypto.mscapi.gmk
@@ -29,14 +29,10 @@ include LibCommon.gmk
ifeq ($(OPENJDK_TARGET_OS), windows)
- LIBSUNMSCAPI_SRC := $(TOPDIR)/src/jdk.crypto.mscapi/$(OPENJDK_TARGET_OS_TYPE)/native/libsunmscapi
-
$(eval $(call SetupJdkLibrary, BUILD_LIBSUNMSCAPI, \
NAME := sunmscapi, \
- SRC := $(LIBSUNMSCAPI_SRC), \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKLIB) \
- -I$(LIBSUNMSCAPI_SRC), \
+ CFLAGS := $(CFLAGS_JDKLIB), \
LDFLAGS := $(LDFLAGS_JDKLIB) $(LDFLAGS_CXX_JDK) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LIBS := crypt32.lib advapi32.lib, \
diff --git a/make/lib/Lib-jdk.crypto.ucrypto.gmk b/make/lib/Lib-jdk.crypto.ucrypto.gmk
index 758491007d3..c00e543737b 100644
--- a/make/lib/Lib-jdk.crypto.ucrypto.gmk
+++ b/make/lib/Lib-jdk.crypto.ucrypto.gmk
@@ -29,14 +29,10 @@ include LibCommon.gmk
ifeq ($(OPENJDK_TARGET_OS), solaris)
- LIBJ2UCRYPTO_SRC := $(TOPDIR)/src/jdk.crypto.ucrypto/solaris/native/libj2ucrypto
-
$(eval $(call SetupJdkLibrary, BUILD_LIBJ2UCRYPTO, \
NAME := j2ucrypto, \
- SRC := $(LIBJ2UCRYPTO_SRC), \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKLIB) \
- $(addprefix -I, $(LIBJ2UCRYPTO_SRC)), \
+ CFLAGS := $(CFLAGS_JDKLIB), \
LDFLAGS := $(LDFLAGS_JDKLIB), \
LIBS := $(LIBDL), \
))
diff --git a/make/lib/Lib-jdk.hotspot.agent.gmk b/make/lib/Lib-jdk.hotspot.agent.gmk
index 3f023dbd772..2f805c438e8 100644
--- a/make/lib/Lib-jdk.hotspot.agent.gmk
+++ b/make/lib/Lib-jdk.hotspot.agent.gmk
@@ -29,19 +29,6 @@ $(eval $(call IncludeCustomExtension, hotspot/lib/Lib-jdk.hotspot.agent.gmk))
################################################################################
-SA_TOPDIR := $(TOPDIR)/src/jdk.hotspot.agent
-
-SA_SRC += \
- $(SA_TOPDIR)/share/native/libsaproc \
- $(SA_TOPDIR)/$(OPENJDK_TARGET_OS)/native/libsaproc \
- #
-
-SA_INCLUDES := \
- $(addprefix -I, $(SA_SRC)) \
- -I$(SUPPORT_OUTPUTDIR)/headers/jdk.hotspot.agent \
- -I$(TOPDIR)/src/hotspot/os/$(OPENJDK_TARGET_OS) \
- #
-
ifeq ($(OPENJDK_TARGET_OS), linux)
SA_CFLAGS := -D_FILE_OFFSET_BITS=64
@@ -68,9 +55,8 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBSA, \
DISABLED_WARNINGS_microsoft := 4267, \
DISABLED_WARNINGS_gcc := sign-compare, \
DISABLED_WARNINGS_CXX_solstudio := truncwarn unknownpragma, \
- SRC := $(SA_SRC), \
- CFLAGS := $(CFLAGS_JDKLIB) $(SA_INCLUDES) $(SA_CFLAGS) $(SA_CUSTOM_CFLAGS), \
- CXXFLAGS := $(CXXFLAGS_JDKLIB) $(SA_INCLUDES) $(SA_CFLAGS) $(SA_CXXFLAGS), \
+ CFLAGS := $(CFLAGS_JDKLIB) $(SA_CFLAGS), \
+ CXXFLAGS := $(CXXFLAGS_JDKLIB) $(SA_CFLAGS) $(SA_CXXFLAGS), \
LDFLAGS := $(LDFLAGS_JDKLIB) $(SA_LDFLAGS), \
LIBS_linux := -lthread_db $(LIBDL), \
LIBS_solaris := -ldl -ldemangle -lthread -lproc, \
diff --git a/make/lib/Lib-jdk.internal.le.gmk b/make/lib/Lib-jdk.internal.le.gmk
index 62f69bcadd3..0aa7bb273f5 100644
--- a/make/lib/Lib-jdk.internal.le.gmk
+++ b/make/lib/Lib-jdk.internal.le.gmk
@@ -29,19 +29,10 @@ include LibCommon.gmk
ifeq ($(OPENJDK_TARGET_OS), windows)
- LIBLE_SRC := $(TOPDIR)/src/jdk.internal.le/$(OPENJDK_TARGET_OS_TYPE)/native/lible \
- #
- LIBLE_CPPFLAGS := \
- $(addprefix -I, $(LIBLE_SRC)) \
- -I$(SUPPORT_OUTPUTDIR)/headers/jdk.internal.le \
- #
-
$(eval $(call SetupJdkLibrary, BUILD_LIBLE, \
NAME := le, \
- SRC := $(LIBLE_SRC), \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKLIB) $(LIBJAVA_HEADER_FLAGS)\
- $(LIBLE_CPPFLAGS), \
+ CFLAGS := $(CFLAGS_JDKLIB), \
LDFLAGS := $(LDFLAGS_JDKLIB), \
LIBS := $(JDKLIB_LIBS) user32.lib, \
))
diff --git a/make/lib/Lib-jdk.jdi.gmk b/make/lib/Lib-jdk.jdi.gmk
index ff0cd954b75..197b95c2e20 100644
--- a/make/lib/Lib-jdk.jdi.gmk
+++ b/make/lib/Lib-jdk.jdi.gmk
@@ -29,22 +29,13 @@ include LibCommon.gmk
ifeq ($(OPENJDK_TARGET_OS), windows)
- LIBDT_SHMEM_SRC := $(TOPDIR)/src/jdk.jdi/share/native/libdt_shmem \
- $(TOPDIR)/src/jdk.jdi/$(OPENJDK_TARGET_OS_TYPE)/native/libdt_shmem \
- #
- LIBDT_SHMEM_CPPFLAGS := -I$(INCLUDEDIR) -I$(JDK_OUTPUTDIR)/include/$(OPENJDK_TARGET_OS) \
- $(addprefix -I, $(LIBDT_SHMEM_SRC)) \
- -I$(TOPDIR)/src/jdk.jdwp.agent/share/native/libjdwp/export \
- -I$(TOPDIR)/src/jdk.jdwp.agent/share/native/include \
- -I$(SUPPORT_OUTPUTDIR)/headers/jdk.jdi \
- #
-
$(eval $(call SetupJdkLibrary, BUILD_LIBDT_SHMEM, \
NAME := dt_shmem, \
- SRC := $(LIBDT_SHMEM_SRC), \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKLIB) -DUSE_MMAP \
- $(LIBDT_SHMEM_CPPFLAGS), \
+ CFLAGS := $(CFLAGS_JDKLIB) -DUSE_MMAP, \
+ EXTRA_HEADER_DIRS := \
+ jdk.jdwp.agent:include \
+ jdk.jdwp.agent:libjdwp/export, \
LDFLAGS := $(LDFLAGS_JDKLIB), \
LIBS := $(JDKLIB_LIBS), \
))
diff --git a/make/lib/Lib-jdk.jdwp.agent.gmk b/make/lib/Lib-jdk.jdwp.agent.gmk
index f0dc9f3d01c..0bc93e0d352 100644
--- a/make/lib/Lib-jdk.jdwp.agent.gmk
+++ b/make/lib/Lib-jdk.jdwp.agent.gmk
@@ -27,21 +27,14 @@ include LibCommon.gmk
################################################################################
-LIBDT_SOCKET_SRC := $(TOPDIR)/src/jdk.jdwp.agent/share/native/libdt_socket \
- $(TOPDIR)/src/jdk.jdwp.agent/$(OPENJDK_TARGET_OS_TYPE)/native/libdt_socket
-LIBDT_SOCKET_CPPFLAGS := \
- $(addprefix -I, $(LIBDT_SOCKET_SRC)) \
- -I$(TOPDIR)/src/jdk.jdwp.agent/share/native/libjdwp/export \
- -I$(TOPDIR)/src/jdk.jdwp.agent/share/native/libjdwp \
- -I$(TOPDIR)/src/jdk.jdwp.agent/share/native/include \
- #
-
$(eval $(call SetupJdkLibrary, BUILD_LIBDT_SOCKET, \
NAME := dt_socket, \
- SRC := $(LIBDT_SOCKET_SRC), \
OPTIMIZATION := LOW, \
CFLAGS := $(CFLAGS_JDKLIB) -DUSE_MMAP \
$(LIBDT_SOCKET_CPPFLAGS), \
+ EXTRA_HEADER_DIRS := \
+ include \
+ libjdwp/export, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LIBS_linux := -lpthread, \
@@ -56,21 +49,14 @@ TARGETS += $(BUILD_LIBDT_SOCKET)
################################################################################
-LIBJDWP_SRC := $(TOPDIR)/src/jdk.jdwp.agent/share/native/libjdwp \
- $(TOPDIR)/src/jdk.jdwp.agent/$(OPENJDK_TARGET_OS_TYPE)/native/libjdwp
-LIBJDWP_CPPFLAGS := \
- -I$(TOPDIR)/src/jdk.jdwp.agent/share/native/libjdwp/export \
- -I$(TOPDIR)/src/jdk.jdwp.agent/share/native/include \
- $(addprefix -I, $(LIBJDWP_SRC))
-
# JDWP_LOGGING causes log messages to be compiled into the library.
$(eval $(call SetupJdkLibrary, BUILD_LIBJDWP, \
NAME := jdwp, \
- SRC := $(LIBJDWP_SRC), \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKLIB) -DJDWP_LOGGING \
- $(LIBJDWP_CPPFLAGS) \
- -I$(SUPPORT_OUTPUTDIR)/headers/jdk.jdwp.agent, \
+ CFLAGS := $(CFLAGS_JDKLIB) -DJDWP_LOGGING, \
+ EXTRA_HEADER_DIRS := \
+ include \
+ libjdwp/export, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LIBS := $(JDKLIB_LIBS), \
diff --git a/make/lib/Lib-jdk.management.agent.gmk b/make/lib/Lib-jdk.management.agent.gmk
index 426e84d30d3..35bf7ea21c8 100644
--- a/make/lib/Lib-jdk.management.agent.gmk
+++ b/make/lib/Lib-jdk.management.agent.gmk
@@ -27,17 +27,10 @@ include LibCommon.gmk
################################################################################
-LIBMANAGEMENT_AGENT_SRC += $(TOPDIR)/src/jdk.management.agent/$(OPENJDK_TARGET_OS_TYPE)/native/libmanagement_agent
-LIBMANAGEMENT_AGENT_CFLAGS := $(addprefix -I,$(LIBMANAGEMENT_AGENT_SRC)) \
- -I$(SUPPORT_OUTPUTDIR)/headers/jdk.management.agent \
- $(LIBJAVA_HEADER_FLAGS) \
- #
-
$(eval $(call SetupJdkLibrary, BUILD_LIBMANAGEMENT_AGENT, \
NAME := management_agent, \
- SRC := $(LIBMANAGEMENT_AGENT_SRC), \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKLIB) $(LIBMANAGEMENT_AGENT_CFLAGS), \
+ CFLAGS := $(CFLAGS_JDKLIB), \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LIBS := $(JDKLIB_LIBS), \
diff --git a/make/lib/Lib-jdk.management.gmk b/make/lib/Lib-jdk.management.gmk
index 22a167e23b5..29bfc7a5aa2 100644
--- a/make/lib/Lib-jdk.management.gmk
+++ b/make/lib/Lib-jdk.management.gmk
@@ -30,15 +30,6 @@ $(eval $(call IncludeCustomExtension, lib/Lib-jdk.management.gmk))
################################################################################
-LIBMANAGEMENT_EXT_SRC += $(TOPDIR)/src/jdk.management/share/native/libmanagement_ext \
- $(TOPDIR)/src/jdk.management/$(OPENJDK_TARGET_OS_TYPE)/native/libmanagement_ext \
- $(TOPDIR)/src/jdk.management/$(OPENJDK_TARGET_OS)/native/libmanagement_ext
-LIBMANAGEMENT_EXT_CFLAGS := -I$(TOPDIR)/src/java.management/share/native/include \
- $(addprefix -I,$(LIBMANAGEMENT_EXT_SRC)) \
- -I$(SUPPORT_OUTPUTDIR)/headers/jdk.management \
- $(LIBJAVA_HEADER_FLAGS) \
- #
-
ifeq ($(OPENJDK_TARGET_OS), windows)
# In (at least) VS2013 and later, -DPSAPI_VERSION=1 is needed to generate
# a binary that is compatible with windows versions older than 7/2008R2.
@@ -55,8 +46,6 @@ endif
$(eval $(call SetupJdkLibrary, BUILD_LIBMANAGEMENT_EXT, \
NAME := management_ext, \
- SRC := $(LIBMANAGEMENT_EXT_SRC), \
- LANG := C, \
OPTIMIZATION := $(LIBMANAGEMENT_EXT_OPTIMIZATION), \
CFLAGS := $(CFLAGS_JDKLIB) $(LIBMANAGEMENT_EXT_CFLAGS), \
LDFLAGS := $(LDFLAGS_JDKLIB) \
diff --git a/make/lib/Lib-jdk.net.gmk b/make/lib/Lib-jdk.net.gmk
index 1ea1c2f543a..b202c6373f7 100644
--- a/make/lib/Lib-jdk.net.gmk
+++ b/make/lib/Lib-jdk.net.gmk
@@ -27,13 +27,12 @@ include LibCommon.gmk
################################################################################
-ifneq ($(filter $(OPENJDK_TARGET_OS), solaris linux), )
+ifneq ($(filter $(OPENJDK_TARGET_OS), solaris linux macosx), )
$(eval $(call SetupJdkLibrary, BUILD_LIBEXTNET, \
NAME := extnet, \
- SRC := $(TOPDIR)/src/jdk.net/$(OPENJDK_TARGET_OS)/native/libextnet, \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKLIB) -I$(SUPPORT_OUTPUTDIR)/headers/jdk.net, \
+ CFLAGS := $(CFLAGS_JDKLIB), \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LIBS := -ljava, \
diff --git a/make/lib/Lib-jdk.pack.gmk b/make/lib/Lib-jdk.pack.gmk
index 9884818852c..f401e03e7c0 100644
--- a/make/lib/Lib-jdk.pack.gmk
+++ b/make/lib/Lib-jdk.pack.gmk
@@ -29,16 +29,13 @@ include LibCommon.gmk
$(eval $(call SetupJdkLibrary, BUILD_LIBUNPACK, \
NAME := unpack, \
- SRC := $(TOPDIR)/src/jdk.pack/share/native/libunpack \
- $(TOPDIR)/src/jdk.pack/share/native/common-unpack, \
+ EXTRA_SRC := common-unpack, \
TOOLCHAIN := TOOLCHAIN_LINK_CXX, \
OPTIMIZATION := LOW, \
CFLAGS := $(CXXFLAGS_JDKLIB) \
- -DNO_ZLIB -DUNPACK_JNI -DFULL \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.base \
- -I$(TOPDIR)/src/jdk.pack/share/native/common-unpack \
- $(LIBJAVA_HEADER_FLAGS), \
+ -DNO_ZLIB -DUNPACK_JNI -DFULL, \
CFLAGS_release := -DPRODUCT, \
+ EXTRA_HEADER_DIRS := $(call GetJavaHeaderDir, java.base), \
DISABLED_WARNINGS_gcc := implicit-fallthrough, \
LDFLAGS := $(LDFLAGS_JDKLIB) $(LDFLAGS_CXX_JDK) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
diff --git a/make/lib/Lib-jdk.sctp.gmk b/make/lib/Lib-jdk.sctp.gmk
index c1d2e468e86..2676102021e 100644
--- a/make/lib/Lib-jdk.sctp.gmk
+++ b/make/lib/Lib-jdk.sctp.gmk
@@ -29,18 +29,15 @@ include LibCommon.gmk
ifeq ($(OPENJDK_TARGET_OS_TYPE), unix)
- ifeq (, $(filter $(OPENJDK_TARGET_OS), macosx aix))
+ ifeq ($(filter $(OPENJDK_TARGET_OS), macosx aix), )
$(eval $(call SetupJdkLibrary, BUILD_LIBSCTP, \
NAME := sctp, \
- SRC := $(TOPDIR)/src/jdk.sctp/$(OPENJDK_TARGET_OS_TYPE)/native/libsctp, \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKLIB) \
- -I $(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS_TYPE)/native/libnio/ch \
- -I $(TOPDIR)/src/java.base/share/native/libnio/ch \
- $(addprefix -I, $(call FindSrcDirsForLib, java.base, net)) \
- $(LIBJAVA_HEADER_FLAGS) \
- -I$(SUPPORT_OUTPUTDIR)/headers/jdk.sctp \
- -I$(SUPPORT_OUTPUTDIR)/headers/java.base, \
+ CFLAGS := $(CFLAGS_JDKLIB), \
+ EXTRA_HEADER_DIRS := \
+ $(call GetJavaHeaderDir, java.base) \
+ java.base:libnet \
+ java.base:libnio/ch, \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LIBS_unix := -lnio -lnet -ljava -ljvm, \
diff --git a/make/lib/Lib-jdk.security.auth.gmk b/make/lib/Lib-jdk.security.auth.gmk
index de3332f1522..12c26ca3758 100644
--- a/make/lib/Lib-jdk.security.auth.gmk
+++ b/make/lib/Lib-jdk.security.auth.gmk
@@ -29,9 +29,8 @@ include LibCommon.gmk
$(eval $(call SetupJdkLibrary, BUILD_LIBJAAS, \
NAME := jaas, \
- SRC := $(call FindSrcDirsForLib, jdk.security.auth, jaas), \
OPTIMIZATION := LOW, \
- CFLAGS := $(CFLAGS_JDKLIB) -I$(SUPPORT_OUTPUTDIR)/headers/jdk.security.auth, \
+ CFLAGS := $(CFLAGS_JDKLIB), \
LDFLAGS := $(LDFLAGS_JDKLIB) \
$(call SET_SHARED_LIBRARY_ORIGIN), \
LIBS_windows := netapi32.lib user32.lib mpr.lib advapi32.lib $(JDKLIB_LIBS), \
diff --git a/make/lib/LibCommon.gmk b/make/lib/LibCommon.gmk
index 57993303829..7e25925c7d1 100644
--- a/make/lib/LibCommon.gmk
+++ b/make/lib/LibCommon.gmk
@@ -25,9 +25,6 @@
include JdkNativeCompilation.gmk
-# Hook to include the corresponding custom file, if present.
-$(eval $(call IncludeCustomExtension, lib/LibCommon.gmk))
-
################################################################################
GLOBAL_VERSION_INFO_RESOURCE := $(TOPDIR)/src/java.base/windows/native/common/version.rc
@@ -66,16 +63,6 @@ else ifeq ($(TOOLCHAIN_TYPE), xlc)
endif
endif
-################################################################################
-# Find the default set of src dirs for a native library.
-# Param 1 - module name
-# Param 2 - library name
-FindSrcDirsForLib += \
- $(call uniq, $(wildcard \
- $(TOPDIR)/src/$(strip $1)/$(OPENJDK_TARGET_OS)/native/lib$(strip $2) \
- $(TOPDIR)/src/$(strip $1)/$(OPENJDK_TARGET_OS_TYPE)/native/lib$(strip $2) \
- $(TOPDIR)/src/$(strip $1)/share/native/lib$(strip $2)))
-
################################################################################
# Find a library
# Param 1 - module name
@@ -94,10 +81,6 @@ FindStaticLib = \
$(addprefix $(SUPPORT_OUTPUTDIR)/native/, \
$(strip $1)$(strip $3)/$(LIBRARY_PREFIX)$(strip $2)$(STATIC_LIBRARY_SUFFIX))
-################################################################################
-# Define the header include flags needed to compile against it.
-LIBJAVA_HEADER_FLAGS := $(addprefix -I, $(call FindSrcDirsForLib, java.base, java))
-
# Put the libraries here.
INSTALL_LIBRARIES_HERE := $(call FindLibDirForModule, $(MODULE))
diff --git a/make/scripts/compare.sh b/make/scripts/compare.sh
index d0ad63f5a7f..8db1ae02c2c 100644
--- a/make/scripts/compare.sh
+++ b/make/scripts/compare.sh
@@ -1300,35 +1300,10 @@ if [ "$SKIP_DEFAULT" != "true" ]; then
THIS_JDK="$THIS/install/jdk"
OTHER_JDK="$OTHER/install/jdk"
echo "Selecting install images for JDK compare"
- if [ -d "$THIS/install/jre" ] && [ -d "$OTHER/install/jre" ]; then
- THIS_JRE="$THIS/install/jre"
- OTHER_JRE="$OTHER/install/jre"
- echo "Also selecting install images for JRE compare"
- else
- echo "No install JRE image found"
- fi
- elif [ -d "$THIS/images/jdk" ] && [ -d "$OTHER/deploy/images/jdk" ]; then
- THIS_JDK="$THIS/images/jdk"
- OTHER_JDK="$OTHER/deploy/images/jdk"
- echo "Selecting deploy images for JDK compare"
- if [ -d "$THIS/images/jre" ] && [ -d "$OTHER/deploy/images/jre" ]; then
- THIS_JRE="$THIS/images/jre"
- OTHER_JRE="$OTHER/deploy/images/jre"
- echo "Selecting deploy images for JRE compare"
- else
- echo "No deploy JRE image found"
- fi
elif [ -d "$THIS/images/jdk" ] && [ -d "$OTHER/images/jdk" ]; then
THIS_JDK="$THIS/images/jdk"
OTHER_JDK="$OTHER/images/jdk"
echo "Selecting normal images for JDK compare"
- if [ -d "$THIS/images/jre" ] && [ -d "$OTHER/images/jre" ]; then
- THIS_JRE="$THIS/images/jre"
- OTHER_JRE="$OTHER/images/jre"
- echo "Selecting normal images for JRE compare"
- else
- echo "No normal JRE image found"
- fi
elif [ -d "$(ls -d $THIS/licensee-src/build/*/images/jdk 2> /dev/null)" ] \
&& [ -d "$(ls -d $OTHER/licensee-src/build/*/images/jdk 2> /dev/null)" ]
then
@@ -1341,9 +1316,7 @@ if [ "$SKIP_DEFAULT" != "true" ]; then
THIS="$(ls -d $THIS/licensee-src/build/*)"
OTHER="$(ls -d $OTHER/licensee-src/build/*)"
THIS_JDK="$THIS/images/jdk"
- THIS_JRE="$THIS/images/jre"
OTHER_JDK="$OTHER/images/jdk"
- OTHER_JRE="$OTHER/images/jre"
# Rewrite the path to tools that are used from the build
JIMAGE="$(echo "$JIMAGE" | $SED "s|$OLD_THIS|$THIS|g")"
JAVAP="$(echo "$JAVAP" | $SED "s|$OLD_THIS|$THIS|g")"
@@ -1358,17 +1331,13 @@ if [ "$SKIP_DEFAULT" != "true" ]; then
&& [ -d "$OTHER/images/jdk-bundle" -o -d "$OTHER/deploy/images/jdk-bundle" ]; then
if [ -d "$THIS/deploy/images/jdk-bundle" ]; then
THIS_JDK_BUNDLE="$THIS/deploy/images/jdk-bundle"
- THIS_JRE_BUNDLE="$THIS/deploy/images/jre-bundle"
else
THIS_JDK_BUNDLE="$THIS/images/jdk-bundle"
- THIS_JRE_BUNDLE="$THIS/images/jre-bundle"
fi
if [ -d "$OTHER/deploy/images/jdk-bundle" ]; then
OTHER_JDK_BUNDLE="$OTHER/deploy/images/jdk-bundle"
- OTHER_JRE_BUNDLE="$OTHER/deploy/images/jre-bundle"
else
OTHER_JDK_BUNDLE="$OTHER/images/jdk-bundle"
- OTHER_JRE_BUNDLE="$OTHER/images/jre-bundle"
fi
echo "Also comparing jdk macosx bundles"
echo " $THIS_JDK_BUNDLE"
@@ -1457,22 +1426,12 @@ if [ "$CMP_NAMES" = "true" ]; then
echo -n "JDK "
compare_files $THIS_JDK $OTHER_JDK $COMPARE_ROOT/jdk
fi
- if [ -n "$THIS_JRE" ] && [ -n "$OTHER_JRE" ]; then
- echo -n "JRE "
- compare_dirs $THIS_JRE $OTHER_JRE $COMPARE_ROOT/jre
- echo -n "JRE "
- compare_files $THIS_JRE $OTHER_JRE $COMPARE_ROOT/jre
- fi
if [ -n "$THIS_JDK_BUNDLE" ] && [ -n "$OTHER_JDK_BUNDLE" ]; then
echo -n "JDK Bundle "
compare_dirs $THIS_JDK_BUNDLE $OTHER_JDK_BUNDLE $COMPARE_ROOT/jdk-bundle
- echo -n "JRE Bundle "
- compare_dirs $THIS_JRE_BUNDLE $OTHER_JRE_BUNDLE $COMPARE_ROOT/jre-bundle
echo -n "JDK Bundle "
compare_files $THIS_JDK_BUNDLE $OTHER_JDK_BUNDLE $COMPARE_ROOT/jdk-bundle
- echo -n "JRE Bundle "
- compare_files $THIS_JRE_BUNDLE $OTHER_JRE_BUNDLE $COMPARE_ROOT/jre-bundle
fi
if [ -n "$THIS_DOCS" ] && [ -n "$OTHER_DOCS" ]; then
echo -n "Docs "
@@ -1538,15 +1497,9 @@ if [ "$CMP_GENERAL" = "true" ]; then
echo -n "JDK "
compare_general_files $THIS_JDK $OTHER_JDK $COMPARE_ROOT/jdk
fi
- if [ -n "$THIS_JRE" ] && [ -n "$OTHER_JRE" ]; then
- echo -n "JRE "
- compare_general_files $THIS_JRE $OTHER_JRE $COMPARE_ROOT/jre
- fi
if [ -n "$THIS_JDK_BUNDLE" ] && [ -n "$OTHER_JDK_BUNDLE" ]; then
echo -n "JDK Bundle "
compare_general_files $THIS_JDK_BUNDLE $OTHER_JDK_BUNDLE $COMPARE_ROOT/jdk-bundle
- echo -n "JRE Bundle "
- compare_general_files $THIS_JRE_BUNDLE $OTHER_JRE_BUNDLE $COMPARE_ROOT/jre-bundle
fi
if [ -n "$THIS_DOCS" ] && [ -n "$OTHER_DOCS" ]; then
echo -n "Docs "
@@ -1615,10 +1568,6 @@ if [ "$CMP_PERMS" = "true" ]; then
echo -n "JDK "
compare_permissions $THIS_JDK $OTHER_JDK $COMPARE_ROOT/jdk
fi
- if [ -n "$THIS_JRE" ] && [ -n "$OTHER_JRE" ]; then
- echo -n "JRE "
- compare_permissions $THIS_JRE $OTHER_JRE $COMPARE_ROOT/jre
- fi
if [ -n "$THIS_BASE_DIR" ] && [ -n "$OTHER_BASE_DIR" ]; then
compare_permissions $THIS_BASE_DIR $OTHER_BASE_DIR $COMPARE_ROOT/base_dir
fi
@@ -1637,15 +1586,9 @@ if [ "$CMP_TYPES" = "true" ]; then
echo -n "JDK "
compare_file_types $THIS_JDK $OTHER_JDK $COMPARE_ROOT/jdk
fi
- if [ -n "$THIS_JRE" ] && [ -n "$OTHER_JRE" ]; then
- echo -n "JRE "
- compare_file_types $THIS_JRE $OTHER_JRE $COMPARE_ROOT/jre
- fi
if [ -n "$THIS_JDK_BUNDLE" ] && [ -n "$OTHER_JDK_BUNDLE" ]; then
echo -n "JDK Bundle "
compare_file_types $THIS_JDK_BUNDLE $OTHER_JDK_BUNDLE $COMPARE_ROOT/jdk-bundle
- echo -n "JRE Bundle "
- compare_file_types $THIS_JRE_BUNDLE $OTHER_JRE_BUNDLE $COMPARE_ROOT/jre-bundle
fi
if [ -n "$THIS_BASE_DIR" ] && [ -n "$OTHER_BASE_DIR" ]; then
compare_file_types $THIS_BASE_DIR $OTHER_BASE_DIR $COMPARE_ROOT/base_dir
diff --git a/make/test/JtregNativeHotspot.gmk b/make/test/JtregNativeHotspot.gmk
index 73b2d9d247c..1312d102a4d 100644
--- a/make/test/JtregNativeHotspot.gmk
+++ b/make/test/JtregNativeHotspot.gmk
@@ -134,6 +134,11 @@ NSK_JVMTI_AOD_INCLUDES := \
-I$(VM_TESTBASE_DIR)/nsk/share/jvmti \
-I$(VM_TESTBASE_DIR)/nsk/share/jvmti/aod
+NSK_AOD_INCLUDES := \
+ -I$(VM_TESTBASE_DIR)/nsk/share/aod \
+ -I$(VM_TESTBASE_DIR)/nsk/share/native \
+ -I$(VM_TESTBASE_DIR)/nsk/share/jni
+
BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libProcessUtils := $(VM_SHARE_INCLUDES)
BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libThreadController := $(NSK_MONITORING_INCLUDES)
@@ -823,6 +828,12 @@ BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libattach021Agent00 := $(NSK_JVMTI_AOD_INCL
BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libattach050Agent00 := $(NSK_JVMTI_AOD_INCLUDES)
BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libattach002Agent00 := $(NSK_JVMTI_AOD_INCLUDES)
+BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libVirtualMachine07agent00 := $(NSK_AOD_INCLUDES)
+BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libVirtualMachine07agent01 := $(NSK_AOD_INCLUDES)
+BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libVirtualMachine07agent02 := $(NSK_AOD_INCLUDES)
+BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libVirtualMachine07agent03 := $(NSK_AOD_INCLUDES)
+BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libVirtualMachine09agent00 := $(NSK_AOD_INCLUDES)
+
################################################################################
# Platform specific setup
@@ -851,8 +862,6 @@ ifeq ($(OPENJDK_TARGET_OS), windows)
BUILD_HOTSPOT_JTREG_EXECUTABLES_CFLAGS_exeFPRegs := -MT
BUILD_HOTSPOT_JTREG_EXCLUDE += exesigtest.c
- # Disable warning until JDK-8203802 is fixed
- BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libtimers += -wd4477
else
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libbootclssearch_agent += -lpthread
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libsystemclssearch_agent += -lpthread
diff --git a/make/test/JtregNativeJdk.gmk b/make/test/JtregNativeJdk.gmk
index 1d2a6045b6b..2969173f7cc 100644
--- a/make/test/JtregNativeJdk.gmk
+++ b/make/test/JtregNativeJdk.gmk
@@ -64,6 +64,14 @@ else
endif
endif
+ifeq ($(OPENJDK_TARGET_OS), macosx)
+ BUILD_JDK_JTREG_LIBRARIES_CFLAGS_libTestMainKeyWindow := -ObjC
+ BUILD_JDK_JTREG_LIBRARIES_LIBS_libTestMainKeyWindow := -framework JavaVM \
+ -framework Cocoa -framework JavaNativeFoundation
+else
+ BUILD_JDK_JTREG_EXCLUDE += libTestMainKeyWindow.c
+endif
+
$(eval $(call SetupTestFilesCompilation, BUILD_JDK_JTREG_LIBRARIES, \
TYPE := LIBRARY, \
SOURCE_DIRS := $(BUILD_JDK_JTREG_NATIVE_SRC), \
diff --git a/src/demo/share/java2d/J2DBench/options/default.opt b/src/demo/share/java2d/J2DBench/options/default.opt
index 52cae6c31a7..e188354f3fc 100644
--- a/src/demo/share/java2d/J2DBench/options/default.opt
+++ b/src/demo/share/java2d/J2DBench/options/default.opt
@@ -151,7 +151,7 @@ pixel.dbtests.getelem=disabled
pixel.dbtests.setelem=disabled
text.opts.data.tlength=32
text.opts.data.tscript=english
-text.opts.font.fname=lucida
+text.opts.font.fname=dialog
text.opts.font.fstyle=0
text.opts.font.fsize=13.0
text.opts.font.ftx=Identity
diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
index 90d51f4bf0e..31e99213e8c 100644
--- a/src/hotspot/cpu/aarch64/aarch64.ad
+++ b/src/hotspot/cpu/aarch64/aarch64.ad
@@ -3792,69 +3792,7 @@ bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, Ve
return false;
}
-// Transform:
-// (AddP base (AddP base address (LShiftL index con)) offset)
-// into:
-// (AddP base (AddP base offset) (LShiftL index con))
-// to take full advantage of ARM's addressing modes
void Compile::reshape_address(AddPNode* addp) {
- Node *addr = addp->in(AddPNode::Address);
- if (addr->is_AddP() && addr->in(AddPNode::Base) == addp->in(AddPNode::Base)) {
- const AddPNode *addp2 = addr->as_AddP();
- if ((addp2->in(AddPNode::Offset)->Opcode() == Op_LShiftL &&
- addp2->in(AddPNode::Offset)->in(2)->is_Con() &&
- size_fits_all_mem_uses(addp, addp2->in(AddPNode::Offset)->in(2)->get_int())) ||
- addp2->in(AddPNode::Offset)->Opcode() == Op_ConvI2L) {
-
- // Any use that can't embed the address computation?
- for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
- Node* u = addp->fast_out(i);
- if (!u->is_Mem()) {
- return;
- }
- if (u->is_LoadVector() || u->is_StoreVector() || u->Opcode() == Op_StoreCM) {
- return;
- }
- if (addp2->in(AddPNode::Offset)->Opcode() != Op_ConvI2L) {
- int scale = 1 << addp2->in(AddPNode::Offset)->in(2)->get_int();
- if (VM_Version::expensive_load(u->as_Mem()->memory_size(), scale)) {
- return;
- }
- }
- }
-
- Node* off = addp->in(AddPNode::Offset);
- Node* addr2 = addp2->in(AddPNode::Address);
- Node* base = addp->in(AddPNode::Base);
-
- Node* new_addr = NULL;
- // Check whether the graph already has the new AddP we need
- // before we create one (no GVN available here).
- for (DUIterator_Fast imax, i = addr2->fast_outs(imax); i < imax; i++) {
- Node* u = addr2->fast_out(i);
- if (u->is_AddP() &&
- u->in(AddPNode::Base) == base &&
- u->in(AddPNode::Address) == addr2 &&
- u->in(AddPNode::Offset) == off) {
- new_addr = u;
- break;
- }
- }
-
- if (new_addr == NULL) {
- new_addr = new AddPNode(base, addr2, off);
- }
- Node* new_off = addp2->in(AddPNode::Offset);
- addp->set_req(AddPNode::Address, new_addr);
- if (addr->outcnt() == 0) {
- addr->disconnect_inputs(NULL, this);
- }
- addp->set_req(AddPNode::Offset, new_off);
- if (off->outcnt() == 0) {
- off->disconnect_inputs(NULL, this);
- }
- }
- }
}
// helper for encoding java_to_runtime calls on sim
diff --git a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp
index b4b49e68633..fca142f9552 100644
--- a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp
@@ -24,6 +24,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_CodeStubs.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_LIRAssembler.hpp"
@@ -72,19 +73,20 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
}
if (_index->is_cpu_register()) {
- __ mov(r22, _index->as_register());
+ __ mov(rscratch1, _index->as_register());
} else {
- __ mov(r22, _index->as_jint());
+ __ mov(rscratch1, _index->as_jint());
}
Runtime1::StubID stub_id;
if (_throw_index_out_of_bounds_exception) {
stub_id = Runtime1::throw_index_exception_id;
} else {
assert(_array != NULL, "sanity");
- __ mov(r23, _array->as_pointer_register());
+ __ mov(rscratch2, _array->as_pointer_register());
stub_id = Runtime1::throw_range_check_failed_id;
}
- __ far_call(RuntimeAddress(Runtime1::entry_for(stub_id)), NULL, rscratch2);
+ __ lea(lr, RuntimeAddress(Runtime1::entry_for(stub_id)));
+ __ blr(lr);
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
index 0f92f54df2f..e785a74f3e0 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
@@ -24,6 +24,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "asm/assembler.hpp"
#include "c1/c1_CodeStubs.hpp"
#include "c1/c1_Compilation.hpp"
diff --git a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
index 0eaf007db7f..dbb30907674 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
@@ -24,6 +24,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_Compilation.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_Instruction.hpp"
diff --git a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
index 1fa3e3d8f44..27a426b0f42 100644
--- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
@@ -323,7 +323,7 @@ void Runtime1::initialize_pd() {
// target: the entry point of the method that creates and posts the exception oop
-// has_argument: true if the exception needs arguments (passed in r22 and r23)
+// has_argument: true if the exception needs arguments (passed in rscratch1 and rscratch2)
OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
// make a frame and preserve the caller's caller-save registers
@@ -332,7 +332,9 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe
if (!has_argument) {
call_offset = __ call_RT(noreg, noreg, target);
} else {
- call_offset = __ call_RT(noreg, noreg, target, r22, r23);
+ __ mov(c_rarg1, rscratch1);
+ __ mov(c_rarg2, rscratch2);
+ call_offset = __ call_RT(noreg, noreg, target);
}
OopMapSet* oop_maps = new OopMapSet();
oop_maps->add_gc_map(call_offset, oop_map);
diff --git a/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp
index 9738d9be7a3..b112992939b 100644
--- a/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp
@@ -29,6 +29,7 @@
#include "gc/g1/c1/g1BarrierSetC1.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
+#include "gc/g1/g1BarrierSetRuntime.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/heapRegion.hpp"
@@ -60,9 +61,9 @@ void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm
__ mov(c_rarg1, count);
}
if (UseCompressedOops) {
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry), 2);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry), 2);
} else {
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry), 2);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), 2);
}
__ pop(saved_regs, sp);
}
@@ -78,7 +79,7 @@ void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* mas
__ lsr(scratch, scratch, LogBytesPerHeapOop); // convert to element count
__ mov(c_rarg0, start);
__ mov(c_rarg1, scratch);
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry), 2);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry), 2);
__ pop(saved_regs, sp);
}
@@ -161,9 +162,9 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
if (expand_call) {
assert(pre_val != c_rarg1, "smashed arg");
- __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
+ __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
} else {
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
}
__ pop(saved, sp);
@@ -245,7 +246,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
// save the live input values
RegSet saved = RegSet::of(store_addr, new_val);
__ push(saved, sp);
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread);
__ pop(saved, sp);
__ bind(done);
@@ -398,7 +399,7 @@ void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler*
__ bind(runtime);
__ push_call_clobbered_registers();
__ load_parameter(0, pre_val);
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
__ pop_call_clobbered_registers();
__ bind(done);
@@ -468,7 +469,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
__ bind(runtime);
__ push_call_clobbered_registers();
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread);
__ pop_call_clobbered_registers();
__ bind(done);
__ epilogue();
diff --git a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp
index a20ba4d84ca..f7745c0e656 100644
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp
@@ -56,6 +56,15 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
}
break;
}
+ case T_BOOLEAN: __ load_unsigned_byte (dst, src); break;
+ case T_BYTE: __ load_signed_byte (dst, src); break;
+ case T_CHAR: __ load_unsigned_short(dst, src); break;
+ case T_SHORT: __ load_signed_short (dst, src); break;
+ case T_INT: __ ldrw (dst, src); break;
+ case T_LONG: __ ldr (dst, src); break;
+ case T_ADDRESS: __ ldr (dst, src); break;
+ case T_FLOAT: __ ldrs (v0, src); break;
+ case T_DOUBLE: __ ldrd (v0, src); break;
default: Unimplemented();
}
}
@@ -84,6 +93,18 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators
}
break;
}
+ case T_BOOLEAN:
+ __ andw(val, val, 0x1); // boolean is true if LSB is 1
+ __ strb(val, dst);
+ break;
+ case T_BYTE: __ strb(val, dst); break;
+ case T_CHAR: __ strh(val, dst); break;
+ case T_SHORT: __ strh(val, dst); break;
+ case T_INT: __ strw(val, dst); break;
+ case T_LONG: __ str (val, dst); break;
+ case T_ADDRESS: __ str (val, dst); break;
+ case T_FLOAT: __ strs(v0, dst); break;
+ case T_DOUBLE: __ strd(v0, dst); break;
default: Unimplemented();
}
}
diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
index fc39908a2ee..fc9aa31eba1 100644
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
@@ -24,6 +24,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interp_masm_aarch64.hpp"
@@ -267,9 +268,6 @@ void InterpreterMacroAssembler::get_method_counters(Register method,
void InterpreterMacroAssembler::load_resolved_reference_at_index(
Register result, Register index, Register tmp) {
assert_different_registers(result, index);
- // convert from field index to resolved_references() index and from
- // word index to byte offset. Since this is a java object, it can be compressed
- lslw(index, index, LogBytesPerHeapOop);
get_constant_pool(result);
// load pointer for resolved_references[] objArray
@@ -277,8 +275,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(
ldr(result, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
resolve_oop_handle(result, tmp);
// Add in the index
- add(result, result, index);
- load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
+ add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
+ load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)));
}
void InterpreterMacroAssembler::load_resolved_klass_at_offset(
diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp
index d94860d37fa..1143f6b3c70 100644
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp
@@ -27,7 +27,6 @@
#define CPU_AARCH64_VM_INTERP_MASM_AARCH64_64_HPP
#include "asm/macroAssembler.hpp"
-#include "asm/macroAssembler.inline.hpp"
#include "interpreter/invocationCounter.hpp"
#include "runtime/frame.hpp"
diff --git a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp
index 913590c930d..86a74782278 100644
--- a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp
@@ -24,6 +24,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "interpreter/interp_masm.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
index 96ef3f9763f..b8dc219a1bb 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
@@ -37,6 +37,7 @@
#include "compiler/disassembler.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_aarch64.hpp"
+#include "oops/accessDecorators.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/klass.inline.hpp"
#include "oops/oop.hpp"
@@ -2112,7 +2113,6 @@ void MacroAssembler::verify_heapbase(const char* msg) {
#endif
void MacroAssembler::resolve_jobject(Register value, Register thread, Register tmp) {
- BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
Label done, not_weak;
cbz(value, done); // Use NULL as-is.
@@ -2120,15 +2120,15 @@ void MacroAssembler::resolve_jobject(Register value, Register thread, Register t
tbz(r0, 0, not_weak); // Test for jweak tag.
// Resolve jweak.
- bs->load_at(this, IN_ROOT | ON_PHANTOM_OOP_REF, T_OBJECT,
- value, Address(value, -JNIHandles::weak_tag_value), tmp, thread);
+ access_load_at(T_OBJECT, IN_ROOT | ON_PHANTOM_OOP_REF, value,
+ Address(value, -JNIHandles::weak_tag_value), tmp, thread);
verify_oop(value);
b(done);
bind(not_weak);
// Resolve (untagged) jobject.
- bs->load_at(this, IN_ROOT | IN_CONCURRENT_ROOT, T_OBJECT,
- value, Address(value, 0), tmp, thread);
+ access_load_at(T_OBJECT, IN_CONCURRENT_ROOT, value, Address(value, 0), tmp,
+ thread);
verify_oop(value);
bind(done);
}
@@ -3663,9 +3663,8 @@ void MacroAssembler::load_klass(Register dst, Register src) {
// ((OopHandle)result).resolve();
void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
// OopHandle::resolve is an indirection.
- BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
- bs->load_at(this, IN_ROOT | IN_CONCURRENT_ROOT, T_OBJECT,
- result, Address(result, 0), tmp, rthread);
+ access_load_at(T_OBJECT, IN_CONCURRENT_ROOT,
+ result, Address(result, 0), tmp, noreg);
}
void MacroAssembler::load_mirror(Register dst, Register method, Register tmp) {
@@ -3983,6 +3982,7 @@ void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
Register dst, Address src,
Register tmp1, Register thread_tmp) {
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ decorators = AccessInternal::decorator_fixup(decorators);
bool as_raw = (decorators & AS_RAW) != 0;
if (as_raw) {
bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
@@ -3995,6 +3995,7 @@ void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
Address dst, Register src,
Register tmp1, Register thread_tmp) {
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ decorators = AccessInternal::decorator_fixup(decorators);
bool as_raw = (decorators & AS_RAW) != 0;
if (as_raw) {
bs->BarrierSetAssembler::store_at(this, decorators, type, dst, src, tmp1, thread_tmp);
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
index c064e150d55..5c977eb3261 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
@@ -1020,7 +1020,7 @@ public:
address trampoline_call(Address entry, CodeBuffer *cbuf = NULL);
static bool far_branches() {
- return ReservedCodeCacheSize > branch_range;
+ return ReservedCodeCacheSize > branch_range || UseAOT;
}
// Jumps that can reach anywhere in the code cache.
diff --git a/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp b/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp
index 8e1eae23185..9e92b2d2f90 100644
--- a/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp
@@ -141,7 +141,7 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
__ verify_oop(method_temp);
__ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())), temp2);
__ verify_oop(method_temp);
- __ ldr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())));
+ __ access_load_at(T_ADDRESS, IN_HEAP, method_temp, Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())), noreg, noreg);
if (VerifyMethodHandles && !for_compiler_entry) {
// make sure recv is already on stack
@@ -340,7 +340,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
}
__ load_heap_oop(rmethod, member_vmtarget);
- __ ldr(rmethod, vmtarget_method);
+ __ access_load_at(T_ADDRESS, IN_HEAP, rmethod, vmtarget_method, noreg, noreg);
break;
case vmIntrinsics::_linkToStatic:
@@ -348,7 +348,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
}
__ load_heap_oop(rmethod, member_vmtarget);
- __ ldr(rmethod, vmtarget_method);
+ __ access_load_at(T_ADDRESS, IN_HEAP, rmethod, vmtarget_method, noreg, noreg);
break;
case vmIntrinsics::_linkToVirtual:
@@ -362,7 +362,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// pick out the vtable index from the MemberName, and then we can discard it:
Register temp2_index = temp2;
- __ ldr(temp2_index, member_vmindex);
+ __ access_load_at(T_ADDRESS, IN_HEAP, temp2_index, member_vmindex, noreg, noreg);
if (VerifyMethodHandles) {
Label L_index_ok;
@@ -394,7 +394,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
__ verify_klass_ptr(temp3_intf);
Register rindex = rmethod;
- __ ldr(rindex, member_vmindex);
+ __ access_load_at(T_ADDRESS, IN_HEAP, rindex, member_vmindex, noreg, noreg);
if (VerifyMethodHandles) {
Label L;
__ cmpw(rindex, 0U);
diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp
index f988a361d4d..5cda3e0e240 100644
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp
@@ -234,8 +234,12 @@ class NativeCall: public NativeInstruction {
}
#if INCLUDE_AOT
+ // Return true iff a call from instr to target is out of range.
+ // Used for calls from JIT- to AOT-compiled code.
static bool is_far_call(address instr, address target) {
- return !Assembler::reachable_from_branch_at(instr, target);
+ // On AArch64 we use trampolines which can reach anywhere in the
+ // address space, so calls are never out of range.
+ return false;
}
#endif
diff --git a/src/hotspot/cpu/aarch64/register_definitions_aarch64.cpp b/src/hotspot/cpu/aarch64/register_definitions_aarch64.cpp
index 5411d06e12a..c18109087e1 100644
--- a/src/hotspot/cpu/aarch64/register_definitions_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/register_definitions_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "asm/assembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "asm/register.hpp"
#include "register_aarch64.hpp"
# include "interp_masm_aarch64.hpp"
diff --git a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
index 372bdfb83f9..b0752e9a7ad 100644
--- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
@@ -24,7 +24,7 @@
*/
#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
index 507a652b88a..632ab3a0313 100644
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
@@ -24,7 +24,7 @@
*/
#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
@@ -760,8 +760,8 @@ void TemplateTable::iaload()
// r0: array
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
- __ lea(r1, Address(r0, r1, Address::uxtw(2)));
- __ ldrw(r0, Address(r1, arrayOopDesc::base_offset_in_bytes(T_INT)));
+ __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
+ __ access_load_at(T_INT, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
}
void TemplateTable::laload()
@@ -772,8 +772,8 @@ void TemplateTable::laload()
// r0: array
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
- __ lea(r1, Address(r0, r1, Address::uxtw(3)));
- __ ldr(r0, Address(r1, arrayOopDesc::base_offset_in_bytes(T_LONG)));
+ __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
+ __ access_load_at(T_LONG, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
}
void TemplateTable::faload()
@@ -784,8 +784,8 @@ void TemplateTable::faload()
// r0: array
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
- __ lea(r1, Address(r0, r1, Address::uxtw(2)));
- __ ldrs(v0, Address(r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
+ __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
+ __ access_load_at(T_FLOAT, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(2)), noreg, noreg);
}
void TemplateTable::daload()
@@ -796,8 +796,8 @@ void TemplateTable::daload()
// r0: array
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
- __ lea(r1, Address(r0, r1, Address::uxtw(3)));
- __ ldrd(v0, Address(r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
+ __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
+ __ access_load_at(T_DOUBLE, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
}
void TemplateTable::aaload()
@@ -808,12 +808,11 @@ void TemplateTable::aaload()
// r0: array
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
- int s = (UseCompressedOops ? 2 : 3);
- __ lea(r1, Address(r0, r1, Address::uxtw(s)));
+ __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
do_oop_load(_masm,
- Address(r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
+ Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)),
r0,
- IN_HEAP | IN_HEAP_ARRAY);
+ IN_HEAP_ARRAY);
}
void TemplateTable::baload()
@@ -824,8 +823,8 @@ void TemplateTable::baload()
// r0: array
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
- __ lea(r1, Address(r0, r1, Address::uxtw(0)));
- __ load_signed_byte(r0, Address(r1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
+ __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
+ __ access_load_at(T_BYTE, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(0)), noreg, noreg);
}
void TemplateTable::caload()
@@ -836,8 +835,8 @@ void TemplateTable::caload()
// r0: array
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
- __ lea(r1, Address(r0, r1, Address::uxtw(1)));
- __ load_unsigned_short(r0, Address(r1, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
+ __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
+ __ access_load_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
}
// iload followed by caload frequent pair
@@ -853,8 +852,8 @@ void TemplateTable::fast_icaload()
// r0: array
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
- __ lea(r1, Address(r0, r1, Address::uxtw(1)));
- __ load_unsigned_short(r0, Address(r1, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
+ __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
+ __ access_load_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
}
void TemplateTable::saload()
@@ -865,8 +864,8 @@ void TemplateTable::saload()
// r0: array
// r1: index
index_check(r0, r1); // leaves index in r1, kills rscratch1
- __ lea(r1, Address(r0, r1, Address::uxtw(1)));
- __ load_signed_short(r0, Address(r1, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
+ __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_SHORT) >> 1);
+ __ access_load_at(T_SHORT, IN_HEAP | IN_HEAP_ARRAY, r0, Address(r0, r1, Address::uxtw(1)), noreg, noreg);
}
void TemplateTable::iload(int n)
@@ -1059,9 +1058,8 @@ void TemplateTable::iastore() {
// r1: index
// r3: array
index_check(r3, r1); // prefer index in r1
- __ lea(rscratch1, Address(r3, r1, Address::uxtw(2)));
- __ strw(r0, Address(rscratch1,
- arrayOopDesc::base_offset_in_bytes(T_INT)));
+ __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_INT) >> 2);
+ __ access_store_at(T_INT, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(2)), r0, noreg, noreg);
}
void TemplateTable::lastore() {
@@ -1072,9 +1070,8 @@ void TemplateTable::lastore() {
// r1: index
// r3: array
index_check(r3, r1); // prefer index in r1
- __ lea(rscratch1, Address(r3, r1, Address::uxtw(3)));
- __ str(r0, Address(rscratch1,
- arrayOopDesc::base_offset_in_bytes(T_LONG)));
+ __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_LONG) >> 3);
+ __ access_store_at(T_LONG, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(3)), r0, noreg, noreg);
}
void TemplateTable::fastore() {
@@ -1085,9 +1082,8 @@ void TemplateTable::fastore() {
// r1: index
// r3: array
index_check(r3, r1); // prefer index in r1
- __ lea(rscratch1, Address(r3, r1, Address::uxtw(2)));
- __ strs(v0, Address(rscratch1,
- arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
+ __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_FLOAT) >> 2);
+ __ access_store_at(T_FLOAT, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(2)), noreg /* ftos */, noreg, noreg);
}
void TemplateTable::dastore() {
@@ -1098,9 +1094,8 @@ void TemplateTable::dastore() {
// r1: index
// r3: array
index_check(r3, r1); // prefer index in r1
- __ lea(rscratch1, Address(r3, r1, Address::uxtw(3)));
- __ strd(v0, Address(rscratch1,
- arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
+ __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
+ __ access_store_at(T_DOUBLE, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg);
}
void TemplateTable::aastore() {
@@ -1111,10 +1106,10 @@ void TemplateTable::aastore() {
__ ldr(r2, at_tos_p1()); // index
__ ldr(r3, at_tos_p2()); // array
- Address element_address(r4, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
+ Address element_address(r3, r4, Address::uxtw(LogBytesPerHeapOop));
index_check(r3, r2); // kills r1
- __ lea(r4, Address(r3, r2, Address::uxtw(UseCompressedOops? 2 : 3)));
+ __ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
// do array store check - check for NULL value first
__ cbz(r0, is_null);
@@ -1141,7 +1136,7 @@ void TemplateTable::aastore() {
// Get the value we will store
__ ldr(r0, at_tos());
// Now store using the appropriate barrier
- do_oop_store(_masm, element_address, r0, IN_HEAP | IN_HEAP_ARRAY);
+ do_oop_store(_masm, element_address, r0, IN_HEAP_ARRAY);
__ b(done);
// Have a NULL in r0, r3=array, r2=index. Store NULL at ary[idx]
@@ -1149,7 +1144,7 @@ void TemplateTable::aastore() {
__ profile_null_seen(r2);
// Store a NULL
- do_oop_store(_masm, element_address, noreg, IN_HEAP | IN_HEAP_ARRAY);
+ do_oop_store(_masm, element_address, noreg, IN_HEAP_ARRAY);
// Pop stack arguments
__ bind(done);
@@ -1176,9 +1171,8 @@ void TemplateTable::bastore()
__ andw(r0, r0, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
__ bind(L_skip);
- __ lea(rscratch1, Address(r3, r1, Address::uxtw(0)));
- __ strb(r0, Address(rscratch1,
- arrayOopDesc::base_offset_in_bytes(T_BYTE)));
+ __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
+ __ access_store_at(T_BYTE, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(0)), r0, noreg, noreg);
}
void TemplateTable::castore()
@@ -1190,9 +1184,8 @@ void TemplateTable::castore()
// r1: index
// r3: array
index_check(r3, r1); // prefer index in r1
- __ lea(rscratch1, Address(r3, r1, Address::uxtw(1)));
- __ strh(r0, Address(rscratch1,
- arrayOopDesc::base_offset_in_bytes(T_CHAR)));
+ __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_CHAR) >> 1);
+ __ access_store_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY, Address(r3, r1, Address::uxtw(1)), r0, noreg, noreg);
}
void TemplateTable::sastore()
@@ -2513,7 +2506,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
if (is_static) rc = may_not_rewrite;
// btos
- __ load_signed_byte(r0, field);
+ __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
__ push(btos);
// Rewrite bytecode to be faster
if (rc == may_rewrite) {
@@ -2526,7 +2519,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ br(Assembler::NE, notBool);
// ztos (same code as btos)
- __ ldrsb(r0, field);
+ __ access_load_at(T_BOOLEAN, IN_HEAP, r0, field, noreg, noreg);
__ push(ztos);
// Rewrite bytecode to be faster
if (rc == may_rewrite) {
@@ -2550,7 +2543,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ cmp(flags, itos);
__ br(Assembler::NE, notInt);
// itos
- __ ldrw(r0, field);
+ __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
__ push(itos);
// Rewrite bytecode to be faster
if (rc == may_rewrite) {
@@ -2562,7 +2555,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ cmp(flags, ctos);
__ br(Assembler::NE, notChar);
// ctos
- __ load_unsigned_short(r0, field);
+ __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
__ push(ctos);
// Rewrite bytecode to be faster
if (rc == may_rewrite) {
@@ -2574,7 +2567,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ cmp(flags, stos);
__ br(Assembler::NE, notShort);
// stos
- __ load_signed_short(r0, field);
+ __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
__ push(stos);
// Rewrite bytecode to be faster
if (rc == may_rewrite) {
@@ -2586,7 +2579,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ cmp(flags, ltos);
__ br(Assembler::NE, notLong);
// ltos
- __ ldr(r0, field);
+ __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
__ push(ltos);
// Rewrite bytecode to be faster
if (rc == may_rewrite) {
@@ -2598,7 +2591,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ cmp(flags, ftos);
__ br(Assembler::NE, notFloat);
// ftos
- __ ldrs(v0, field);
+ __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
__ push(ftos);
// Rewrite bytecode to be faster
if (rc == may_rewrite) {
@@ -2612,7 +2605,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ br(Assembler::NE, notDouble);
#endif
// dtos
- __ ldrd(v0, field);
+ __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
__ push(dtos);
// Rewrite bytecode to be faster
if (rc == may_rewrite) {
@@ -2750,7 +2743,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(btos);
if (!is_static) pop_and_check_object(obj);
- __ strb(r0, field);
+ __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
}
@@ -2765,8 +2758,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(ztos);
if (!is_static) pop_and_check_object(obj);
- __ andw(r0, r0, 0x1);
- __ strb(r0, field);
+ __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
}
@@ -2797,7 +2789,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(itos);
if (!is_static) pop_and_check_object(obj);
- __ strw(r0, field);
+ __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
}
@@ -2812,7 +2804,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(ctos);
if (!is_static) pop_and_check_object(obj);
- __ strh(r0, field);
+ __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg);
if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_cputfield, bc, r1, true, byte_no);
}
@@ -2827,7 +2819,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(stos);
if (!is_static) pop_and_check_object(obj);
- __ strh(r0, field);
+ __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg);
if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_sputfield, bc, r1, true, byte_no);
}
@@ -2842,7 +2834,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(ltos);
if (!is_static) pop_and_check_object(obj);
- __ str(r0, field);
+ __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg);
if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_lputfield, bc, r1, true, byte_no);
}
@@ -2857,7 +2849,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(ftos);
if (!is_static) pop_and_check_object(obj);
- __ strs(v0, field);
+ __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg);
if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_fputfield, bc, r1, true, byte_no);
}
@@ -2874,7 +2866,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(dtos);
if (!is_static) pop_and_check_object(obj);
- __ strd(v0, field);
+ __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg);
if (rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_dputfield, bc, r1, true, byte_no);
}
@@ -3005,27 +2997,28 @@ void TemplateTable::fast_storefield(TosState state)
do_oop_store(_masm, field, r0, IN_HEAP);
break;
case Bytecodes::_fast_lputfield:
- __ str(r0, field);
+ __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg);
break;
case Bytecodes::_fast_iputfield:
- __ strw(r0, field);
+ __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
break;
case Bytecodes::_fast_zputfield:
- __ andw(r0, r0, 0x1); // boolean is true if LSB is 1
- // fall through to bputfield
+ __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
+ break;
case Bytecodes::_fast_bputfield:
- __ strb(r0, field);
+ __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
break;
case Bytecodes::_fast_sputfield:
- // fall through
+ __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg);
+ break;
case Bytecodes::_fast_cputfield:
- __ strh(r0, field);
+ __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg);
break;
case Bytecodes::_fast_fputfield:
- __ strs(v0, field);
+ __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg);
break;
case Bytecodes::_fast_dputfield:
- __ strd(v0, field);
+ __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg);
break;
default:
ShouldNotReachHere();
@@ -3098,25 +3091,25 @@ void TemplateTable::fast_accessfield(TosState state)
__ verify_oop(r0);
break;
case Bytecodes::_fast_lgetfield:
- __ ldr(r0, field);
+ __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
break;
case Bytecodes::_fast_igetfield:
- __ ldrw(r0, field);
+ __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
break;
case Bytecodes::_fast_bgetfield:
- __ load_signed_byte(r0, field);
+ __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
break;
case Bytecodes::_fast_sgetfield:
- __ load_signed_short(r0, field);
+ __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
break;
case Bytecodes::_fast_cgetfield:
- __ load_unsigned_short(r0, field);
+ __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
break;
case Bytecodes::_fast_fgetfield:
- __ ldrs(v0, field);
+ __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
break;
case Bytecodes::_fast_dgetfield:
- __ ldrd(v0, field);
+ __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
break;
default:
ShouldNotReachHere();
@@ -3161,14 +3154,14 @@ void TemplateTable::fast_xaccess(TosState state)
__ null_check(r0);
switch (state) {
case itos:
- __ ldrw(r0, Address(r0, r1, Address::lsl(0)));
+ __ access_load_at(T_INT, IN_HEAP, r0, Address(r0, r1, Address::lsl(0)), noreg, noreg);
break;
case atos:
do_oop_load(_masm, Address(r0, r1, Address::lsl(0)), r0, IN_HEAP);
__ verify_oop(r0);
break;
case ftos:
- __ ldrs(v0, Address(r0, r1, Address::lsl(0)));
+ __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, Address(r0, r1, Address::lsl(0)), noreg, noreg);
break;
default:
ShouldNotReachHere();
diff --git a/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp b/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp
index e2280b07836..0d40f1d2edc 100644
--- a/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -24,7 +24,7 @@
*/
#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "assembler_aarch64.inline.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_aarch64.hpp"
diff --git a/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp b/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp
index 6e9b285d9b9..4a2a09393ac 100644
--- a/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp
+++ b/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_CodeStubs.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_LIRAssembler.hpp"
diff --git a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp
index abdcfc18541..b358a9fadfa 100644
--- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_Compilation.hpp"
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
diff --git a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp
index ae89efbd3d5..342c1c3474c 100644
--- a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp
+++ b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_Compilation.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_Instruction.hpp"
diff --git a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp
index c1fe04f5f0a..af8a8f4682e 100644
--- a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp
+++ b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_Defs.hpp"
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
diff --git a/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.cpp b/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.cpp
index 7801d1d2c9e..f9b629b35b0 100644
--- a/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.cpp
@@ -26,6 +26,7 @@
#include "asm/macroAssembler.inline.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
+#include "gc/g1/g1BarrierSetRuntime.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
@@ -74,7 +75,7 @@ void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm
__ mov(R0, addr);
}
#ifdef AARCH64
- __ zero_extend(R1, count, 32); // G1BarrierSet::write_ref_array_pre_*_entry takes size_t
+ __ zero_extend(R1, count, 32); // G1BarrierSetRuntime::write_ref_array_pre_*_entry takes size_t
#else
if (count != R1) {
__ mov(R1, count);
@@ -82,9 +83,9 @@ void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm
#endif // AARCH64
if (UseCompressedOops) {
- __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry));
+ __ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry));
} else {
- __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry));
+ __ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry));
}
#ifdef AARCH64
@@ -106,7 +107,7 @@ void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* mas
__ mov(R0, addr);
}
#ifdef AARCH64
- __ zero_extend(R1, count, 32); // G1BarrierSet::write_ref_array_post_entry takes size_t
+ __ zero_extend(R1, count, 32); // G1BarrierSetRuntime::write_ref_array_post_entry takes size_t
#else
if (count != R1) {
__ mov(R1, count);
@@ -120,7 +121,7 @@ void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* mas
__ push(R9);
#endif // !R9_IS_SCRATCHED
#endif // !AARCH64
- __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry));
+ __ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry));
#ifndef AARCH64
#if R9_IS_SCRATCHED
__ pop(R9);
@@ -205,7 +206,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
}
__ mov(R1, Rthread);
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), R0, R1);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), R0, R1);
#ifdef AARCH64
if (store_addr != noreg) {
@@ -296,7 +297,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
__ mov(R0, card_addr);
}
__ mov(R1, Rthread);
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), R0, R1);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), R0, R1);
__ bind(done);
}
@@ -467,7 +468,7 @@ void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler*
assert(r_pre_val_0 == c_rarg0, "pre_val should be in R0");
__ mov(c_rarg1, Rthread);
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), c_rarg0, c_rarg1);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), c_rarg0, c_rarg1);
__ restore_live_registers_without_return();
@@ -574,7 +575,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
assert(r_card_addr_0 == c_rarg0, "card_addr should be in R0");
__ mov(c_rarg1, Rthread);
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), c_rarg0, c_rarg1);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), c_rarg0, c_rarg1);
__ restore_live_registers_without_return();
diff --git a/src/hotspot/cpu/arm/interp_masm_arm.cpp b/src/hotspot/cpu/arm/interp_masm_arm.cpp
index 178a7c1cc9f..273f92bdf1a 100644
--- a/src/hotspot/cpu/arm/interp_masm_arm.cpp
+++ b/src/hotspot/cpu/arm/interp_masm_arm.cpp
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "jvm.h"
+#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.inline.hpp"
diff --git a/src/hotspot/cpu/arm/interp_masm_arm.hpp b/src/hotspot/cpu/arm/interp_masm_arm.hpp
index ad77b632254..70d694ae494 100644
--- a/src/hotspot/cpu/arm/interp_masm_arm.hpp
+++ b/src/hotspot/cpu/arm/interp_masm_arm.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
#define CPU_ARM_VM_INTERP_MASM_ARM_HPP
#include "asm/macroAssembler.hpp"
-#include "asm/macroAssembler.inline.hpp"
#include "interpreter/invocationCounter.hpp"
#include "runtime/frame.hpp"
#include "prims/jvmtiExport.hpp"
diff --git a/src/hotspot/cpu/arm/interpreterRT_arm.cpp b/src/hotspot/cpu/arm/interpreterRT_arm.cpp
index 1de15da112a..47c0227e8e0 100644
--- a/src/hotspot/cpu/arm/interpreterRT_arm.cpp
+++ b/src/hotspot/cpu/arm/interpreterRT_arm.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "interpreter/interp_masm.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
diff --git a/src/hotspot/cpu/arm/macroAssembler_arm.cpp b/src/hotspot/cpu/arm/macroAssembler_arm.cpp
index 1bca5dc5134..61628e4679e 100644
--- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp
@@ -36,6 +36,7 @@
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/resourceArea.hpp"
+#include "oops/accessDecorators.hpp"
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
@@ -1366,9 +1367,12 @@ void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, Regi
// Bump total bytes allocated by this thread
Label done;
- ldr(tmp, Address(Rthread, in_bytes(JavaThread::allocated_bytes_offset())));
+ // Borrow the Rthread for alloc counter
+ Register Ralloc = Rthread;
+ add(Ralloc, Ralloc, in_bytes(JavaThread::allocated_bytes_offset()));
+ ldr(tmp, Address(Ralloc));
adds(tmp, tmp, size_in_bytes);
- str(tmp, Address(Rthread, in_bytes(JavaThread::allocated_bytes_offset())), cc);
+ str(tmp, Address(Ralloc), cc);
b(done, cc);
// Increment the high word and store single-copy atomically (that is an unlikely scenario on typical embedded systems as it means >4GB has been allocated)
@@ -1386,14 +1390,17 @@ void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, Regi
}
push(RegisterSet(low, high));
- ldrd(low, Address(Rthread, in_bytes(JavaThread::allocated_bytes_offset())));
+ ldrd(low, Address(Ralloc));
adds(low, low, size_in_bytes);
adc(high, high, 0);
- strd(low, Address(Rthread, in_bytes(JavaThread::allocated_bytes_offset())));
+ strd(low, Address(Ralloc));
pop(RegisterSet(low, high));
bind(done);
+
+ // Unborrow the Rthread
+ sub(Rthread, Ralloc, in_bytes(JavaThread::allocated_bytes_offset()));
#endif // AARCH64
}
@@ -2133,7 +2140,7 @@ void MacroAssembler::resolve_jobject(Register value,
b(done);
bind(not_weak);
// Resolve (untagged) jobject.
- access_load_at(T_OBJECT, IN_ROOT | IN_CONCURRENT_ROOT,
+ access_load_at(T_OBJECT, IN_CONCURRENT_ROOT,
Address(value, 0), value, tmp1, tmp2, noreg);
verify_oop(value);
bind(done);
@@ -2700,6 +2707,7 @@ void MacroAssembler::store_heap_oop_null(Address obj, Register new_val, Register
void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
Address src, Register dst, Register tmp1, Register tmp2, Register tmp3) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ decorators = AccessInternal::decorator_fixup(decorators);
bool as_raw = (decorators & AS_RAW) != 0;
if (as_raw) {
bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2, tmp3);
@@ -2711,6 +2719,7 @@ void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, bool is_null) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ decorators = AccessInternal::decorator_fixup(decorators);
bool as_raw = (decorators & AS_RAW) != 0;
if (as_raw) {
bs->BarrierSetAssembler::store_at(this, decorators, type, obj, new_val, tmp1, tmp2, tmp3, is_null);
diff --git a/src/hotspot/cpu/arm/relocInfo_arm.cpp b/src/hotspot/cpu/arm/relocInfo_arm.cpp
index f11c1b8201b..6d38ab8fd3b 100644
--- a/src/hotspot/cpu/arm/relocInfo_arm.cpp
+++ b/src/hotspot/cpu/arm/relocInfo_arm.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
#include "nativeInst_arm.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "runtime/safepoint.hpp"
void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
diff --git a/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp b/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp
index fcc66fe4192..8dcfe2839c6 100644
--- a/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp
+++ b/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "asm/assembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interp_masm.hpp"
#include "interpreter/interpreter.hpp"
diff --git a/src/hotspot/cpu/arm/templateTable_arm.cpp b/src/hotspot/cpu/arm/templateTable_arm.cpp
index 353500c7660..a17cdb38dac 100644
--- a/src/hotspot/cpu/arm/templateTable_arm.cpp
+++ b/src/hotspot/cpu/arm/templateTable_arm.cpp
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interp_masm.hpp"
#include "interpreter/interpreter.hpp"
diff --git a/src/hotspot/cpu/arm/vtableStubs_arm.cpp b/src/hotspot/cpu/arm/vtableStubs_arm.cpp
index f2cd16a3ce7..4e08b9c39cd 100644
--- a/src/hotspot/cpu/arm/vtableStubs_arm.cpp
+++ b/src/hotspot/cpu/arm/vtableStubs_arm.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "asm/assembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "assembler_arm.inline.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_arm.hpp"
diff --git a/src/hotspot/cpu/ppc/assembler_ppc.cpp b/src/hotspot/cpu/ppc/assembler_ppc.cpp
index 17cc095b08b..b646371b6d6 100644
--- a/src/hotspot/cpu/ppc/assembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/assembler_ppc.cpp
@@ -486,7 +486,7 @@ int Assembler::add_const_optimized(Register d, Register s, long x, Register tmp,
// Case 2: Can use addis.
if (xd == 0) {
short xc = rem & 0xFFFF; // 2nd 16-bit chunk.
- rem = (rem >> 16) + ((unsigned short)xd >> 15);
+ rem = (rem >> 16) + ((unsigned short)xc >> 15);
if (rem == 0) {
addis(d, s, xc);
return 0;
diff --git a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp
index 1078114ec2c..6a0150315f2 100644
--- a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp
@@ -24,6 +24,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_CodeStubs.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_LIRAssembler.hpp"
diff --git a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
index 8defa95eab2..2a5851b5911 100644
--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
@@ -24,6 +24,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_Compilation.hpp"
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
diff --git a/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp
index 01627e3b118..fd743a164b4 100644
--- a/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp
@@ -24,6 +24,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_Compilation.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_Instruction.hpp"
diff --git a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
index 9a6494b2ae2..0be72906049 100644
--- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
@@ -24,6 +24,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "classfile/systemDictionary.hpp"
diff --git a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp
index 7af90fbf2a3..925b5d8d206 100644
--- a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp
@@ -24,6 +24,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_Defs.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
diff --git a/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp
index 4c5aba0dd76..e6b163e46be 100644
--- a/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp
@@ -27,6 +27,7 @@
#include "asm/macroAssembler.inline.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
+#include "gc/g1/g1BarrierSetRuntime.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/heapRegion.hpp"
@@ -72,9 +73,9 @@ void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm
if (preserve2 != noreg) { __ std(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
if (UseCompressedOops) {
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry), to, count);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry), to, count);
} else {
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry), to, count);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), to, count);
}
slot_nr = 0;
@@ -98,7 +99,7 @@ void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* mas
__ save_LR_CR(R0);
__ push_frame(frame_size, R0);
if (preserve != noreg) { __ std(preserve, frame_size - 1 * wordSize, R1_SP); }
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry), addr, count);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry), addr, count);
if (preserve != noreg) { __ ld(preserve, frame_size - 1 * wordSize, R1_SP); }
__ addi(R1_SP, R1_SP, frame_size); // pop_frame();
__ restore_LR_CR(R0);
@@ -191,7 +192,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, Decorator
}
if (pre_val->is_volatile() && preloaded) { __ mr(nv_save, pre_val); } // Save pre_val across C call if it was preloaded.
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, R16_thread);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, R16_thread);
if (pre_val->is_volatile() && preloaded) { __ mr(pre_val, nv_save); } // restore
if (needs_frame) {
@@ -213,11 +214,9 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
assert(sizeof(*ct->card_table()->byte_map_base()) == sizeof(jbyte), "adjust this code");
// Does store cross heap regions?
- if (G1RSBarrierRegionFilter) {
- __ xorr(tmp1, store_addr, new_val);
- __ srdi_(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes);
- __ beq(CCR0, filtered);
- }
+ __ xorr(tmp1, store_addr, new_val);
+ __ srdi_(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes);
+ __ beq(CCR0, filtered);
// Crosses regions, storing NULL?
if (not_null) {
@@ -272,7 +271,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
__ bind(runtime);
// Save the live input values.
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr, R16_thread);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), Rcard_addr, R16_thread);
__ bind(filtered);
}
diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp
index 10a006cabc4..8466ebb958b 100644
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp
@@ -32,6 +32,7 @@
#include "code/codeCache.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
+#include "oops/accessDecorators.hpp"
#include "runtime/safepointMechanism.hpp"
inline bool MacroAssembler::is_ld_largeoffset(address a) {
@@ -332,6 +333,7 @@ inline void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorat
ON_UNKNOWN_OOP_REF)) == 0, "unsupported decorator");
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bool as_raw = (decorators & AS_RAW) != 0;
+ decorators = AccessInternal::decorator_fixup(decorators);
if (as_raw) {
bs->BarrierSetAssembler::store_at(this, decorators, type,
base, ind_or_offs, val,
@@ -349,6 +351,7 @@ inline void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorato
assert((decorators & ~(AS_RAW | IN_HEAP | IN_HEAP_ARRAY | IN_ROOT | OOP_NOT_NULL |
ON_PHANTOM_OOP_REF | ON_WEAK_OOP_REF)) == 0, "unsupported decorator");
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ decorators = AccessInternal::decorator_fixup(decorators);
bool as_raw = (decorators & AS_RAW) != 0;
if (as_raw) {
bs->BarrierSetAssembler::load_at(this, decorators, type,
diff --git a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp
index f962545702c..67289e91238 100644
--- a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp
+++ b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -30,7 +30,7 @@
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.hpp"
#include "runtime/handles.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/ostream.hpp"
diff --git a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
index d22b0d50f94..4b15fb04e0c 100644
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
@@ -688,7 +688,7 @@ void TemplateTable::aaload() {
Rtemp2 = R31;
__ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr);
do_oop_load(_masm, Rload_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos, Rtemp, Rtemp2,
- IN_HEAP | IN_HEAP_ARRAY);
+ IN_HEAP_ARRAY);
__ verify_oop(R17_tos);
//__ dcbt(R17_tos); // prefetch
}
@@ -1015,14 +1015,14 @@ void TemplateTable::aastore() {
__ bind(Lis_null);
do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */,
- Rscratch, Rscratch2, Rscratch3, IN_HEAP | IN_HEAP_ARRAY);
+ Rscratch, Rscratch2, Rscratch3, IN_HEAP_ARRAY);
__ profile_null_seen(Rscratch, Rscratch2);
__ b(Ldone);
// Store is OK.
__ bind(Lstore_ok);
do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */,
- Rscratch, Rscratch2, Rscratch3, IN_HEAP | IN_HEAP_ARRAY | OOP_NOT_NULL);
+ Rscratch, Rscratch2, Rscratch3, IN_HEAP_ARRAY | OOP_NOT_NULL);
__ bind(Ldone);
// Adjust sp (pops array, index and value).
diff --git a/src/hotspot/cpu/ppc/vm_version_ppc.cpp b/src/hotspot/cpu/ppc/vm_version_ppc.cpp
index 1e5ede1a92c..4104b3d77f2 100644
--- a/src/hotspot/cpu/ppc/vm_version_ppc.cpp
+++ b/src/hotspot/cpu/ppc/vm_version_ppc.cpp
@@ -131,7 +131,7 @@ void VM_Version::initialize() {
// Create and print feature-string.
char buf[(num_features+1) * 16]; // Max 16 chars per feature.
jio_snprintf(buf, sizeof(buf),
- "ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+ "ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
(has_fsqrt() ? " fsqrt" : ""),
(has_isel() ? " isel" : ""),
(has_lxarxeh() ? " lxarxeh" : ""),
@@ -148,7 +148,8 @@ void VM_Version::initialize() {
(has_vsx() ? " vsx" : ""),
(has_ldbrx() ? " ldbrx" : ""),
(has_stdbrx() ? " stdbrx" : ""),
- (has_vshasig() ? " sha" : "")
+ (has_vshasig() ? " sha" : ""),
+ (has_tm() ? " rtm" : "")
// Make sure number of %s matches num_features!
);
_features_string = os::strdup(buf);
@@ -319,47 +320,14 @@ void VM_Version::initialize() {
if (PowerArchitecturePPC64 < 8) {
vm_exit_during_initialization("RTM instructions are not available on this CPU.");
}
- bool os_support_tm = false;
-#ifdef AIX
- // Actually, this is supported since AIX 7.1.. Unfortunately, this first
- // contained bugs, so that it can only be enabled after AIX 7.1.3.30.
- // The Java property os.version, which is used in RTM tests to decide
- // whether the feature is available, only knows major and minor versions.
- // We don't want to change this property, as user code might depend on it.
- // So the tests can not check on subversion 3.30, and we only enable RTM
- // with AIX 7.2.
- if (os::Aix::os_version() >= 0x07020000) { // At least AIX 7.2.
- os_support_tm = true;
- }
-#endif
-#if defined(LINUX) && defined(VM_LITTLE_ENDIAN)
- unsigned long auxv = getauxval(AT_HWCAP2);
- if (auxv & PPC_FEATURE2_HTM_NOSC) {
- if (auxv & PPC_FEATURE2_HAS_HTM) {
- // TM on POWER8 and POWER9 in compat mode (VM) is supported by the JVM.
- // TM on POWER9 DD2.1 NV (baremetal) is not supported by the JVM (TM on
- // POWER9 DD2.1 NV has a few issues that need a couple of firmware
- // and kernel workarounds, so there is a new mode only supported
- // on non-virtualized P9 machines called HTM with no Suspend Mode).
- // TM on POWER9 D2.2+ NV is not supported at all by Linux.
- os_support_tm = true;
- }
- }
-#endif
- if (!os_support_tm) {
+ if (!has_tm()) {
vm_exit_during_initialization("RTM is not supported on this OS version.");
}
}
if (UseRTMLocking) {
#if INCLUDE_RTM_OPT
- if (!UnlockExperimentalVMOptions) {
- vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. "
- "It must be enabled via -XX:+UnlockExperimentalVMOptions flag.");
- } else {
- warning("UseRTMLocking is only available as experimental option on this platform.");
- }
if (!FLAG_IS_CMDLINE(UseRTMLocking)) {
// RTM locking should be used only for applications with
// high lock contention. For now we do not use it by default.
@@ -755,6 +723,37 @@ void VM_Version::determine_features() {
}
_features = features;
+
+#ifdef AIX
+ // To enable it on AIX it's necessary POWER8 or above and at least AIX 7.2.
+ // Actually, this is supported since AIX 7.1.. Unfortunately, this first
+ // contained bugs, so that it can only be enabled after AIX 7.1.3.30.
+ // The Java property os.version, which is used in RTM tests to decide
+ // whether the feature is available, only knows major and minor versions.
+ // We don't want to change this property, as user code might depend on it.
+ // So the tests can not check on subversion 3.30, and we only enable RTM
+ // with AIX 7.2.
+ if (has_lqarx()) { // POWER8 or above
+ if (os::Aix::os_version() >= 0x07020000) { // At least AIX 7.2.
+ _features |= rtm_m;
+ }
+ }
+#endif
+#if defined(LINUX) && defined(VM_LITTLE_ENDIAN)
+ unsigned long auxv = getauxval(AT_HWCAP2);
+
+ if (auxv & PPC_FEATURE2_HTM_NOSC) {
+ if (auxv & PPC_FEATURE2_HAS_HTM) {
+ // TM on POWER8 and POWER9 in compat mode (VM) is supported by the JVM.
+ // TM on POWER9 DD2.1 NV (baremetal) is not supported by the JVM (TM on
+ // POWER9 DD2.1 NV has a few issues that need a couple of firmware
+ // and kernel workarounds, so there is a new mode only supported
+ // on non-virtualized P9 machines called HTM with no Suspend Mode).
+ // TM on POWER9 D2.2+ NV is not supported at all by Linux.
+ _features |= rtm_m;
+ }
+ }
+#endif
}
// Power 8: Configure Data Stream Control Register.
diff --git a/src/hotspot/cpu/ppc/vm_version_ppc.hpp b/src/hotspot/cpu/ppc/vm_version_ppc.hpp
index 8e8eb43ab50..b4b6c8390c7 100644
--- a/src/hotspot/cpu/ppc/vm_version_ppc.hpp
+++ b/src/hotspot/cpu/ppc/vm_version_ppc.hpp
@@ -49,6 +49,7 @@ protected:
ldbrx,
stdbrx,
vshasig,
+ rtm,
num_features // last entry to count features
};
enum Feature_Flag_Set {
@@ -64,12 +65,13 @@ protected:
vand_m = (1 << vand ),
lqarx_m = (1 << lqarx ),
vcipher_m = (1 << vcipher),
- vshasig_m = (1 << vshasig),
vpmsumb_m = (1 << vpmsumb),
mfdscr_m = (1 << mfdscr ),
vsx_m = (1 << vsx ),
ldbrx_m = (1 << ldbrx ),
stdbrx_m = (1 << stdbrx ),
+ vshasig_m = (1 << vshasig),
+ rtm_m = (1 << rtm ),
all_features_m = (unsigned long)-1
};
@@ -107,6 +109,8 @@ public:
static bool has_stdbrx() { return (_features & stdbrx_m) != 0; }
static bool has_vshasig() { return (_features & vshasig_m) != 0; }
static bool has_mtfprd() { return has_vpmsumb(); } // alias for P8
+ // OS feature support
+ static bool has_tm() { return (_features & rtm_m) != 0; }
// Assembler testing
static void allow_all();
diff --git a/src/hotspot/cpu/s390/assembler_s390.hpp b/src/hotspot/cpu/s390/assembler_s390.hpp
index 0a133896936..8c2ba158a1d 100644
--- a/src/hotspot/cpu/s390/assembler_s390.hpp
+++ b/src/hotspot/cpu/s390/assembler_s390.hpp
@@ -2967,6 +2967,7 @@ class Assembler : public AbstractAssembler {
// branch never (nop)
inline void z_nop();
+ inline void nop(); // Used by shared code.
// ===============================================================================================
diff --git a/src/hotspot/cpu/s390/assembler_s390.inline.hpp b/src/hotspot/cpu/s390/assembler_s390.inline.hpp
index 230c01ac116..b96bd17fc92 100644
--- a/src/hotspot/cpu/s390/assembler_s390.inline.hpp
+++ b/src/hotspot/cpu/s390/assembler_s390.inline.hpp
@@ -1311,6 +1311,7 @@ inline void Assembler::z_clgij(Register r1, int64_t i2, branch_condition m3, Lab
// branch never (nop), branch always
inline void Assembler::z_nop() { z_bcr(bcondNop, Z_R0); }
+inline void Assembler::nop() { z_nop(); }
inline void Assembler::z_br(Register r2) { assert(r2 != Z_R0, "nop if target is Z_R0, use z_nop() instead"); z_bcr(bcondAlways, r2 ); }
inline void Assembler::z_exrl(Register r1, Label& L) { z_exrl(r1, target(L)); } // z10
diff --git a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp
index 113f5701f67..f7ae2510506 100644
--- a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp
@@ -24,6 +24,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_CodeStubs.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_LIRAssembler.hpp"
diff --git a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
index 72b92780c52..996c255e8a8 100644
--- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
@@ -24,6 +24,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_Compilation.hpp"
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
diff --git a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp
index d65d0ea9baa..3f6d7eee689 100644
--- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp
@@ -24,6 +24,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "classfile/systemDictionary.hpp"
diff --git a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp
index e4e03af35d3..b8b63c9db59 100644
--- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp
+++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp
@@ -95,8 +95,6 @@
void invalidate_registers(Register preserve1 = noreg, Register preserve2 = noreg,
Register preserve3 = noreg) PRODUCT_RETURN;
- void nop() { z_nop(); }
-
// This platform only uses signal-based null checks. The Label is not needed.
void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); }
diff --git a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp
index b2c5cc1fa3e..c5be06a90c9 100644
--- a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp
@@ -24,6 +24,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_Defs.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
diff --git a/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp
index 06224db34d8..c039838953b 100644
--- a/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp
@@ -29,6 +29,7 @@
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
+#include "gc/g1/g1BarrierSetRuntime.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/heapRegion.hpp"
#include "interpreter/interp_masm.hpp"
@@ -66,9 +67,9 @@ void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm
RegisterSaver::save_live_registers(masm, RegisterSaver::arg_registers); // Creates frame.
if (UseCompressedOops) {
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry), addr, count);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry), addr, count);
} else {
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry), addr, count);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), addr, count);
}
RegisterSaver::restore_live_registers(masm, RegisterSaver::arg_registers);
@@ -79,7 +80,7 @@ void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm
void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, bool do_return) {
- address entry_point = CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry);
+ address entry_point = CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry);
if (!do_return) {
assert_different_registers(addr, Z_R0_scratch); // would be destroyed by push_frame()
assert_different_registers(count, Z_R0_scratch); // would be destroyed by push_frame()
@@ -234,7 +235,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, Decorator
__ push_frame_abi160(0); // Will use Z_R0 as tmp.
// Rpre_val may be destroyed by push_frame().
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), Rpre_save, Z_thread);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), Rpre_save, Z_thread);
__ pop_frame();
__ restore_return_pc();
@@ -272,16 +273,14 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
// Does store cross heap regions?
// It does if the two addresses specify different grain addresses.
- if (G1RSBarrierRegionFilter) {
- if (VM_Version::has_DistinctOpnds()) {
- __ z_xgrk(Rtmp1, Rstore_addr, Rnew_val);
- } else {
- __ z_lgr(Rtmp1, Rstore_addr);
- __ z_xgr(Rtmp1, Rnew_val);
- }
- __ z_srag(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes);
- __ z_bre(filtered);
+ if (VM_Version::has_DistinctOpnds()) {
+ __ z_xgrk(Rtmp1, Rstore_addr, Rnew_val);
+ } else {
+ __ z_lgr(Rtmp1, Rstore_addr);
+ __ z_xgr(Rtmp1, Rnew_val);
}
+ __ z_srag(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes);
+ __ z_bre(filtered);
// Crosses regions, storing NULL?
if (not_null) {
@@ -359,7 +358,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
}
// Save the live input values.
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr, Z_thread);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), Rcard_addr, Z_thread);
if (needs_frame) {
__ pop_frame();
diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp
index 6186428d533..f76720156ae 100644
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp
@@ -34,6 +34,7 @@
#include "gc/shared/cardTableBarrierSet.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
+#include "oops/accessDecorators.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/klass.inline.hpp"
#include "opto/compile.hpp"
@@ -4053,6 +4054,7 @@ void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
assert((decorators & ~(AS_RAW | IN_HEAP | IN_HEAP_ARRAY | IN_ROOT | OOP_NOT_NULL |
ON_UNKNOWN_OOP_REF)) == 0, "unsupported decorator");
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ decorators = AccessInternal::decorator_fixup(decorators);
bool as_raw = (decorators & AS_RAW) != 0;
if (as_raw) {
bs->BarrierSetAssembler::store_at(this, decorators, type,
@@ -4071,6 +4073,7 @@ void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
assert((decorators & ~(AS_RAW | IN_HEAP | IN_HEAP_ARRAY | IN_ROOT | OOP_NOT_NULL |
ON_PHANTOM_OOP_REF | ON_WEAK_OOP_REF)) == 0, "unsupported decorator");
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ decorators = AccessInternal::decorator_fixup(decorators);
bool as_raw = (decorators & AS_RAW) != 0;
if (as_raw) {
bs->BarrierSetAssembler::load_at(this, decorators, type,
diff --git a/src/hotspot/cpu/s390/templateTable_s390.cpp b/src/hotspot/cpu/s390/templateTable_s390.cpp
index ac8a06c73b5..994e2e36e1f 100644
--- a/src/hotspot/cpu/s390/templateTable_s390.cpp
+++ b/src/hotspot/cpu/s390/templateTable_s390.cpp
@@ -853,7 +853,7 @@ void TemplateTable::aaload() {
index_check(Z_tmp_1, index, shift);
// Now load array element.
do_oop_load(_masm, Address(Z_tmp_1, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT)), Z_tos,
- Z_tmp_2, Z_tmp_3, IN_HEAP | IN_HEAP_ARRAY);
+ Z_tmp_2, Z_tmp_3, IN_HEAP_ARRAY);
__ verify_oop(Z_tos);
}
@@ -1197,7 +1197,7 @@ void TemplateTable::aastore() {
// Store a NULL.
do_oop_store(_masm, Address(Rstore_addr, (intptr_t)0), noreg,
- tmp3, tmp2, tmp1, IN_HEAP | IN_HEAP_ARRAY);
+ tmp3, tmp2, tmp1, IN_HEAP_ARRAY);
__ z_bru(done);
// Come here on success.
@@ -1205,7 +1205,7 @@ void TemplateTable::aastore() {
// Now store using the appropriate barrier.
do_oop_store(_masm, Address(Rstore_addr, (intptr_t)0), Rvalue,
- tmp3, tmp2, tmp1, IN_HEAP | IN_HEAP_ARRAY | OOP_NOT_NULL);
+ tmp3, tmp2, tmp1, IN_HEAP_ARRAY | OOP_NOT_NULL);
// Pop stack arguments.
__ bind(done);
diff --git a/src/hotspot/cpu/sparc/assembler_sparc.hpp b/src/hotspot/cpu/sparc/assembler_sparc.hpp
index f8f5b11c9a6..01f0121bb42 100644
--- a/src/hotspot/cpu/sparc/assembler_sparc.hpp
+++ b/src/hotspot/cpu/sparc/assembler_sparc.hpp
@@ -783,7 +783,9 @@ class Assembler : public AbstractAssembler {
void flush() {
#ifdef VALIDATE_PIPELINE
assert(_delay_state == NoDelay, "Ending code with a delay-slot.");
+#ifdef COMPILER2
validate_no_pipeline_hazards();
+#endif
#endif
AbstractAssembler::flush();
}
diff --git a/src/hotspot/cpu/sparc/c1_CodeStubs_sparc.cpp b/src/hotspot/cpu/sparc/c1_CodeStubs_sparc.cpp
index b818c7a825b..6f9624c054c 100644
--- a/src/hotspot/cpu/sparc/c1_CodeStubs_sparc.cpp
+++ b/src/hotspot/cpu/sparc/c1_CodeStubs_sparc.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_CodeStubs.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_LIRAssembler.hpp"
diff --git a/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp b/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp
index 14a59271792..987fe52389a 100644
--- a/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp
+++ b/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_Compilation.hpp"
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
diff --git a/src/hotspot/cpu/sparc/c1_LIRGenerator_sparc.cpp b/src/hotspot/cpu/sparc/c1_LIRGenerator_sparc.cpp
index abafd0fb958..81630deaaec 100644
--- a/src/hotspot/cpu/sparc/c1_LIRGenerator_sparc.cpp
+++ b/src/hotspot/cpu/sparc/c1_LIRGenerator_sparc.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_Compilation.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_Instruction.hpp"
diff --git a/src/hotspot/cpu/sparc/c1_MacroAssembler_sparc.cpp b/src/hotspot/cpu/sparc/c1_MacroAssembler_sparc.cpp
index 1e068db5527..27c525aad15 100644
--- a/src/hotspot/cpu/sparc/c1_MacroAssembler_sparc.cpp
+++ b/src/hotspot/cpu/sparc/c1_MacroAssembler_sparc.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "classfile/systemDictionary.hpp"
diff --git a/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp b/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp
index d5cd533c2a9..889662728d9 100644
--- a/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp
+++ b/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_Defs.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
diff --git a/src/hotspot/cpu/sparc/gc/g1/g1BarrierSetAssembler_sparc.cpp b/src/hotspot/cpu/sparc/gc/g1/g1BarrierSetAssembler_sparc.cpp
index 1de5dfea13e..90bc2b6d801 100644
--- a/src/hotspot/cpu/sparc/gc/g1/g1BarrierSetAssembler_sparc.cpp
+++ b/src/hotspot/cpu/sparc/gc/g1/g1BarrierSetAssembler_sparc.cpp
@@ -26,6 +26,7 @@
#include "asm/macroAssembler.inline.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
+#include "gc/g1/g1BarrierSetRuntime.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/heapRegion.hpp"
@@ -68,8 +69,8 @@ void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm
}
__ mov(addr->after_save(), O0);
// Get the count into O1
- address slowpath = UseCompressedOops ? CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry)
- : CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry);
+ address slowpath = UseCompressedOops ? CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry)
+ : CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry);
__ call(slowpath);
__ delayed()->mov(count->after_save(), O1);
if (addr->is_global()) {
@@ -90,7 +91,7 @@ void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* mas
// Get some new fresh output registers.
__ save_frame(0);
__ mov(addr->after_save(), O0);
- __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry));
+ __ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry));
__ delayed()->mov(count->after_save(), O1);
__ restore();
}
@@ -368,12 +369,10 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Register
G1BarrierSet* bs = barrier_set_cast(BarrierSet::barrier_set());
- if (G1RSBarrierRegionFilter) {
- __ xor3(store_addr, new_val, tmp);
- __ srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
+ __ xor3(store_addr, new_val, tmp);
+ __ srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
- __ cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);
- }
+ __ cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);
// If the "store_addr" register is an "in" or "local" register, move it to
// a scratch reg so we can pass it as an argument.
diff --git a/src/hotspot/cpu/sparc/gc/shared/barrierSetAssembler_sparc.cpp b/src/hotspot/cpu/sparc/gc/shared/barrierSetAssembler_sparc.cpp
index 9c8afd26d2d..3a99f4b04ac 100644
--- a/src/hotspot/cpu/sparc/gc/shared/barrierSetAssembler_sparc.cpp
+++ b/src/hotspot/cpu/sparc/gc/shared/barrierSetAssembler_sparc.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interp_masm.hpp"
#include "runtime/jniHandles.hpp"
diff --git a/src/hotspot/cpu/sparc/interp_masm_sparc.cpp b/src/hotspot/cpu/sparc/interp_masm_sparc.cpp
index 5e14d32ba17..d6ff62c118b 100644
--- a/src/hotspot/cpu/sparc/interp_masm_sparc.cpp
+++ b/src/hotspot/cpu/sparc/interp_masm_sparc.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "interp_masm_sparc.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
diff --git a/src/hotspot/cpu/sparc/interp_masm_sparc.hpp b/src/hotspot/cpu/sparc/interp_masm_sparc.hpp
index 8ba7144dc3d..da3d920e1d4 100644
--- a/src/hotspot/cpu/sparc/interp_masm_sparc.hpp
+++ b/src/hotspot/cpu/sparc/interp_masm_sparc.hpp
@@ -25,7 +25,7 @@
#ifndef CPU_SPARC_VM_INTERP_MASM_SPARC_HPP
#define CPU_SPARC_VM_INTERP_MASM_SPARC_HPP
-#include "asm/macroAssembler.inline.hpp"
+#include "asm/macroAssembler.hpp"
#include "interpreter/invocationCounter.hpp"
// This file specializes the assember with interpreter-specific macros
diff --git a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp
index f2e1c901b02..17a28f36259 100644
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp
@@ -32,6 +32,7 @@
#include "interpreter/interpreter.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
+#include "oops/accessDecorators.hpp"
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
@@ -181,7 +182,7 @@ void MacroAssembler::resolve_jobject(Register value, Register tmp) {
br (Assembler::always, true, Assembler::pt, done);
delayed()->nop();
bind(not_weak);
- access_load_at(T_OBJECT, IN_ROOT | IN_CONCURRENT_ROOT,
+ access_load_at(T_OBJECT, IN_CONCURRENT_ROOT,
Address(value, 0), value, tmp);
verify_oop(value);
bind(done);
@@ -3338,6 +3339,12 @@ SkipIfEqual::~SkipIfEqual() {
_masm->bind(_label);
}
+void MacroAssembler::bang_stack_with_offset(int offset) {
+ // stack grows down, caller passes positive offset
+ assert(offset > 0, "must bang with negative offset");
+ set((-offset)+STACK_BIAS, G3_scratch);
+ st(G0, SP, G3_scratch);
+}
// Writes to stack successive pages until offset reached to check for
// stack overflow + shadow pages. This clobbers tsp and scratch.
@@ -3395,7 +3402,7 @@ void MacroAssembler::reserved_stack_check() {
// ((OopHandle)result).resolve();
void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
// OopHandle::resolve is an indirection.
- access_load_at(T_OBJECT, IN_ROOT | IN_CONCURRENT_ROOT,
+ access_load_at(T_OBJECT, IN_CONCURRENT_ROOT,
Address(result, 0), result, tmp);
}
@@ -3440,6 +3447,7 @@ void MacroAssembler::store_klass_gap(Register s, Register d) {
void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
Register src, Address dst, Register tmp) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ decorators = AccessInternal::decorator_fixup(decorators);
bool as_raw = (decorators & AS_RAW) != 0;
if (as_raw) {
bs->BarrierSetAssembler::store_at(this, decorators, type, src, dst, tmp);
@@ -3451,6 +3459,7 @@ void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
Address src, Register dst, Register tmp) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ decorators = AccessInternal::decorator_fixup(decorators);
bool as_raw = (decorators & AS_RAW) != 0;
if (as_raw) {
bs->BarrierSetAssembler::load_at(this, decorators, type, src, dst, tmp);
diff --git a/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp b/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp
index 33dff6edb83..0918e234eaa 100644
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp
@@ -1303,7 +1303,7 @@ public:
// Stack overflow checking
// Note: this clobbers G3_scratch
- inline void bang_stack_with_offset(int offset);
+ void bang_stack_with_offset(int offset);
// Writes to stack successive pages until offset reached to check for
// stack overflow + shadow pages. Clobbers tsp and scratch registers.
diff --git a/src/hotspot/cpu/sparc/macroAssembler_sparc.inline.hpp b/src/hotspot/cpu/sparc/macroAssembler_sparc.inline.hpp
index 16871d98629..60f3e713496 100644
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.inline.hpp
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -724,12 +724,4 @@ inline void MacroAssembler::swap(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); swap(a.base(), a.index(), d ); }
else { swap(a.base(), a.disp() + offset, d); }
}
-
-inline void MacroAssembler::bang_stack_with_offset(int offset) {
- // stack grows down, caller passes positive offset
- assert(offset > 0, "must bang with negative offset");
- set((-offset)+STACK_BIAS, G3_scratch);
- st(G0, SP, G3_scratch);
-}
-
#endif // CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP
diff --git a/src/hotspot/cpu/sparc/memset_with_concurrent_readers_sparc.cpp b/src/hotspot/cpu/sparc/memset_with_concurrent_readers_sparc.cpp
index 19871c6ab86..9e1674592ab 100644
--- a/src/hotspot/cpu/sparc/memset_with_concurrent_readers_sparc.cpp
+++ b/src/hotspot/cpu/sparc/memset_with_concurrent_readers_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/memset_with_concurrent_readers.hpp"
#include "runtime/prefetch.inline.hpp"
#include "utilities/align.hpp"
diff --git a/src/hotspot/cpu/sparc/methodHandles_sparc.cpp b/src/hotspot/cpu/sparc/methodHandles_sparc.cpp
index f84ba5d0021..5232313c3db 100644
--- a/src/hotspot/cpu/sparc/methodHandles_sparc.cpp
+++ b/src/hotspot/cpu/sparc/methodHandles_sparc.cpp
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "jvm.h"
-#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interp_masm.hpp"
diff --git a/src/hotspot/cpu/sparc/relocInfo_sparc.cpp b/src/hotspot/cpu/sparc/relocInfo_sparc.cpp
index 102ce1bd90e..b8f0107af55 100644
--- a/src/hotspot/cpu/sparc/relocInfo_sparc.cpp
+++ b/src/hotspot/cpu/sparc/relocInfo_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "code/relocInfo.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/compressedOops.inline.hpp"
diff --git a/src/hotspot/cpu/sparc/templateInterpreterGenerator_sparc.cpp b/src/hotspot/cpu/sparc/templateInterpreterGenerator_sparc.cpp
index 81611e0fb7c..cc5a527aa81 100644
--- a/src/hotspot/cpu/sparc/templateInterpreterGenerator_sparc.cpp
+++ b/src/hotspot/cpu/sparc/templateInterpreterGenerator_sparc.cpp
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
diff --git a/src/hotspot/cpu/sparc/templateTable_sparc.cpp b/src/hotspot/cpu/sparc/templateTable_sparc.cpp
index e569b6f37a6..0286db431fc 100644
--- a/src/hotspot/cpu/sparc/templateTable_sparc.cpp
+++ b/src/hotspot/cpu/sparc/templateTable_sparc.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
index d915f3fda4d..281490d6b99 100644
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
@@ -1346,7 +1346,11 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
__ decode_heap_oop(dest->as_register());
}
#endif
- __ verify_oop(dest->as_register());
+
+ // Load barrier has not yet been applied, so ZGC can't verify the oop here
+ if (!UseZGC) {
+ __ verify_oop(dest->as_register());
+ }
} else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
#ifdef _LP64
if (UseCompressedClassPointers) {
diff --git a/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.cpp
index f4082dbc6d8..e59fad963fb 100644
--- a/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.cpp
@@ -26,6 +26,7 @@
#include "asm/macroAssembler.inline.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
+#include "gc/g1/g1BarrierSetRuntime.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/heapRegion.hpp"
@@ -80,12 +81,12 @@ void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm
__ movptr(c_rarg1, count);
}
if (UseCompressedOops) {
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry), 2);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry), 2);
} else {
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry), 2);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), 2);
}
#else
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry),
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry),
addr, count);
#endif
__ popa();
@@ -107,9 +108,9 @@ void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* mas
__ mov(c_rarg0, addr);
__ mov(c_rarg1, count);
}
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry), 2);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry), 2);
#else
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry),
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry),
addr, count);
#endif
__ popa();
@@ -238,9 +239,9 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
__ push(thread);
__ push(pre_val);
#endif
- __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2);
+ __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), 2);
} else {
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
}
NOT_LP64( __ pop(thread); )
@@ -333,10 +334,10 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
__ push(store_addr);
__ push(new_val);
#ifdef _LP64
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, r15_thread);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, r15_thread);
#else
__ push(thread);
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread);
__ pop(thread);
#endif
__ pop(new_val);
@@ -500,7 +501,7 @@ void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler*
// load the pre-value
__ load_parameter(0, rcx);
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), rcx, thread);
__ restore_live_registers(true);
@@ -577,7 +578,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
__ save_live_registers_no_oop_map(true);
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread);
__ restore_live_registers(true);
diff --git a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp
index 93cb4e946e2..df57d637d72 100644
--- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp
@@ -34,6 +34,7 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
bool on_heap = (decorators & IN_HEAP) != 0;
bool on_root = (decorators & IN_ROOT) != 0;
bool oop_not_null = (decorators & OOP_NOT_NULL) != 0;
+ bool atomic = (decorators & MO_RELAXED) != 0;
switch (type) {
case T_OBJECT:
@@ -58,6 +59,37 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
}
break;
}
+ case T_BOOLEAN: __ load_unsigned_byte(dst, src); break;
+ case T_BYTE: __ load_signed_byte(dst, src); break;
+ case T_CHAR: __ load_unsigned_short(dst, src); break;
+ case T_SHORT: __ load_signed_short(dst, src); break;
+ case T_INT: __ movl (dst, src); break;
+ case T_ADDRESS: __ movptr(dst, src); break;
+ case T_FLOAT:
+ assert(dst == noreg, "only to ftos");
+ __ load_float(src);
+ break;
+ case T_DOUBLE:
+ assert(dst == noreg, "only to dtos");
+ __ load_double(src);
+ break;
+ case T_LONG:
+ assert(dst == noreg, "only to ltos");
+#ifdef _LP64
+ __ movq(rax, src);
+#else
+ if (atomic) {
+ __ fild_d(src); // Must load atomically
+ __ subptr(rsp,2*wordSize); // Make space for store
+ __ fistp_d(Address(rsp,0));
+ __ pop(rax);
+ __ pop(rdx);
+ } else {
+ __ movl(rax, src);
+ __ movl(rdx, src.plus_disp(wordSize));
+ }
+#endif
+ break;
default: Unimplemented();
}
}
@@ -67,6 +99,7 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators
bool on_heap = (decorators & IN_HEAP) != 0;
bool on_root = (decorators & IN_ROOT) != 0;
bool oop_not_null = (decorators & OOP_NOT_NULL) != 0;
+ bool atomic = (decorators & MO_RELAXED) != 0;
switch (type) {
case T_OBJECT:
@@ -106,6 +139,50 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators
}
break;
}
+ case T_BOOLEAN:
+ __ andl(val, 0x1); // boolean is true if LSB is 1
+ __ movb(dst, val);
+ break;
+ case T_BYTE:
+ __ movb(dst, val);
+ break;
+ case T_SHORT:
+ __ movw(dst, val);
+ break;
+ case T_CHAR:
+ __ movw(dst, val);
+ break;
+ case T_INT:
+ __ movl(dst, val);
+ break;
+ case T_LONG:
+ assert(val == noreg, "only tos");
+#ifdef _LP64
+ __ movq(dst, rax);
+#else
+ if (atomic) {
+ __ push(rdx);
+ __ push(rax); // Must update atomically with FIST
+ __ fild_d(Address(rsp,0)); // So load into FPU register
+ __ fistp_d(dst); // and put into memory atomically
+ __ addptr(rsp, 2*wordSize);
+ } else {
+ __ movptr(dst, rax);
+ __ movptr(dst.plus_disp(wordSize), rdx);
+ }
+#endif
+ break;
+ case T_FLOAT:
+ assert(val == noreg, "only tos");
+ __ store_float(dst);
+ break;
+ case T_DOUBLE:
+ assert(val == noreg, "only tos");
+ __ store_double(dst);
+ break;
+ case T_ADDRESS:
+ __ movptr(dst, val);
+ break;
default: Unimplemented();
}
}
diff --git a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp
new file mode 100644
index 00000000000..e8b4b7414d0
--- /dev/null
+++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp
@@ -0,0 +1,458 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/z/zBarrier.inline.hpp"
+#include "gc/z/zBarrierSet.hpp"
+#include "gc/z/zBarrierSetAssembler.hpp"
+#include "gc/z/zBarrierSetRuntime.hpp"
+#include "runtime/stubCodeGenerator.hpp"
+#include "utilities/macros.hpp"
+#ifdef COMPILER1
+#include "c1/c1_LIRAssembler.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "gc/z/c1/zBarrierSetC1.hpp"
+#endif // COMPILER1
+
+#undef __
+#define __ masm->
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+static void call_vm(MacroAssembler* masm,
+ address entry_point,
+ Register arg0,
+ Register arg1) {
+ // Setup arguments
+ if (arg1 == c_rarg0) {
+ if (arg0 == c_rarg1) {
+ __ xchgptr(c_rarg1, c_rarg0);
+ } else {
+ __ movptr(c_rarg1, arg1);
+ __ movptr(c_rarg0, arg0);
+ }
+ } else {
+ if (arg0 != c_rarg0) {
+ __ movptr(c_rarg0, arg0);
+ }
+ if (arg1 != c_rarg1) {
+ __ movptr(c_rarg1, arg1);
+ }
+ }
+
+ // Call VM
+ __ MacroAssembler::call_VM_leaf_base(entry_point, 2);
+}
+
+void ZBarrierSetAssembler::load_at(MacroAssembler* masm,
+ DecoratorSet decorators,
+ BasicType type,
+ Register dst,
+ Address src,
+ Register tmp1,
+ Register tmp_thread) {
+ if (!ZBarrierSet::barrier_needed(decorators, type)) {
+ // Barrier not needed
+ BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
+ return;
+ }
+
+ BLOCK_COMMENT("ZBarrierSetAssembler::load_at {");
+
+ // Allocate scratch register
+ Register scratch = tmp1;
+ if (tmp1 == noreg) {
+ scratch = r12;
+ __ push(scratch);
+ }
+
+ assert_different_registers(dst, scratch);
+
+ Label done;
+
+ //
+ // Fast Path
+ //
+
+ // Load address
+ __ lea(scratch, src);
+
+ // Load oop at address
+ __ movptr(dst, Address(scratch, 0));
+
+ // Test address bad mask
+ __ testptr(dst, address_bad_mask_from_thread(r15_thread));
+ __ jcc(Assembler::zero, done);
+
+ //
+ // Slow path
+ //
+
+ // Save registers
+ __ push(rax);
+ __ push(rcx);
+ __ push(rdx);
+ __ push(rdi);
+ __ push(rsi);
+ __ push(r8);
+ __ push(r9);
+ __ push(r10);
+ __ push(r11);
+
+ // We may end up here from generate_native_wrapper, then the method may have
+ // floats as arguments, and we must spill them before calling the VM runtime
+ // leaf. From the interpreter all floats are passed on the stack.
+ assert(Argument::n_float_register_parameters_j == 8, "Assumption");
+ const int xmm_size = wordSize * 2;
+ const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j;
+ __ subptr(rsp, xmm_spill_size);
+ __ movdqu(Address(rsp, xmm_size * 7), xmm7);
+ __ movdqu(Address(rsp, xmm_size * 6), xmm6);
+ __ movdqu(Address(rsp, xmm_size * 5), xmm5);
+ __ movdqu(Address(rsp, xmm_size * 4), xmm4);
+ __ movdqu(Address(rsp, xmm_size * 3), xmm3);
+ __ movdqu(Address(rsp, xmm_size * 2), xmm2);
+ __ movdqu(Address(rsp, xmm_size * 1), xmm1);
+ __ movdqu(Address(rsp, xmm_size * 0), xmm0);
+
+ // Call VM
+ call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), dst, scratch);
+
+ // Restore registers
+ __ movdqu(xmm0, Address(rsp, xmm_size * 0));
+ __ movdqu(xmm1, Address(rsp, xmm_size * 1));
+ __ movdqu(xmm2, Address(rsp, xmm_size * 2));
+ __ movdqu(xmm3, Address(rsp, xmm_size * 3));
+ __ movdqu(xmm4, Address(rsp, xmm_size * 4));
+ __ movdqu(xmm5, Address(rsp, xmm_size * 5));
+ __ movdqu(xmm6, Address(rsp, xmm_size * 6));
+ __ movdqu(xmm7, Address(rsp, xmm_size * 7));
+ __ addptr(rsp, xmm_spill_size);
+
+ __ pop(r11);
+ __ pop(r10);
+ __ pop(r9);
+ __ pop(r8);
+ __ pop(rsi);
+ __ pop(rdi);
+ __ pop(rdx);
+ __ pop(rcx);
+
+ if (dst == rax) {
+ __ addptr(rsp, wordSize);
+ } else {
+ __ movptr(dst, rax);
+ __ pop(rax);
+ }
+
+ __ bind(done);
+
+ // Restore scratch register
+ if (tmp1 == noreg) {
+ __ pop(scratch);
+ }
+
+ BLOCK_COMMENT("} ZBarrierSetAssembler::load_at");
+}
+
+#ifdef ASSERT
+
+void ZBarrierSetAssembler::store_at(MacroAssembler* masm,
+ DecoratorSet decorators,
+ BasicType type,
+ Address dst,
+ Register src,
+ Register tmp1,
+ Register tmp2) {
+ BLOCK_COMMENT("ZBarrierSetAssembler::store_at {");
+
+ // Verify oop store
+ if (type == T_OBJECT || type == T_ARRAY) {
+ // Note that src could be noreg, which means we
+ // are storing null and can skip verification.
+ if (src != noreg) {
+ Label done;
+ __ testptr(src, address_bad_mask_from_thread(r15_thread));
+ __ jcc(Assembler::zero, done);
+ __ stop("Verify oop store failed");
+ __ should_not_reach_here();
+ __ bind(done);
+ }
+ }
+
+ // Store value
+ BarrierSetAssembler::store_at(masm, decorators, type, dst, src, tmp1, tmp2);
+
+ BLOCK_COMMENT("} ZBarrierSetAssembler::store_at");
+}
+
+#endif // ASSERT
+
+void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm,
+ DecoratorSet decorators,
+ BasicType type,
+ Register src,
+ Register dst,
+ Register count) {
+ if (!ZBarrierSet::barrier_needed(decorators, type)) {
+ // Barrier not needed
+ return;
+ }
+
+ BLOCK_COMMENT("ZBarrierSetAssembler::arraycopy_prologue {");
+
+ // Save registers
+ __ pusha();
+
+ // Call VM
+ call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_array_addr(), src, count);
+
+ // Restore registers
+ __ popa();
+
+ BLOCK_COMMENT("} ZBarrierSetAssembler::arraycopy_prologue");
+}
+
+void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
+ Register jni_env,
+ Register obj,
+ Register tmp,
+ Label& slowpath) {
+ BLOCK_COMMENT("ZBarrierSetAssembler::try_resolve_jobject_in_native {");
+
+ // Resolve jobject
+ BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
+
+ // Test address bad mask
+ __ testptr(obj, address_bad_mask_from_jni_env(jni_env));
+ __ jcc(Assembler::notZero, slowpath);
+
+ BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_jobject_in_native");
+}
+
+#ifdef COMPILER1
+
+#undef __
+#define __ ce->masm()->
+
+void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
+ LIR_Opr ref) const {
+ __ testptr(ref->as_register(), address_bad_mask_from_thread(r15_thread));
+}
+
+void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
+ ZLoadBarrierStubC1* stub) const {
+ // Stub entry
+ __ bind(*stub->entry());
+
+ Register ref = stub->ref()->as_register();
+ Register ref_addr = noreg;
+
+ if (stub->ref_addr()->is_register()) {
+ // Address already in register
+ ref_addr = stub->ref_addr()->as_pointer_register();
+ } else {
+ // Load address into tmp register
+ ce->leal(stub->ref_addr(), stub->tmp(), stub->patch_code(), stub->patch_info());
+ ref_addr = stub->tmp()->as_pointer_register();
+ }
+
+ assert_different_registers(ref, ref_addr, noreg);
+
+ // Save rax unless it is the result register
+ if (ref != rax) {
+ __ push(rax);
+ }
+
+ // Setup arguments and call runtime stub
+ __ subptr(rsp, 2 * BytesPerWord);
+ ce->store_parameter(ref_addr, 1);
+ ce->store_parameter(ref, 0);
+ __ call(RuntimeAddress(stub->runtime_stub()));
+ __ addptr(rsp, 2 * BytesPerWord);
+
+ // Verify result
+ __ verify_oop(rax, "Bad oop");
+
+ // Restore rax unless it is the result register
+ if (ref != rax) {
+ __ movptr(ref, rax);
+ __ pop(rax);
+ }
+
+ // Stub exit
+ __ jmp(*stub->continuation());
+}
+
+#undef __
+#define __ sasm->
+
+void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
+ DecoratorSet decorators) const {
+ // Enter and save registers
+ __ enter();
+ __ save_live_registers_no_oop_map(true /* save_fpu_registers */);
+
+ // Setup arguments
+ __ load_parameter(1, c_rarg1);
+ __ load_parameter(0, c_rarg0);
+
+ // Call VM
+ __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
+
+ // Restore registers and return
+ __ restore_live_registers_except_rax(true /* restore_fpu_registers */);
+ __ leave();
+ __ ret(0);
+}
+
+#endif // COMPILER1
+
+#undef __
+#define __ cgen->assembler()->
+
+// Generates a register specific stub for calling
+// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
+// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
+//
+// The raddr register serves as both input and output for this stub. When the stub is
+// called the raddr register contains the object field address (oop*) where the bad oop
+// was loaded from, which caused the slow path to be taken. On return from the stub the
+// raddr register contains the good/healed oop returned from
+// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or
+// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
+static address generate_load_barrier_stub(StubCodeGenerator* cgen, Register raddr, DecoratorSet decorators) {
+ // Don't generate stub for invalid registers
+ if (raddr == rsp || raddr == r12 || raddr == r15) {
+ return NULL;
+ }
+
+ // Create stub name
+ char name[64];
+ const bool weak = (decorators & ON_WEAK_OOP_REF) != 0;
+ os::snprintf(name, sizeof(name), "load_barrier%s_stub_%s", weak ? "_weak" : "", raddr->name());
+
+ __ align(CodeEntryAlignment);
+ StubCodeMark mark(cgen, "StubRoutines", os::strdup(name, mtCode));
+ address start = __ pc();
+
+ // Save live registers
+ if (raddr != rax) {
+ __ push(rax);
+ }
+ if (raddr != rcx) {
+ __ push(rcx);
+ }
+ if (raddr != rdx) {
+ __ push(rdx);
+ }
+ if (raddr != rsi) {
+ __ push(rsi);
+ }
+ if (raddr != rdi) {
+ __ push(rdi);
+ }
+ if (raddr != r8) {
+ __ push(r8);
+ }
+ if (raddr != r9) {
+ __ push(r9);
+ }
+ if (raddr != r10) {
+ __ push(r10);
+ }
+ if (raddr != r11) {
+ __ push(r11);
+ }
+
+ // Setup arguments
+ if (c_rarg1 != raddr) {
+ __ movq(c_rarg1, raddr);
+ }
+ __ movq(c_rarg0, Address(raddr, 0));
+
+ // Call barrier function
+ __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
+
+ // Move result returned in rax to raddr, if needed
+ if (raddr != rax) {
+ __ movq(raddr, rax);
+ }
+
+ // Restore saved registers
+ if (raddr != r11) {
+ __ pop(r11);
+ }
+ if (raddr != r10) {
+ __ pop(r10);
+ }
+ if (raddr != r9) {
+ __ pop(r9);
+ }
+ if (raddr != r8) {
+ __ pop(r8);
+ }
+ if (raddr != rdi) {
+ __ pop(rdi);
+ }
+ if (raddr != rsi) {
+ __ pop(rsi);
+ }
+ if (raddr != rdx) {
+ __ pop(rdx);
+ }
+ if (raddr != rcx) {
+ __ pop(rcx);
+ }
+ if (raddr != rax) {
+ __ pop(rax);
+ }
+
+ __ ret(0);
+
+ return start;
+}
+
+#undef __
+
+void ZBarrierSetAssembler::barrier_stubs_init() {
+ // Load barrier stubs
+ int stub_code_size = 256 * 16; // Rough estimate of code size
+
+ ResourceMark rm;
+ BufferBlob* bb = BufferBlob::create("zgc_load_barrier_stubs", stub_code_size);
+ CodeBuffer buf(bb);
+ StubCodeGenerator cgen(&buf);
+
+ Register rr = as_Register(0);
+ for (int i = 0; i < RegisterImpl::number_of_registers; i++) {
+ _load_barrier_slow_stub[i] = generate_load_barrier_stub(&cgen, rr, ON_STRONG_OOP_REF);
+ _load_barrier_weak_slow_stub[i] = generate_load_barrier_stub(&cgen, rr, ON_WEAK_OOP_REF);
+ rr = rr->successor();
+ }
+}
diff --git a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp
new file mode 100644
index 00000000000..3687754e71a
--- /dev/null
+++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
+#define CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
+
+#ifdef COMPILER1
+class LIR_Assembler;
+class LIR_OprDesc;
+typedef LIR_OprDesc* LIR_Opr;
+class StubAssembler;
+class ZLoadBarrierStubC1;
+#endif // COMPILER1
+
+class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase {
+ address _load_barrier_slow_stub[RegisterImpl::number_of_registers];
+ address _load_barrier_weak_slow_stub[RegisterImpl::number_of_registers];
+
+public:
+ ZBarrierSetAssembler() :
+ _load_barrier_slow_stub(),
+ _load_barrier_weak_slow_stub() {}
+
+ address load_barrier_slow_stub(Register reg) { return _load_barrier_slow_stub[reg->encoding()]; }
+ address load_barrier_weak_slow_stub(Register reg) { return _load_barrier_weak_slow_stub[reg->encoding()]; }
+
+ virtual void load_at(MacroAssembler* masm,
+ DecoratorSet decorators,
+ BasicType type,
+ Register dst,
+ Address src,
+ Register tmp1,
+ Register tmp_thread);
+
+#ifdef ASSERT
+ virtual void store_at(MacroAssembler* masm,
+ DecoratorSet decorators,
+ BasicType type,
+ Address dst,
+ Register src,
+ Register tmp1,
+ Register tmp2);
+#endif // ASSERT
+
+ virtual void arraycopy_prologue(MacroAssembler* masm,
+ DecoratorSet decorators,
+ BasicType type,
+ Register src,
+ Register dst,
+ Register count);
+
+ virtual void try_resolve_jobject_in_native(MacroAssembler* masm,
+ Register jni_env,
+ Register obj,
+ Register tmp,
+ Label& slowpath);
+
+#ifdef COMPILER1
+ void generate_c1_load_barrier_test(LIR_Assembler* ce,
+ LIR_Opr ref) const;
+
+ void generate_c1_load_barrier_stub(LIR_Assembler* ce,
+ ZLoadBarrierStubC1* stub) const;
+
+ void generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
+ DecoratorSet decorators) const;
+#endif // COMPILER1
+
+ virtual void barrier_stubs_init();
+};
+
+#endif // CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP
diff --git a/src/hotspot/cpu/x86/interp_masm_x86.hpp b/src/hotspot/cpu/x86/interp_masm_x86.hpp
index a7ce2d966fc..ca50de919e9 100644
--- a/src/hotspot/cpu/x86/interp_masm_x86.hpp
+++ b/src/hotspot/cpu/x86/interp_masm_x86.hpp
@@ -26,7 +26,6 @@
#define CPU_X86_VM_INTERP_MASM_X86_HPP
#include "asm/macroAssembler.hpp"
-#include "asm/macroAssembler.inline.hpp"
#include "interpreter/invocationCounter.hpp"
#include "runtime/frame.hpp"
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
index 7830873de17..df90ffbfbe8 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
@@ -33,7 +33,7 @@
#include "interpreter/interpreter.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
-#include "oops/access.hpp"
+#include "oops/accessDecorators.hpp"
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
@@ -5259,7 +5259,7 @@ void MacroAssembler::resolve_jobject(Register value,
jmp(done);
bind(not_weak);
// Resolve (untagged) jobject.
- access_load_at(T_OBJECT, IN_ROOT | IN_CONCURRENT_ROOT,
+ access_load_at(T_OBJECT, IN_CONCURRENT_ROOT,
value, Address(value, 0), tmp, thread);
verify_oop(value);
bind(done);
@@ -6281,7 +6281,7 @@ void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
// Only 64 bit platforms support GCs that require a tmp register
// Only IN_HEAP loads require a thread_tmp register
// OopHandle::resolve is an indirection like jobject.
- access_load_at(T_OBJECT, IN_ROOT | IN_CONCURRENT_ROOT,
+ access_load_at(T_OBJECT, IN_CONCURRENT_ROOT,
result, Address(result, 0), tmp, /*tmp_thread*/noreg);
}
@@ -6323,6 +6323,7 @@ void MacroAssembler::store_klass(Register dst, Register src) {
void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
Register tmp1, Register thread_tmp) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ decorators = AccessInternal::decorator_fixup(decorators);
bool as_raw = (decorators & AS_RAW) != 0;
if (as_raw) {
bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
@@ -6334,6 +6335,7 @@ void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Reg
void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src,
Register tmp1, Register tmp2) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ decorators = AccessInternal::decorator_fixup(decorators);
bool as_raw = (decorators & AS_RAW) != 0;
if (as_raw) {
bs->BarrierSetAssembler::store_at(this, decorators, type, dst, src, tmp1, tmp2);
diff --git a/src/hotspot/cpu/x86/methodHandles_x86.cpp b/src/hotspot/cpu/x86/methodHandles_x86.cpp
index dc53107c735..5b2bdba733c 100644
--- a/src/hotspot/cpu/x86/methodHandles_x86.cpp
+++ b/src/hotspot/cpu/x86/methodHandles_x86.cpp
@@ -175,7 +175,9 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
__ verify_oop(method_temp);
__ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())), temp2);
__ verify_oop(method_temp);
- __ movptr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())));
+ __ access_load_at(T_ADDRESS, IN_HEAP, method_temp,
+ Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())),
+ noreg, noreg);
if (VerifyMethodHandles && !for_compiler_entry) {
// make sure recv is already on stack
@@ -390,7 +392,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
}
__ load_heap_oop(rbx_method, member_vmtarget);
- __ movptr(rbx_method, vmtarget_method);
+ __ access_load_at(T_ADDRESS, IN_HEAP, rbx_method, vmtarget_method, noreg, noreg);
break;
case vmIntrinsics::_linkToStatic:
@@ -398,7 +400,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
}
__ load_heap_oop(rbx_method, member_vmtarget);
- __ movptr(rbx_method, vmtarget_method);
+ __ access_load_at(T_ADDRESS, IN_HEAP, rbx_method, vmtarget_method, noreg, noreg);
break;
case vmIntrinsics::_linkToVirtual:
@@ -412,7 +414,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// pick out the vtable index from the MemberName, and then we can discard it:
Register temp2_index = temp2;
- __ movptr(temp2_index, member_vmindex);
+ __ access_load_at(T_ADDRESS, IN_HEAP, temp2_index, member_vmindex, noreg, noreg);
if (VerifyMethodHandles) {
Label L_index_ok;
@@ -446,7 +448,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
__ verify_klass_ptr(temp3_intf);
Register rbx_index = rbx_method;
- __ movptr(rbx_index, member_vmindex);
+ __ access_load_at(T_ADDRESS, IN_HEAP, rbx_index, member_vmindex, noreg, noreg);
if (VerifyMethodHandles) {
Label L;
__ cmpl(rbx_index, 0);
diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
index 712e706c132..0b86577f98c 100644
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
@@ -44,6 +44,9 @@
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
+#if INCLUDE_ZGC
+#include "gc/z/zThreadLocalData.hpp"
+#endif
// Declaration and definition of StubGenerator (no .hpp file).
// For a more detailed description of the stub routine structure
@@ -1026,6 +1029,15 @@ class StubGenerator: public StubCodeGenerator {
// make sure object is 'reasonable'
__ testptr(rax, rax);
__ jcc(Assembler::zero, exit); // if obj is NULL it is OK
+
+#if INCLUDE_ZGC
+ if (UseZGC) {
+ // Check if metadata bits indicate a bad oop
+ __ testptr(rax, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
+ __ jcc(Assembler::notZero, error);
+ }
+#endif
+
// Check if the oop is in the right area of memory
__ movptr(c_rarg2, rax);
__ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask());
diff --git a/src/hotspot/cpu/x86/templateTable_x86.cpp b/src/hotspot/cpu/x86/templateTable_x86.cpp
index 8997c4b1ea4..5f1e7ba46c2 100644
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp
@@ -770,9 +770,10 @@ void TemplateTable::iaload() {
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
- __ movl(rax, Address(rdx, rax,
- Address::times_4,
- arrayOopDesc::base_offset_in_bytes(T_INT)));
+ __ access_load_at(T_INT, IN_HEAP | IN_HEAP_ARRAY, rax,
+ Address(rdx, rax, Address::times_4,
+ arrayOopDesc::base_offset_in_bytes(T_INT)),
+ noreg, noreg);
}
void TemplateTable::laload() {
@@ -782,8 +783,10 @@ void TemplateTable::laload() {
index_check(rdx, rax); // kills rbx
NOT_LP64(__ mov(rbx, rax));
// rbx,: index
- __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize));
- NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize)));
+ __ access_load_at(T_LONG, IN_HEAP | IN_HEAP_ARRAY, noreg /* ltos */,
+ Address(rdx, rbx, Address::times_8,
+ arrayOopDesc::base_offset_in_bytes(T_LONG)),
+ noreg, noreg);
}
@@ -793,9 +796,11 @@ void TemplateTable::faload() {
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
- __ load_float(Address(rdx, rax,
- Address::times_4,
- arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
+ __ access_load_at(T_FLOAT, IN_HEAP | IN_HEAP_ARRAY, noreg /* ftos */,
+ Address(rdx, rax,
+ Address::times_4,
+ arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
+ noreg, noreg);
}
void TemplateTable::daload() {
@@ -803,9 +808,11 @@ void TemplateTable::daload() {
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
- __ load_double(Address(rdx, rax,
- Address::times_8,
- arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
+ __ access_load_at(T_DOUBLE, IN_HEAP | IN_HEAP_ARRAY, noreg /* dtos */,
+ Address(rdx, rax,
+ Address::times_8,
+ arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
+ noreg, noreg);
}
void TemplateTable::aaload() {
@@ -826,7 +833,9 @@ void TemplateTable::baload() {
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
- __ load_signed_byte(rax, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
+ __ access_load_at(T_BYTE, IN_HEAP | IN_HEAP_ARRAY, rax,
+ Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)),
+ noreg, noreg);
}
void TemplateTable::caload() {
@@ -834,7 +843,9 @@ void TemplateTable::caload() {
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
- __ load_unsigned_short(rax, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
+ __ access_load_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY, rax,
+ Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
+ noreg, noreg);
}
// iload followed by caload frequent pair
@@ -847,10 +858,9 @@ void TemplateTable::fast_icaload() {
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
- __ load_unsigned_short(rax,
- Address(rdx, rax,
- Address::times_2,
- arrayOopDesc::base_offset_in_bytes(T_CHAR)));
+ __ access_load_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY, rax,
+ Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
+ noreg, noreg);
}
@@ -859,7 +869,9 @@ void TemplateTable::saload() {
// rax: index
// rdx: array
index_check(rdx, rax); // kills rbx
- __ load_signed_short(rax, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
+ __ access_load_at(T_SHORT, IN_HEAP | IN_HEAP_ARRAY, rax,
+ Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)),
+ noreg, noreg);
}
void TemplateTable::iload(int n) {
@@ -1051,10 +1063,10 @@ void TemplateTable::iastore() {
// rbx: index
// rdx: array
index_check(rdx, rbx); // prefer index in rbx
- __ movl(Address(rdx, rbx,
- Address::times_4,
- arrayOopDesc::base_offset_in_bytes(T_INT)),
- rax);
+ __ access_store_at(T_INT, IN_HEAP | IN_HEAP_ARRAY,
+ Address(rdx, rbx, Address::times_4,
+ arrayOopDesc::base_offset_in_bytes(T_INT)),
+ rax, noreg, noreg);
}
void TemplateTable::lastore() {
@@ -1065,8 +1077,10 @@ void TemplateTable::lastore() {
// rdx: high(value)
index_check(rcx, rbx); // prefer index in rbx,
// rbx,: index
- __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax);
- NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx));
+ __ access_store_at(T_LONG, IN_HEAP | IN_HEAP_ARRAY,
+ Address(rcx, rbx, Address::times_8,
+ arrayOopDesc::base_offset_in_bytes(T_LONG)),
+ noreg /* ltos */, noreg, noreg);
}
@@ -1077,7 +1091,10 @@ void TemplateTable::fastore() {
// rbx: index
// rdx: array
index_check(rdx, rbx); // prefer index in rbx
- __ store_float(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
+ __ access_store_at(T_FLOAT, IN_HEAP | IN_HEAP_ARRAY,
+ Address(rdx, rbx, Address::times_4,
+ arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
+ noreg /* ftos */, noreg, noreg);
}
void TemplateTable::dastore() {
@@ -1087,7 +1104,10 @@ void TemplateTable::dastore() {
// rbx: index
// rdx: array
index_check(rdx, rbx); // prefer index in rbx
- __ store_double(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
+ __ access_store_at(T_DOUBLE, IN_HEAP | IN_HEAP_ARRAY,
+ Address(rdx, rbx, Address::times_8,
+ arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
+ noreg /* dtos */, noreg, noreg);
}
void TemplateTable::aastore() {
@@ -1160,10 +1180,10 @@ void TemplateTable::bastore() {
__ jccb(Assembler::zero, L_skip);
__ andl(rax, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
__ bind(L_skip);
- __ movb(Address(rdx, rbx,
- Address::times_1,
- arrayOopDesc::base_offset_in_bytes(T_BYTE)),
- rax);
+ __ access_store_at(T_BYTE, IN_HEAP | IN_HEAP_ARRAY,
+ Address(rdx, rbx,Address::times_1,
+ arrayOopDesc::base_offset_in_bytes(T_BYTE)),
+ rax, noreg, noreg);
}
void TemplateTable::castore() {
@@ -1173,10 +1193,10 @@ void TemplateTable::castore() {
// rbx: index
// rdx: array
index_check(rdx, rbx); // prefer index in rbx
- __ movw(Address(rdx, rbx,
- Address::times_2,
- arrayOopDesc::base_offset_in_bytes(T_CHAR)),
- rax);
+ __ access_store_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY,
+ Address(rdx, rbx, Address::times_2,
+ arrayOopDesc::base_offset_in_bytes(T_CHAR)),
+ rax, noreg, noreg);
}
@@ -2852,7 +2872,6 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
if (!is_static) pop_and_check_object(obj);
const Address field(obj, off, Address::times_1, 0*wordSize);
- NOT_LP64(const Address hi(obj, off, Address::times_1, 1*wordSize));
Label Done, notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
@@ -2864,7 +2883,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ jcc(Assembler::notZero, notByte);
// btos
- __ load_signed_byte(rax, field);
+ __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
__ push(btos);
// Rewrite bytecode to be faster
if (!is_static && rc == may_rewrite) {
@@ -2877,7 +2896,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ jcc(Assembler::notEqual, notBool);
// ztos (same code as btos)
- __ load_signed_byte(rax, field);
+ __ access_load_at(T_BOOLEAN, IN_HEAP, rax, field, noreg, noreg);
__ push(ztos);
// Rewrite bytecode to be faster
if (!is_static && rc == may_rewrite) {
@@ -2901,7 +2920,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ cmpl(flags, itos);
__ jcc(Assembler::notEqual, notInt);
// itos
- __ movl(rax, field);
+ __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
__ push(itos);
// Rewrite bytecode to be faster
if (!is_static && rc == may_rewrite) {
@@ -2913,7 +2932,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ cmpl(flags, ctos);
__ jcc(Assembler::notEqual, notChar);
// ctos
- __ load_unsigned_short(rax, field);
+ __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
__ push(ctos);
// Rewrite bytecode to be faster
if (!is_static && rc == may_rewrite) {
@@ -2925,7 +2944,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ cmpl(flags, stos);
__ jcc(Assembler::notEqual, notShort);
// stos
- __ load_signed_short(rax, field);
+ __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
__ push(stos);
// Rewrite bytecode to be faster
if (!is_static && rc == may_rewrite) {
@@ -2937,19 +2956,9 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ cmpl(flags, ltos);
__ jcc(Assembler::notEqual, notLong);
// ltos
-
-#ifndef _LP64
- // Generate code as if volatile. There just aren't enough registers to
- // save that information and this code is faster than the test.
- __ fild_d(field); // Must load atomically
- __ subptr(rsp,2*wordSize); // Make space for store
- __ fistp_d(Address(rsp,0));
- __ pop(rax);
- __ pop(rdx);
-#else
- __ movq(rax, field);
-#endif
-
+ // Generate code as if volatile (x86_32). There just aren't enough registers to
+ // save that information and this code is faster than the test.
+ __ access_load_at(T_LONG, IN_HEAP | MO_RELAXED, noreg /* ltos */, field, noreg, noreg);
__ push(ltos);
// Rewrite bytecode to be faster
LP64_ONLY(if (!is_static && rc == may_rewrite) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx));
@@ -2960,7 +2969,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ jcc(Assembler::notEqual, notFloat);
// ftos
- __ load_float(field);
+ __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
__ push(ftos);
// Rewrite bytecode to be faster
if (!is_static && rc == may_rewrite) {
@@ -2974,7 +2983,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
__ jcc(Assembler::notEqual, notDouble);
#endif
// dtos
- __ load_double(field);
+ __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
__ push(dtos);
// Rewrite bytecode to be faster
if (!is_static && rc == may_rewrite) {
@@ -3133,7 +3142,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(btos);
if (!is_static) pop_and_check_object(obj);
- __ movb(field, rax);
+ __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
}
@@ -3148,8 +3157,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(ztos);
if (!is_static) pop_and_check_object(obj);
- __ andl(rax, 0x1);
- __ movb(field, rax);
+ __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_zputfield, bc, rbx, true, byte_no);
}
@@ -3180,7 +3188,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(itos);
if (!is_static) pop_and_check_object(obj);
- __ movl(field, rax);
+ __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
}
@@ -3195,7 +3203,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(ctos);
if (!is_static) pop_and_check_object(obj);
- __ movw(field, rax);
+ __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
}
@@ -3210,7 +3218,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(stos);
if (!is_static) pop_and_check_object(obj);
- __ movw(field, rax);
+ __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
}
@@ -3226,7 +3234,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(ltos);
if (!is_static) pop_and_check_object(obj);
- __ movq(field, rax);
+ __ access_store_at(T_LONG, IN_HEAP, field, noreg /* ltos*/, noreg, noreg);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
}
@@ -3242,11 +3250,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
if (!is_static) pop_and_check_object(obj);
// Replace with real volatile test
- __ push(rdx);
- __ push(rax); // Must update atomically with FIST
- __ fild_d(Address(rsp,0)); // So load into FPU register
- __ fistp_d(field); // and put into memory atomically
- __ addptr(rsp, 2*wordSize);
+ __ access_store_at(T_LONG, IN_HEAP | MO_RELAXED, field, noreg /* ltos */, noreg, noreg);
// volatile_barrier();
volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
Assembler::StoreStore));
@@ -3257,8 +3261,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
__ pop(ltos); // overwrites rdx
if (!is_static) pop_and_check_object(obj);
- __ movptr(hi, rdx);
- __ movptr(field, rax);
+ __ access_store_at(T_LONG, IN_HEAP, field, noreg /* ltos */, noreg, noreg);
// Don't rewrite to _fast_lputfield for potential volatile case.
__ jmp(notVolatile);
}
@@ -3272,7 +3275,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(ftos);
if (!is_static) pop_and_check_object(obj);
- __ store_float(field);
+ __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
}
@@ -3289,7 +3292,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
{
__ pop(dtos);
if (!is_static) pop_and_check_object(obj);
- __ store_double(field);
+ __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg);
if (!is_static && rc == may_rewrite) {
patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
}
@@ -3422,30 +3425,31 @@ void TemplateTable::fast_storefield(TosState state) {
break;
case Bytecodes::_fast_lputfield:
#ifdef _LP64
- __ movq(field, rax);
+ __ access_store_at(T_LONG, IN_HEAP, field, noreg /* ltos */, noreg, noreg);
#else
__ stop("should not be rewritten");
#endif
break;
case Bytecodes::_fast_iputfield:
- __ movl(field, rax);
+ __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg);
break;
case Bytecodes::_fast_zputfield:
- __ andl(rax, 0x1); // boolean is true if LSB is 1
- // fall through to bputfield
+ __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg);
+ break;
case Bytecodes::_fast_bputfield:
- __ movb(field, rax);
+ __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg);
break;
case Bytecodes::_fast_sputfield:
- // fall through
+ __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg);
+ break;
case Bytecodes::_fast_cputfield:
- __ movw(field, rax);
+ __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg);
break;
case Bytecodes::_fast_fputfield:
- __ store_float(field);
+ __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos*/, noreg, noreg);
break;
case Bytecodes::_fast_dputfield:
- __ store_double(field);
+ __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos*/, noreg, noreg);
break;
default:
ShouldNotReachHere();
@@ -3512,28 +3516,28 @@ void TemplateTable::fast_accessfield(TosState state) {
break;
case Bytecodes::_fast_lgetfield:
#ifdef _LP64
- __ movq(rax, field);
+ __ access_load_at(T_LONG, IN_HEAP, noreg /* ltos */, field, noreg, noreg);
#else
__ stop("should not be rewritten");
#endif
break;
case Bytecodes::_fast_igetfield:
- __ movl(rax, field);
+ __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
break;
case Bytecodes::_fast_bgetfield:
- __ movsbl(rax, field);
+ __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
break;
case Bytecodes::_fast_sgetfield:
- __ load_signed_short(rax, field);
+ __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
break;
case Bytecodes::_fast_cgetfield:
- __ load_unsigned_short(rax, field);
+ __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
break;
case Bytecodes::_fast_fgetfield:
- __ load_float(field);
+ __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
break;
case Bytecodes::_fast_dgetfield:
- __ load_double(field);
+ __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
break;
default:
ShouldNotReachHere();
@@ -3566,14 +3570,14 @@ void TemplateTable::fast_xaccess(TosState state) {
const Address field = Address(rax, rbx, Address::times_1, 0*wordSize);
switch (state) {
case itos:
- __ movl(rax, field);
+ __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
break;
case atos:
do_oop_load(_masm, field, rax);
__ verify_oop(rax);
break;
case ftos:
- __ load_float(field);
+ __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
break;
default:
ShouldNotReachHere();
diff --git a/src/hotspot/cpu/x86/x86.ad b/src/hotspot/cpu/x86/x86.ad
index 726796aa452..3fd148bceda 100644
--- a/src/hotspot/cpu/x86/x86.ad
+++ b/src/hotspot/cpu/x86/x86.ad
@@ -1067,6 +1067,138 @@ reg_class vectorz_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM
#endif
);
+reg_class xmm0_reg(XMM0, XMM0b, XMM0c, XMM0d);
+reg_class ymm0_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h);
+reg_class zmm0_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, XMM0i, XMM0j, XMM0k, XMM0l, XMM0m, XMM0n, XMM0o, XMM0p);
+
+reg_class xmm1_reg(XMM1, XMM1b, XMM1c, XMM1d);
+reg_class ymm1_reg(XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h);
+reg_class zmm1_reg(XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h, XMM1i, XMM1j, XMM1k, XMM1l, XMM1m, XMM1n, XMM1o, XMM1p);
+
+reg_class xmm2_reg(XMM2, XMM2b, XMM2c, XMM2d);
+reg_class ymm2_reg(XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h);
+reg_class zmm2_reg(XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h, XMM2i, XMM2j, XMM2k, XMM2l, XMM2m, XMM2n, XMM2o, XMM2p);
+
+reg_class xmm3_reg(XMM3, XMM3b, XMM3c, XMM3d);
+reg_class ymm3_reg(XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h);
+reg_class zmm3_reg(XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h, XMM3i, XMM3j, XMM3k, XMM3l, XMM3m, XMM3n, XMM3o, XMM3p);
+
+reg_class xmm4_reg(XMM4, XMM4b, XMM4c, XMM4d);
+reg_class ymm4_reg(XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h);
+reg_class zmm4_reg(XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, XMM4i, XMM4j, XMM4k, XMM4l, XMM4m, XMM4n, XMM4o, XMM4p);
+
+reg_class xmm5_reg(XMM5, XMM5b, XMM5c, XMM5d);
+reg_class ymm5_reg(XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h);
+reg_class zmm5_reg(XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, XMM5i, XMM5j, XMM5k, XMM5l, XMM5m, XMM5n, XMM5o, XMM5p);
+
+reg_class xmm6_reg(XMM6, XMM6b, XMM6c, XMM6d);
+reg_class ymm6_reg(XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h);
+reg_class zmm6_reg(XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, XMM6i, XMM6j, XMM6k, XMM6l, XMM6m, XMM6n, XMM6o, XMM6p);
+
+reg_class xmm7_reg(XMM7, XMM7b, XMM7c, XMM7d);
+reg_class ymm7_reg(XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h);
+reg_class zmm7_reg(XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h, XMM7i, XMM7j, XMM7k, XMM7l, XMM7m, XMM7n, XMM7o, XMM7p);
+
+#ifdef _LP64
+
+reg_class xmm8_reg(XMM8, XMM8b, XMM8c, XMM8d);
+reg_class ymm8_reg(XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h);
+reg_class zmm8_reg(XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, XMM8i, XMM8j, XMM8k, XMM8l, XMM8m, XMM8n, XMM8o, XMM8p);
+
+reg_class xmm9_reg(XMM9, XMM9b, XMM9c, XMM9d);
+reg_class ymm9_reg(XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h);
+reg_class zmm9_reg(XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, XMM9i, XMM9j, XMM9k, XMM9l, XMM9m, XMM9n, XMM9o, XMM9p);
+
+reg_class xmm10_reg(XMM10, XMM10b, XMM10c, XMM10d);
+reg_class ymm10_reg(XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h);
+reg_class zmm10_reg(XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, XMM10i, XMM10j, XMM10k, XMM10l, XMM10m, XMM10n, XMM10o, XMM10p);
+
+reg_class xmm11_reg(XMM11, XMM11b, XMM11c, XMM11d);
+reg_class ymm11_reg(XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h);
+reg_class zmm11_reg(XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, XMM11i, XMM11j, XMM11k, XMM11l, XMM11m, XMM11n, XMM11o, XMM11p);
+
+reg_class xmm12_reg(XMM12, XMM12b, XMM12c, XMM12d);
+reg_class ymm12_reg(XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h);
+reg_class zmm12_reg(XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, XMM12i, XMM12j, XMM12k, XMM12l, XMM12m, XMM12n, XMM12o, XMM12p);
+
+reg_class xmm13_reg(XMM13, XMM13b, XMM13c, XMM13d);
+reg_class ymm13_reg(XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h);
+reg_class zmm13_reg(XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, XMM13i, XMM13j, XMM13k, XMM13l, XMM13m, XMM13n, XMM13o, XMM13p);
+
+reg_class xmm14_reg(XMM14, XMM14b, XMM14c, XMM14d);
+reg_class ymm14_reg(XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h);
+reg_class zmm14_reg(XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, XMM14i, XMM14j, XMM14k, XMM14l, XMM14m, XMM14n, XMM14o, XMM14p);
+
+reg_class xmm15_reg(XMM15, XMM15b, XMM15c, XMM15d);
+reg_class ymm15_reg(XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h);
+reg_class zmm15_reg(XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h, XMM15i, XMM15j, XMM15k, XMM15l, XMM15m, XMM15n, XMM15o, XMM15p);
+
+reg_class xmm16_reg(XMM16, XMM16b, XMM16c, XMM16d);
+reg_class ymm16_reg(XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h);
+reg_class zmm16_reg(XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h, XMM16i, XMM16j, XMM16k, XMM16l, XMM16m, XMM16n, XMM16o, XMM16p);
+
+reg_class xmm17_reg(XMM17, XMM17b, XMM17c, XMM17d);
+reg_class ymm17_reg(XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h);
+reg_class zmm17_reg(XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h, XMM17i, XMM17j, XMM17k, XMM17l, XMM17m, XMM17n, XMM17o, XMM17p);
+
+reg_class xmm18_reg(XMM18, XMM18b, XMM18c, XMM18d);
+reg_class ymm18_reg(XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h);
+reg_class zmm18_reg(XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h, XMM18i, XMM18j, XMM18k, XMM18l, XMM18m, XMM18n, XMM18o, XMM18p);
+
+reg_class xmm19_reg(XMM19, XMM19b, XMM19c, XMM19d);
+reg_class ymm19_reg(XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h);
+reg_class zmm19_reg(XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h, XMM19i, XMM19j, XMM19k, XMM19l, XMM19m, XMM19n, XMM19o, XMM19p);
+
+reg_class xmm20_reg(XMM20, XMM20b, XMM20c, XMM20d);
+reg_class ymm20_reg(XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h);
+reg_class zmm20_reg(XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h, XMM20i, XMM20j, XMM20k, XMM20l, XMM20m, XMM20n, XMM20o, XMM20p);
+
+reg_class xmm21_reg(XMM21, XMM21b, XMM21c, XMM21d);
+reg_class ymm21_reg(XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h);
+reg_class zmm21_reg(XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h, XMM21i, XMM21j, XMM21k, XMM21l, XMM21m, XMM21n, XMM21o, XMM21p);
+
+reg_class xmm22_reg(XMM22, XMM22b, XMM22c, XMM22d);
+reg_class ymm22_reg(XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h);
+reg_class zmm22_reg(XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h, XMM22i, XMM22j, XMM22k, XMM22l, XMM22m, XMM22n, XMM22o, XMM22p);
+
+reg_class xmm23_reg(XMM23, XMM23b, XMM23c, XMM23d);
+reg_class ymm23_reg(XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h);
+reg_class zmm23_reg(XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h, XMM23i, XMM23j, XMM23k, XMM23l, XMM23m, XMM23n, XMM23o, XMM23p);
+
+reg_class xmm24_reg(XMM24, XMM24b, XMM24c, XMM24d);
+reg_class ymm24_reg(XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h);
+reg_class zmm24_reg(XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h, XMM24i, XMM24j, XMM24k, XMM24l, XMM24m, XMM24n, XMM24o, XMM24p);
+
+reg_class xmm25_reg(XMM25, XMM25b, XMM25c, XMM25d);
+reg_class ymm25_reg(XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h);
+reg_class zmm25_reg(XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h, XMM25i, XMM25j, XMM25k, XMM25l, XMM25m, XMM25n, XMM25o, XMM25p);
+
+reg_class xmm26_reg(XMM26, XMM26b, XMM26c, XMM26d);
+reg_class ymm26_reg(XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h);
+reg_class zmm26_reg(XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h, XMM26i, XMM26j, XMM26k, XMM26l, XMM26m, XMM26n, XMM26o, XMM26p);
+
+reg_class xmm27_reg(XMM27, XMM27b, XMM27c, XMM27d);
+reg_class ymm27_reg(XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h);
+reg_class zmm27_reg(XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h, XMM27i, XMM27j, XMM27k, XMM27l, XMM27m, XMM27n, XMM27o, XMM27p);
+
+reg_class xmm28_reg(XMM28, XMM28b, XMM28c, XMM28d);
+reg_class ymm28_reg(XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h);
+reg_class zmm28_reg(XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h, XMM28i, XMM28j, XMM28k, XMM28l, XMM28m, XMM28n, XMM28o, XMM28p);
+
+reg_class xmm29_reg(XMM29, XMM29b, XMM29c, XMM29d);
+reg_class ymm29_reg(XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h);
+reg_class zmm29_reg(XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h, XMM29i, XMM29j, XMM29k, XMM29l, XMM29m, XMM29n, XMM29o, XMM29p);
+
+reg_class xmm30_reg(XMM30, XMM30b, XMM30c, XMM30d);
+reg_class ymm30_reg(XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h);
+reg_class zmm30_reg(XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h, XMM30i, XMM30j, XMM30k, XMM30l, XMM30m, XMM30n, XMM30o, XMM30p);
+
+reg_class xmm31_reg(XMM31, XMM31b, XMM31c, XMM31d);
+reg_class ymm31_reg(XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h);
+reg_class zmm31_reg(XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h, XMM31i, XMM31j, XMM31k, XMM31l, XMM31m, XMM31n, XMM31o, XMM31p);
+
+#endif
+
%}
diff --git a/src/hotspot/cpu/x86/x86_64.ad b/src/hotspot/cpu/x86/x86_64.ad
index ea0f16ac18c..368a5b03faf 100644
--- a/src/hotspot/cpu/x86/x86_64.ad
+++ b/src/hotspot/cpu/x86/x86_64.ad
@@ -538,6 +538,12 @@ reg_class int_rdi_reg(RDI);
%}
+source_hpp %{
+#if INCLUDE_ZGC
+#include "gc/z/zBarrierSetAssembler.hpp"
+#endif
+%}
+
//----------SOURCE BLOCK-------------------------------------------------------
// This is a block of C++ code which provides values, functions, and
// definitions necessary in the rest of the architecture description
@@ -4221,6 +4227,135 @@ operand cmpOpUCF2() %{
%}
%}
+// Operands for bound floating pointer register arguments
+operand rxmm0() %{
+ constraint(ALLOC_IN_RC(xmm0_reg)); match(VecX);
+ predicate((UseSSE > 0) && (UseAVX<= 2)); format%{%} interface(REG_INTER);
+%}
+operand rxmm1() %{
+ constraint(ALLOC_IN_RC(xmm1_reg)); match(VecX);
+ predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
+%}
+operand rxmm2() %{
+ constraint(ALLOC_IN_RC(xmm2_reg)); match(VecX);
+ predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
+%}
+operand rxmm3() %{
+ constraint(ALLOC_IN_RC(xmm3_reg)); match(VecX);
+ predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
+%}
+operand rxmm4() %{
+ constraint(ALLOC_IN_RC(xmm4_reg)); match(VecX);
+ predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
+%}
+operand rxmm5() %{
+ constraint(ALLOC_IN_RC(xmm5_reg)); match(VecX);
+ predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
+%}
+operand rxmm6() %{
+ constraint(ALLOC_IN_RC(xmm6_reg)); match(VecX);
+ predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
+%}
+operand rxmm7() %{
+ constraint(ALLOC_IN_RC(xmm7_reg)); match(VecX);
+ predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
+%}
+operand rxmm8() %{
+ constraint(ALLOC_IN_RC(xmm8_reg)); match(VecX);
+ predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
+%}
+operand rxmm9() %{
+ constraint(ALLOC_IN_RC(xmm9_reg)); match(VecX);
+ predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
+%}
+operand rxmm10() %{
+ constraint(ALLOC_IN_RC(xmm10_reg)); match(VecX);
+ predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
+%}
+operand rxmm11() %{
+ constraint(ALLOC_IN_RC(xmm11_reg)); match(VecX);
+ predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
+%}
+operand rxmm12() %{
+ constraint(ALLOC_IN_RC(xmm12_reg)); match(VecX);
+ predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
+%}
+operand rxmm13() %{
+ constraint(ALLOC_IN_RC(xmm13_reg)); match(VecX);
+ predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
+%}
+operand rxmm14() %{
+ constraint(ALLOC_IN_RC(xmm14_reg)); match(VecX);
+ predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
+%}
+operand rxmm15() %{
+ constraint(ALLOC_IN_RC(xmm15_reg)); match(VecX);
+ predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
+%}
+operand rxmm16() %{
+ constraint(ALLOC_IN_RC(xmm16_reg)); match(VecX);
+ predicate(UseAVX == 3); format%{%} interface(REG_INTER);
+%}
+operand rxmm17() %{
+ constraint(ALLOC_IN_RC(xmm17_reg)); match(VecX);
+ predicate(UseAVX == 3); format%{%} interface(REG_INTER);
+%}
+operand rxmm18() %{
+ constraint(ALLOC_IN_RC(xmm18_reg)); match(VecX);
+ predicate(UseAVX == 3); format%{%} interface(REG_INTER);
+%}
+operand rxmm19() %{
+ constraint(ALLOC_IN_RC(xmm19_reg)); match(VecX);
+ predicate(UseAVX == 3); format%{%} interface(REG_INTER);
+%}
+operand rxmm20() %{
+ constraint(ALLOC_IN_RC(xmm20_reg)); match(VecX);
+ predicate(UseAVX == 3); format%{%} interface(REG_INTER);
+%}
+operand rxmm21() %{
+ constraint(ALLOC_IN_RC(xmm21_reg)); match(VecX);
+ predicate(UseAVX == 3); format%{%} interface(REG_INTER);
+%}
+operand rxmm22() %{
+ constraint(ALLOC_IN_RC(xmm22_reg)); match(VecX);
+ predicate(UseAVX == 3); format%{%} interface(REG_INTER);
+%}
+operand rxmm23() %{
+ constraint(ALLOC_IN_RC(xmm23_reg)); match(VecX);
+ predicate(UseAVX == 3); format%{%} interface(REG_INTER);
+%}
+operand rxmm24() %{
+ constraint(ALLOC_IN_RC(xmm24_reg)); match(VecX);
+ predicate(UseAVX == 3); format%{%} interface(REG_INTER);
+%}
+operand rxmm25() %{
+ constraint(ALLOC_IN_RC(xmm25_reg)); match(VecX);
+ predicate(UseAVX == 3); format%{%} interface(REG_INTER);
+%}
+operand rxmm26() %{
+ constraint(ALLOC_IN_RC(xmm26_reg)); match(VecX);
+ predicate(UseAVX == 3); format%{%} interface(REG_INTER);
+%}
+operand rxmm27() %{
+ constraint(ALLOC_IN_RC(xmm27_reg)); match(VecX);
+ predicate(UseAVX == 3); format%{%} interface(REG_INTER);
+%}
+operand rxmm28() %{
+ constraint(ALLOC_IN_RC(xmm28_reg)); match(VecX);
+ predicate(UseAVX == 3); format%{%} interface(REG_INTER);
+%}
+operand rxmm29() %{
+ constraint(ALLOC_IN_RC(xmm29_reg)); match(VecX);
+ predicate(UseAVX == 3); format%{%} interface(REG_INTER);
+%}
+operand rxmm30() %{
+ constraint(ALLOC_IN_RC(xmm30_reg)); match(VecX);
+ predicate(UseAVX == 3); format%{%} interface(REG_INTER);
+%}
+operand rxmm31() %{
+ constraint(ALLOC_IN_RC(xmm31_reg)); match(VecX);
+ predicate(UseAVX == 3); format%{%} interface(REG_INTER);
+%}
//----------OPERAND CLASSES----------------------------------------------------
// Operand Classes are groups of operands that are used as to simplify
@@ -11547,6 +11682,16 @@ instruct testL_reg_mem(rFlagsReg cr, rRegL src, memory mem, immL0 zero)
ins_pipe(ialu_cr_reg_mem);
%}
+instruct testL_reg_mem2(rFlagsReg cr, rRegP src, memory mem, immL0 zero)
+%{
+ match(Set cr (CmpL (AndL (CastP2X src) (LoadL mem)) zero));
+
+ format %{ "testq $src, $mem" %}
+ opcode(0x85);
+ ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem));
+ ins_pipe(ialu_cr_reg_mem);
+%}
+
// Manifest a CmpL result in an integer register. Very painful.
// This is the test to avoid.
instruct cmpL3_reg_reg(rRegI dst, rRegL src1, rRegL src2, rFlagsReg flags)
@@ -11607,16 +11752,6 @@ instruct testUL_reg(rFlagsRegU cr, rRegL src, immL0 zero)
ins_pipe(ialu_cr_reg_imm);
%}
-instruct compUB_mem_imm(rFlagsReg cr, memory mem, immU8 imm)
-%{
- match(Set cr (CmpI (LoadUB mem) imm));
-
- ins_cost(125);
- format %{ "cmpb $mem, $imm" %}
- ins_encode %{ __ cmpb($mem$$Address, $imm$$constant); %}
- ins_pipe(ialu_cr_reg_mem);
-%}
-
instruct compB_mem_imm(rFlagsReg cr, memory mem, immI8 imm)
%{
match(Set cr (CmpI (LoadB mem) imm));
@@ -11627,16 +11762,6 @@ instruct compB_mem_imm(rFlagsReg cr, memory mem, immI8 imm)
ins_pipe(ialu_cr_reg_mem);
%}
-instruct testUB_mem_imm(rFlagsReg cr, memory mem, immU8 imm, immI0 zero)
-%{
- match(Set cr (CmpI (AndI (LoadUB mem) imm) zero));
-
- ins_cost(125);
- format %{ "testb $mem, $imm" %}
- ins_encode %{ __ testb($mem$$Address, $imm$$constant); %}
- ins_pipe(ialu_cr_reg_mem);
-%}
-
instruct testB_mem_imm(rFlagsReg cr, memory mem, immI8 imm, immI0 zero)
%{
match(Set cr (CmpI (AndI (LoadB mem) imm) zero));
@@ -12340,6 +12465,223 @@ instruct RethrowException()
ins_pipe(pipe_jmp);
%}
+//
+// Execute ZGC load barrier (strong) slow path
+//
+
+// When running without XMM regs
+instruct loadBarrierSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
+
+ match(Set dst (LoadBarrierSlowReg mem));
+ predicate(MaxVectorSize < 16);
+
+ effect(DEF dst, KILL cr);
+
+ format %{"LoadBarrierSlowRegNoVec $dst, $mem" %}
+ ins_encode %{
+#if INCLUDE_ZGC
+ Register d = $dst$$Register;
+ ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+ assert(d != r12, "Can't be R12!");
+ assert(d != r15, "Can't be R15!");
+ assert(d != rsp, "Can't be RSP!");
+
+ __ lea(d, $mem$$Address);
+ __ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
+#else
+ ShouldNotReachHere();
+#endif
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+// For XMM and YMM enabled processors
+instruct loadBarrierSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
+ rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+ rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
+ rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
+ rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
+
+ match(Set dst (LoadBarrierSlowReg mem));
+ predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
+
+ effect(DEF dst, KILL cr,
+ KILL x0, KILL x1, KILL x2, KILL x3,
+ KILL x4, KILL x5, KILL x6, KILL x7,
+ KILL x8, KILL x9, KILL x10, KILL x11,
+ KILL x12, KILL x13, KILL x14, KILL x15);
+
+ format %{"LoadBarrierSlowRegXmm $dst, $mem" %}
+ ins_encode %{
+#if INCLUDE_ZGC
+ Register d = $dst$$Register;
+ ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+ assert(d != r12, "Can't be R12!");
+ assert(d != r15, "Can't be R15!");
+ assert(d != rsp, "Can't be RSP!");
+
+ __ lea(d, $mem$$Address);
+ __ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
+#else
+ ShouldNotReachHere();
+#endif
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+// For ZMM enabled processors
+instruct loadBarrierSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
+ rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+ rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
+ rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
+ rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
+ rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
+ rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
+ rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
+ rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
+
+ match(Set dst (LoadBarrierSlowReg mem));
+ predicate((UseAVX == 3) && (MaxVectorSize >= 16));
+
+ effect(DEF dst, KILL cr,
+ KILL x0, KILL x1, KILL x2, KILL x3,
+ KILL x4, KILL x5, KILL x6, KILL x7,
+ KILL x8, KILL x9, KILL x10, KILL x11,
+ KILL x12, KILL x13, KILL x14, KILL x15,
+ KILL x16, KILL x17, KILL x18, KILL x19,
+ KILL x20, KILL x21, KILL x22, KILL x23,
+ KILL x24, KILL x25, KILL x26, KILL x27,
+ KILL x28, KILL x29, KILL x30, KILL x31);
+
+ format %{"LoadBarrierSlowRegZmm $dst, $mem" %}
+ ins_encode %{
+#if INCLUDE_ZGC
+ Register d = $dst$$Register;
+ ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+ assert(d != r12, "Can't be R12!");
+ assert(d != r15, "Can't be R15!");
+ assert(d != rsp, "Can't be RSP!");
+
+ __ lea(d, $mem$$Address);
+ __ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
+#else
+ ShouldNotReachHere();
+#endif
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+//
+// Execute ZGC load barrier (weak) slow path
+//
+
+// When running without XMM regs
+instruct loadBarrierWeakSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
+
+ match(Set dst (LoadBarrierSlowReg mem));
+ predicate(MaxVectorSize < 16);
+
+ effect(DEF dst, KILL cr);
+
+ format %{"LoadBarrierSlowRegNoVec $dst, $mem" %}
+ ins_encode %{
+#if INCLUDE_ZGC
+ Register d = $dst$$Register;
+ ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+ assert(d != r12, "Can't be R12!");
+ assert(d != r15, "Can't be R15!");
+ assert(d != rsp, "Can't be RSP!");
+
+ __ lea(d, $mem$$Address);
+ __ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
+#else
+ ShouldNotReachHere();
+#endif
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+// For XMM and YMM enabled processors
+instruct loadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
+ rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+ rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
+ rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
+ rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
+
+ match(Set dst (LoadBarrierWeakSlowReg mem));
+ predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
+
+ effect(DEF dst, KILL cr,
+ KILL x0, KILL x1, KILL x2, KILL x3,
+ KILL x4, KILL x5, KILL x6, KILL x7,
+ KILL x8, KILL x9, KILL x10, KILL x11,
+ KILL x12, KILL x13, KILL x14, KILL x15);
+
+ format %{"LoadBarrierWeakSlowRegXmm $dst, $mem" %}
+ ins_encode %{
+#if INCLUDE_ZGC
+ Register d = $dst$$Register;
+ ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+ assert(d != r12, "Can't be R12!");
+ assert(d != r15, "Can't be R15!");
+ assert(d != rsp, "Can't be RSP!");
+
+ __ lea(d,$mem$$Address);
+ __ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
+#else
+ ShouldNotReachHere();
+#endif
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+// For ZMM enabled processors
+instruct loadBarrierWeakSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
+ rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+ rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
+ rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
+ rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
+ rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
+ rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
+ rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
+ rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
+
+ match(Set dst (LoadBarrierWeakSlowReg mem));
+ predicate((UseAVX == 3) && (MaxVectorSize >= 16));
+
+ effect(DEF dst, KILL cr,
+ KILL x0, KILL x1, KILL x2, KILL x3,
+ KILL x4, KILL x5, KILL x6, KILL x7,
+ KILL x8, KILL x9, KILL x10, KILL x11,
+ KILL x12, KILL x13, KILL x14, KILL x15,
+ KILL x16, KILL x17, KILL x18, KILL x19,
+ KILL x20, KILL x21, KILL x22, KILL x23,
+ KILL x24, KILL x25, KILL x26, KILL x27,
+ KILL x28, KILL x29, KILL x30, KILL x31);
+
+ format %{"LoadBarrierWeakSlowRegZmm $dst, $mem" %}
+ ins_encode %{
+#if INCLUDE_ZGC
+ Register d = $dst$$Register;
+ ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+ assert(d != r12, "Can't be R12!");
+ assert(d != r15, "Can't be R15!");
+ assert(d != rsp, "Can't be RSP!");
+
+ __ lea(d,$mem$$Address);
+ __ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
+#else
+ ShouldNotReachHere();
+#endif
+ %}
+ ins_pipe(pipe_slow);
+%}
// ============================================================================
// This name is KNOWN by the ADLC and cannot be changed.
diff --git a/src/hotspot/cpu/zero/cppInterpreter_zero.cpp b/src/hotspot/cpu/zero/cppInterpreter_zero.cpp
index 43e03b74224..0d2ef9ff73a 100644
--- a/src/hotspot/cpu/zero/cppInterpreter_zero.cpp
+++ b/src/hotspot/cpu/zero/cppInterpreter_zero.cpp
@@ -43,7 +43,7 @@
#include "runtime/frame.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
diff --git a/src/hotspot/cpu/zero/interp_masm_zero.hpp b/src/hotspot/cpu/zero/interp_masm_zero.hpp
index ccba37b249f..296bdb68a36 100644
--- a/src/hotspot/cpu/zero/interp_masm_zero.hpp
+++ b/src/hotspot/cpu/zero/interp_masm_zero.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -26,7 +26,8 @@
#ifndef CPU_ZERO_VM_INTERP_MASM_ZERO_HPP
#define CPU_ZERO_VM_INTERP_MASM_ZERO_HPP
-#include "assembler_zero.inline.hpp"
+#include "asm/codeBuffer.hpp"
+#include "asm/macroAssembler.hpp"
#include "interpreter/invocationCounter.hpp"
// This file specializes the assember with interpreter-specific macros
diff --git a/src/hotspot/cpu/zero/relocInfo_zero.cpp b/src/hotspot/cpu/zero/relocInfo_zero.cpp
index 8482e53d37b..e908f796136 100644
--- a/src/hotspot/cpu/zero/relocInfo_zero.cpp
+++ b/src/hotspot/cpu/zero/relocInfo_zero.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -24,8 +24,7 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.inline.hpp"
-#include "assembler_zero.inline.hpp"
+#include "asm/codeBuffer.hpp"
#include "code/relocInfo.hpp"
#include "nativeInst_zero.hpp"
#include "oops/oop.inline.hpp"
diff --git a/src/hotspot/cpu/zero/vtableStubs_zero.cpp b/src/hotspot/cpu/zero/vtableStubs_zero.cpp
index 4e52a4ed125..f3c6c1f01bc 100644
--- a/src/hotspot/cpu/zero/vtableStubs_zero.cpp
+++ b/src/hotspot/cpu/zero/vtableStubs_zero.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -24,18 +24,8 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_zero.inline.hpp"
#include "code/vtableStubs.hpp"
-#include "interp_masm_zero.hpp"
-#include "memory/resourceArea.hpp"
-#include "oops/instanceKlass.hpp"
-#include "oops/klassVtable.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "vmreg_zero.inline.hpp"
-#ifdef COMPILER2
-#include "opto/runtime.hpp"
-#endif
+#include "utilities/debug.hpp"
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
ShouldNotCallThis();
diff --git a/src/hotspot/os/aix/attachListener_aix.cpp b/src/hotspot/os/aix/attachListener_aix.cpp
index b738d328f3e..18c7f574653 100644
--- a/src/hotspot/os/aix/attachListener_aix.cpp
+++ b/src/hotspot/os/aix/attachListener_aix.cpp
@@ -386,11 +386,10 @@ AixAttachOperation* AixAttachListener::dequeue() {
::close(s);
continue;
}
- uid_t euid = geteuid();
- gid_t egid = getegid();
- if (cred_info.euid != euid || cred_info.egid != egid) {
- log_debug(attach)("euid/egid check failed (%d/%d vs %d/%d)", cred_info.euid, cred_info.egid, euid, egid);
+ if (!os::Posix::matches_effective_uid_and_gid_or_root(cred_info.euid, cred_info.egid)) {
+ log_debug(attach)("euid/egid check failed (%d/%d vs %d/%d)",
+ cred_info.euid, cred_info.egid, geteuid(), getegid());
::close(s);
continue;
}
@@ -548,8 +547,8 @@ bool AttachListener::is_init_trigger() {
}
if (ret == 0) {
// simple check to avoid starting the attach mechanism when
- // a bogus user creates the file
- if (st.st_uid == geteuid()) {
+ // a bogus non-root user creates the file
+ if (os::Posix::matches_effective_uid_or_root(st.st_uid)) {
init();
log_trace(attach)("Attach triggered by %s", fn);
return true;
diff --git a/src/hotspot/os/aix/os_aix.cpp b/src/hotspot/os/aix/os_aix.cpp
index 08da5392a7d..b5827a05354 100644
--- a/src/hotspot/os/aix/os_aix.cpp
+++ b/src/hotspot/os/aix/os_aix.cpp
@@ -59,7 +59,7 @@
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/objectMonitor.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
#include "runtime/osThread.hpp"
#include "runtime/perfMemory.hpp"
@@ -899,8 +899,12 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
// guard pages might not fit on the tiny stack created.
int ret = pthread_attr_setstacksize(&attr, stack_size);
if (ret != 0) {
- log_warning(os, thread)("The thread stack size specified is invalid: " SIZE_FORMAT "k",
+ log_warning(os, thread)("The %sthread stack size specified is invalid: " SIZE_FORMAT "k",
+ (thr_type == compiler_thread) ? "compiler " : ((thr_type == java_thread) ? "" : "VM "),
stack_size / K);
+ thread->set_osthread(NULL);
+ delete osthread;
+ return false;
}
// Save some cycles and a page by disabling OS guard pages where we have our own
diff --git a/src/hotspot/os/bsd/attachListener_bsd.cpp b/src/hotspot/os/bsd/attachListener_bsd.cpp
index 12c097631ee..503a4d72a30 100644
--- a/src/hotspot/os/bsd/attachListener_bsd.cpp
+++ b/src/hotspot/os/bsd/attachListener_bsd.cpp
@@ -357,11 +357,10 @@ BsdAttachOperation* BsdAttachListener::dequeue() {
::close(s);
continue;
}
- uid_t euid = geteuid();
- gid_t egid = getegid();
- if (puid != euid || pgid != egid) {
- log_debug(attach)("euid/egid check failed (%d/%d vs %d/%d)", puid, pgid, euid, egid);
+ if (!os::Posix::matches_effective_uid_and_gid_or_root(puid, pgid)) {
+ log_debug(attach)("euid/egid check failed (%d/%d vs %d/%d)", puid, pgid,
+ geteuid(), getegid());
::close(s);
continue;
}
@@ -513,8 +512,8 @@ bool AttachListener::is_init_trigger() {
}
if (ret == 0) {
// simple check to avoid starting the attach mechanism when
- // a bogus user creates the file
- if (st.st_uid == geteuid()) {
+ // a bogus non-root user creates the file
+ if (os::Posix::matches_effective_uid_or_root(st.st_uid)) {
init();
log_trace(attach)("Attach triggered by %s", fn);
return true;
diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp
index f33ed42d75e..ba11ba4a48c 100644
--- a/src/hotspot/os/bsd/os_bsd.cpp
+++ b/src/hotspot/os/bsd/os_bsd.cpp
@@ -49,7 +49,7 @@
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/objectMonitor.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "runtime/osThread.hpp"
#include "runtime/perfMemory.hpp"
#include "runtime/semaphore.hpp"
diff --git a/src/hotspot/os/linux/attachListener_linux.cpp b/src/hotspot/os/linux/attachListener_linux.cpp
index 93ae6ab58ea..d79bd1bac85 100644
--- a/src/hotspot/os/linux/attachListener_linux.cpp
+++ b/src/hotspot/os/linux/attachListener_linux.cpp
@@ -357,11 +357,10 @@ LinuxAttachOperation* LinuxAttachListener::dequeue() {
::close(s);
continue;
}
- uid_t euid = geteuid();
- gid_t egid = getegid();
- if (cred_info.uid != euid || cred_info.gid != egid) {
- log_debug(attach)("euid/egid check failed (%d/%d vs %d/%d)", cred_info.uid, cred_info.gid, euid, egid);
+ if (!os::Posix::matches_effective_uid_and_gid_or_root(cred_info.uid, cred_info.gid)) {
+ log_debug(attach)("euid/egid check failed (%d/%d vs %d/%d)",
+ cred_info.uid, cred_info.gid, geteuid(), getegid());
::close(s);
continue;
}
@@ -518,8 +517,8 @@ bool AttachListener::is_init_trigger() {
}
if (ret == 0) {
// simple check to avoid starting the attach mechanism when
- // a bogus user creates the file
- if (st.st_uid == geteuid()) {
+ // a bogus non-root user creates the file
+ if (os::Posix::matches_effective_uid_or_root(st.st_uid)) {
init();
log_trace(attach)("Attach triggered by %s", fn);
return true;
diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp
index ab19d1da824..b3d07ade19f 100644
--- a/src/hotspot/os/linux/os_linux.cpp
+++ b/src/hotspot/os/linux/os_linux.cpp
@@ -51,7 +51,7 @@
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/objectMonitor.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "runtime/osThread.hpp"
#include "runtime/perfMemory.hpp"
#include "runtime/sharedRuntime.hpp"
@@ -1988,6 +1988,8 @@ void os::print_os_info(outputStream* st) {
os::Linux::print_full_memory_info(st);
+ os::Linux::print_proc_sys_info(st);
+
os::Linux::print_container_info(st);
}
@@ -2120,6 +2122,24 @@ void os::Linux::print_libversion_info(outputStream* st) {
st->cr();
}
+void os::Linux::print_proc_sys_info(outputStream* st) {
+ st->cr();
+ st->print_cr("/proc/sys/kernel/threads-max (system-wide limit on the number of threads):");
+ _print_ascii_file("/proc/sys/kernel/threads-max", st);
+ st->cr();
+ st->cr();
+
+ st->print_cr("/proc/sys/vm/max_map_count (maximum number of memory map areas a process may have):");
+ _print_ascii_file("/proc/sys/vm/max_map_count", st);
+ st->cr();
+ st->cr();
+
+ st->print_cr("/proc/sys/kernel/pid_max (system-wide limit on number of process identifiers):");
+ _print_ascii_file("/proc/sys/kernel/pid_max", st);
+ st->cr();
+ st->cr();
+}
+
void os::Linux::print_full_memory_info(outputStream* st) {
st->print("\n/proc/meminfo:\n");
_print_ascii_file("/proc/meminfo", st);
@@ -3106,7 +3126,10 @@ static address get_stack_commited_bottom(address bottom, size_t size) {
bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
int mincore_return_value;
const size_t stripe = 1024; // query this many pages each time
- unsigned char vec[stripe];
+ unsigned char vec[stripe + 1];
+ // set a guard
+ vec[stripe] = 'X';
+
const size_t page_sz = os::vm_page_size();
size_t pages = size / page_sz;
@@ -3118,7 +3141,9 @@ bool os::committed_in_range(address start, size_t size, address& committed_start
int loops = (pages + stripe - 1) / stripe;
int committed_pages = 0;
address loop_base = start;
- for (int index = 0; index < loops; index ++) {
+ bool found_range = false;
+
+ for (int index = 0; index < loops && !found_range; index ++) {
assert(pages > 0, "Nothing to do");
int pages_to_query = (pages >= stripe) ? stripe : pages;
pages -= pages_to_query;
@@ -3133,12 +3158,14 @@ bool os::committed_in_range(address start, size_t size, address& committed_start
return false;
}
+ assert(vec[stripe] == 'X', "overflow guard");
assert(mincore_return_value == 0, "Range must be valid");
// Process this stripe
for (int vecIdx = 0; vecIdx < pages_to_query; vecIdx ++) {
if ((vec[vecIdx] & 0x01) == 0) { // not committed
// End of current contiguous region
if (committed_start != NULL) {
+ found_range = true;
break;
}
} else { // committed
diff --git a/src/hotspot/os/linux/os_linux.hpp b/src/hotspot/os/linux/os_linux.hpp
index b97f66627ca..14c3484d29a 100644
--- a/src/hotspot/os/linux/os_linux.hpp
+++ b/src/hotspot/os/linux/os_linux.hpp
@@ -113,6 +113,7 @@ class Linux {
static void print_container_info(outputStream* st);
static void print_distro_info(outputStream* st);
static void print_libversion_info(outputStream* st);
+ static void print_proc_sys_info(outputStream* st);
public:
static bool _stack_is_executable;
diff --git a/src/hotspot/os/posix/os_posix.cpp b/src/hotspot/os/posix/os_posix.cpp
index 623a0e39404..074499a16d3 100644
--- a/src/hotspot/os/posix/os_posix.cpp
+++ b/src/hotspot/os/posix/os_posix.cpp
@@ -51,6 +51,8 @@
#endif
#define IS_VALID_PID(p) (p > 0 && p < MAX_PID)
+#define ROOT_UID 0
+
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
#endif
@@ -1454,6 +1456,18 @@ size_t os::Posix::get_initial_stack_size(ThreadType thr_type, size_t req_stack_s
return stack_size;
}
+bool os::Posix::is_root(uid_t uid){
+ return ROOT_UID == uid;
+}
+
+bool os::Posix::matches_effective_uid_or_root(uid_t uid) {
+ return is_root(uid) || geteuid() == uid;
+}
+
+bool os::Posix::matches_effective_uid_and_gid_or_root(uid_t uid, gid_t gid) {
+ return is_root(uid) || (geteuid() == uid && getegid() == gid);
+}
+
Thread* os::ThreadCrashProtection::_protected_thread = NULL;
os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0;
diff --git a/src/hotspot/os/posix/os_posix.hpp b/src/hotspot/os/posix/os_posix.hpp
index 7ce25c2a616..ef6f4b6b1bd 100644
--- a/src/hotspot/os/posix/os_posix.hpp
+++ b/src/hotspot/os/posix/os_posix.hpp
@@ -106,6 +106,16 @@ public:
// On error, it will return NULL and set errno. The content of 'outbuf' is undefined.
// On truncation error ('outbuf' too small), it will return NULL and set errno to ENAMETOOLONG.
static char* realpath(const char* filename, char* outbuf, size_t outbuflen);
+
+ // Returns true if given uid is root.
+ static bool is_root(uid_t uid);
+
+ // Returns true if given uid is effective or root uid.
+ static bool matches_effective_uid_or_root(uid_t uid);
+
+ // Returns true if either given uid is effective uid and given gid is
+ // effective gid, or if given uid is root.
+ static bool matches_effective_uid_and_gid_or_root(uid_t uid, gid_t gid);
};
// On POSIX platforms the signal handler is global so we just do the write.
diff --git a/src/hotspot/os/solaris/attachListener_solaris.cpp b/src/hotspot/os/solaris/attachListener_solaris.cpp
index 521d5e7c686..3f1a68a7122 100644
--- a/src/hotspot/os/solaris/attachListener_solaris.cpp
+++ b/src/hotspot/os/solaris/attachListener_solaris.cpp
@@ -213,16 +213,12 @@ static int check_credentials() {
return -1; // unable to get them, deny
}
- // get our euid/eguid (probably could cache these)
- uid_t euid = geteuid();
- gid_t egid = getegid();
-
// get euid/egid from ucred_free
uid_t ucred_euid = ucred_geteuid(cred_info);
gid_t ucred_egid = ucred_getegid(cred_info);
// check that the effective uid/gid matches
- if (ucred_euid == euid && ucred_egid == egid) {
+ if (os::Posix::matches_effective_uid_and_gid_or_root(ucred_euid, ucred_egid)) {
ret = 0; // allow
}
@@ -664,8 +660,8 @@ bool AttachListener::is_init_trigger() {
}
if (ret == 0) {
// simple check to avoid starting the attach mechanism when
- // a bogus user creates the file
- if (st.st_uid == geteuid()) {
+ // a bogus non-root user creates the file
+ if (os::Posix::matches_effective_uid_or_root(st.st_uid)) {
init();
log_trace(attach)("Attach triggered by %s", fn);
return true;
diff --git a/src/hotspot/os/solaris/os_solaris.cpp b/src/hotspot/os/solaris/os_solaris.cpp
index 01a3c292d1b..d8c56353760 100644
--- a/src/hotspot/os/solaris/os_solaris.cpp
+++ b/src/hotspot/os/solaris/os_solaris.cpp
@@ -49,7 +49,7 @@
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/objectMonitor.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "runtime/osThread.hpp"
#include "runtime/perfMemory.hpp"
#include "runtime/sharedRuntime.hpp"
diff --git a/src/hotspot/os/windows/os_perf_windows.cpp b/src/hotspot/os/windows/os_perf_windows.cpp
index 88d3f314f22..03df2941abc 100644
--- a/src/hotspot/os/windows/os_perf_windows.cpp
+++ b/src/hotspot/os/windows/os_perf_windows.cpp
@@ -118,6 +118,11 @@ typedef struct {
bool initialized;
} MultiCounterQuerySetS, *MultiCounterQuerySetP;
+typedef struct {
+ MultiCounterQuerySetS set;
+ int process_index;
+} ProcessQueryS, *ProcessQueryP;
+
static void pdh_cleanup(HQUERY* const query, HCOUNTER* const counter) {
if (counter != NULL && *counter != NULL) {
PdhDll::PdhRemoveCounter(*counter);
@@ -158,7 +163,7 @@ static void destroy_counter_query(MultiCounterQueryP counter_query) {
}
}
-static void destroy_counter_query(MultiCounterQuerySetP counter_query_set) {
+static void destroy_multi_counter_query(MultiCounterQuerySetP counter_query_set) {
for (int i = 0; i < counter_query_set->size; i++) {
for (int j = 0; j < counter_query_set->queries[i].noOfCounters; ++j) {
pdh_cleanup(NULL, &counter_query_set->queries[i].counters[j]);
@@ -167,9 +172,18 @@ static void destroy_counter_query(MultiCounterQuerySetP counter_query_set) {
pdh_cleanup(&counter_query_set->queries[i].query.query, NULL);
}
FREE_C_HEAP_ARRAY(MultiCounterQueryS, counter_query_set->queries);
+}
+
+static void destroy_counter_query(MultiCounterQuerySetP counter_query_set) {
+ destroy_multi_counter_query(counter_query_set);
FREE_C_HEAP_ARRAY(MultiCounterQuerySetS, counter_query_set);
}
+static void destroy_counter_query(ProcessQueryP process_query) {
+ destroy_multi_counter_query(&process_query->set);
+ FREE_C_HEAP_ARRAY(ProcessQueryS, process_query);
+}
+
static int open_query(HQUERY* query) {
return PdhDll::PdhOpenQuery(NULL, 0, query);
}
@@ -204,6 +218,11 @@ static int allocate_counters(MultiCounterQuerySetP query_set, size_t nofCounters
return OS_OK;
}
+static int allocate_counters(ProcessQueryP process_query, size_t nofCounters) {
+ assert(process_query != NULL, "invariant");
+ return allocate_counters(&process_query->set, nofCounters);
+}
+
static void deallocate_counters(MultiCounterQueryP query) {
if (query->counters != NULL) {
FREE_C_HEAP_ARRAY(char, query->counters);
@@ -261,7 +280,6 @@ static OSReturn add_counter(CounterQueryP counter_query, const char* path, bool
}
static OSReturn add_process_counter(MultiCounterQueryP query, int slot_index, const char* path, bool first_sample_on_init) {
- assert(query != NULL, "invariant");
assert(query != NULL, "invariant");
assert(slot_index < query->noOfCounters, "invariant");
assert(query->counters[slot_index] == NULL, "invariant");
@@ -326,13 +344,15 @@ static int formatted_counter_value(HCOUNTER counter, DWORD format, PDH_FMT_COUNT
* (in order to keep this index valid when the list resets from underneath,
* ensure to call current_query_index_for_process() before every query involving
* Process object instance data).
+*
+* if unable to query, returns OS_ERR(-1)
*/
static int current_query_index_for_process() {
assert(process_image_name != NULL, "invariant");
assert(pdh_IDProcess_counter_fmt != NULL, "invariant");
HQUERY tmpQuery = NULL;
if (open_query(&tmpQuery) != ERROR_SUCCESS) {
- return 0;
+ return OS_ERR;
}
char counter[512];
HCOUNTER handle_counter = NULL;
@@ -342,12 +362,12 @@ static int current_query_index_for_process() {
assert(strlen(counter) < sizeof(counter), "invariant");
if (PdhDll::PdhAddCounter(tmpQuery, counter, 0, &handle_counter) != ERROR_SUCCESS) {
pdh_cleanup(&tmpQuery, &handle_counter);
- return 0;
+ return OS_ERR;
}
const PDH_STATUS res = PdhDll::PdhCollectQueryData(tmpQuery);
if (res == PDH_INVALID_HANDLE || res == PDH_NO_DATA) {
pdh_cleanup(&tmpQuery, &handle_counter);
- return 0;
+ return OS_ERR;
} else {
PDH_FMT_COUNTERVALUE counter_value;
formatted_counter_value(handle_counter, PDH_FMT_LONG, &counter_value);
@@ -359,24 +379,28 @@ static int current_query_index_for_process() {
}
}
pdh_cleanup(&tmpQuery, NULL);
- return 0;
+ return OS_ERR;
}
-static MultiCounterQuerySetP create_process_counter_query() {
- MultiCounterQuerySetP const query = NEW_C_HEAP_ARRAY(MultiCounterQuerySetS, 1, mtInternal);
- memset(query, 0, sizeof(MultiCounterQuerySetS));
+static ProcessQueryP create_process_query() {
const int current_process_idx = current_query_index_for_process();
- query->queries = NEW_C_HEAP_ARRAY(MultiCounterQueryS, current_process_idx + 1, mtInternal);
- memset(query->queries, 0, sizeof(MultiCounterQueryS) * (current_process_idx + 1));
- query->size = current_process_idx + 1;
- return query;
+ if (OS_ERR == current_process_idx) {
+ return NULL;
+ }
+ ProcessQueryP const process_query = NEW_C_HEAP_ARRAY(ProcessQueryS, 1, mtInternal);
+ memset(process_query, 0, sizeof(ProcessQueryS));
+ process_query->set.queries = NEW_C_HEAP_ARRAY(MultiCounterQueryS, current_process_idx + 1, mtInternal);
+ memset(process_query->set.queries, 0, sizeof(MultiCounterQueryS) * (current_process_idx + 1));
+ process_query->process_index = current_process_idx;
+ process_query->set.size = current_process_idx + 1;
+ assert(process_query->set.size > process_query->process_index, "invariant");
+ return process_query;
}
-static MultiCounterQueryP current_process_counter_query(MultiCounterQuerySetP process_query_set) {
- assert(process_query_set != NULL, "invariant");
- const int current_query_index = current_query_index_for_process();
- assert(current_query_index < process_query_set->size, "invariant");
- return &process_query_set->queries[current_query_index];
+static MultiCounterQueryP current_process_counter_query(ProcessQueryP process_query) {
+ assert(process_query != NULL, "invariant");
+ assert(process_query->process_index < process_query->set.size, "invariant");
+ return &process_query->set.queries[process_query->process_index];
}
static void clear_multi_counter(MultiCounterQueryP query) {
@@ -384,19 +408,46 @@ static void clear_multi_counter(MultiCounterQueryP query) {
pdh_cleanup(NULL, &query->counters[i]);
}
pdh_cleanup(&query->query.query, NULL);
+ query->initialized = false;
}
-static int collect_process_query_data(MultiCounterQuerySetP counter_query_set) {
- const int current_process_idx = current_query_index_for_process();
- while (current_process_idx < counter_query_set->size - 1) {
- const int new_size = --counter_query_set->size;
- clear_multi_counter(&counter_query_set->queries[new_size]);
+static int ensure_valid_process_query_index(ProcessQueryP process_query) {
+ assert(process_query != NULL, "invariant");
+ const int previous_process_idx = process_query->process_index;
+ if (previous_process_idx == 0) {
+ return previous_process_idx;
}
- return collect_query_data(&counter_query_set->queries[current_process_idx]);
+ const int current_process_idx = current_query_index_for_process();
+ if (current_process_idx == previous_process_idx || OS_ERR == current_process_idx ||
+ current_process_idx >= process_query->set.size) {
+ return previous_process_idx;
+ }
+
+ assert(current_process_idx >= 0 && current_process_idx < process_query->set.size, "out of bounds!");
+ while (current_process_idx < process_query->set.size - 1) {
+ const int new_size = --process_query->set.size;
+ clear_multi_counter(&process_query->set.queries[new_size]);
+ }
+ assert(current_process_idx < process_query->set.size, "invariant");
+ process_query->process_index = current_process_idx;
+ return current_process_idx;
}
-static int query_process_counter(MultiCounterQuerySetP process_query_set, int slot_index, DWORD format, PDH_FMT_COUNTERVALUE* const value) {
- MultiCounterQueryP const current_query = current_process_counter_query(process_query_set);
+static MultiCounterQueryP current_process_query(ProcessQueryP process_query) {
+ assert(process_query != NULL, "invariant");
+ const int current_process_idx = ensure_valid_process_query_index(process_query);
+ assert(current_process_idx == process_query->process_index, "invariant");
+ assert(current_process_idx < process_query->set.size, "invariant");
+ return &process_query->set.queries[current_process_idx];
+}
+
+static int collect_process_query_data(ProcessQueryP process_query) {
+ assert(process_query != NULL, "invariant");
+ return collect_query_data(current_process_query(process_query));
+}
+
+static int query_process_counter(ProcessQueryP process_query, int slot_index, DWORD format, PDH_FMT_COUNTERVALUE* const value) {
+ MultiCounterQueryP const current_query = current_process_counter_query(process_query);
assert(current_query != NULL, "invariant");
assert(slot_index < current_query->noOfCounters, "invariant");
assert(current_query->counters[slot_index] != NULL, "invariant");
@@ -810,7 +861,7 @@ static int initialize_cpu_query(MultiCounterQueryP cpu_query, DWORD pdh_counter_
return initialize_cpu_query_counters(cpu_query, pdh_counter_idx);
}
-static int initialize_process_counter(MultiCounterQuerySetP query_set, int slot_index, DWORD pdh_counter_index) {
+static int initialize_process_counter(ProcessQueryP process_query, int slot_index, DWORD pdh_counter_index) {
char* localized_process_object;
if (lookup_name_by_index(PDH_PROCESS_IDX, &localized_process_object) != OS_OK) {
return OS_ERR;
@@ -821,7 +872,7 @@ static int initialize_process_counter(MultiCounterQuerySetP query_set, int slot_
return OS_ERR;
}
assert(localized_counter_name != NULL, "invariant");
- for (int i = 0; i < query_set->size; ++i) {
+ for (int i = 0; i < process_query->set.size; ++i) {
char instanceIndexBuffer[32];
const char* counter_path = make_fully_qualified_counter_path(localized_process_object,
localized_counter_name,
@@ -830,7 +881,7 @@ static int initialize_process_counter(MultiCounterQuerySetP query_set, int slot_
if (counter_path == NULL) {
return OS_ERR;
}
- MultiCounterQueryP const query = &query_set->queries[i];
+ MultiCounterQueryP const query = &process_query->set.queries[i];
if (add_process_counter(query, slot_index, counter_path, true)) {
return OS_ERR;
}
@@ -839,8 +890,9 @@ static int initialize_process_counter(MultiCounterQuerySetP query_set, int slot_
}
static CounterQueryP create_counter_query(DWORD pdh_object_idx, DWORD pdh_counter_idx) {
- assert(is_valid_pdh_index(pdh_object_idx), "invariant");
- assert(is_valid_pdh_index(pdh_counter_idx), "invariant");
+ if (!((is_valid_pdh_index(pdh_object_idx) && is_valid_pdh_index(pdh_counter_idx)))) {
+ return NULL;
+ }
CounterQueryP const query = create_counter_query();
const char* object = pdh_localized_artifact(pdh_object_idx);
assert(object != NULL, "invariant");
@@ -917,7 +969,7 @@ class CPUPerformanceInterface::CPUPerformance : public CHeapObj {
friend class CPUPerformanceInterface;
private:
CounterQueryP _context_switches;
- MultiCounterQuerySetP _process_cpu_load;
+ ProcessQueryP _process_cpu_load;
MultiCounterQueryP _machine_cpu_load;
int cpu_load(int which_logical_cpu, double* cpu_load);
@@ -963,34 +1015,28 @@ CPUPerformanceInterface::CPUPerformance::CPUPerformance() : _context_switches(NU
bool CPUPerformanceInterface::CPUPerformance::initialize() {
if (!pdh_acquire()) {
- return false;
+ return true;
}
_context_switches = create_counter_query(PDH_SYSTEM_IDX, PDH_CONTEXT_SWITCH_RATE_IDX);
- if (_context_switches == NULL) {
- return false;
- }
- _process_cpu_load = create_process_counter_query();
+ _process_cpu_load = create_process_query();
if (_process_cpu_load == NULL) {
- return false;
+ return true;
}
if (allocate_counters(_process_cpu_load, 2) != OS_OK) {
- return false;
+ return true;
}
if (initialize_process_counter(_process_cpu_load, 0, PDH_PROCESSOR_TIME_IDX) != OS_OK) {
- return false;
+ return true;
}
if (initialize_process_counter(_process_cpu_load, 1, PDH_PRIV_PROCESSOR_TIME_IDX) != OS_OK) {
- return false;
+ return true;
}
- _process_cpu_load->initialized = true;
-
+ _process_cpu_load->set.initialized = true;
_machine_cpu_load = create_multi_counter_query();
if (_machine_cpu_load == NULL) {
- return false;
- }
- if (initialize_cpu_query(_machine_cpu_load, PDH_PROCESSOR_TIME_IDX) != OS_OK) {
- return false;
+ return true;
}
+ initialize_cpu_query(_machine_cpu_load, PDH_PROCESSOR_TIME_IDX);
return true;
}
@@ -1044,12 +1090,13 @@ int CPUPerformanceInterface::cpu_loads_process(double* pjvmUserLoad,
}
int CPUPerformanceInterface::CPUPerformance::cpu_load(int which_logical_cpu, double* cpu_load) {
- assert(_machine_cpu_load != NULL, "invariant");
- assert(which_logical_cpu < _machine_cpu_load->noOfCounters, "invariant");
*cpu_load = .0;
- if (!_machine_cpu_load->initialized) {
+ if (_machine_cpu_load == NULL || !_machine_cpu_load->initialized) {
return OS_ERR;
}
+ assert(_machine_cpu_load != NULL, "invariant");
+ assert(which_logical_cpu < _machine_cpu_load->noOfCounters, "invariant");
+
if (collect_query_data(_machine_cpu_load)) {
return OS_ERR;
}
@@ -1062,11 +1109,11 @@ int CPUPerformanceInterface::CPUPerformance::cpu_load(int which_logical_cpu, dou
}
int CPUPerformanceInterface::CPUPerformance::cpu_load_total_process(double* cpu_load) {
- assert(_process_cpu_load != NULL, "invariant");
*cpu_load = .0;
- if (!_process_cpu_load->initialized) {
+ if (_process_cpu_load == NULL || !_process_cpu_load->set.initialized) {
return OS_ERR;
}
+ assert(_process_cpu_load != NULL, "invariant");
if (collect_process_query_data(_process_cpu_load)) {
return OS_ERR;
}
@@ -1090,9 +1137,11 @@ int CPUPerformanceInterface::CPUPerformance::cpu_loads_process(double* pjvmUserL
*pjvmUserLoad = .0;
*pjvmKernelLoad = .0;
*psystemTotalLoad = .0;
- if (!_process_cpu_load->initialized) {
+
+ if (_process_cpu_load == NULL || !_process_cpu_load->set.initialized) {
return OS_ERR;
}
+ assert(_process_cpu_load != NULL, "invariant");
if (collect_process_query_data(_process_cpu_load)) {
return OS_ERR;
}
@@ -1138,9 +1187,10 @@ int CPUPerformanceInterface::CPUPerformance::cpu_loads_process(double* pjvmUserL
int CPUPerformanceInterface::CPUPerformance::context_switch_rate(double* rate) {
assert(rate != NULL, "invariant");
*rate = .0;
- if (!_context_switches->initialized) {
+ if (_context_switches == NULL || !_context_switches->initialized) {
return OS_ERR;
}
+ assert(_context_switches != NULL, "invariant");
if (collect_query_data(_context_switches) != OS_OK) {
return OS_ERR;
}
diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp
index 81adb9e32e4..00fc92384ac 100644
--- a/src/hotspot/os/windows/os_windows.cpp
+++ b/src/hotspot/os/windows/os_windows.cpp
@@ -52,7 +52,7 @@
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/objectMonitor.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "runtime/osThread.hpp"
#include "runtime/perfMemory.hpp"
#include "runtime/sharedRuntime.hpp"
diff --git a/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.inline.hpp b/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.hpp
similarity index 94%
rename from src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.inline.hpp
rename to src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.hpp
index 070c2169461..81427eb8ca5 100644
--- a/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.inline.hpp
+++ b/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.hpp
@@ -23,10 +23,10 @@
*
*/
-#ifndef OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_INLINE_HPP
-#define OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_INLINE_HPP
+#ifndef OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_HPP
+#define OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_HPP
-#include "runtime/orderAccess.hpp"
+// Included in orderAccess.hpp header file.
// Compiler version last used for testing: xlc 12
// Please update this information when this file changes
@@ -90,4 +90,4 @@ struct OrderAccess::PlatformOrderedLoad
#undef inlasm_eieio
#undef inlasm_isync
-#endif // OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_INLINE_HPP
+#endif // OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_HPP
diff --git a/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.inline.hpp b/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.hpp
similarity index 86%
rename from src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.inline.hpp
rename to src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.hpp
index 0e13f1c75c3..7a59abe3a80 100644
--- a/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.inline.hpp
+++ b/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.hpp
@@ -22,12 +22,10 @@
*
*/
-#ifndef OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
-#define OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
+#ifndef OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_HPP
+#define OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_HPP
-#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/os.hpp"
+// Included in orderAccess.hpp header file.
// Compiler version last used for testing: clang 5.1
// Please update this information when this file changes
@@ -52,14 +50,12 @@ inline void OrderAccess::acquire() { compiler_barrier(); }
inline void OrderAccess::release() { compiler_barrier(); }
inline void OrderAccess::fence() {
- if (os::is_MP()) {
- // always use locked addl since mfence is sometimes expensive
+ // always use locked addl since mfence is sometimes expensive
#ifdef AMD64
- __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
+ __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
#else
- __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
+ __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
#endif
- }
compiler_barrier();
}
@@ -113,4 +109,4 @@ struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
};
#endif // AMD64
-#endif // OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
+#endif // OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_HPP
diff --git a/src/hotspot/os_cpu/bsd_zero/orderAccess_bsd_zero.inline.hpp b/src/hotspot/os_cpu/bsd_zero/orderAccess_bsd_zero.hpp
similarity index 89%
rename from src/hotspot/os_cpu/bsd_zero/orderAccess_bsd_zero.inline.hpp
rename to src/hotspot/os_cpu/bsd_zero/orderAccess_bsd_zero.hpp
index 96ea19a4a70..532b48be46d 100644
--- a/src/hotspot/os_cpu/bsd_zero/orderAccess_bsd_zero.inline.hpp
+++ b/src/hotspot/os_cpu/bsd_zero/orderAccess_bsd_zero.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -23,10 +23,10 @@
*
*/
-#ifndef OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_INLINE_HPP
-#define OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_INLINE_HPP
+#ifndef OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_HPP
+#define OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_HPP
-#include "runtime/orderAccess.hpp"
+// Included in orderAccess.hpp header file.
#ifdef ARM
@@ -74,4 +74,4 @@ inline void OrderAccess::acquire() { LIGHT_MEM_BARRIER; }
inline void OrderAccess::release() { LIGHT_MEM_BARRIER; }
inline void OrderAccess::fence() { FULL_MEM_BARRIER; }
-#endif // OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_INLINE_HPP
+#endif // OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_HPP
diff --git a/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.inline.hpp b/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp
similarity index 88%
rename from src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.inline.hpp
rename to src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp
index ac065069ebe..b733db19577 100644
--- a/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.inline.hpp
+++ b/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp
@@ -23,12 +23,11 @@
*
*/
-#ifndef OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_INLINE_HPP
-#define OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_INLINE_HPP
+#ifndef OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_HPP
+#define OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_HPP
+
+// Included in orderAccess.hpp header file.
-#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/os.hpp"
#include "vm_version_aarch64.hpp"
// Implementation of class OrderAccess.
@@ -71,4 +70,4 @@ struct OrderAccess::PlatformOrderedStore
void operator()(T v, volatile T* p) const { release_store(p, v); fence(); }
};
-#endif // OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_INLINE_HPP
+#endif // OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_HPP
diff --git a/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.inline.hpp b/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp
similarity index 95%
rename from src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.inline.hpp
rename to src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp
index 3784b640c2a..9e6bda2654f 100644
--- a/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.inline.hpp
+++ b/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp
@@ -22,16 +22,17 @@
*
*/
-#ifndef OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_INLINE_HPP
-#define OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_INLINE_HPP
+#ifndef OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_HPP
+#define OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_HPP
+
+// Included in orderAccess.hpp header file.
-#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
#include "vm_version_arm.hpp"
// Implementation of class OrderAccess.
// - we define the high level barriers below and use the general
-// implementation in orderAccess.inline.hpp, with customizations
+// implementation in orderAccess.hpp, with customizations
// on AARCH64 via the specialized_* template functions
// Memory Ordering on ARM is weak.
@@ -53,7 +54,7 @@
// __asm__ volatile (
// "mcr p15, 0, %0, c7, c10, 4"
// : : "r" (dummy) : "memory");
-// }
+// }
// }
inline static void dmb_sy() {
@@ -244,4 +245,4 @@ struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
#endif // AARCH64
-#endif // OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_INLINE_HPP
+#endif // OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_HPP
diff --git a/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.inline.hpp b/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.hpp
similarity index 94%
rename from src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.inline.hpp
rename to src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.hpp
index 12139ce2f7b..92ce8261fd2 100644
--- a/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.inline.hpp
+++ b/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.hpp
@@ -23,10 +23,10 @@
*
*/
-#ifndef OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP
-#define OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP
+#ifndef OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_HPP
+#define OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_HPP
-#include "runtime/orderAccess.hpp"
+// Included in orderAccess.hpp header file.
#ifndef PPC64
#error "OrderAccess currently only implemented for PPC64"
@@ -94,4 +94,4 @@ struct OrderAccess::PlatformOrderedLoad
#undef inlasm_isync
#undef inlasm_acquire_reg
-#endif // OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP
+#endif // OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_HPP
diff --git a/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.inline.hpp b/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp
similarity index 93%
rename from src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.inline.hpp
rename to src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp
index 067878501f6..daa13cec655 100644
--- a/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.inline.hpp
+++ b/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp
@@ -23,10 +23,11 @@
*
*/
-#ifndef OS_CPU_LINUX_S390_VM_ORDERACCESS_LINUX_S390_INLINE_HPP
-#define OS_CPU_LINUX_S390_VM_ORDERACCESS_LINUX_S390_INLINE_HPP
+#ifndef OS_CPU_LINUX_S390_VM_ORDERACCESS_LINUX_S390_HPP
+#define OS_CPU_LINUX_S390_VM_ORDERACCESS_LINUX_S390_HPP
+
+// Included in orderAccess.hpp header file.
-#include "runtime/orderAccess.hpp"
#include "vm_version_s390.hpp"
// Implementation of class OrderAccess.
@@ -87,4 +88,4 @@ struct OrderAccess::PlatformOrderedLoad
#undef inlasm_zarch_acquire
#undef inlasm_zarch_fence
-#endif // OS_CPU_LINUX_S390_VM_ORDERACCESS_LINUX_S390_INLINE_HPP
+#endif // OS_CPU_LINUX_S390_VM_ORDERACCESS_LINUX_S390_HPP
diff --git a/src/hotspot/os_cpu/linux_sparc/orderAccess_linux_sparc.inline.hpp b/src/hotspot/os_cpu/linux_sparc/orderAccess_linux_sparc.hpp
similarity index 85%
rename from src/hotspot/os_cpu/linux_sparc/orderAccess_linux_sparc.inline.hpp
rename to src/hotspot/os_cpu/linux_sparc/orderAccess_linux_sparc.hpp
index c9fde925f7e..5ac28841849 100644
--- a/src/hotspot/os_cpu/linux_sparc/orderAccess_linux_sparc.inline.hpp
+++ b/src/hotspot/os_cpu/linux_sparc/orderAccess_linux_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,10 +22,10 @@
*
*/
-#ifndef OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_INLINE_HPP
-#define OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_INLINE_HPP
+#ifndef OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_HPP
+#define OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_HPP
-#include "runtime/orderAccess.hpp"
+// Included in orderAccess.hpp header file.
// Implementation of class OrderAccess.
@@ -48,4 +48,4 @@ inline void OrderAccess::fence() {
__asm__ volatile ("membar #StoreLoad" : : : "memory");
}
-#endif // OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_INLINE_HPP
+#endif // OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_HPP
diff --git a/src/hotspot/os_cpu/linux_x86/gc/z/zAddress_linux_x86.inline.hpp b/src/hotspot/os_cpu/linux_x86/gc/z/zAddress_linux_x86.inline.hpp
new file mode 100644
index 00000000000..3dfea712969
--- /dev/null
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zAddress_linux_x86.inline.hpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_CPU_LINUX_X86_ZADDRESS_LINUX_X86_INLINE_HPP
+#define OS_CPU_LINUX_X86_ZADDRESS_LINUX_X86_INLINE_HPP
+
+inline uintptr_t ZAddress::address(uintptr_t value) {
+ return value;
+}
+
+#endif // OS_CPU_LINUX_X86_ZADDRESS_LINUX_X86_INLINE_HPP
diff --git a/src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.cpp
new file mode 100644
index 00000000000..90c8f425a10
--- /dev/null
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.cpp
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zArray.inline.hpp"
+#include "gc/z/zBackingFile_linux_x86.hpp"
+#include "gc/z/zBackingPath_linux_x86.hpp"
+#include "gc/z/zErrno.hpp"
+#include "gc/z/zLargePages.inline.hpp"
+#include "logging/log.hpp"
+#include "runtime/init.hpp"
+#include "runtime/os.hpp"
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+// Filesystem names
+#define ZFILESYSTEM_TMPFS "tmpfs"
+#define ZFILESYSTEM_HUGETLBFS "hugetlbfs"
+
+// Sysfs file for transparent huge page on tmpfs
+#define ZFILENAME_SHMEM_ENABLED "/sys/kernel/mm/transparent_hugepage/shmem_enabled"
+
+// Default mount points
+#define ZMOUNTPOINT_TMPFS "/dev/shm"
+#define ZMOUNTPOINT_HUGETLBFS "/hugepages"
+
+// Java heap filename
+#define ZFILENAME_HEAP "java_heap"
+
+// Support for building on older Linux systems
+#ifndef __NR_memfd_create
+#define __NR_memfd_create 319
+#endif
+#ifndef MFD_CLOEXEC
+#define MFD_CLOEXEC 0x0001U
+#endif
+#ifndef MFD_HUGETLB
+#define MFD_HUGETLB 0x0004U
+#endif
+#ifndef O_CLOEXEC
+#define O_CLOEXEC 02000000
+#endif
+#ifndef O_TMPFILE
+#define O_TMPFILE (020000000 | O_DIRECTORY)
+#endif
+
+// Filesystem types, see statfs(2)
+#ifndef TMPFS_MAGIC
+#define TMPFS_MAGIC 0x01021994
+#endif
+#ifndef HUGETLBFS_MAGIC
+#define HUGETLBFS_MAGIC 0x958458f6
+#endif
+
+static int z_memfd_create(const char *name, unsigned int flags) {
+ return syscall(__NR_memfd_create, name, flags);
+}
+
+ZBackingFile::ZBackingFile() :
+ _fd(-1),
+ _filesystem(0),
+ _initialized(false) {
+
+ // Create backing file
+ _fd = create_fd(ZFILENAME_HEAP);
+ if (_fd == -1) {
+ return;
+ }
+
+ // Get filesystem type
+ struct statfs statfs_buf;
+ if (fstatfs(_fd, &statfs_buf) == -1) {
+ ZErrno err;
+ log_error(gc, init)("Failed to determine filesystem type for backing file (%s)", err.to_string());
+ return;
+ }
+ _filesystem = statfs_buf.f_type;
+
+ // Make sure we're on a supported filesystem
+ if (!is_tmpfs() && !is_hugetlbfs()) {
+ log_error(gc, init)("Backing file must be located on a %s or a %s filesystem", ZFILESYSTEM_TMPFS, ZFILESYSTEM_HUGETLBFS);
+ return;
+ }
+
+ // Make sure the filesystem type matches requested large page type
+ if (ZLargePages::is_transparent() && !is_tmpfs()) {
+ log_error(gc, init)("-XX:+UseTransparentHugePages can only be enable when using a %s filesystem", ZFILESYSTEM_TMPFS);
+ return;
+ }
+
+ if (ZLargePages::is_transparent() && !tmpfs_supports_transparent_huge_pages()) {
+ log_error(gc, init)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel", ZFILESYSTEM_TMPFS);
+ return;
+ }
+
+ if (ZLargePages::is_explicit() && !is_hugetlbfs()) {
+ log_error(gc, init)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
+ return;
+ }
+
+ if (!ZLargePages::is_explicit() && is_hugetlbfs()) {
+ log_error(gc, init)("-XX:+UseLargePages must be enabled when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
+ return;
+ }
+
+ // Successfully initialized
+ _initialized = true;
+}
+
+int ZBackingFile::create_mem_fd(const char* name) const {
+ // Create file name
+ char filename[PATH_MAX];
+ snprintf(filename, sizeof(filename), "%s%s", name, ZLargePages::is_explicit() ? ".hugetlb" : "");
+
+ // Create file
+ const int extra_flags = ZLargePages::is_explicit() ? MFD_HUGETLB : 0;
+ const int fd = z_memfd_create(filename, MFD_CLOEXEC | extra_flags);
+ if (fd == -1) {
+ ZErrno err;
+ log_debug(gc, init)("Failed to create memfd file (%s)",
+ ((UseLargePages && err == EINVAL) ? "Hugepages not supported" : err.to_string()));
+ return -1;
+ }
+
+ log_debug(gc, init)("Heap backed by file /memfd:%s", filename);
+
+ return fd;
+}
+
+int ZBackingFile::create_file_fd(const char* name) const {
+ const char* const filesystem = ZLargePages::is_explicit() ? ZFILESYSTEM_HUGETLBFS : ZFILESYSTEM_TMPFS;
+ const char* const mountpoint = ZLargePages::is_explicit() ? ZMOUNTPOINT_HUGETLBFS : ZMOUNTPOINT_TMPFS;
+
+ // Find mountpoint
+ ZBackingPath path(filesystem, mountpoint);
+ if (path.get() == NULL) {
+ log_error(gc, init)("Use -XX:ZPath to specify the path to a %s filesystem", filesystem);
+ return -1;
+ }
+
+ // Try to create an anonymous file using the O_TMPFILE flag. Note that this
+ // flag requires kernel >= 3.11. If this fails we fall back to open/unlink.
+ const int fd_anon = open(path.get(), O_TMPFILE|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
+ if (fd_anon == -1) {
+ ZErrno err;
+ log_debug(gc, init)("Failed to create anonymouns file in %s (%s)", path.get(),
+ (err == EINVAL ? "Not supported" : err.to_string()));
+ } else {
+ // Get inode number for anonymous file
+ struct stat stat_buf;
+ if (fstat(fd_anon, &stat_buf) == -1) {
+ ZErrno err;
+ log_error(gc, init)("Failed to determine inode number for anonymous file (%s)", err.to_string());
+ return -1;
+ }
+
+ log_debug(gc, init)("Heap backed by file %s/#" UINT64_FORMAT, path.get(), (uint64_t)stat_buf.st_ino);
+
+ return fd_anon;
+ }
+
+ log_debug(gc, init)("Falling back to open/unlink");
+
+ // Create file name
+ char filename[PATH_MAX];
+ snprintf(filename, sizeof(filename), "%s/%s.%d", path.get(), name, os::current_process_id());
+
+ // Create file
+ const int fd = open(filename, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
+ if (fd == -1) {
+ ZErrno err;
+ log_error(gc, init)("Failed to create file %s (%s)", filename, err.to_string());
+ return -1;
+ }
+
+ // Unlink file
+ if (unlink(filename) == -1) {
+ ZErrno err;
+ log_error(gc, init)("Failed to unlink file %s (%s)", filename, err.to_string());
+ return -1;
+ }
+
+ log_debug(gc, init)("Heap backed by file %s", filename);
+
+ return fd;
+}
+
+int ZBackingFile::create_fd(const char* name) const {
+ if (ZPath == NULL) {
+ // If the path is not explicitly specified, then we first try to create a memfd file
+ // instead of looking for a tmpfd/hugetlbfs mount point. Note that memfd_create() might
+ // not be supported at all (requires kernel >= 3.17), or it might not support large
+ // pages (requires kernel >= 4.14). If memfd_create() fails, then we try to create a
+ // file on an accessible tmpfs or hugetlbfs mount point.
+ const int fd = create_mem_fd(name);
+ if (fd != -1) {
+ return fd;
+ }
+
+ log_debug(gc, init)("Falling back to searching for an accessible moint point");
+ }
+
+ return create_file_fd(name);
+}
+
+bool ZBackingFile::is_initialized() const {
+ return _initialized;
+}
+
+int ZBackingFile::fd() const {
+ return _fd;
+}
+
+bool ZBackingFile::is_tmpfs() const {
+ return _filesystem == TMPFS_MAGIC;
+}
+
+bool ZBackingFile::is_hugetlbfs() const {
+ return _filesystem == HUGETLBFS_MAGIC;
+}
+
+bool ZBackingFile::tmpfs_supports_transparent_huge_pages() const {
+ // If the shmem_enabled file exists and is readable then we
+ // know the kernel supports transparent huge pages for tmpfs.
+ return access(ZFILENAME_SHMEM_ENABLED, R_OK) == 0;
+}
+
+bool ZBackingFile::try_split_and_expand_tmpfs(size_t offset, size_t length, size_t alignment) const {
+ // Try first smaller part.
+ const size_t offset0 = offset;
+ const size_t length0 = align_up(length / 2, alignment);
+ if (!try_expand_tmpfs(offset0, length0, alignment)) {
+ return false;
+ }
+
+ // Try second smaller part.
+ const size_t offset1 = offset0 + length0;
+ const size_t length1 = length - length0;
+ if (!try_expand_tmpfs(offset1, length1, alignment)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool ZBackingFile::try_expand_tmpfs(size_t offset, size_t length, size_t alignment) const {
+ assert(length > 0, "Invalid length");
+ assert(is_aligned(length, alignment), "Invalid length");
+
+ ZErrno err = posix_fallocate(_fd, offset, length);
+
+ if (err == EINTR && length > alignment) {
+ // Calling posix_fallocate() with a large length can take a long
+ // time to complete. When running profilers, such as VTune, this
+ // syscall will be constantly interrupted by signals. Expanding
+ // the file in smaller steps avoids this problem.
+ return try_split_and_expand_tmpfs(offset, length, alignment);
+ }
+
+ if (err) {
+ log_error(gc)("Failed to allocate backing file (%s)", err.to_string());
+ return false;
+ }
+
+ return true;
+}
+
+bool ZBackingFile::expand_tmpfs(size_t offset, size_t length) const {
+ assert(is_tmpfs(), "Wrong filesystem");
+ return try_expand_tmpfs(offset, length, os::vm_page_size());
+}
+
+bool ZBackingFile::expand_hugetlbfs(size_t offset, size_t length) const {
+ assert(is_hugetlbfs(), "Wrong filesystem");
+
+ // Prior to kernel 4.3, hugetlbfs did not support posix_fallocate().
+ // Instead of posix_fallocate() we can use a well-known workaround,
+ // which involves truncating the file to requested size and then try
+ // to map it to verify that there are enough huge pages available to
+ // back it.
+ while (ftruncate(_fd, offset + length) == -1) {
+ ZErrno err;
+ if (err != EINTR) {
+ log_error(gc)("Failed to truncate backing file (%s)", err.to_string());
+ return false;
+ }
+ }
+
+ // If we fail mapping during initialization, i.e. when we are pre-mapping
+ // the heap, then we wait and retry a few times before giving up. Otherwise
+ // there is a risk that running JVMs back-to-back will fail, since there
+ // is a delay between process termination and the huge pages owned by that
+ // process being returned to the huge page pool and made available for new
+ // allocations.
+ void* addr = MAP_FAILED;
+ const int max_attempts = 3;
+ for (int attempt = 1; attempt <= max_attempts; attempt++) {
+ addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
+ if (addr != MAP_FAILED || is_init_completed()) {
+ // Mapping was successful or initialization phase has completed
+ break;
+ }
+
+ ZErrno err;
+ log_debug(gc)("Failed to map backing file (%s), attempt %d of %d",
+ err.to_string(), attempt, max_attempts);
+
+ // Wait and retry in one second, in the hope that
+ // huge pages will be available by then.
+ sleep(1);
+ }
+
+ if (addr == MAP_FAILED) {
+ // Not enough huge pages left
+ ZErrno err;
+ log_error(gc)("Failed to map backing file (%s)", err.to_string());
+ return false;
+ }
+
+ // Successful mapping, unmap again. From now on the pages we mapped
+ // will be reserved for this file.
+ if (munmap(addr, length) == -1) {
+ ZErrno err;
+ log_error(gc)("Failed to unmap backing file (%s)", err.to_string());
+ return false;
+ }
+
+ return true;
+}
+
+bool ZBackingFile::expand(size_t offset, size_t length) const {
+ return is_hugetlbfs() ? expand_hugetlbfs(offset, length) : expand_tmpfs(offset, length);
+}
diff --git a/src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.hpp b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.hpp
new file mode 100644
index 00000000000..cbc75982833
--- /dev/null
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.hpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_CPU_LINUX_X86_ZBACKINGFILE_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_ZBACKINGFILE_LINUX_X86_HPP
+
+#include "memory/allocation.hpp"
+
+class ZBackingFile {
+private:
+ int _fd;
+ uint64_t _filesystem;
+ bool _initialized;
+
+ int create_mem_fd(const char* name) const;
+ int create_file_fd(const char* name) const;
+ int create_fd(const char* name) const;
+
+ bool is_tmpfs() const;
+ bool is_hugetlbfs() const;
+ bool tmpfs_supports_transparent_huge_pages() const;
+
+ bool try_split_and_expand_tmpfs(size_t offset, size_t length, size_t alignment) const;
+ bool try_expand_tmpfs(size_t offset, size_t length, size_t alignment) const;
+ bool expand_tmpfs(size_t offset, size_t length) const;
+
+ bool expand_hugetlbfs(size_t offset, size_t length) const;
+
+public:
+ ZBackingFile();
+
+ bool is_initialized() const;
+
+ int fd() const;
+ bool expand(size_t offset, size_t length) const;
+};
+
+#endif // OS_CPU_LINUX_X86_ZBACKINGFILE_LINUX_X86_HPP
diff --git a/src/hotspot/os_cpu/linux_x86/gc/z/zBackingPath_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingPath_linux_x86.cpp
new file mode 100644
index 00000000000..b5bd39a7f86
--- /dev/null
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingPath_linux_x86.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zArray.inline.hpp"
+#include "gc/z/zBackingPath_linux_x86.hpp"
+#include "gc/z/zErrno.hpp"
+#include "logging/log.hpp"
+
+#include
+#include
+
+// Mount information, see proc(5) for more details.
+#define PROC_SELF_MOUNTINFO "/proc/self/mountinfo"
+
+ZBackingPath::ZBackingPath(const char* filesystem, const char* preferred_path) {
+ if (ZPath != NULL) {
+ // Use specified path
+ _path = strdup(ZPath);
+ } else {
+ // Find suitable path
+ _path = find_mountpoint(filesystem, preferred_path);
+ }
+}
+
+ZBackingPath::~ZBackingPath() {
+ free(_path);
+ _path = NULL;
+}
+
+char* ZBackingPath::get_mountpoint(const char* line, const char* filesystem) const {
+ char* line_mountpoint = NULL;
+ char* line_filesystem = NULL;
+
+ // Parse line and return a newly allocated string containing the mountpoint if
+ // the line contains a matching filesystem and the mountpoint is accessible by
+ // the current user.
+ if (sscanf(line, "%*u %*u %*u:%*u %*s %ms %*[^-]- %ms", &line_mountpoint, &line_filesystem) != 2 ||
+ strcmp(line_filesystem, filesystem) != 0 ||
+ access(line_mountpoint, R_OK|W_OK|X_OK) != 0) {
+ // Not a matching or accessible filesystem
+ free(line_mountpoint);
+ line_mountpoint = NULL;
+ }
+
+ free(line_filesystem);
+
+ return line_mountpoint;
+}
+
+void ZBackingPath::get_mountpoints(ZArray* mountpoints, const char* filesystem) const {
+ FILE* fd = fopen(PROC_SELF_MOUNTINFO, "r");
+ if (fd == NULL) {
+ ZErrno err;
+ log_error(gc, init)("Failed to open %s: %s", PROC_SELF_MOUNTINFO, err.to_string());
+ return;
+ }
+
+ char* line = NULL;
+ size_t length = 0;
+
+ while (getline(&line, &length, fd) != -1) {
+ char* const mountpoint = get_mountpoint(line, filesystem);
+ if (mountpoint != NULL) {
+ mountpoints->add(mountpoint);
+ }
+ }
+
+ free(line);
+ fclose(fd);
+}
+
+void ZBackingPath::free_mountpoints(ZArray* mountpoints) const {
+ ZArrayIterator iter(mountpoints);
+ for (char* mountpoint; iter.next(&mountpoint);) {
+ free(mountpoint);
+ }
+ mountpoints->clear();
+}
+
+char* ZBackingPath::find_mountpoint(const char* filesystem, const char* preferred_mountpoint) const {
+ char* path = NULL;
+ ZArray mountpoints;
+
+ get_mountpoints(&mountpoints, filesystem);
+
+ if (mountpoints.size() == 0) {
+ // No filesystem found
+ log_error(gc, init)("Failed to find an accessible %s filesystem", filesystem);
+ } else if (mountpoints.size() == 1) {
+ // One filesystem found
+ path = strdup(mountpoints.at(0));
+ } else if (mountpoints.size() > 1) {
+ // More than one filesystem found
+ ZArrayIterator iter(&mountpoints);
+ for (char* mountpoint; iter.next(&mountpoint);) {
+ if (!strcmp(mountpoint, preferred_mountpoint)) {
+ // Preferred mount point found
+ path = strdup(mountpoint);
+ break;
+ }
+ }
+
+ if (path == NULL) {
+ // Preferred mount point not found
+ log_error(gc, init)("More than one %s filesystem found:", filesystem);
+ ZArrayIterator iter2(&mountpoints);
+ for (char* mountpoint; iter2.next(&mountpoint);) {
+ log_error(gc, init)(" %s", mountpoint);
+ }
+ }
+ }
+
+ free_mountpoints(&mountpoints);
+
+ return path;
+}
+
+const char* ZBackingPath::get() const {
+ return _path;
+}
diff --git a/src/hotspot/os_cpu/linux_x86/gc/z/zBackingPath_linux_x86.hpp b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingPath_linux_x86.hpp
new file mode 100644
index 00000000000..51d393a1d16
--- /dev/null
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingPath_linux_x86.hpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_CPU_LINUX_X86_ZBACKINGPATH_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_ZBACKINGPATH_LINUX_X86_HPP
+
+#include "gc/z/zArray.hpp"
+#include "memory/allocation.hpp"
+
+class ZBackingPath : public StackObj {
+private:
+ char* _path;
+
+ char* get_mountpoint(const char* line, const char* filesystem) const;
+ void get_mountpoints(ZArray* mountpoints, const char* filesystem) const;
+ void free_mountpoints(ZArray* mountpoints) const;
+ char* find_mountpoint(const char* filesystem, const char* preferred_mountpoint) const;
+
+public:
+ ZBackingPath(const char* filesystem, const char* preferred_path);
+ ~ZBackingPath();
+
+ const char* get() const;
+};
+
+#endif // OS_CPU_LINUX_X86_ZBACKINGPATH_LINUX_X86_HPP
diff --git a/src/hotspot/os_cpu/linux_x86/gc/z/zGlobals_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/gc/z/zGlobals_linux_x86.cpp
new file mode 100644
index 00000000000..5d248099379
--- /dev/null
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zGlobals_linux_x86.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zGlobals.hpp"
+
+uintptr_t ZAddressReservedStart() {
+ return ZAddressMetadataMarked0;
+}
+
+uintptr_t ZAddressReservedEnd() {
+ return ZAddressMetadataRemapped + ZAddressOffsetMax;
+}
diff --git a/src/hotspot/os_cpu/linux_x86/gc/z/zGlobals_linux_x86.hpp b/src/hotspot/os_cpu/linux_x86/gc/z/zGlobals_linux_x86.hpp
new file mode 100644
index 00000000000..2b0fa83c1ad
--- /dev/null
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zGlobals_linux_x86.hpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_CPU_LINUX_X86_ZGLOBALS_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_ZGLOBALS_LINUX_X86_HPP
+
+//
+// Page Allocation Tiers
+// ---------------------
+//
+// Page Type Page Size Object Size Limit Object Alignment
+// ------------------------------------------------------------------
+// Small 2M <= 265K
+// Medium 32M <= 4M 4K
+// Large X*M > 4M 2M
+// ------------------------------------------------------------------
+//
+//
+// Address Space & Pointer Layout
+// ------------------------------
+//
+// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
+// . .
+// . .
+// . .
+// +--------------------------------+ 0x0000140000000000 (20TB)
+// | Remapped View |
+// +--------------------------------+ 0x0000100000000000 (16TB)
+// | (Reserved, but unused) |
+// +--------------------------------+ 0x00000c0000000000 (12TB)
+// | Marked1 View |
+// +--------------------------------+ 0x0000080000000000 (8TB)
+// | Marked0 View |
+// +--------------------------------+ 0x0000040000000000 (4TB)
+// . .
+// +--------------------------------+ 0x0000000000000000
+//
+//
+// 6 4 4 4 4 4 0
+// 3 7 6 5 2 1 0
+// +-------------------+-+----+-----------------------------------------------+
+// |00000000 00000000 0|0|1111|11 11111111 11111111 11111111 11111111 11111111|
+// +-------------------+-+----+-----------------------------------------------+
+// | | | |
+// | | | * 41-0 Object Offset (42-bits, 4TB address space)
+// | | |
+// | | * 45-42 Metadata Bits (4-bits) 0001 = Marked0 (Address view 4-8TB)
+// | | 0010 = Marked1 (Address view 8-12TB)
+// | | 0100 = Remapped (Address view 16-20TB)
+// | | 1000 = Finalizable (Address view N/A)
+// | |
+// | * 46-46 Unused (1-bit, always zero)
+// |
+// * 63-47 Fixed (17-bits, always zero)
+//
+
+const size_t ZPlatformPageSizeSmallShift = 21; // 2M
+
+const size_t ZPlatformAddressOffsetBits = 42; // 4TB
+
+const uintptr_t ZPlatformAddressMetadataShift = ZPlatformAddressOffsetBits;
+
+const uintptr_t ZPlatformAddressSpaceStart = (uintptr_t)1 << ZPlatformAddressOffsetBits;
+const uintptr_t ZPlatformAddressSpaceSize = ((uintptr_t)1 << ZPlatformAddressOffsetBits) * 4;
+
+const size_t ZPlatformCacheLineSize = 64;
+
+#endif // OS_CPU_LINUX_X86_ZGLOBALS_LINUX_X86_HPP
diff --git a/src/hotspot/os_cpu/linux_x86/gc/z/zLargePages_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/gc/z/zLargePages_linux_x86.cpp
new file mode 100644
index 00000000000..c79195cd118
--- /dev/null
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zLargePages_linux_x86.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zLargePages.hpp"
+#include "runtime/globals.hpp"
+
+void ZLargePages::initialize_platform() {
+ if (UseLargePages) {
+ if (UseTransparentHugePages) {
+ _state = Transparent;
+ } else {
+ _state = Explicit;
+ }
+ } else {
+ _state = Disabled;
+ }
+}
diff --git a/src/hotspot/os_cpu/linux_x86/gc/z/zNUMA_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/gc/z/zNUMA_linux_x86.cpp
new file mode 100644
index 00000000000..1fc2dc2c0b8
--- /dev/null
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zNUMA_linux_x86.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "gc/z/zErrno.hpp"
+#include "gc/z/zCPU.hpp"
+#include "gc/z/zNUMA.hpp"
+#include "runtime/os.hpp"
+#include "utilities/debug.hpp"
+
+#include
+#include
+
+#ifndef MPOL_F_NODE
+#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
+#endif
+
+#ifndef MPOL_F_ADDR
+#define MPOL_F_ADDR (1<<1) /* look up vma using address */
+#endif
+
+static int z_get_mempolicy(uint32_t* mode, const unsigned long *nmask, unsigned long maxnode, uintptr_t addr, int flags) {
+ return syscall(__NR_get_mempolicy, mode, nmask, maxnode, addr, flags);
+}
+
+void ZNUMA::initialize_platform() {
+ _enabled = UseNUMA;
+}
+
+uint32_t ZNUMA::count() {
+ if (!_enabled) {
+ // NUMA support not enabled
+ return 1;
+ }
+
+ return os::Linux::numa_max_node() + 1;
+}
+
+uint32_t ZNUMA::id() {
+ if (!_enabled) {
+ // NUMA support not enabled
+ return 0;
+ }
+
+ return os::Linux::get_node_by_cpu(ZCPU::id());
+}
+
+uint32_t ZNUMA::memory_id(uintptr_t addr) {
+ if (!_enabled) {
+ // NUMA support not enabled, assume everything belongs to node zero
+ return 0;
+ }
+
+ uint32_t id = (uint32_t)-1;
+
+ if (z_get_mempolicy(&id, NULL, 0, addr, MPOL_F_NODE | MPOL_F_ADDR) == -1) {
+ ZErrno err;
+ fatal("Failed to get NUMA id for memory at " PTR_FORMAT " (%s)", addr, err.to_string());
+ }
+
+ assert(id < count(), "Invalid NUMA id");
+
+ return id;
+}
diff --git a/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.cpp
new file mode 100644
index 00000000000..549ca9d3dbf
--- /dev/null
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.cpp
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zBackingFile_linux_x86.hpp"
+#include "gc/z/zErrno.hpp"
+#include "gc/z/zLargePages.inline.hpp"
+#include "gc/z/zMemory.hpp"
+#include "gc/z/zNUMA.hpp"
+#include "gc/z/zPhysicalMemory.inline.hpp"
+#include "gc/z/zPhysicalMemoryBacking_linux_x86.hpp"
+#include "logging/log.hpp"
+#include "runtime/os.hpp"
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
+
+#include
+#include
+#include
+
+// Support for building on older Linux systems
+#ifndef MADV_HUGEPAGE
+#define MADV_HUGEPAGE 14
+#endif
+
+// Proc file entry for max map mount
+#define ZFILENAME_PROC_MAX_MAP_COUNT "/proc/sys/vm/max_map_count"
+
+ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity, size_t granule_size) :
+ _manager(),
+ _file(),
+ _granule_size(granule_size) {
+
+ // Check and warn if max map count seems too low
+ check_max_map_count(max_capacity, granule_size);
+}
+
+void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity, size_t granule_size) const {
+ const char* const filename = ZFILENAME_PROC_MAX_MAP_COUNT;
+ FILE* const file = fopen(filename, "r");
+ if (file == NULL) {
+ // Failed to open file, skip check
+ log_debug(gc)("Failed to open %s", filename);
+ return;
+ }
+
+ size_t actual_max_map_count = 0;
+ const int result = fscanf(file, SIZE_FORMAT, &actual_max_map_count);
+ fclose(file);
+ if (result != 1) {
+ // Failed to read file, skip check
+ log_debug(gc)("Failed to read %s", filename);
+ return;
+ }
+
+ // The required max map count is impossible to calculate exactly since subsystems
+ // other than ZGC are also creating memory mappings, and we have no control over that.
+ // However, ZGC tends to create the most mappings and dominate the total count.
+ // In the worst cases, ZGC will map each granule three times, i.e. once per heap view.
+ // We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.
+ const size_t required_max_map_count = (max_capacity / granule_size) * 3 * 1.2;
+ if (actual_max_map_count < required_max_map_count) {
+ log_warning(gc)("The system limit on number of memory mappings "
+ "per process might be too low for the given");
+ log_warning(gc)("Java heap size (" SIZE_FORMAT "M). Please "
+ "adjust %s to allow for at least", max_capacity / M, filename);
+ log_warning(gc)(SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). "
+ "Continuing execution with the current limit could",
+ required_max_map_count, actual_max_map_count);
+ log_warning(gc)("lead to a fatal error down the line, due to failed "
+ "attempts to map memory.");
+ }
+}
+
+bool ZPhysicalMemoryBacking::is_initialized() const {
+ return _file.is_initialized();
+}
+
+bool ZPhysicalMemoryBacking::expand(size_t from, size_t to) {
+ const size_t size = to - from;
+
+ // Expand
+ if (!_file.expand(from, size)) {
+ return false;
+ }
+
+ // Add expanded space to free list
+ _manager.free(from, size);
+
+ return true;
+}
+
+ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
+ assert(is_aligned(size, _granule_size), "Invalid size");
+
+ ZPhysicalMemory pmem;
+
+ // Allocate segments
+ for (size_t allocated = 0; allocated < size; allocated += _granule_size) {
+ const uintptr_t start = _manager.alloc_from_front(_granule_size);
+ assert(start != UINTPTR_MAX, "Allocation should never fail");
+ pmem.add_segment(ZPhysicalMemorySegment(start, _granule_size));
+ }
+
+ return pmem;
+}
+
+void ZPhysicalMemoryBacking::free(ZPhysicalMemory pmem) {
+ const size_t nsegments = pmem.nsegments();
+
+ // Free segments
+ for (size_t i = 0; i < nsegments; i++) {
+ const ZPhysicalMemorySegment segment = pmem.segment(i);
+ _manager.free(segment.start(), segment.size());
+ }
+}
+
+void ZPhysicalMemoryBacking::map_failed(ZErrno err) const {
+ if (err == ENOMEM) {
+ fatal("Failed to map memory. Please check the system limit on number of "
+ "memory mappings allowed per process (see %s)", ZFILENAME_PROC_MAX_MAP_COUNT);
+ } else {
+ fatal("Failed to map memory (%s)", err.to_string());
+ }
+}
+
+void ZPhysicalMemoryBacking::advise_view(uintptr_t addr, size_t size) const {
+ if (madvise((void*)addr, size, MADV_HUGEPAGE) == -1) {
+ ZErrno err;
+ log_error(gc)("Failed to advise use of transparent huge pages (%s)", err.to_string());
+ }
+}
+
+void ZPhysicalMemoryBacking::pretouch_view(uintptr_t addr, size_t size) const {
+ const size_t page_size = ZLargePages::is_explicit() ? os::large_page_size() : os::vm_page_size();
+ os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
+}
+
+void ZPhysicalMemoryBacking::map_view(ZPhysicalMemory pmem, uintptr_t addr, bool pretouch) const {
+ const size_t nsegments = pmem.nsegments();
+
+ // Map segments
+ for (size_t i = 0; i < nsegments; i++) {
+ const ZPhysicalMemorySegment segment = pmem.segment(i);
+ const size_t size = segment.size();
+ const void* const res = mmap((void*)addr, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _file.fd(), segment.start());
+ if (res == MAP_FAILED) {
+ ZErrno err;
+ map_failed(err);
+ }
+
+ // Advise on use of transparent huge pages before touching it
+ if (ZLargePages::is_transparent()) {
+ advise_view(addr, size);
+ }
+
+ // NUMA interleave memory before touching it
+ ZNUMA::memory_interleave(addr, size);
+
+ if (pretouch) {
+ pretouch_view(addr, size);
+ }
+
+ addr += size;
+ }
+}
+
+void ZPhysicalMemoryBacking::unmap_view(ZPhysicalMemory pmem, uintptr_t addr) const {
+ // Note that we must keep the address space reservation intact and just detach
+ // the backing memory. For this reason we map a new anonymous, non-accessible
+ // and non-reserved page over the mapping instead of actually unmapping.
+ const size_t size = pmem.size();
+ const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
+ if (res == MAP_FAILED) {
+ ZErrno err;
+ map_failed(err);
+ }
+}
+
+uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const {
+ // From an NMT point of view we treat the first heap mapping (marked0) as committed
+ return ZAddress::marked0(offset);
+}
+
+void ZPhysicalMemoryBacking::map(ZPhysicalMemory pmem, uintptr_t offset) const {
+ if (ZUnmapBadViews) {
+ // Only map the good view, for debugging only
+ map_view(pmem, ZAddress::good(offset), AlwaysPreTouch);
+ } else {
+ // Map all views
+ map_view(pmem, ZAddress::marked0(offset), AlwaysPreTouch);
+ map_view(pmem, ZAddress::marked1(offset), AlwaysPreTouch);
+ map_view(pmem, ZAddress::remapped(offset), AlwaysPreTouch);
+ }
+}
+
+void ZPhysicalMemoryBacking::unmap(ZPhysicalMemory pmem, uintptr_t offset) const {
+ if (ZUnmapBadViews) {
+ // Only map the good view, for debugging only
+ unmap_view(pmem, ZAddress::good(offset));
+ } else {
+ // Unmap all views
+ unmap_view(pmem, ZAddress::marked0(offset));
+ unmap_view(pmem, ZAddress::marked1(offset));
+ unmap_view(pmem, ZAddress::remapped(offset));
+ }
+}
+
+void ZPhysicalMemoryBacking::flip(ZPhysicalMemory pmem, uintptr_t offset) const {
+ assert(ZUnmapBadViews, "Should be enabled");
+ const uintptr_t addr_good = ZAddress::good(offset);
+ const uintptr_t addr_bad = ZAddress::is_marked(ZAddressGoodMask) ? ZAddress::remapped(offset) : ZAddress::marked(offset);
+ // Map/Unmap views
+ map_view(pmem, addr_good, false /* pretouch */);
+ unmap_view(pmem, addr_bad);
+}
diff --git a/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.hpp b/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.hpp
new file mode 100644
index 00000000000..71c6dc1d9bb
--- /dev/null
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.hpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_CPU_LINUX_X86_ZPHYSICALMEMORYBACKING_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_ZPHYSICALMEMORYBACKING_LINUX_X86_HPP
+
+#include "gc/z/zBackingFile_linux_x86.hpp"
+#include "gc/z/zMemory.hpp"
+
+class ZErrno;
+class ZPhysicalMemory;
+
+class ZPhysicalMemoryBacking {
+private:
+ ZMemoryManager _manager;
+ ZBackingFile _file;
+ const size_t _granule_size;
+
+ void check_max_map_count(size_t max_capacity, size_t granule_size) const;
+ void map_failed(ZErrno err) const;
+
+ void advise_view(uintptr_t addr, size_t size) const;
+ void pretouch_view(uintptr_t addr, size_t size) const;
+ void map_view(ZPhysicalMemory pmem, uintptr_t addr, bool pretouch) const;
+ void unmap_view(ZPhysicalMemory pmem, uintptr_t addr) const;
+
+public:
+ ZPhysicalMemoryBacking(size_t max_capacity, size_t granule_size);
+
+ bool is_initialized() const;
+
+ bool expand(size_t from, size_t to);
+ ZPhysicalMemory alloc(size_t size);
+ void free(ZPhysicalMemory pmem);
+
+ uintptr_t nmt_address(uintptr_t offset) const;
+
+ void map(ZPhysicalMemory pmem, uintptr_t offset) const;
+ void unmap(ZPhysicalMemory pmem, uintptr_t offset) const;
+ void flip(ZPhysicalMemory pmem, uintptr_t offset) const;
+};
+
+#endif // OS_CPU_LINUX_X86_ZPHYSICALMEMORYBACKING_LINUX_X86_HPP
diff --git a/src/hotspot/os_cpu/linux_x86/gc/z/zVirtualMemory_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/gc/z/zVirtualMemory_linux_x86.cpp
new file mode 100644
index 00000000000..68df40191a5
--- /dev/null
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zVirtualMemory_linux_x86.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zVirtualMemory.hpp"
+#include "logging/log.hpp"
+
+#include
+#include
+
+bool ZVirtualMemoryManager::reserve(uintptr_t start, size_t size) {
+ // Reserve address space
+ const uintptr_t actual_start = (uintptr_t)mmap((void*)start, size, PROT_NONE,
+ MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
+ if (actual_start != start) {
+ log_error(gc)("Failed to reserve address space for Java heap");
+ return false;
+ }
+
+ return true;
+}
diff --git a/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.inline.hpp b/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.hpp
similarity index 85%
rename from src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.inline.hpp
rename to src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.hpp
index 2a6d587a2ac..0cf302992c1 100644
--- a/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.inline.hpp
+++ b/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.hpp
@@ -22,12 +22,10 @@
*
*/
-#ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
-#define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
+#ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_HPP
-#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/os.hpp"
+// Included in orderAccess.hpp header file.
// Compiler version last used for testing: gcc 4.8.2
// Please update this information when this file changes
@@ -48,14 +46,12 @@ inline void OrderAccess::acquire() { compiler_barrier(); }
inline void OrderAccess::release() { compiler_barrier(); }
inline void OrderAccess::fence() {
- if (os::is_MP()) {
- // always use locked addl since mfence is sometimes expensive
+ // always use locked addl since mfence is sometimes expensive
#ifdef AMD64
- __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
+ __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
#else
- __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
+ __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
#endif
- }
compiler_barrier();
}
@@ -109,4 +105,4 @@ struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
};
#endif // AMD64
-#endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
+#endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_HPP
diff --git a/src/hotspot/os_cpu/linux_zero/orderAccess_linux_zero.inline.hpp b/src/hotspot/os_cpu/linux_zero/orderAccess_linux_zero.hpp
similarity index 89%
rename from src/hotspot/os_cpu/linux_zero/orderAccess_linux_zero.inline.hpp
rename to src/hotspot/os_cpu/linux_zero/orderAccess_linux_zero.hpp
index 8c4cd1c7a1c..950a4819f36 100644
--- a/src/hotspot/os_cpu/linux_zero/orderAccess_linux_zero.inline.hpp
+++ b/src/hotspot/os_cpu/linux_zero/orderAccess_linux_zero.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -23,10 +23,10 @@
*
*/
-#ifndef OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_INLINE_HPP
-#define OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_INLINE_HPP
+#ifndef OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_HPP
+#define OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_HPP
-#include "runtime/orderAccess.hpp"
+// Included in orderAccess.hpp header file.
#ifdef ARM
@@ -83,4 +83,4 @@ inline void OrderAccess::release() { LIGHT_MEM_BARRIER; }
inline void OrderAccess::fence() { FULL_MEM_BARRIER; }
-#endif // OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_INLINE_HPP
+#endif // OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_HPP
diff --git a/src/hotspot/os_cpu/solaris_sparc/orderAccess_solaris_sparc.inline.hpp b/src/hotspot/os_cpu/solaris_sparc/orderAccess_solaris_sparc.hpp
similarity index 84%
rename from src/hotspot/os_cpu/solaris_sparc/orderAccess_solaris_sparc.inline.hpp
rename to src/hotspot/os_cpu/solaris_sparc/orderAccess_solaris_sparc.hpp
index b60cd092c50..14422eb1252 100644
--- a/src/hotspot/os_cpu/solaris_sparc/orderAccess_solaris_sparc.inline.hpp
+++ b/src/hotspot/os_cpu/solaris_sparc/orderAccess_solaris_sparc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,11 +22,10 @@
*
*/
-#ifndef OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_INLINE_HPP
-#define OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_INLINE_HPP
+#ifndef OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_HPP
+#define OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_HPP
-#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.hpp"
+// Included in orderAccess.hpp header file.
// Compiler version last used for testing: solaris studio 12u3
// Please update this information when this file changes
@@ -52,4 +51,4 @@ inline void OrderAccess::fence() {
__asm__ volatile ("membar #StoreLoad" : : : "memory");
}
-#endif // OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_INLINE_HPP
+#endif // OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_HPP
diff --git a/src/hotspot/os_cpu/solaris_x86/orderAccess_solaris_x86.inline.hpp b/src/hotspot/os_cpu/solaris_x86/orderAccess_solaris_x86.hpp
similarity index 77%
rename from src/hotspot/os_cpu/solaris_x86/orderAccess_solaris_x86.inline.hpp
rename to src/hotspot/os_cpu/solaris_x86/orderAccess_solaris_x86.hpp
index bd676dbe62f..ad18c0f349c 100644
--- a/src/hotspot/os_cpu/solaris_x86/orderAccess_solaris_x86.inline.hpp
+++ b/src/hotspot/os_cpu/solaris_x86/orderAccess_solaris_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,12 +22,10 @@
*
*/
-#ifndef OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP
-#define OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP
+#ifndef OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_HPP
+#define OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_HPP
-#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/os.hpp"
+// Included in orderAccess.hpp header file.
// Compiler version last used for testing: solaris studio 12u3
// Please update this information when this file changes
@@ -48,14 +46,12 @@ inline void OrderAccess::acquire() { compiler_barrier(); }
inline void OrderAccess::release() { compiler_barrier(); }
inline void OrderAccess::fence() {
- if (os::is_MP()) {
#ifdef AMD64
- __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
+ __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
#else
- __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
+ __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
#endif
- }
compiler_barrier();
}
-#endif // OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP
+#endif // OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_HPP
diff --git a/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.inline.hpp b/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.hpp
similarity index 90%
rename from src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.inline.hpp
rename to src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.hpp
index d2f39ee21bd..0f06f1b58bd 100644
--- a/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.inline.hpp
+++ b/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.hpp
@@ -22,13 +22,12 @@
*
*/
-#ifndef OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
-#define OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
+#ifndef OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_HPP
+#define OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_HPP
+
+// Included in orderAccess.hpp header file.
#include
-#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/os.hpp"
// Compiler version last used for testing: Microsoft Visual Studio 2010
// Please update this information when this file changes
@@ -63,10 +62,8 @@ inline void OrderAccess::fence() {
#ifdef AMD64
StubRoutines_fence();
#else
- if (os::is_MP()) {
- __asm {
- lock add dword ptr [esp], 0;
- }
+ __asm {
+ lock add dword ptr [esp], 0;
}
#endif // AMD64
compiler_barrier();
@@ -113,4 +110,4 @@ struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
};
#endif // AMD64
-#endif // OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
+#endif // OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_HPP
diff --git a/src/hotspot/share/adlc/formssel.cpp b/src/hotspot/share/adlc/formssel.cpp
index 60f8c18b39f..cb0c0798325 100644
--- a/src/hotspot/share/adlc/formssel.cpp
+++ b/src/hotspot/share/adlc/formssel.cpp
@@ -2282,6 +2282,9 @@ bool OperandForm::is_bound_register() const {
if (strcmp(name, "RegD") == 0) size = 2;
if (strcmp(name, "RegL") == 0) size = 2;
if (strcmp(name, "RegN") == 0) size = 1;
+ if (strcmp(name, "VecX") == 0) size = 4;
+ if (strcmp(name, "VecY") == 0) size = 8;
+ if (strcmp(name, "VecZ") == 0) size = 16;
if (strcmp(name, "RegP") == 0) size = globalAD->get_preproc_def("_LP64") ? 2 : 1;
if (size == 0) {
return false;
@@ -3509,6 +3512,7 @@ int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
"ClearArray",
"GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP",
"GetAndAddB", "GetAndAddS", "GetAndAddL", "GetAndSetL", "GetAndSetN",
+ "LoadBarrierSlowReg", "LoadBarrierWeakSlowReg"
};
int cnt = sizeof(needs_ideal_memory_list)/sizeof(char*);
if( strcmp(_opType,"PrefetchAllocation")==0 )
diff --git a/src/hotspot/share/aot/aotCompiledMethod.cpp b/src/hotspot/share/aot/aotCompiledMethod.cpp
index 619f215eb9c..4c6d47aa801 100644
--- a/src/hotspot/share/aot/aotCompiledMethod.cpp
+++ b/src/hotspot/share/aot/aotCompiledMethod.cpp
@@ -75,7 +75,7 @@ address* AOTCompiledMethod::orig_pc_addr(const frame* fr) {
return (address*) ((address)fr->unextended_sp() + _meta->orig_pc_offset());
}
-bool AOTCompiledMethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) {
+bool AOTCompiledMethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive) {
return false;
}
@@ -245,7 +245,7 @@ bool AOTCompiledMethod::make_entrant() {
// more conservative than for nmethods.
void AOTCompiledMethod::flush_evol_dependents_on(InstanceKlass* dependee) {
if (is_java_method()) {
- cleanup_inline_caches();
+ clear_inline_caches();
mark_for_deoptimization();
make_not_entrant();
}
diff --git a/src/hotspot/share/aot/aotCompiledMethod.hpp b/src/hotspot/share/aot/aotCompiledMethod.hpp
index 9e9a19f0c15..aff03e5db7c 100644
--- a/src/hotspot/share/aot/aotCompiledMethod.hpp
+++ b/src/hotspot/share/aot/aotCompiledMethod.hpp
@@ -284,8 +284,8 @@ private:
bool is_aot_runtime_stub() const { return _method == NULL; }
protected:
- virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred);
- virtual bool do_unloading_jvmci(bool unloading_occurred) { return false; }
+ virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive);
+ virtual bool do_unloading_jvmci() { return false; }
};
diff --git a/src/hotspot/share/asm/codeBuffer.hpp b/src/hotspot/share/asm/codeBuffer.hpp
index f72b00ce090..c7f056fcf3c 100644
--- a/src/hotspot/share/asm/codeBuffer.hpp
+++ b/src/hotspot/share/asm/codeBuffer.hpp
@@ -337,6 +337,7 @@ public:
class CodeBuffer: public StackObj {
friend class CodeSection;
+ friend class StubCodeGenerator;
private:
// CodeBuffers must be allocated on the stack except for a single
diff --git a/src/hotspot/share/c1/c1_LIRAssembler.cpp b/src/hotspot/share/c1/c1_LIRAssembler.cpp
index dcca623dabe..a687a07856f 100644
--- a/src/hotspot/share/c1/c1_LIRAssembler.cpp
+++ b/src/hotspot/share/c1/c1_LIRAssembler.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "asm/assembler.inline.hpp"
#include "c1/c1_Compilation.hpp"
#include "c1/c1_Instruction.hpp"
#include "c1/c1_InstructionPrinter.hpp"
diff --git a/src/hotspot/share/c1/c1_MacroAssembler.hpp b/src/hotspot/share/c1/c1_MacroAssembler.hpp
index c3c4114ee14..0f73a54f0c8 100644
--- a/src/hotspot/share/c1/c1_MacroAssembler.hpp
+++ b/src/hotspot/share/c1/c1_MacroAssembler.hpp
@@ -26,7 +26,6 @@
#define SHARE_VM_C1_C1_MACROASSEMBLER_HPP
#include "asm/macroAssembler.hpp"
-#include "asm/macroAssembler.inline.hpp"
#include "utilities/macros.hpp"
class CodeEmitInfo;
diff --git a/src/hotspot/share/classfile/classLoader.inline.hpp b/src/hotspot/share/classfile/classLoader.inline.hpp
index f32493d80bb..731dc0cb1e8 100644
--- a/src/hotspot/share/classfile/classLoader.inline.hpp
+++ b/src/hotspot/share/classfile/classLoader.inline.hpp
@@ -26,7 +26,7 @@
#define SHARE_VM_CLASSFILE_CLASSLOADER_INLINE_HPP
#include "classfile/classLoader.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
// Next entry in class path
inline ClassPathEntry* ClassPathEntry::next() const { return OrderAccess::load_acquire(&_next); }
diff --git a/src/hotspot/share/classfile/classLoaderData.hpp b/src/hotspot/share/classfile/classLoaderData.hpp
index e88f77ecebf..c44ccc5f4e5 100644
--- a/src/hotspot/share/classfile/classLoaderData.hpp
+++ b/src/hotspot/share/classfile/classLoaderData.hpp
@@ -400,9 +400,16 @@ class ClassLoaderData : public CHeapObj {
static ClassLoaderData* class_loader_data_or_null(oop loader);
static ClassLoaderData* anonymous_class_loader_data(Handle loader);
-
+ // Returns Klass* of associated class loader, or NULL if associated loader is .
+ // Also works if unloading.
Klass* class_loader_klass() const { return _class_loader_klass; }
+
+ // Returns Name of associated class loader.
+ // Returns NULL if associated class loader is or if no name has been set for
+ // this loader.
+ // Also works if unloading.
Symbol* class_loader_name() const { return _class_loader_name; }
+
JFR_ONLY(DEFINE_TRACE_ID_METHODS;)
};
diff --git a/src/hotspot/share/classfile/classLoaderExt.cpp b/src/hotspot/share/classfile/classLoaderExt.cpp
index b620f4b3abb..31544c6437b 100644
--- a/src/hotspot/share/classfile/classLoaderExt.cpp
+++ b/src/hotspot/share/classfile/classLoaderExt.cpp
@@ -55,8 +55,10 @@ bool ClassLoaderExt::_has_platform_classes = false;
void ClassLoaderExt::append_boot_classpath(ClassPathEntry* new_entry) {
#if INCLUDE_CDS
- warning("Sharing is only supported for boot loader classes because bootstrap classpath has been appended");
- FileMapInfo::current_info()->header()->set_has_platform_or_app_classes(false);
+ if (UseSharedSpaces) {
+ warning("Sharing is only supported for boot loader classes because bootstrap classpath has been appended");
+ FileMapInfo::current_info()->header()->set_has_platform_or_app_classes(false);
+ }
#endif
ClassLoader::add_to_boot_append_entries(new_entry);
}
diff --git a/src/hotspot/share/classfile/classLoaderHierarchyDCmd.cpp b/src/hotspot/share/classfile/classLoaderHierarchyDCmd.cpp
new file mode 100644
index 00000000000..4a5fc402d1a
--- /dev/null
+++ b/src/hotspot/share/classfile/classLoaderHierarchyDCmd.cpp
@@ -0,0 +1,456 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "classfile/classLoaderData.inline.hpp"
+#include "classfile/classLoaderHierarchyDCmd.hpp"
+#include "memory/allocation.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/safepoint.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/ostream.hpp"
+
+
+ClassLoaderHierarchyDCmd::ClassLoaderHierarchyDCmd(outputStream* output, bool heap)
+ : DCmdWithParser(output, heap)
+ , _show_classes("show-classes", "Print loaded classes.", "BOOLEAN", false, "false")
+ , _verbose("verbose", "Print detailed information.", "BOOLEAN", false, "false") {
+ _dcmdparser.add_dcmd_option(&_show_classes);
+ _dcmdparser.add_dcmd_option(&_verbose);
+}
+
+
+int ClassLoaderHierarchyDCmd::num_arguments() {
+ ResourceMark rm;
+ ClassLoaderHierarchyDCmd* dcmd = new ClassLoaderHierarchyDCmd(NULL, false);
+ if (dcmd != NULL) {
+ DCmdMark mark(dcmd);
+ return dcmd->_dcmdparser.num_arguments();
+ } else {
+ return 0;
+ }
+}
+
+// Helper class for drawing the branches to the left of a node.
+class BranchTracker : public StackObj {
+ // ""
+ // " |---"
+ // " | |
+ // " | "
+ // " | |---
+ // " | |---
+ // ^^^^^^^ ^^^
+ // A B
+
+ // Some terms for the graphics:
+ // - branch: vertical connection between a node's ancestor to a later sibling.
+ // - branchwork: (A) the string to print as a prefix at the start of each line, contains all branches.
+ // - twig (B): Length of the dashed line connecting a node to its branch.
+ // - branch spacing: how many spaces between branches are printed.
+
+public:
+
+ enum { max_depth = 64, twig_len = 2, branch_spacing = 5 };
+
+private:
+
+ char _branches[max_depth];
+ int _pos;
+
+public:
+ BranchTracker()
+ : _pos(0) {}
+
+ void push(bool has_branch) {
+ if (_pos < max_depth) {
+ _branches[_pos] = has_branch ? '|' : ' ';
+ }
+ _pos ++; // beyond max depth, omit branch drawing but do count on.
+ }
+
+ void pop() {
+ assert(_pos > 0, "must be");
+ _pos --;
+ }
+
+ void print(outputStream* st) {
+ for (int i = 0; i < _pos; i ++) {
+ st->print("%c%.*s", _branches[i], branch_spacing, " ");
+ }
+ }
+
+ class Mark {
+ BranchTracker& _tr;
+ public:
+ Mark(BranchTracker& tr, bool has_branch_here)
+ : _tr(tr) { _tr.push(has_branch_here); }
+ ~Mark() { _tr.pop(); }
+ };
+
+}; // end: BranchTracker
+
+struct LoadedClassInfo : public ResourceObj {
+public:
+ LoadedClassInfo* _next;
+ Klass* const _klass;
+ const ClassLoaderData* const _cld;
+
+ LoadedClassInfo(Klass* klass, const ClassLoaderData* cld)
+ : _klass(klass), _cld(cld) {}
+
+};
+
+class LoaderTreeNode : public ResourceObj {
+
+ // We walk the CLDG and, for each CLD which is non-anonymous, add
+ // a tree node. To add a node we need its parent node; if it itself
+ // does not exist yet, we add a preliminary node for it. This preliminary
+ // node just contains its loader oop; later, when encountering its CLD in
+ // our CLDG walk, we complete the missing information in this node.
+
+ const oop _loader_oop;
+ const ClassLoaderData* _cld;
+
+ LoaderTreeNode* _child;
+ LoaderTreeNode* _next;
+
+ LoadedClassInfo* _classes;
+ int _num_classes;
+
+ LoadedClassInfo* _anon_classes;
+ int _num_anon_classes;
+
+ void print_with_childs(outputStream* st, BranchTracker& branchtracker,
+ bool print_classes, bool verbose) const {
+
+ ResourceMark rm;
+
+ if (_cld == NULL) {
+ // Not sure how this could happen: we added a preliminary node for a parent but then never encountered
+ // its CLD?
+ return;
+ }
+
+ // Retrieve information.
+ const Klass* const loader_klass = _cld->class_loader_klass();
+ const Symbol* const loader_name = _cld->class_loader_name();
+
+ branchtracker.print(st);
+
+ // e.g. "+--- jdk.internal.reflect.DelegatingClassLoader"
+ st->print("+%.*s", BranchTracker::twig_len, "----------");
+ if (_cld->is_the_null_class_loader_data()) {
+ st->print(" ");
+ } else {
+ if (loader_name != NULL) {
+ st->print(" \"%s\",", loader_name->as_C_string());
+ }
+ st->print(" %s", loader_klass != NULL ? loader_klass->external_name() : "??");
+ st->print(" {" PTR_FORMAT "}", p2i(_loader_oop));
+ }
+ st->cr();
+
+ // Output following this node (node details and child nodes) - up to the next sibling node
+ // needs to be prefixed with "|" if there is a follow up sibling.
+ const bool have_sibling = _next != NULL;
+ BranchTracker::Mark trm(branchtracker, have_sibling);
+
+ {
+ // optional node details following this node needs to be prefixed with "|"
+ // if there are follow up child nodes.
+ const bool have_child = _child != NULL;
+ BranchTracker::Mark trm(branchtracker, have_child);
+
+ // Empty line
+ branchtracker.print(st);
+ st->cr();
+
+ const int indentation = 18;
+
+ if (verbose) {
+ branchtracker.print(st);
+ st->print_cr("%*s " PTR_FORMAT, indentation, "Loader Data:", p2i(_cld));
+ branchtracker.print(st);
+ st->print_cr("%*s " PTR_FORMAT, indentation, "Loader Klass:", p2i(loader_klass));
+
+ // Empty line
+ branchtracker.print(st);
+ st->cr();
+ }
+
+ if (print_classes) {
+
+ if (_classes != NULL) {
+ for (LoadedClassInfo* lci = _classes; lci; lci = lci->_next) {
+ branchtracker.print(st);
+ if (lci == _classes) { // first iteration
+ st->print("%*s ", indentation, "Classes:");
+ } else {
+ st->print("%*s ", indentation, "");
+ }
+ st->print("%s", lci->_klass->external_name());
+ st->cr();
+ // Non-anonymous classes should live in the primary CLD of its loader
+ assert(lci->_cld == _cld, "must be");
+ }
+ branchtracker.print(st);
+ st->print("%*s ", indentation, "");
+ st->print_cr("(%u class%s)", _num_classes, (_num_classes == 1) ? "" : "es");
+
+ // Empty line
+ branchtracker.print(st);
+ st->cr();
+ }
+
+ if (_anon_classes != NULL) {
+ for (LoadedClassInfo* lci = _anon_classes; lci; lci = lci->_next) {
+ branchtracker.print(st);
+ if (lci == _anon_classes) { // first iteration
+ st->print("%*s ", indentation, "Anonymous Classes:");
+ } else {
+ st->print("%*s ", indentation, "");
+ }
+ st->print("%s", lci->_klass->external_name());
+ // For anonymous classes, also print CLD if verbose. Should be a different one than the primary CLD.
+ assert(lci->_cld != _cld, "must be");
+ if (verbose) {
+ st->print(" (CLD: " PTR_FORMAT ")", p2i(lci->_cld));
+ }
+ st->cr();
+ }
+ branchtracker.print(st);
+ st->print("%*s ", indentation, "");
+ st->print_cr("(%u anonymous class%s)", _num_anon_classes, (_num_anon_classes == 1) ? "" : "es");
+
+ // Empty line
+ branchtracker.print(st);
+ st->cr();
+ }
+
+ } // end: print_classes
+
+ } // Pop branchtracker mark
+
+ // Print children, recursively
+ LoaderTreeNode* c = _child;
+ while (c != NULL) {
+ c->print_with_childs(st, branchtracker, print_classes, verbose);
+ c = c->_next;
+ }
+
+ }
+
+public:
+
+ LoaderTreeNode(const oop loader_oop)
+ : _loader_oop(loader_oop), _cld(NULL)
+ , _child(NULL), _next(NULL)
+ , _classes(NULL), _anon_classes(NULL)
+ , _num_classes(0), _num_anon_classes(0) {}
+
+ void set_cld(const ClassLoaderData* cld) {
+ _cld = cld;
+ }
+
+ void add_child(LoaderTreeNode* info) {
+ info->_next = _child;
+ _child = info;
+ }
+
+ void add_sibling(LoaderTreeNode* info) {
+ assert(info->_next == NULL, "must be");
+ info->_next = _next;
+ _next = info;
+ }
+
+ void add_classes(LoadedClassInfo* first_class, int num_classes, bool anonymous) {
+ LoadedClassInfo** p_list_to_add_to = anonymous ? &_anon_classes : &_classes;
+ // Search tail.
+ while ((*p_list_to_add_to) != NULL) {
+ p_list_to_add_to = &(*p_list_to_add_to)->_next;
+ }
+ *p_list_to_add_to = first_class;
+ if (anonymous) {
+ _num_anon_classes += num_classes;
+ } else {
+ _num_classes += num_classes;
+ }
+ }
+
+ const ClassLoaderData* cld() const {
+ return _cld;
+ }
+
+ const oop loader_oop() const {
+ return _loader_oop;
+ }
+
+ LoaderTreeNode* find(const oop loader_oop) {
+ LoaderTreeNode* result = NULL;
+ if (_loader_oop == loader_oop) {
+ result = this;
+ } else {
+ LoaderTreeNode* c = _child;
+ while (c != NULL && result == NULL) {
+ result = c->find(loader_oop);
+ c = c->_next;
+ }
+ }
+ return result;
+ }
+
+ void print_with_childs(outputStream* st, bool print_classes, bool print_add_info) const {
+ BranchTracker bwt;
+ print_with_childs(st, bwt, print_classes, print_add_info);
+ }
+
+};
+
+class LoadedClassCollectClosure : public KlassClosure {
+public:
+ LoadedClassInfo* _list;
+ const ClassLoaderData* _cld;
+ int _num_classes;
+ LoadedClassCollectClosure(const ClassLoaderData* cld)
+ : _list(NULL), _cld(cld), _num_classes(0) {}
+ void do_klass(Klass* k) {
+ LoadedClassInfo* lki = new LoadedClassInfo(k, _cld);
+ lki->_next = _list;
+ _list = lki;
+ _num_classes ++;
+ }
+};
+
+class LoaderInfoScanClosure : public CLDClosure {
+
+ const bool _print_classes;
+ const bool _verbose;
+ LoaderTreeNode* _root;
+
+ static void fill_in_classes(LoaderTreeNode* info, const ClassLoaderData* cld) {
+ assert(info != NULL && cld != NULL, "must be");
+ LoadedClassCollectClosure lccc(cld);
+ const_cast(cld)->classes_do(&lccc);
+ if (lccc._num_classes > 0) {
+ info->add_classes(lccc._list, lccc._num_classes, cld->is_anonymous());
+ }
+ }
+
+ LoaderTreeNode* find_node_or_add_empty_node(oop loader_oop) {
+
+ assert(_root != NULL, "root node must exist");
+
+ if (loader_oop == NULL) {
+ return _root;
+ }
+
+ // Check if a node for this oop already exists.
+ LoaderTreeNode* info = _root->find(loader_oop);
+
+ if (info == NULL) {
+ // It does not. Create a node.
+ info = new LoaderTreeNode(loader_oop);
+
+ // Add it to tree.
+ LoaderTreeNode* parent_info = NULL;
+
+ // Recursively add parent nodes if needed.
+ const oop parent_oop = java_lang_ClassLoader::parent(loader_oop);
+ if (parent_oop == NULL) {
+ parent_info = _root;
+ } else {
+ parent_info = find_node_or_add_empty_node(parent_oop);
+ }
+ assert(parent_info != NULL, "must be");
+
+ parent_info->add_child(info);
+ }
+ return info;
+ }
+
+
+public:
+ LoaderInfoScanClosure(bool print_classes, bool verbose)
+ : _print_classes(print_classes), _verbose(verbose), _root(NULL) {
+ _root = new LoaderTreeNode(NULL);
+ }
+
+ void print_results(outputStream* st) const {
+ _root->print_with_childs(st, _print_classes, _verbose);
+ }
+
+ void do_cld (ClassLoaderData* cld) {
+
+ // We do not display unloading loaders, for now.
+ if (cld->is_unloading()) {
+ return;
+ }
+
+ const oop loader_oop = cld->class_loader();
+
+ LoaderTreeNode* info = find_node_or_add_empty_node(loader_oop);
+ assert(info != NULL, "must be");
+
+ // Update CLD in node, but only if this is the primary CLD for this loader.
+ if (cld->is_anonymous() == false) {
+ assert(info->cld() == NULL, "there should be only one primary CLD per loader");
+ info->set_cld(cld);
+ }
+
+ // Add classes.
+ fill_in_classes(info, cld);
+ }
+
+};
+
+
+class ClassLoaderHierarchyVMOperation : public VM_Operation {
+ outputStream* const _out;
+ const bool _show_classes;
+ const bool _verbose;
+public:
+ ClassLoaderHierarchyVMOperation(outputStream* out, bool show_classes, bool verbose) :
+ _out(out), _show_classes(show_classes), _verbose(verbose)
+ {}
+
+ VMOp_Type type() const {
+ return VMOp_ClassLoaderHierarchyOperation;
+ }
+
+ void doit() {
+ assert(SafepointSynchronize::is_at_safepoint(), "must be a safepoint");
+ ResourceMark rm;
+ LoaderInfoScanClosure cl (_show_classes, _verbose);
+ ClassLoaderDataGraph::cld_do(&cl);
+ cl.print_results(_out);
+ }
+};
+
+// This command needs to be executed at a safepoint.
+void ClassLoaderHierarchyDCmd::execute(DCmdSource source, TRAPS) {
+ ClassLoaderHierarchyVMOperation op(output(), _show_classes.value(), _verbose.value());
+ VMThread::execute(&op);
+}
diff --git a/src/hotspot/share/classfile/classLoaderHierarchyDCmd.hpp b/src/hotspot/share/classfile/classLoaderHierarchyDCmd.hpp
new file mode 100644
index 00000000000..0db0eccf4dc
--- /dev/null
+++ b/src/hotspot/share/classfile/classLoaderHierarchyDCmd.hpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef HOTSPOT_SHARE_CLASSFILE_CLASSLOADERHIERARCHYDCMD_HPP_
+#define HOTSPOT_SHARE_CLASSFILE_CLASSLOADERHIERARCHYDCMD_HPP_
+
+#include "services/diagnosticCommand.hpp"
+
+class ClassLoaderHierarchyDCmd: public DCmdWithParser {
+ DCmdArgument _show_classes;
+ DCmdArgument _verbose;
+public:
+
+ ClassLoaderHierarchyDCmd(outputStream* output, bool heap);
+
+ static const char* name() {
+ return "VM.classloaders";
+ }
+
+ static const char* description() {
+ return "Prints classloader hierarchy.";
+ }
+ static const char* impact() {
+ return "Medium: Depends on number of class loaders and classes loaded.";
+ }
+ static const JavaPermission permission() {
+ JavaPermission p = {"java.lang.management.ManagementPermission",
+ "monitor", NULL};
+ return p;
+ }
+ static int num_arguments();
+ virtual void execute(DCmdSource source, TRAPS);
+
+};
+
+#endif /* HOTSPOT_SHARE_CLASSFILE_CLASSLOADERHIERARCHYDCMD_HPP_ */
diff --git a/src/hotspot/share/classfile/dictionary.cpp b/src/hotspot/share/classfile/dictionary.cpp
index db35510398d..b422bb68530 100644
--- a/src/hotspot/share/classfile/dictionary.cpp
+++ b/src/hotspot/share/classfile/dictionary.cpp
@@ -35,7 +35,7 @@
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "runtime/safepointVerifiers.hpp"
#include "utilities/hashtable.inline.hpp"
diff --git a/src/hotspot/share/classfile/dictionary.inline.hpp b/src/hotspot/share/classfile/dictionary.inline.hpp
index 4a24fb00c78..4e1c73bbb5b 100644
--- a/src/hotspot/share/classfile/dictionary.inline.hpp
+++ b/src/hotspot/share/classfile/dictionary.inline.hpp
@@ -26,7 +26,7 @@
#define SHARE_VM_CLASSFILE_DICTIONARY_INLINE_HPP
#include "classfile/dictionary.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
inline ProtectionDomainEntry* DictionaryEntry::pd_set_acquire() const {
return OrderAccess::load_acquire(&_pd_set);
diff --git a/src/hotspot/share/classfile/javaClasses.cpp b/src/hotspot/share/classfile/javaClasses.cpp
index bde2253b6af..ff31188b685 100644
--- a/src/hotspot/share/classfile/javaClasses.cpp
+++ b/src/hotspot/share/classfile/javaClasses.cpp
@@ -63,7 +63,6 @@
#include "runtime/vframe.inline.hpp"
#include "utilities/align.hpp"
#include "utilities/preserveException.hpp"
-
#if INCLUDE_JVMCI
#include "jvmci/jvmciJavaClasses.hpp"
#endif
@@ -310,7 +309,8 @@ Handle java_lang_String::create_from_str(const char* utf8_str, TRAPS) {
Handle h_obj = basic_create(length, is_latin1, CHECK_NH);
if (length > 0) {
if (!has_multibyte) {
- strncpy((char*)value(h_obj())->byte_at_addr(0), utf8_str, length);
+ const jbyte* src = reinterpret_cast(utf8_str);
+ ArrayAccess<>::arraycopy_from_native(src, value(h_obj()), typeArrayOopDesc::element_offset(0), length);
} else if (is_latin1) {
UTF8::convert_to_unicode(utf8_str, value(h_obj())->byte_at_addr(0), length);
} else {
@@ -356,7 +356,8 @@ Handle java_lang_String::create_from_symbol(Symbol* symbol, TRAPS) {
Handle h_obj = basic_create(length, is_latin1, CHECK_NH);
if (length > 0) {
if (!has_multibyte) {
- strncpy((char*)value(h_obj())->byte_at_addr(0), utf8_str, length);
+ const jbyte* src = reinterpret_cast(utf8_str);
+ ArrayAccess<>::arraycopy_from_native(src, value(h_obj()), typeArrayOopDesc::element_offset(0), length);
} else if (is_latin1) {
UTF8::convert_to_unicode(utf8_str, value(h_obj())->byte_at_addr(0), length);
} else {
@@ -796,7 +797,7 @@ void java_lang_Class::fixup_mirror(Klass* k, TRAPS) {
// During bootstrap, java.lang.Class wasn't loaded so static field
// offsets were computed without the size added it. Go back and
// update all the static field offsets to included the size.
- for (JavaFieldStream fs(InstanceKlass::cast(k)); !fs.done(); fs.next()) {
+ for (JavaFieldStream fs(InstanceKlass::cast(k)); !fs.done(); fs.next()) {
if (fs.access_flags().is_static()) {
int real_offset = fs.offset() + InstanceMirrorKlass::offset_of_static_fields();
fs.set_offset(real_offset);
@@ -807,12 +808,8 @@ void java_lang_Class::fixup_mirror(Klass* k, TRAPS) {
if (k->is_shared() && k->has_raw_archived_mirror()) {
if (MetaspaceShared::open_archive_heap_region_mapped()) {
- oop m = k->archived_java_mirror();
- assert(m != NULL, "archived mirror is NULL");
- assert(MetaspaceShared::is_archive_object(m), "must be archived mirror object");
- Handle m_h(THREAD, m);
- // restore_archived_mirror() clears the klass' _has_raw_archived_mirror flag
- restore_archived_mirror(k, m_h, Handle(), Handle(), Handle(), CHECK);
+ bool present = restore_archived_mirror(k, Handle(), Handle(), Handle(), CHECK);
+ assert(present, "Missing archived mirror for %s", k->external_name());
return;
} else {
k->set_java_mirror_handle(NULL);
@@ -1205,11 +1202,23 @@ oop java_lang_Class::process_archived_mirror(Klass* k, oop mirror,
return archived_mirror;
}
-// After the archived mirror object is restored, the shared klass'
-// _has_raw_archived_mirror flag is cleared
-void java_lang_Class::restore_archived_mirror(Klass *k, Handle mirror,
+// Returns true if the mirror is updated, false if no archived mirror
+// data is present. After the archived mirror object is restored, the
+// shared klass' _has_raw_archived_mirror flag is cleared.
+bool java_lang_Class::restore_archived_mirror(Klass *k,
Handle class_loader, Handle module,
Handle protection_domain, TRAPS) {
+ oop m = MetaspaceShared::materialize_archived_object(k->archived_java_mirror_raw());
+
+ if (m == NULL) {
+ return false;
+ }
+
+ log_debug(cds, mirror)("Archived mirror is: " PTR_FORMAT, p2i(m));
+
+ // mirror is archived, restore
+ assert(MetaspaceShared::is_archive_object(m), "must be archived mirror object");
+ Handle mirror(THREAD, m);
// The java.lang.Class field offsets were archived and reloaded from archive.
// No need to put classes on the fixup_mirror_list before java.lang.Class
@@ -1219,7 +1228,7 @@ void java_lang_Class::restore_archived_mirror(Klass *k, Handle mirror,
// - local static final fields with initial values were initialized at dump time
// create the init_lock
- typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK);
+ typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK_(false));
set_init_lock(mirror(), r);
if (protection_domain.not_null()) {
@@ -1239,6 +1248,8 @@ void java_lang_Class::restore_archived_mirror(Klass *k, Handle mirror,
ResourceMark rm;
log_trace(cds, mirror)("Restored %s archived mirror " PTR_FORMAT, k->external_name(), p2i(mirror()));
+
+ return true;
}
#endif // INCLUDE_CDS_JAVA_HEAP
@@ -4255,7 +4266,7 @@ int java_lang_AssertionStatusDirectives::packages_offset;
int java_lang_AssertionStatusDirectives::packageEnabled_offset;
int java_lang_AssertionStatusDirectives::deflt_offset;
int java_nio_Buffer::_limit_offset;
-int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset = 0;
+int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset;
int reflect_ConstantPool::_oop_offset;
int reflect_UnsafeStaticFieldAccessorImpl::_base_offset;
@@ -4397,13 +4408,12 @@ void java_nio_Buffer::serialize(SerializeClosure* f) {
}
#endif
-void java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(TRAPS) {
- if (_owner_offset != 0) return;
+#define AOS_FIELDS_DO(macro) \
+ macro(_owner_offset, k, "exclusiveOwnerThread", thread_signature, false)
- SystemDictionary::load_abstract_ownable_synchronizer_klass(CHECK);
- InstanceKlass* k = SystemDictionary::abstract_ownable_synchronizer_klass();
- compute_offset(_owner_offset, k,
- "exclusiveOwnerThread", vmSymbols::thread_signature());
+void java_util_concurrent_locks_AbstractOwnableSynchronizer::compute_offsets() {
+ InstanceKlass* k = SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass();
+ AOS_FIELDS_DO(FIELD_COMPUTE_OFFSET);
}
oop java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(oop obj) {
@@ -4471,6 +4481,7 @@ void JavaClasses::compute_offsets() {
java_lang_StackTraceElement::compute_offsets();
java_lang_StackFrameInfo::compute_offsets();
java_lang_LiveStackFrameInfo::compute_offsets();
+ java_util_concurrent_locks_AbstractOwnableSynchronizer::compute_offsets();
// generated interpreter code wants to know about the offsets we just computed:
AbstractAssembler::update_delayed_values();
diff --git a/src/hotspot/share/classfile/javaClasses.hpp b/src/hotspot/share/classfile/javaClasses.hpp
index 40e63f289bd..7f1d7cc9b12 100644
--- a/src/hotspot/share/classfile/javaClasses.hpp
+++ b/src/hotspot/share/classfile/javaClasses.hpp
@@ -229,8 +229,9 @@ class java_lang_Class : AllStatic {
static oop archive_mirror(Klass* k, TRAPS) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
static oop process_archived_mirror(Klass* k, oop mirror, oop archived_mirror, Thread *THREAD)
NOT_CDS_JAVA_HEAP_RETURN_(NULL);
- static void restore_archived_mirror(Klass *k, Handle mirror, Handle class_loader, Handle module,
- Handle protection_domain, TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
+ static bool restore_archived_mirror(Klass *k, Handle class_loader, Handle module,
+ Handle protection_domain,
+ TRAPS) NOT_CDS_JAVA_HEAP_RETURN_(false);
static void fixup_module_field(Klass* k, Handle module);
@@ -1483,7 +1484,7 @@ class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic {
private:
static int _owner_offset;
public:
- static void initialize(TRAPS);
+ static void compute_offsets();
static oop get_owner_threadObj(oop obj);
};
diff --git a/src/hotspot/share/classfile/moduleEntry.cpp b/src/hotspot/share/classfile/moduleEntry.cpp
index 1a64a6e6a58..987a0fe7eaa 100644
--- a/src/hotspot/share/classfile/moduleEntry.cpp
+++ b/src/hotspot/share/classfile/moduleEntry.cpp
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "jni.h"
#include "classfile/classLoaderData.inline.hpp"
-#include "classfile/javaClasses.hpp"
+#include "classfile/javaClasses.inline.hpp"
#include "classfile/moduleEntry.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
@@ -236,10 +236,17 @@ ModuleEntry* ModuleEntry::create_unnamed_module(ClassLoaderData* cld) {
// The java.lang.Module for this loader's
// corresponding unnamed module can be found in the java.lang.ClassLoader object.
oop module = java_lang_ClassLoader::unnamedModule(cld->class_loader());
+
+ // Ensure that the unnamed module was correctly set when the class loader was constructed.
+ // Guarantee will cause a recognizable crash if the user code has circumvented calling the ClassLoader constructor.
+ ResourceMark rm;
+ guarantee(java_lang_Module::is_instance(module),
+ "The unnamed module for ClassLoader %s, is null or not an instance of java.lang.Module. The class loader has not been initialized correctly.",
+ cld->loader_name());
+
ModuleEntry* unnamed_module = new_unnamed_module_entry(Handle(Thread::current(), module), cld);
- // Store pointer to the ModuleEntry in the unnamed module's java.lang.Module
- // object.
+ // Store pointer to the ModuleEntry in the unnamed module's java.lang.Module object.
java_lang_Module::set_module_entry(module, unnamed_module);
return unnamed_module;
diff --git a/src/hotspot/share/classfile/stringTable.cpp b/src/hotspot/share/classfile/stringTable.cpp
index 96e3f858563..fc696792e7d 100644
--- a/src/hotspot/share/classfile/stringTable.cpp
+++ b/src/hotspot/share/classfile/stringTable.cpp
@@ -29,7 +29,10 @@
#include "classfile/stringTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/oopStorage.inline.hpp"
+#include "gc/shared/oopStorageParState.inline.hpp"
#include "logging/log.hpp"
+#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/filemap.hpp"
#include "memory/metaspaceShared.hpp"
@@ -38,171 +41,196 @@
#include "oops/access.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
+#include "oops/weakHandle.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/safepointVerifiers.hpp"
+#include "runtime/timerTrace.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "services/diagnosticCommand.hpp"
-#include "utilities/hashtable.inline.hpp"
+#include "utilities/concurrentHashTable.inline.hpp"
+#include "utilities/concurrentHashTableTasks.inline.hpp"
#include "utilities/macros.hpp"
-// the number of buckets a thread claims
-const int ClaimChunkSize = 32;
-
-#ifdef ASSERT
-class StableMemoryChecker : public StackObj {
- enum { _bufsize = wordSize*4 };
-
- address _region;
- jint _size;
- u1 _save_buf[_bufsize];
-
- int sample(u1* save_buf) {
- if (_size <= _bufsize) {
- memcpy(save_buf, _region, _size);
- return _size;
- } else {
- // copy head and tail
- memcpy(&save_buf[0], _region, _bufsize/2);
- memcpy(&save_buf[_bufsize/2], _region + _size - _bufsize/2, _bufsize/2);
- return (_bufsize/2)*2;
- }
- }
-
- public:
- StableMemoryChecker(const void* region, jint size) {
- _region = (address) region;
- _size = size;
- sample(_save_buf);
- }
-
- bool verify() {
- u1 check_buf[sizeof(_save_buf)];
- int check_size = sample(check_buf);
- return (0 == memcmp(_save_buf, check_buf, check_size));
- }
-
- void set_region(const void* region) { _region = (address) region; }
-};
-#endif
-
+// We prefer short chains of avg 2
+#define PREF_AVG_LIST_LEN 2
+// 2^24 is max size
+#define END_SIZE 24
+// If a chain gets to 32 something might be wrong
+#define REHASH_LEN 32
+// If we have as many dead items as 50% of the number of bucket
+#define CLEAN_DEAD_HIGH_WATER_MARK 0.5
// --------------------------------------------------------------------------
StringTable* StringTable::_the_table = NULL;
bool StringTable::_shared_string_mapped = false;
-bool StringTable::_needs_rehashing = false;
-
-volatile int StringTable::_parallel_claimed_idx = 0;
-
CompactHashtable StringTable::_shared_table;
+bool StringTable::_alt_hash = false;
-// Pick hashing algorithm
-unsigned int StringTable::hash_string(const jchar* s, int len) {
- return use_alternate_hashcode() ? alt_hash_string(s, len) :
- java_lang_String::hash_code(s, len);
+static juint murmur_seed = 0;
+
+uintx hash_string(const jchar* s, int len, bool useAlt) {
+ return useAlt ?
+ AltHashing::murmur3_32(murmur_seed, s, len) :
+ java_lang_String::hash_code(s, len);
}
-unsigned int StringTable::alt_hash_string(const jchar* s, int len) {
- return AltHashing::murmur3_32(seed(), s, len);
-}
-
-unsigned int StringTable::hash_string(oop string) {
- EXCEPTION_MARK;
- if (string == NULL) {
- return hash_string((jchar*)NULL, 0);
- }
- ResourceMark rm(THREAD);
- // All String oops are hashed as unicode
- int length;
- jchar* chars = java_lang_String::as_unicode_string(string, length, THREAD);
- if (chars != NULL) {
- return hash_string(chars, length);
- } else {
- vm_exit_out_of_memory(length, OOM_MALLOC_ERROR, "unable to create Unicode string for verification");
+class StringTableConfig : public StringTableHash::BaseConfig {
+ private:
+ public:
+ static uintx get_hash(WeakHandle const& value,
+ bool* is_dead) {
+ EXCEPTION_MARK;
+ oop val_oop = value.peek();
+ if (val_oop == NULL) {
+ *is_dead = true;
+ return 0;
+ }
+ *is_dead = false;
+ ResourceMark rm(THREAD);
+ // All String oops are hashed as unicode
+ int length;
+ jchar* chars = java_lang_String::as_unicode_string(val_oop, length, THREAD);
+ if (chars != NULL) {
+ return hash_string(chars, length, StringTable::_alt_hash);
+ }
+ vm_exit_out_of_memory(length, OOM_MALLOC_ERROR, "get hash from oop");
return 0;
}
-}
+ // We use default allocation/deallocation but counted
+ static void* allocate_node(size_t size,
+ WeakHandle const& value) {
+ StringTable::item_added();
+ return StringTableHash::BaseConfig::allocate_node(size, value);
+ }
+ static void free_node(void* memory,
+ WeakHandle const& value) {
+ value.release();
+ StringTableHash::BaseConfig::free_node(memory, value);
+ StringTable::item_removed();
+ }
+};
-oop StringTable::string_object(HashtableEntry* entry) {
- return RootAccess::oop_load(entry->literal_addr());
-}
+class StringTableLookupJchar : StackObj {
+ private:
+ Thread* _thread;
+ uintx _hash;
+ int _len;
+ const jchar* _str;
+ Handle _found;
-oop StringTable::string_object_no_keepalive(HashtableEntry* entry) {
- // The AS_NO_KEEPALIVE peeks at the oop without keeping it alive.
- // This is *very dangerous* in general but is okay in this specific
- // case. The subsequent oop_load keeps the oop alive if it it matched
- // the jchar* string.
- return RootAccess::oop_load(entry->literal_addr());
-}
-
-void StringTable::set_string_object(HashtableEntry* entry, oop string) {
- RootAccess::oop_store(entry->literal_addr(), string);
-}
-
-oop StringTable::lookup_shared(jchar* name, int len, unsigned int hash) {
- assert(hash == java_lang_String::hash_code(name, len),
- "hash must be computed using java_lang_String::hash_code");
- return _shared_table.lookup((const char*)name, hash, len);
-}
-
-oop StringTable::lookup_in_main_table(int index, jchar* name,
- int len, unsigned int hash) {
- int count = 0;
- for (HashtableEntry* l = bucket(index); l != NULL; l = l->next()) {
- count++;
- if (l->hash() == hash) {
- if (java_lang_String::equals(string_object_no_keepalive(l), name, len)) {
- // We must perform a new load with string_object() that keeps the string
- // alive as we must expose the oop as strongly reachable when exiting
- // this context, in case the oop gets published.
- return string_object(l);
- }
+ public:
+ StringTableLookupJchar(Thread* thread, uintx hash, const jchar* key, int len)
+ : _thread(thread), _hash(hash), _str(key), _len(len) {
+ }
+ uintx get_hash() const {
+ return _hash;
+ }
+ bool equals(WeakHandle* value, bool* is_dead) {
+ oop val_oop = value->peek();
+ if (val_oop == NULL) {
+ // dead oop, mark this hash dead for cleaning
+ *is_dead = true;
+ return false;
}
+ bool equals = java_lang_String::equals(val_oop, (jchar*)_str, _len);
+ if (!equals) {
+ return false;
+ }
+ // Need to resolve weak handle and Handleize through possible safepoint.
+ _found = Handle(_thread, value->resolve());
+ return true;
}
- // If the bucket size is too deep check if this hash code is insufficient.
- if (count >= rehash_count && !needs_rehashing()) {
- _needs_rehashing = check_rehash_table(count);
+};
+
+class StringTableLookupOop : public StackObj {
+ private:
+ Thread* _thread;
+ uintx _hash;
+ Handle _find;
+ Handle _found; // Might be a different oop with the same value that's already
+ // in the table, which is the point.
+ public:
+ StringTableLookupOop(Thread* thread, uintx hash, Handle handle)
+ : _thread(thread), _hash(hash), _find(handle) { }
+
+ uintx get_hash() const {
+ return _hash;
}
- return NULL;
+
+ bool equals(WeakHandle* value, bool* is_dead) {
+ oop val_oop = value->peek();
+ if (val_oop == NULL) {
+ // dead oop, mark this hash dead for cleaning
+ *is_dead = true;
+ return false;
+ }
+ bool equals = java_lang_String::equals(_find(), val_oop);
+ if (!equals) {
+ return false;
+ }
+ // Need to resolve weak handle and Handleize through possible safepoint.
+ _found = Handle(_thread, value->resolve());
+ return true;
+ }
+};
+
+static size_t ceil_pow_2(uintx val) {
+ size_t ret;
+ for (ret = 1; ((size_t)1 << ret) < val; ++ret);
+ return ret;
}
-
-oop StringTable::basic_add(int index_arg, Handle string, jchar* name,
- int len, unsigned int hashValue_arg, TRAPS) {
-
- assert(java_lang_String::equals(string(), name, len),
- "string must be properly initialized");
- // Cannot hit a safepoint in this function because the "this" pointer can move.
- NoSafepointVerifier nsv;
-
- // Check if the symbol table has been rehashed, if so, need to recalculate
- // the hash value and index before second lookup.
- unsigned int hashValue;
- int index;
- if (use_alternate_hashcode()) {
- hashValue = alt_hash_string(name, len);
- index = hash_to_index(hashValue);
- } else {
- hashValue = hashValue_arg;
- index = index_arg;
- }
-
- // Since look-up was done lock-free, we need to check if another
- // thread beat us in the race to insert the symbol.
-
- // No need to lookup the shared table from here since the caller (intern()) already did
- oop test = lookup_in_main_table(index, name, len, hashValue); // calls lookup(u1*, int)
- if (test != NULL) {
- // Entry already added
- return test;
- }
-
- HashtableEntry* entry = new_entry(hashValue, string());
- add_entry(index, entry);
- return string();
+StringTable::StringTable() : _local_table(NULL), _current_size(0), _has_work(0),
+ _needs_rehashing(false), _weak_handles(NULL), _items(0), _uncleaned_items(0) {
+ _weak_handles = new OopStorage("StringTable weak",
+ StringTableWeakAlloc_lock,
+ StringTableWeakActive_lock);
+ size_t start_size_log_2 = ceil_pow_2(StringTableSize);
+ _current_size = ((size_t)1) << start_size_log_2;
+ log_trace(stringtable)("Start size: " SIZE_FORMAT " (" SIZE_FORMAT ")",
+ _current_size, start_size_log_2);
+ _local_table = new StringTableHash(start_size_log_2, END_SIZE, REHASH_LEN);
}
+size_t StringTable::item_added() {
+ return Atomic::add((size_t)1, &(the_table()->_items));
+}
+size_t StringTable::items_to_clean(size_t ncl) {
+ size_t total = Atomic::add((size_t)ncl, &(the_table()->_uncleaned_items));
+ log_trace(stringtable)(
+ "Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT,
+ the_table()->_uncleaned_items, ncl, total);
+ return total;
+}
+
+void StringTable::item_removed() {
+ Atomic::add((size_t)-1, &(the_table()->_items));
+ Atomic::add((size_t)-1, &(the_table()->_uncleaned_items));
+}
+
+double StringTable::get_load_factor() {
+ return (_items*1.0)/_current_size;
+}
+
+double StringTable::get_dead_factor() {
+ return (_uncleaned_items*1.0)/_current_size;
+}
+
+size_t StringTable::table_size(Thread* thread) {
+ return ((size_t)(1)) << _local_table->get_size_log2(thread != NULL ? thread
+ : Thread::current());
+}
+
+void StringTable::trigger_concurrent_work() {
+ MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
+ the_table()->_has_work = true;
+ Service_lock->notify_all();
+}
+
+// Probing
oop StringTable::lookup(Symbol* symbol) {
ResourceMark rm;
int length;
@@ -211,71 +239,45 @@ oop StringTable::lookup(Symbol* symbol) {
}
oop StringTable::lookup(jchar* name, int len) {
- // shared table always uses java_lang_String::hash_code
unsigned int hash = java_lang_String::hash_code(name, len);
- oop string = lookup_shared(name, len, hash);
+ oop string = StringTable::the_table()->lookup_shared(name, len, hash);
if (string != NULL) {
return string;
}
- if (use_alternate_hashcode()) {
- hash = alt_hash_string(name, len);
+ if (StringTable::_alt_hash) {
+ hash = hash_string(name, len, true);
}
- int index = the_table()->hash_to_index(hash);
- string = the_table()->lookup_in_main_table(index, name, len, hash);
-
- return string;
+ return StringTable::the_table()->do_lookup(name, len, hash);
}
-oop StringTable::intern(Handle string_or_null, jchar* name,
- int len, TRAPS) {
- // shared table always uses java_lang_String::hash_code
- unsigned int hashValue = java_lang_String::hash_code(name, len);
- oop found_string = lookup_shared(name, len, hashValue);
- if (found_string != NULL) {
- return found_string;
+class StringTableGet : public StackObj {
+ Thread* _thread;
+ Handle _return;
+ public:
+ StringTableGet(Thread* thread) : _thread(thread) {}
+ void operator()(WeakHandle* val) {
+ oop result = val->resolve();
+ assert(result != NULL, "Result should be reachable");
+ _return = Handle(_thread, result);
}
- if (use_alternate_hashcode()) {
- hashValue = alt_hash_string(name, len);
+ oop get_res_oop() {
+ return _return();
}
- int index = the_table()->hash_to_index(hashValue);
- found_string = the_table()->lookup_in_main_table(index, name, len, hashValue);
+};
- // Found
- if (found_string != NULL) {
- return found_string;
+oop StringTable::do_lookup(jchar* name, int len, uintx hash) {
+ Thread* thread = Thread::current();
+ StringTableLookupJchar lookup(thread, hash, name, len);
+ StringTableGet stg(thread);
+ bool rehash_warning;
+ _local_table->get(thread, lookup, stg, &rehash_warning);
+ if (rehash_warning) {
+ _needs_rehashing = true;
}
-
- debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
- assert(!Universe::heap()->is_in_reserved(name),
- "proposed name of symbol must be stable");
-
- HandleMark hm(THREAD); // cleanup strings created
- Handle string;
- // try to reuse the string if possible
- if (!string_or_null.is_null()) {
- string = string_or_null;
- } else {
- string = java_lang_String::create_from_unicode(name, len, CHECK_NULL);
- }
-
- // Deduplicate the string before it is interned. Note that we should never
- // deduplicate a string after it has been interned. Doing so will counteract
- // compiler optimizations done on e.g. interned string literals.
- Universe::heap()->deduplicate_string(string());
-
- // Grab the StringTable_lock before getting the_table() because it could
- // change at safepoint.
- oop added_or_found;
- {
- MutexLocker ml(StringTable_lock, THREAD);
- // Otherwise, add to symbol to table
- added_or_found = the_table()->basic_add(index, string, name, len,
- hashValue, CHECK_NULL);
- }
-
- return added_or_found;
+ return stg.get_res_oop();
}
+// Interning
oop StringTable::intern(Symbol* symbol, TRAPS) {
if (symbol == NULL) return NULL;
ResourceMark rm(THREAD);
@@ -286,19 +288,17 @@ oop StringTable::intern(Symbol* symbol, TRAPS) {
return result;
}
-
-oop StringTable::intern(oop string, TRAPS)
-{
+oop StringTable::intern(oop string, TRAPS) {
if (string == NULL) return NULL;
ResourceMark rm(THREAD);
int length;
Handle h_string (THREAD, string);
- jchar* chars = java_lang_String::as_unicode_string(string, length, CHECK_NULL);
+ jchar* chars = java_lang_String::as_unicode_string(string, length,
+ CHECK_NULL);
oop result = intern(h_string, chars, length, CHECK_NULL);
return result;
}
-
oop StringTable::intern(const char* utf8_string, TRAPS) {
if (utf8_string == NULL) return NULL;
ResourceMark rm(THREAD);
@@ -310,342 +310,451 @@ oop StringTable::intern(const char* utf8_string, TRAPS) {
return result;
}
-void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int* processed, int* removed) {
- BucketUnlinkContext context;
- buckets_unlink_or_oops_do(is_alive, f, 0, the_table()->table_size(), &context);
- _the_table->bulk_free_entries(&context);
- *processed = context._num_processed;
- *removed = context._num_removed;
-}
-
-void StringTable::possibly_parallel_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int* processed, int* removed) {
- // Readers of the table are unlocked, so we should only be removing
- // entries at a safepoint.
- assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
- const int limit = the_table()->table_size();
-
- BucketUnlinkContext context;
- for (;;) {
- // Grab next set of buckets to scan
- int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize;
- if (start_idx >= limit) {
- // End of table
- break;
- }
-
- int end_idx = MIN2(limit, start_idx + ClaimChunkSize);
- buckets_unlink_or_oops_do(is_alive, f, start_idx, end_idx, &context);
+oop StringTable::intern(Handle string_or_null_h, jchar* name, int len, TRAPS) {
+ // shared table always uses java_lang_String::hash_code
+ unsigned int hash = java_lang_String::hash_code(name, len);
+ oop found_string = StringTable::the_table()->lookup_shared(name, len, hash);
+ if (found_string != NULL) {
+ return found_string;
}
- _the_table->bulk_free_entries(&context);
- *processed = context._num_processed;
- *removed = context._num_removed;
-}
-
-void StringTable::buckets_oops_do(OopClosure* f, int start_idx, int end_idx) {
- const int limit = the_table()->table_size();
-
- assert(0 <= start_idx && start_idx <= limit,
- "start_idx (%d) is out of bounds", start_idx);
- assert(0 <= end_idx && end_idx <= limit,
- "end_idx (%d) is out of bounds", end_idx);
- assert(start_idx <= end_idx,
- "Index ordering: start_idx=%d, end_idx=%d",
- start_idx, end_idx);
-
- for (int i = start_idx; i < end_idx; i += 1) {
- HashtableEntry* entry = the_table()->bucket(i);
- while (entry != NULL) {
- assert(!entry->is_shared(), "CDS not used for the StringTable");
-
- f->do_oop((oop*)entry->literal_addr());
-
- entry = entry->next();
- }
+ if (StringTable::_alt_hash) {
+ hash = hash_string(name, len, true);
}
+ return StringTable::the_table()->do_intern(string_or_null_h, name, len,
+ hash, CHECK_NULL);
}
-void StringTable::buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, BucketUnlinkContext* context) {
- const int limit = the_table()->table_size();
+class StringTableCreateEntry : public StackObj {
+ private:
+ Thread* _thread;
+ Handle _return;
+ Handle _store;
+ public:
+ StringTableCreateEntry(Thread* thread, Handle store)
+ : _thread(thread), _store(store) {}
- assert(0 <= start_idx && start_idx <= limit,
- "start_idx (%d) is out of bounds", start_idx);
- assert(0 <= end_idx && end_idx <= limit,
- "end_idx (%d) is out of bounds", end_idx);
- assert(start_idx <= end_idx,
- "Index ordering: start_idx=%d, end_idx=%d",
- start_idx, end_idx);
+ WeakHandle operator()() { // No dups found
+ WeakHandle wh =
+ WeakHandle::create(_store);
+ return wh;
+ }
+ void operator()(bool inserted, WeakHandle* val) {
+ oop result = val->resolve();
+ assert(result != NULL, "Result should be reachable");
+ _return = Handle(_thread, result);
+ }
+ oop get_return() const {
+ return _return();
+ }
+};
- for (int i = start_idx; i < end_idx; ++i) {
- HashtableEntry** p = the_table()->bucket_addr(i);
- HashtableEntry* entry = the_table()->bucket(i);
- while (entry != NULL) {
- assert(!entry->is_shared(), "CDS not used for the StringTable");
+oop StringTable::do_intern(Handle string_or_null_h, jchar* name,
+ int len, uintx hash, TRAPS) {
+ HandleMark hm(THREAD); // cleanup strings created
+ Handle string_h;
- if (is_alive->do_object_b(string_object_no_keepalive(entry))) {
- if (f != NULL) {
- f->do_oop(entry->literal_addr());
- }
- p = entry->next_addr();
- } else {
- *p = entry->next();
- context->free_entry(entry);
- }
- context->_num_processed++;
- entry = *p;
+ if (!string_or_null_h.is_null()) {
+ string_h = string_or_null_h;
+ } else {
+ string_h = java_lang_String::create_from_unicode(name, len, CHECK_NULL);
+ }
+
+ // Deduplicate the string before it is interned. Note that we should never
+ // deduplicate a string after it has been interned. Doing so will counteract
+ // compiler optimizations done on e.g. interned string literals.
+ Universe::heap()->deduplicate_string(string_h());
+
+ assert(java_lang_String::equals(string_h(), name, len),
+ "string must be properly initialized");
+ assert(len == java_lang_String::length(string_h()), "Must be same length");
+ StringTableLookupOop lookup(THREAD, hash, string_h);
+ StringTableCreateEntry stc(THREAD, string_h);
+
+ bool rehash_warning;
+ _local_table->get_insert_lazy(THREAD, lookup, stc, stc, &rehash_warning);
+ if (rehash_warning) {
+ _needs_rehashing = true;
+ }
+ return stc.get_return();
+}
+
+// GC support
+class StringTableIsAliveCounter : public BoolObjectClosure {
+ BoolObjectClosure* _real_boc;
+ public:
+ size_t _count;
+ size_t _count_total;
+ StringTableIsAliveCounter(BoolObjectClosure* boc) : _real_boc(boc), _count(0),
+ _count_total(0) {}
+ bool do_object_b(oop obj) {
+ bool ret = _real_boc->do_object_b(obj);
+ if (!ret) {
+ ++_count;
}
+ ++_count_total;
+ return ret;
+ }
+};
+
+void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f,
+ int* processed, int* removed) {
+ DoNothingClosure dnc;
+ assert(is_alive != NULL, "No closure");
+ StringTableIsAliveCounter stiac(is_alive);
+ OopClosure* tmp = f != NULL ? f : &dnc;
+
+ StringTable::the_table()->_weak_handles->weak_oops_do(&stiac, tmp);
+
+ StringTable::the_table()->items_to_clean(stiac._count);
+ StringTable::the_table()->check_concurrent_work();
+ if (processed != NULL) {
+ *processed = (int) stiac._count_total;
+ }
+ if (removed != NULL) {
+ *removed = (int) stiac._count;
}
}
void StringTable::oops_do(OopClosure* f) {
- buckets_oops_do(f, 0, the_table()->table_size());
+ assert(f != NULL, "No closure");
+ StringTable::the_table()->_weak_handles->oops_do(f);
}
-void StringTable::possibly_parallel_oops_do(OopClosure* f) {
- const int limit = the_table()->table_size();
+void StringTable::possibly_parallel_unlink(
+ OopStorage::ParState* _par_state_string, BoolObjectClosure* cl,
+ int* processed, int* removed)
+{
+ DoNothingClosure dnc;
+ assert(cl != NULL, "No closure");
+ StringTableIsAliveCounter stiac(cl);
- for (;;) {
- // Grab next set of buckets to scan
- int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize;
- if (start_idx >= limit) {
- // End of table
- break;
+ _par_state_string->weak_oops_do(&stiac, &dnc);
+
+ StringTable::the_table()->items_to_clean(stiac._count);
+ StringTable::the_table()->check_concurrent_work();
+ *processed = (int) stiac._count_total;
+ *removed = (int) stiac._count;
+}
+
+void StringTable::possibly_parallel_oops_do(
+ OopStorage::ParState *
+ _par_state_string, OopClosure* f)
+{
+ assert(f != NULL, "No closure");
+ _par_state_string->oops_do(f);
+}
+
+// Concurrent work
+void StringTable::grow(JavaThread* jt) {
+ StringTableHash::GrowTask gt(_local_table);
+ if (!gt.prepare(jt)) {
+ return;
+ }
+ log_trace(stringtable)("Started to grow");
+ {
+ TraceTime timer("Grow", TRACETIME_LOG(Debug, stringtable, perf));
+ while (gt.doTask(jt)) {
+ gt.pause(jt);
+ {
+ ThreadBlockInVM tbivm(jt);
+ }
+ gt.cont(jt);
}
+ }
+ gt.done(jt);
+ _current_size = table_size(jt);
+ log_debug(stringtable)("Grown to size:" SIZE_FORMAT, _current_size);
+}
- int end_idx = MIN2(limit, start_idx + ClaimChunkSize);
- buckets_oops_do(f, start_idx, end_idx);
+struct StringTableDoDelete : StackObj {
+ long _count;
+ StringTableDoDelete() : _count(0) {}
+ void operator()(WeakHandle* val) {
+ ++_count;
+ }
+};
+
+struct StringTableDeleteCheck : StackObj {
+ long _count;
+ long _item;
+ StringTableDeleteCheck() : _count(0), _item(0) {}
+ bool operator()(WeakHandle* val) {
+ ++_item;
+ oop tmp = val->peek();
+ if (tmp == NULL) {
+ ++_count;
+ return true;
+ } else {
+ return false;
+ }
+ }
+};
+
+void StringTable::clean_dead_entries(JavaThread* jt) {
+ StringTableHash::BulkDeleteTask bdt(_local_table);
+ if (!bdt.prepare(jt)) {
+ return;
+ }
+
+ StringTableDeleteCheck stdc;
+ StringTableDoDelete stdd;
+ bool interrupted = false;
+ {
+ TraceTime timer("Clean", TRACETIME_LOG(Debug, stringtable, perf));
+ while(bdt.doTask(jt, stdc, stdd)) {
+ bdt.pause(jt);
+ {
+ ThreadBlockInVM tbivm(jt);
+ }
+ if (!bdt.cont(jt)) {
+ interrupted = true;
+ break;
+ }
+ }
+ }
+ if (interrupted) {
+ _has_work = true;
+ } else {
+ bdt.done(jt);
+ }
+ log_debug(stringtable)("Cleaned %ld of %ld", stdc._count, stdc._item);
+}
+
+void StringTable::check_concurrent_work() {
+ if (_has_work) {
+ return;
+ }
+ double load_factor = StringTable::get_load_factor();
+ double dead_factor = StringTable::get_dead_factor();
+ // We should clean/resize if we have more dead than alive,
+ // more items than preferred load factor or
+ // more dead items than water mark.
+ if ((dead_factor > load_factor) ||
+ (load_factor > PREF_AVG_LIST_LEN) ||
+ (dead_factor > CLEAN_DEAD_HIGH_WATER_MARK)) {
+ log_debug(stringtable)("Concurrent work triggered, live factor:%g dead factor:%g",
+ load_factor, dead_factor);
+ trigger_concurrent_work();
}
}
+void StringTable::concurrent_work(JavaThread* jt) {
+ _has_work = false;
+ double load_factor = get_load_factor();
+ log_debug(stringtable, perf)("Concurrent work, live factor: %g", load_factor);
+ // We prefer growing, since that also removes dead items
+ if (load_factor > PREF_AVG_LIST_LEN && !_local_table->is_max_size_reached()) {
+ grow(jt);
+ } else {
+ clean_dead_entries(jt);
+ }
+}
+
+void StringTable::do_concurrent_work(JavaThread* jt) {
+ StringTable::the_table()->concurrent_work(jt);
+}
+
+// Rehash
+bool StringTable::do_rehash() {
+ if (!_local_table->is_safepoint_safe()) {
+ return false;
+ }
+
+ // We use max size
+ StringTableHash* new_table = new StringTableHash(END_SIZE, END_SIZE, REHASH_LEN);
+ // Use alt hash from now on
+ _alt_hash = true;
+ if (!_local_table->try_move_nodes_to(Thread::current(), new_table)) {
+ _alt_hash = false;
+ delete new_table;
+ return false;
+ }
+
+ // free old table
+ delete _local_table;
+ _local_table = new_table;
+
+ return true;
+}
+
+void StringTable::try_rehash_table() {
+ static bool rehashed = false;
+ log_debug(stringtable)("Table imbalanced, rehashing called.");
+
+ // Grow instead of rehash.
+ if (get_load_factor() > PREF_AVG_LIST_LEN &&
+ !_local_table->is_max_size_reached()) {
+ log_debug(stringtable)("Choosing growing over rehashing.");
+ trigger_concurrent_work();
+ _needs_rehashing = false;
+ return;
+ }
+ // Already rehashed.
+ if (rehashed) {
+ log_warning(stringtable)("Rehashing already done, still long lists.");
+ trigger_concurrent_work();
+ _needs_rehashing = false;
+ return;
+ }
+
+ murmur_seed = AltHashing::compute_seed();
+ {
+ if (do_rehash()) {
+ rehashed = true;
+ } else {
+ log_info(stringtable)("Resizes in progress rehashing skipped.");
+ }
+ }
+ _needs_rehashing = false;
+}
+
+void StringTable::rehash_table() {
+ StringTable::the_table()->try_rehash_table();
+}
+
+// Statistics
+static int literal_size(oop obj) {
+ // NOTE: this would over-count if (pre-JDK8)
+ // java_lang_Class::has_offset_field() is true and the String.value array is
+ // shared by several Strings. However, starting from JDK8, the String.value
+ // array is not shared anymore.
+ if (obj == NULL) {
+ return 0;
+ } else if (obj->klass() == SystemDictionary::String_klass()) {
+ return (obj->size() + java_lang_String::value(obj)->size()) * HeapWordSize;
+ } else {
+ return obj->size();
+ }
+}
+
+struct SizeFunc : StackObj {
+ size_t operator()(WeakHandle* val) {
+ oop s = val->peek();
+ if (s == NULL) {
+ // Dead
+ return 0;
+ }
+ return literal_size(s);
+ };
+};
+
+void StringTable::print_table_statistics(outputStream* st,
+ const char* table_name) {
+ SizeFunc sz;
+ _local_table->statistics_to(Thread::current(), sz, st, table_name);
+}
+
+// Verification
+class VerifyStrings : StackObj {
+ public:
+ bool operator()(WeakHandle* val) {
+ oop s = val->peek();
+ if (s != NULL) {
+ assert(java_lang_String::length(s) >= 0, "Length on string must work.");
+ }
+ return true;
+ };
+};
+
// This verification is part of Universe::verify() and needs to be quick.
-// See StringTable::verify_and_compare() below for exhaustive verification.
void StringTable::verify() {
- for (int i = 0; i < the_table()->table_size(); ++i) {
- HashtableEntry* p = the_table()->bucket(i);
- for ( ; p != NULL; p = p->next()) {
- oop s = string_object_no_keepalive(p);
- guarantee(s != NULL, "interned string is NULL");
- unsigned int h = hash_string(s);
- guarantee(p->hash() == h, "broken hash in string table entry");
- guarantee(the_table()->hash_to_index(h) == i,
- "wrong index in string table");
- }
+ Thread* thr = Thread::current();
+ VerifyStrings vs;
+ if (!the_table()->_local_table->try_scan(thr, vs)) {
+ log_info(stringtable)("verify unavailable at this moment");
}
}
+// Verification and comp
+class VerifyCompStrings : StackObj {
+ GrowableArray* _oops;
+ public:
+ size_t _errors;
+ VerifyCompStrings(GrowableArray* oops) : _oops(oops), _errors(0) {}
+ bool operator()(WeakHandle* val) {
+ oop s = val->resolve();
+ if (s == NULL) {
+ return true;
+ }
+ int len = _oops->length();
+ for (int i = 0; i < len; i++) {
+ bool eq = java_lang_String::equals(s, _oops->at(i));
+ assert(!eq, "Duplicate strings");
+ if (eq) {
+ _errors++;
+ }
+ }
+ _oops->push(s);
+ return true;
+ };
+};
+
+size_t StringTable::verify_and_compare_entries() {
+ Thread* thr = Thread::current();
+ GrowableArray* oops =
+ new (ResourceObj::C_HEAP, mtInternal)
+ GrowableArray((int)the_table()->_current_size, true);
+
+ VerifyCompStrings vcs(oops);
+ if (!the_table()->_local_table->try_scan(thr, vcs)) {
+ log_info(stringtable)("verify unavailable at this moment");
+ }
+ delete oops;
+ return vcs._errors;
+}
+
+// Dumping
+class PrintString : StackObj {
+ Thread* _thr;
+ outputStream* _st;
+ public:
+ PrintString(Thread* thr, outputStream* st) : _thr(thr), _st(st) {}
+ bool operator()(WeakHandle* val) {
+ oop s = val->peek();
+ if (s == NULL) {
+ return true;
+ }
+ typeArrayOop value = java_lang_String::value_no_keepalive(s);
+ int length = java_lang_String::length(s);
+ bool is_latin1 = java_lang_String::is_latin1(s);
+
+ if (length <= 0) {
+ _st->print("%d: ", length);
+ } else {
+ ResourceMark rm(_thr);
+ int utf8_length = length;
+ char* utf8_string;
+
+ if (!is_latin1) {
+ jchar* chars = value->char_at_addr(0);
+ utf8_string = UNICODE::as_utf8(chars, utf8_length);
+ } else {
+ jbyte* bytes = value->byte_at_addr(0);
+ utf8_string = UNICODE::as_utf8(bytes, utf8_length);
+ }
+
+ _st->print("%d: ", utf8_length);
+ HashtableTextDump::put_utf8(_st, utf8_string, utf8_length);
+ }
+ _st->cr();
+ return true;
+ };
+};
+
void StringTable::dump(outputStream* st, bool verbose) {
if (!verbose) {
- the_table()->print_table_statistics(st, "StringTable", string_object_no_keepalive);
+ the_table()->print_table_statistics(st, "StringTable");
} else {
- Thread* THREAD = Thread::current();
+ Thread* thr = Thread::current();
+ ResourceMark rm(thr);
st->print_cr("VERSION: 1.1");
- for (int i = 0; i < the_table()->table_size(); ++i) {
- HashtableEntry* p = the_table()->bucket(i);
- for ( ; p != NULL; p = p->next()) {
- oop s = string_object_no_keepalive(p);
- typeArrayOop value = java_lang_String::value_no_keepalive(s);
- int length = java_lang_String::length(s);
- bool is_latin1 = java_lang_String::is_latin1(s);
-
- if (length <= 0) {
- st->print("%d: ", length);
- } else {
- ResourceMark rm(THREAD);
- int utf8_length = length;
- char* utf8_string;
-
- if (!is_latin1) {
- jchar* chars = value->char_at_addr(0);
- utf8_string = UNICODE::as_utf8(chars, utf8_length);
- } else {
- jbyte* bytes = value->byte_at_addr(0);
- utf8_string = UNICODE::as_utf8(bytes, utf8_length);
- }
-
- st->print("%d: ", utf8_length);
- HashtableTextDump::put_utf8(st, utf8_string, utf8_length);
- }
- st->cr();
- }
+ PrintString ps(thr, st);
+ if (!the_table()->_local_table->try_scan(thr, ps)) {
+ st->print_cr("dump unavailable at this moment");
}
}
}
-StringTable::VerifyRetTypes StringTable::compare_entries(
- int bkt1, int e_cnt1,
- HashtableEntry* e_ptr1,
- int bkt2, int e_cnt2,
- HashtableEntry* e_ptr2) {
- // These entries are sanity checked by verify_and_compare_entries()
- // before this function is called.
- oop str1 = string_object_no_keepalive(e_ptr1);
- oop str2 = string_object_no_keepalive(e_ptr2);
-
- if (str1 == str2) {
- tty->print_cr("ERROR: identical oop values (0x" PTR_FORMAT ") "
- "in entry @ bucket[%d][%d] and entry @ bucket[%d][%d]",
- p2i(str1), bkt1, e_cnt1, bkt2, e_cnt2);
- return _verify_fail_continue;
- }
-
- if (java_lang_String::equals(str1, str2)) {
- tty->print_cr("ERROR: identical String values in entry @ "
- "bucket[%d][%d] and entry @ bucket[%d][%d]",
- bkt1, e_cnt1, bkt2, e_cnt2);
- return _verify_fail_continue;
- }
-
- return _verify_pass;
-}
-
-StringTable::VerifyRetTypes StringTable::verify_entry(int bkt, int e_cnt,
- HashtableEntry* e_ptr,
- StringTable::VerifyMesgModes mesg_mode) {
-
- VerifyRetTypes ret = _verify_pass; // be optimistic
-
- oop str = string_object_no_keepalive(e_ptr);
- if (str == NULL) {
- if (mesg_mode == _verify_with_mesgs) {
- tty->print_cr("ERROR: NULL oop value in entry @ bucket[%d][%d]", bkt,
- e_cnt);
- }
- // NULL oop means no more verifications are possible
- return _verify_fail_done;
- }
-
- if (str->klass() != SystemDictionary::String_klass()) {
- if (mesg_mode == _verify_with_mesgs) {
- tty->print_cr("ERROR: oop is not a String in entry @ bucket[%d][%d]",
- bkt, e_cnt);
- }
- // not a String means no more verifications are possible
- return _verify_fail_done;
- }
-
- unsigned int h = hash_string(str);
- if (e_ptr->hash() != h) {
- if (mesg_mode == _verify_with_mesgs) {
- tty->print_cr("ERROR: broken hash value in entry @ bucket[%d][%d], "
- "bkt_hash=%d, str_hash=%d", bkt, e_cnt, e_ptr->hash(), h);
- }
- ret = _verify_fail_continue;
- }
-
- if (the_table()->hash_to_index(h) != bkt) {
- if (mesg_mode == _verify_with_mesgs) {
- tty->print_cr("ERROR: wrong index value for entry @ bucket[%d][%d], "
- "str_hash=%d, hash_to_index=%d", bkt, e_cnt, h,
- the_table()->hash_to_index(h));
- }
- ret = _verify_fail_continue;
- }
-
- return ret;
-}
-
-// See StringTable::verify() above for the quick verification that is
-// part of Universe::verify(). This verification is exhaustive and
-// reports on every issue that is found. StringTable::verify() only
-// reports on the first issue that is found.
-//
-// StringTable::verify_entry() checks:
-// - oop value != NULL (same as verify())
-// - oop value is a String
-// - hash(String) == hash in entry (same as verify())
-// - index for hash == index of entry (same as verify())
-//
-// StringTable::compare_entries() checks:
-// - oops are unique across all entries
-// - String values are unique across all entries
-//
-int StringTable::verify_and_compare_entries() {
- assert(StringTable_lock->is_locked(), "sanity check");
-
- int fail_cnt = 0;
-
- // first, verify all the entries individually:
- for (int bkt = 0; bkt < the_table()->table_size(); bkt++) {
- HashtableEntry* e_ptr = the_table()->bucket(bkt);
- for (int e_cnt = 0; e_ptr != NULL; e_ptr = e_ptr->next(), e_cnt++) {
- VerifyRetTypes ret = verify_entry(bkt, e_cnt, e_ptr, _verify_with_mesgs);
- if (ret != _verify_pass) {
- fail_cnt++;
- }
- }
- }
-
- // Optimization: if the above check did not find any failures, then
- // the comparison loop below does not need to call verify_entry()
- // before calling compare_entries(). If there were failures, then we
- // have to call verify_entry() to see if the entry can be passed to
- // compare_entries() safely. When we call verify_entry() in the loop
- // below, we do so quietly to void duplicate messages and we don't
- // increment fail_cnt because the failures have already been counted.
- bool need_entry_verify = (fail_cnt != 0);
-
- // second, verify all entries relative to each other:
- for (int bkt1 = 0; bkt1 < the_table()->table_size(); bkt1++) {
- HashtableEntry* e_ptr1 = the_table()->bucket(bkt1);
- for (int e_cnt1 = 0; e_ptr1 != NULL; e_ptr1 = e_ptr1->next(), e_cnt1++) {
- if (need_entry_verify) {
- VerifyRetTypes ret = verify_entry(bkt1, e_cnt1, e_ptr1,
- _verify_quietly);
- if (ret == _verify_fail_done) {
- // cannot use the current entry to compare against other entries
- continue;
- }
- }
-
- for (int bkt2 = bkt1; bkt2 < the_table()->table_size(); bkt2++) {
- HashtableEntry* e_ptr2 = the_table()->bucket(bkt2);
- int e_cnt2;
- for (e_cnt2 = 0; e_ptr2 != NULL; e_ptr2 = e_ptr2->next(), e_cnt2++) {
- if (bkt1 == bkt2 && e_cnt2 <= e_cnt1) {
- // skip the entries up to and including the one that
- // we're comparing against
- continue;
- }
-
- if (need_entry_verify) {
- VerifyRetTypes ret = verify_entry(bkt2, e_cnt2, e_ptr2,
- _verify_quietly);
- if (ret == _verify_fail_done) {
- // cannot compare against this entry
- continue;
- }
- }
-
- // compare two entries, report and count any failures:
- if (compare_entries(bkt1, e_cnt1, e_ptr1, bkt2, e_cnt2, e_ptr2)
- != _verify_pass) {
- fail_cnt++;
- }
- }
- }
- }
- }
- return fail_cnt;
-}
-
-// Create a new table and using alternate hash code, populate the new table
-// with the existing strings. Set flag to use the alternate hash code afterwards.
-void StringTable::rehash_table() {
- assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
- // This should never happen with -Xshare:dump but it might in testing mode.
- if (DumpSharedSpaces) return;
- StringTable* new_table = new StringTable();
-
- // Rehash the table
- the_table()->move_to(new_table);
-
- // Delete the table and buckets (entries are reused in new table).
- delete _the_table;
- // Don't check if we need rehashing until the table gets unbalanced again.
- // Then rehash with a new global seed.
- _needs_rehashing = false;
- _the_table = new_table;
-}
-
// Utility for dumping strings
StringtableDCmd::StringtableDCmd(outputStream* output, bool heap) :
DCmdWithParser(output, heap),
@@ -671,14 +780,21 @@ int StringtableDCmd::num_arguments() {
}
}
-#if INCLUDE_CDS_JAVA_HEAP
// Sharing
+#if INCLUDE_CDS_JAVA_HEAP
+oop StringTable::lookup_shared(jchar* name, int len, unsigned int hash) {
+ assert(hash == java_lang_String::hash_code(name, len),
+ "hash must be computed using java_lang_String::hash_code");
+ return _shared_table.lookup((const char*)name, hash, len);
+}
+
oop StringTable::create_archived_string(oop s, Thread* THREAD) {
assert(DumpSharedSpaces, "this function is only used with -Xshare:dump");
oop new_s = NULL;
typeArrayOop v = java_lang_String::value_no_keepalive(s);
- typeArrayOop new_v = (typeArrayOop)MetaspaceShared::archive_heap_object(v, THREAD);
+ typeArrayOop new_v =
+ (typeArrayOop)MetaspaceShared::archive_heap_object(v, THREAD);
if (new_v == NULL) {
return NULL;
}
@@ -692,51 +808,51 @@ oop StringTable::create_archived_string(oop s, Thread* THREAD) {
return new_s;
}
-bool StringTable::copy_shared_string(GrowableArray *string_space,
- CompactStringTableWriter* writer) {
+struct CopyToArchive : StackObj {
+ CompactStringTableWriter* _writer;
+ CopyToArchive(CompactStringTableWriter* writer) : _writer(writer) {}
+ bool operator()(WeakHandle* val) {
+ oop s = val->peek();
+ if (s == NULL) {
+ return true;
+ }
+ unsigned int hash = java_lang_String::hash_code(s);
+ if (hash == 0) {
+ return true;
+ }
+
+ java_lang_String::set_hash(s, hash);
+ oop new_s = StringTable::create_archived_string(s, Thread::current());
+ if (new_s == NULL) {
+ return true;
+ }
+
+ val->replace(new_s);
+ // add to the compact table
+ _writer->add(hash, new_s);
+ return true;
+ }
+};
+
+void StringTable::copy_shared_string_table(CompactStringTableWriter* writer) {
assert(MetaspaceShared::is_heap_object_archiving_allowed(), "must be");
- Thread* THREAD = Thread::current();
- for (int i = 0; i < the_table()->table_size(); ++i) {
- HashtableEntry* bucket = the_table()->bucket(i);
- for ( ; bucket != NULL; bucket = bucket->next()) {
- oop s = string_object_no_keepalive(bucket);
- unsigned int hash = java_lang_String::hash_code(s);
- if (hash == 0) {
- continue;
- }
-
- java_lang_String::set_hash(s, hash);
- oop new_s = create_archived_string(s, THREAD);
- if (new_s == NULL) {
- continue;
- }
-
- // set the archived string in bucket
- set_string_object(bucket, new_s);
-
- // add to the compact table
- writer->add(hash, new_s);
- }
- }
-
- return true;
+ CopyToArchive copy(writer);
+ StringTable::the_table()->_local_table->do_scan(Thread::current(), copy);
}
-void StringTable::write_to_archive(GrowableArray *string_space) {
+void StringTable::write_to_archive() {
assert(MetaspaceShared::is_heap_object_archiving_allowed(), "must be");
_shared_table.reset();
- int num_buckets = the_table()->number_of_entries() /
- SharedSymbolTableBucketSize;
+ int num_buckets = the_table()->_items / SharedSymbolTableBucketSize;
// calculation of num_buckets can result in zero buckets, we need at least one
CompactStringTableWriter writer(num_buckets > 1 ? num_buckets : 1,
&MetaspaceShared::stats()->string);
// Copy the interned strings into the "string space" within the java heap
- if (copy_shared_string(string_space, &writer)) {
- writer.dump(&_shared_table);
- }
+ copy_shared_string_table(&writer);
+ writer.dump(&_shared_table);
}
void StringTable::serialize(SerializeClosure* soc) {
@@ -744,7 +860,8 @@ void StringTable::serialize(SerializeClosure* soc) {
_shared_table.serialize(soc);
if (soc->writing()) {
- _shared_table.reset(); // Sanity. Make sure we don't use the shared table at dump time
+ // Sanity. Make sure we don't use the shared table at dump time
+ _shared_table.reset();
} else if (!_shared_string_mapped) {
_shared_table.reset();
}
diff --git a/src/hotspot/share/classfile/stringTable.hpp b/src/hotspot/share/classfile/stringTable.hpp
index fa99e4f32cd..ca056949930 100644
--- a/src/hotspot/share/classfile/stringTable.hpp
+++ b/src/hotspot/share/classfile/stringTable.hpp
@@ -25,109 +25,111 @@
#ifndef SHARE_VM_CLASSFILE_STRINGTABLE_HPP
#define SHARE_VM_CLASSFILE_STRINGTABLE_HPP
-#include "utilities/hashtable.hpp"
+#include "gc/shared/oopStorage.hpp"
+#include "gc/shared/oopStorageParState.hpp"
+#include "memory/allocation.hpp"
+#include "memory/padded.hpp"
+#include "oops/oop.hpp"
+#include "oops/weakHandle.hpp"
+#include "utilities/concurrentHashTable.hpp"
template class CompactHashtable;
class CompactStringTableWriter;
-class FileMapInfo;
class SerializeClosure;
-class StringTable : public RehashableHashtable {
+class StringTable;
+class StringTableConfig;
+typedef ConcurrentHashTable,
+ StringTableConfig, mtSymbol> StringTableHash;
+
+class StringTableCreateEntry;
+
+class StringTable : public CHeapObj{
friend class VMStructs;
friend class Symbol;
+ friend class StringTableConfig;
+ friend class StringTableCreateEntry;
private:
+ void grow(JavaThread* jt);
+ void clean_dead_entries(JavaThread* jt);
+
// The string table
static StringTable* _the_table;
-
// Shared string table
static CompactHashtable _shared_table;
static bool _shared_string_mapped;
+ static bool _alt_hash;
+private:
- // Set if one bucket is out of balance due to hash algorithm deficiency
- static bool _needs_rehashing;
+ // Set if one bucket is out of balance due to hash algorithm deficiency
+ StringTableHash* _local_table;
+ size_t _current_size;
+ volatile bool _has_work;
+ volatile bool _needs_rehashing;
- // Claimed high water mark for parallel chunked scanning
- static volatile int _parallel_claimed_idx;
+ OopStorage* _weak_handles;
- static oop intern(Handle string_or_null, jchar* chars, int length, TRAPS);
- oop basic_add(int index, Handle string_or_null, jchar* name, int len,
- unsigned int hashValue, TRAPS);
+ volatile size_t _items;
+ DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
+ volatile size_t _uncleaned_items;
+ DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
- oop lookup_in_main_table(int index, jchar* chars, int length, unsigned int hashValue);
- static oop lookup_shared(jchar* name, int len, unsigned int hash);
+ double get_load_factor();
+ double get_dead_factor();
- // Apply the give oop closure to the entries to the buckets
- // in the range [start_idx, end_idx).
- static void buckets_oops_do(OopClosure* f, int start_idx, int end_idx);
+ void check_concurrent_work();
+ void trigger_concurrent_work();
- typedef StringTable::BucketUnlinkContext BucketUnlinkContext;
- // Unlink or apply the give oop closure to the entries to the buckets
- // in the range [start_idx, end_idx). Unlinked bucket entries are collected in the given
- // context to be freed later.
- // This allows multiple threads to work on the table at once.
- static void buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, BucketUnlinkContext* context);
+ static uintx item_added();
+ static void item_removed();
+ static size_t items_to_clean(size_t ncl);
- // Hashing algorithm, used as the hash value used by the
- // StringTable for bucket selection and comparison (stored in the
- // HashtableEntry structures). This is used in the String.intern() method.
- static unsigned int hash_string(const jchar* s, int len);
- static unsigned int hash_string(oop string);
- static unsigned int alt_hash_string(const jchar* s, int len);
+ StringTable();
- // Accessors for the string roots in the hashtable entries.
- // Use string_object_no_keepalive() only when the value is not returned
- // outside of a scope where a thread transition is possible.
- static oop string_object(HashtableEntry* entry);
- static oop string_object_no_keepalive(HashtableEntry* entry);
- static void set_string_object(HashtableEntry* entry, oop string);
+ static oop intern(Handle string_or_null_h, jchar* name, int len, TRAPS);
+ oop do_intern(Handle string_or_null, jchar* name, int len, uintx hash, TRAPS);
+ oop do_lookup(jchar* name, int len, uintx hash);
- StringTable() : RehashableHashtable((int)StringTableSize,
- sizeof (HashtableEntry)) {}
+ void concurrent_work(JavaThread* jt);
+ void print_table_statistics(outputStream* st, const char* table_name);
- StringTable(HashtableBucket* t, int number_of_entries)
- : RehashableHashtable((int)StringTableSize, sizeof (HashtableEntry), t,
- number_of_entries) {}
-public:
+ void try_rehash_table();
+ bool do_rehash();
+
+ public:
// The string table
static StringTable* the_table() { return _the_table; }
+ size_t table_size(Thread* thread = NULL);
- // Size of one bucket in the string table. Used when checking for rollover.
- static uint bucket_size() { return sizeof(HashtableBucket); }
+ static OopStorage* weak_storage() { return the_table()->_weak_handles; }
static void create_table() {
assert(_the_table == NULL, "One string table allowed.");
_the_table = new StringTable();
}
+ static void do_concurrent_work(JavaThread* jt);
+ static bool has_work() { return the_table()->_has_work; }
+
// GC support
// Delete pointers to otherwise-unreachable objects.
- static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f) {
- int processed = 0;
- int removed = 0;
- unlink_or_oops_do(cl, f, &processed, &removed);
- }
static void unlink(BoolObjectClosure* cl) {
- int processed = 0;
- int removed = 0;
- unlink_or_oops_do(cl, NULL, &processed, &removed);
- }
- static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f, int* processed, int* removed);
- static void unlink(BoolObjectClosure* cl, int* processed, int* removed) {
- unlink_or_oops_do(cl, NULL, processed, removed);
+ unlink_or_oops_do(cl);
}
+ static void unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f = NULL,
+ int* processed = NULL, int* removed = NULL);
+
// Serially invoke "f->do_oop" on the locations of all oops in the table.
static void oops_do(OopClosure* f);
// Possibly parallel versions of the above
- static void possibly_parallel_unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f, int* processed, int* removed);
- static void possibly_parallel_unlink(BoolObjectClosure* cl, int* processed, int* removed) {
- possibly_parallel_unlink_or_oops_do(cl, NULL, processed, removed);
- }
- static void possibly_parallel_oops_do(OopClosure* f);
-
- // Internal test.
- static void test_alt_hash() PRODUCT_RETURN;
+ static void possibly_parallel_unlink(
+ OopStorage::ParState * par_state_string,
+ BoolObjectClosure* cl, int* processed, int* removed);
+ static void possibly_parallel_oops_do(
+ OopStorage::ParState * par_state_string,
+ OopClosure* f);
// Probing
static oop lookup(Symbol* symbol);
@@ -138,46 +140,28 @@ public:
static oop intern(oop string, TRAPS);
static oop intern(const char *utf8_string, TRAPS);
- // Debugging
- static void verify();
- static void dump(outputStream* st, bool verbose=false);
-
- enum VerifyMesgModes {
- _verify_quietly = 0,
- _verify_with_mesgs = 1
- };
-
- enum VerifyRetTypes {
- _verify_pass = 0,
- _verify_fail_continue = 1,
- _verify_fail_done = 2
- };
-
- static VerifyRetTypes compare_entries(int bkt1, int e_cnt1,
- HashtableEntry* e_ptr1,
- int bkt2, int e_cnt2,
- HashtableEntry* e_ptr2);
- static VerifyRetTypes verify_entry(int bkt, int e_cnt,
- HashtableEntry* e_ptr,
- VerifyMesgModes mesg_mode);
- static int verify_and_compare_entries();
+ // Rehash the string table if it gets out of balance
+ static void rehash_table();
+ static bool needs_rehashing()
+ { return StringTable::the_table()->_needs_rehashing; }
// Sharing
+ private:
+ oop lookup_shared(jchar* name, int len, unsigned int hash) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
+ static void copy_shared_string_table(CompactStringTableWriter* ch_table) NOT_CDS_JAVA_HEAP_RETURN;
+ public:
+ static oop create_archived_string(oop s, Thread* THREAD);
static void set_shared_string_mapped() { _shared_string_mapped = true; }
static bool shared_string_mapped() { return _shared_string_mapped; }
static void shared_oops_do(OopClosure* f) NOT_CDS_JAVA_HEAP_RETURN;
- static bool copy_shared_string(GrowableArray *string_space,
- CompactStringTableWriter* ch_table) NOT_CDS_JAVA_HEAP_RETURN_(false);
- static oop create_archived_string(oop s, Thread* THREAD) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
- static void write_to_archive(GrowableArray *string_space) NOT_CDS_JAVA_HEAP_RETURN;
+ static void write_to_archive() NOT_CDS_JAVA_HEAP_RETURN;
static void serialize(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;
- // Rehash the symbol table if it gets out of balance
- static void rehash_table();
- static bool needs_rehashing() { return _needs_rehashing; }
-
- // Parallel chunked scanning
- static void clear_parallel_claimed_index() { _parallel_claimed_idx = 0; }
- static int parallel_claimed_index() { return _parallel_claimed_idx; }
+ // Jcmd
+ static void dump(outputStream* st, bool verbose=false);
+ // Debugging
+ static size_t verify_and_compare_entries();
+ static void verify();
};
+
#endif // SHARE_VM_CLASSFILE_STRINGTABLE_HPP
diff --git a/src/hotspot/share/classfile/systemDictionary.cpp b/src/hotspot/share/classfile/systemDictionary.cpp
index 27c5bd39dff..e8a99f00f70 100644
--- a/src/hotspot/share/classfile/systemDictionary.cpp
+++ b/src/hotspot/share/classfile/systemDictionary.cpp
@@ -76,7 +76,7 @@
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/signature.hpp"
#include "services/classLoadingService.hpp"
@@ -110,9 +110,6 @@ oop SystemDictionary::_java_platform_loader = NULL;
bool SystemDictionary::_has_checkPackageAccess = false;
-// lazily initialized klass variables
-InstanceKlass* volatile SystemDictionary::_abstract_ownable_synchronizer_klass = NULL;
-
// Default ProtectionDomainCacheSize value
const int defaultProtectionDomainCacheSize = 1009;
@@ -1817,22 +1814,11 @@ void SystemDictionary::add_to_hierarchy(InstanceKlass* k, TRAPS) {
// ----------------------------------------------------------------------------
// GC support
-void SystemDictionary::always_strong_oops_do(OopClosure* blk) {
- roots_oops_do(blk, NULL);
-}
-
-
// Assumes classes in the SystemDictionary are only unloaded at a safepoint
// Note: anonymous classes are not in the SD.
-bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive,
- GCTimer* gc_timer,
+bool SystemDictionary::do_unloading(GCTimer* gc_timer,
bool do_cleaning) {
- {
- GCTraceTime(Debug, gc, phases) t("SystemDictionary WeakHandle cleaning", gc_timer);
- vm_weak_oop_storage()->weak_oops_do(is_alive, &do_nothing_cl);
- }
-
bool unloading_occurred;
{
GCTraceTime(Debug, gc, phases) t("ClassLoaderData", gc_timer);
@@ -1863,27 +1849,6 @@ bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive,
return unloading_occurred;
}
-void SystemDictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
- strong->do_oop(&_java_system_loader);
- strong->do_oop(&_java_platform_loader);
- strong->do_oop(&_system_loader_lock_obj);
- CDS_ONLY(SystemDictionaryShared::roots_oops_do(strong);)
-
- // Do strong roots marking if the closures are the same.
- if (strong == weak || !ClassUnloading) {
- // Only the protection domain oops contain references into the heap. Iterate
- // over all of them.
- vm_weak_oop_storage()->oops_do(strong);
- } else {
- if (weak != NULL) {
- vm_weak_oop_storage()->oops_do(weak);
- }
- }
-
- // Visit extra methods
- invoke_method_table()->oops_do(strong);
-}
-
void SystemDictionary::oops_do(OopClosure* f) {
f->do_oop(&_java_system_loader);
f->do_oop(&_java_platform_loader);
@@ -1892,8 +1857,6 @@ void SystemDictionary::oops_do(OopClosure* f) {
// Visit extra methods
invoke_method_table()->oops_do(f);
-
- vm_weak_oop_storage()->oops_do(f);
}
// CDS: scan and relocate all classes in the system dictionary.
@@ -1930,22 +1893,6 @@ void SystemDictionary::remove_classes_in_error_state() {
ClassLoaderDataGraph::cld_do(&rcc);
}
-// ----------------------------------------------------------------------------
-// Lazily load klasses
-
-void SystemDictionary::load_abstract_ownable_synchronizer_klass(TRAPS) {
- // if multiple threads calling this function, only one thread will load
- // the class. The other threads will find the loaded version once the
- // class is loaded.
- Klass* aos = _abstract_ownable_synchronizer_klass;
- if (aos == NULL) {
- Klass* k = resolve_or_fail(vmSymbols::java_util_concurrent_locks_AbstractOwnableSynchronizer(), true, CHECK);
- // Force a fence to prevent any read before the write completes
- OrderAccess::fence();
- _abstract_ownable_synchronizer_klass = InstanceKlass::cast(k);
- }
-}
-
// ----------------------------------------------------------------------------
// Initialization
diff --git a/src/hotspot/share/classfile/systemDictionary.hpp b/src/hotspot/share/classfile/systemDictionary.hpp
index 6c9f203fa2c..90d1b11ac75 100644
--- a/src/hotspot/share/classfile/systemDictionary.hpp
+++ b/src/hotspot/share/classfile/systemDictionary.hpp
@@ -199,6 +199,9 @@ class OopStorage;
do_klass(StackFrameInfo_klass, java_lang_StackFrameInfo, Opt ) \
do_klass(LiveStackFrameInfo_klass, java_lang_LiveStackFrameInfo, Opt ) \
\
+ /* support for stack dump lock analysis */ \
+ do_klass(java_util_concurrent_locks_AbstractOwnableSynchronizer_klass, java_util_concurrent_locks_AbstractOwnableSynchronizer, Pre ) \
+ \
/* Preload boxing klasses */ \
do_klass(Boolean_klass, java_lang_Boolean, Pre ) \
do_klass(Character_klass, java_lang_Character, Pre ) \
@@ -357,14 +360,9 @@ public:
// Garbage collection support
- // This method applies "blk->do_oop" to all the pointers to "system"
- // classes and loaders.
- static void always_strong_oops_do(OopClosure* blk);
-
// Unload (that is, break root links to) all unmarked classes and
// loaders. Returns "true" iff something was unloaded.
- static bool do_unloading(BoolObjectClosure* is_alive,
- GCTimer* gc_timer,
+ static bool do_unloading(GCTimer* gc_timer,
bool do_cleaning = true);
// Used by DumpSharedSpaces only to remove classes that failed verification
@@ -374,7 +372,6 @@ public:
// Applies "f->do_oop" to all root oops in the system dictionary.
static void oops_do(OopClosure* f);
- static void roots_oops_do(OopClosure* strong, OopClosure* weak);
// System loader lock
static oop system_loader_lock() { return _system_loader_lock_obj; }
@@ -455,12 +452,6 @@ public:
}
static BasicType box_klass_type(Klass* k); // inverse of box_klass
- // methods returning lazily loaded klasses
- // The corresponding method to load the class must be called before calling them.
- static InstanceKlass* abstract_ownable_synchronizer_klass() { return check_klass(_abstract_ownable_synchronizer_klass); }
-
- static void load_abstract_ownable_synchronizer_klass(TRAPS);
-
protected:
// Returns the class loader data to be used when looking up/updating the
// system dictionary.
@@ -735,9 +726,6 @@ protected:
// Variables holding commonly used klasses (preloaded)
static InstanceKlass* _well_known_klasses[];
- // Lazily loaded klasses
- static InstanceKlass* volatile _abstract_ownable_synchronizer_klass;
-
// table of box klasses (int_klass, etc.)
static InstanceKlass* _box_klasses[T_VOID+1];
diff --git a/src/hotspot/share/classfile/systemDictionaryShared.cpp b/src/hotspot/share/classfile/systemDictionaryShared.cpp
index 3be54622cdf..2d6a29941b7 100644
--- a/src/hotspot/share/classfile/systemDictionaryShared.cpp
+++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp
@@ -87,8 +87,8 @@ Handle SystemDictionaryShared::get_shared_jar_manifest(int shared_path_index, TR
assert(src != NULL, "No Manifest data");
typeArrayOop buf = oopFactory::new_byteArray(size, CHECK_NH);
typeArrayHandle bufhandle(THREAD, buf);
- char* dst = (char*)(buf->byte_at_addr(0));
- memcpy(dst, src, (size_t)size);
+ ArrayAccess<>::arraycopy_from_native(reinterpret_cast(src),
+ buf, typeArrayOopDesc::element_offset(0), size);
Handle bais = JavaCalls::construct_new_instance(SystemDictionary::ByteArrayInputStream_klass(),
vmSymbols::byte_array_void_signature(),
diff --git a/src/hotspot/share/classfile/verifier.cpp b/src/hotspot/share/classfile/verifier.cpp
index 8685f408417..2e76c0ecd95 100644
--- a/src/hotspot/share/classfile/verifier.cpp
+++ b/src/hotspot/share/classfile/verifier.cpp
@@ -47,7 +47,7 @@
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jniHandles.inline.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
#include "runtime/safepointVerifiers.hpp"
#include "runtime/thread.hpp"
diff --git a/src/hotspot/share/classfile/vmSymbols.cpp b/src/hotspot/share/classfile/vmSymbols.cpp
index ebbf66f8ab0..d225e68ada7 100644
--- a/src/hotspot/share/classfile/vmSymbols.cpp
+++ b/src/hotspot/share/classfile/vmSymbols.cpp
@@ -756,6 +756,9 @@ bool vmIntrinsics::is_disabled_by_flags(vmIntrinsics::ID id) {
#endif // COMPILER1
#ifdef COMPILER2
case vmIntrinsics::_clone:
+#if INCLUDE_ZGC
+ if (UseZGC) return true;
+#endif
case vmIntrinsics::_copyOf:
case vmIntrinsics::_copyOfRange:
// These intrinsics use both the objectcopy and the arraycopy
diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp
index 8c3438210c2..1d1846869df 100644
--- a/src/hotspot/share/code/codeCache.cpp
+++ b/src/hotspot/share/code/codeCache.cpp
@@ -685,8 +685,15 @@ void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurre
assert_locked_or_safepoint(CodeCache_lock);
CompiledMethodIterator iter;
while(iter.next_alive()) {
- iter.method()->do_unloading(is_alive, unloading_occurred);
+ iter.method()->do_unloading(is_alive);
}
+
+ // Now that all the unloaded nmethods are known, cleanup caches
+ // before CLDG is purged.
+ // This is another code cache walk but it is moved from gc_epilogue.
+ // G1 does a parallel walk of the nmethods so cleans them up
+ // as it goes and doesn't call this.
+ do_unloading_nmethod_caches(unloading_occurred);
}
void CodeCache::blobs_do(CodeBlobClosure* f) {
@@ -720,8 +727,11 @@ void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) {
assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
- if (TraceScavenge) {
- cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
+ LogTarget(Trace, gc, nmethod) lt;
+ if (lt.is_enabled()) {
+ LogStream ls(lt);
+ CompileTask::print(&ls, cur,
+ is_live ? "scavenge root " : "dead scavenge root", /*short_form:*/ true);
}
if (is_live) {
// Perform cur->oops_do(f), maybe just once per nmethod.
@@ -892,18 +902,26 @@ void CodeCache::verify_icholder_relocations() {
#endif
}
-void CodeCache::gc_prologue() {
-}
+void CodeCache::gc_prologue() { }
void CodeCache::gc_epilogue() {
+ prune_scavenge_root_nmethods();
+}
+
+
+void CodeCache::do_unloading_nmethod_caches(bool class_unloading_occurred) {
assert_locked_or_safepoint(CodeCache_lock);
- NOT_DEBUG(if (needs_cache_clean())) {
+ // Even if classes are not unloaded, there may have been some nmethods that are
+ // unloaded because oops in them are no longer reachable.
+ NOT_DEBUG(if (needs_cache_clean() || class_unloading_occurred)) {
CompiledMethodIterator iter;
while(iter.next_alive()) {
CompiledMethod* cm = iter.method();
assert(!cm->is_unloaded(), "Tautology");
- DEBUG_ONLY(if (needs_cache_clean())) {
- cm->cleanup_inline_caches();
+ DEBUG_ONLY(if (needs_cache_clean() || class_unloading_occurred)) {
+ // Clean up both unloaded klasses from nmethods and unloaded nmethods
+ // from inline caches.
+ cm->unload_nmethod_caches(/*parallel*/false, class_unloading_occurred);
}
DEBUG_ONLY(cm->verify());
DEBUG_ONLY(cm->verify_oop_relocations());
@@ -911,8 +929,6 @@ void CodeCache::gc_epilogue() {
}
set_needs_cache_clean(false);
- prune_scavenge_root_nmethods();
-
verify_icholder_relocations();
}
@@ -1593,6 +1609,7 @@ void CodeCache::print() {
}
void CodeCache::print_summary(outputStream* st, bool detailed) {
+ int full_count = 0;
FOR_ALL_HEAPS(heap_iterator) {
CodeHeap* heap = (*heap_iterator);
size_t total = (heap->high_boundary() - heap->low_boundary());
@@ -1611,6 +1628,8 @@ void CodeCache::print_summary(outputStream* st, bool detailed) {
p2i(heap->low_boundary()),
p2i(heap->high()),
p2i(heap->high_boundary()));
+
+ full_count += get_codemem_full_count(heap->code_blob_type());
}
}
@@ -1622,6 +1641,10 @@ void CodeCache::print_summary(outputStream* st, bool detailed) {
"enabled" : Arguments::mode() == Arguments::_int ?
"disabled (interpreter mode)" :
"disabled (not enough contiguous free space left)");
+ st->print_cr(" stopped_count=%d, restarted_count=%d",
+ CompileBroker::get_total_compiler_stopped_count(),
+ CompileBroker::get_total_compiler_restarted_count());
+ st->print_cr(" full_count=%d", full_count);
}
}
diff --git a/src/hotspot/share/code/codeCache.hpp b/src/hotspot/share/code/codeCache.hpp
index 5f1d7d8d832..fcf6cb404c0 100644
--- a/src/hotspot/share/code/codeCache.hpp
+++ b/src/hotspot/share/code/codeCache.hpp
@@ -168,9 +168,10 @@ class CodeCache : AllStatic {
static void gc_epilogue();
static void gc_prologue();
static void verify_oops();
- // If "unloading_occurred" is true, then unloads (i.e., breaks root links
+ // If any oops are not marked this method unloads (i.e., breaks root links
// to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading"
// to "true" iff some code got unloaded.
+ // "unloading_occurred" controls whether metadata should be cleaned because of class unloading.
static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
@@ -223,8 +224,10 @@ class CodeCache : AllStatic {
static bool needs_cache_clean() { return _needs_cache_clean; }
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
+
static void clear_inline_caches(); // clear all inline caches
- static void cleanup_inline_caches();
+ static void cleanup_inline_caches(); // clean unloaded/zombie nmethods from inline caches
+ static void do_unloading_nmethod_caches(bool class_unloading_occurred); // clean all nmethod caches for unloading, including inline caches
// Returns true if an own CodeHeap for the given CodeBlobType is available
static bool heap_available(int code_blob_type);
diff --git a/src/hotspot/share/code/compiledIC.cpp b/src/hotspot/share/code/compiledIC.cpp
index afb4bc8c1c5..ab94626d112 100644
--- a/src/hotspot/share/code/compiledIC.cpp
+++ b/src/hotspot/share/code/compiledIC.cpp
@@ -552,7 +552,8 @@ void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site, const Com
// ----------------------------------------------------------------------------
-void CompiledStaticCall::set_to_clean() {
+void CompiledStaticCall::set_to_clean(bool in_use) {
+ // in_use is unused but needed to match template function in CompiledMethod
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
// Reset call site
MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
diff --git a/src/hotspot/share/code/compiledIC.hpp b/src/hotspot/share/code/compiledIC.hpp
index 4f967957d29..09790efc241 100644
--- a/src/hotspot/share/code/compiledIC.hpp
+++ b/src/hotspot/share/code/compiledIC.hpp
@@ -358,7 +358,7 @@ public:
virtual address destination() const = 0;
// Clean static call (will force resolving on next use)
- void set_to_clean();
+ void set_to_clean(bool in_use = true);
// Set state. The entry must be the same, as computed by compute_entry.
// Computation and setting is split up, since the actions are separate during
diff --git a/src/hotspot/share/code/compiledMethod.cpp b/src/hotspot/share/code/compiledMethod.cpp
index 4e77ee3bcdb..ae2a1110cfe 100644
--- a/src/hotspot/share/code/compiledMethod.cpp
+++ b/src/hotspot/share/code/compiledMethod.cpp
@@ -28,6 +28,8 @@
#include "code/scopeDesc.hpp"
#include "code/codeCache.hpp"
#include "interpreter/bytecode.inline.hpp"
+#include "logging/log.hpp"
+#include "logging/logTag.hpp"
#include "memory/resourceArea.hpp"
#include "oops/methodData.hpp"
#include "oops/method.inline.hpp"
@@ -222,9 +224,7 @@ ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
pd->return_oop());
}
-void CompiledMethod::cleanup_inline_caches(bool clean_all/*=false*/) {
- assert_locked_or_safepoint(CompiledIC_lock);
-
+address CompiledMethod::oops_reloc_begin() const {
// If the method is not entrant or zombie then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
// should not get GC'd. Skip the first few bytes of oops on
@@ -237,41 +237,7 @@ void CompiledMethod::cleanup_inline_caches(bool clean_all/*=false*/) {
// This shouldn't matter, since oops of non-entrant methods are never used.
// In fact, why are we bothering to look at oops in a non-entrant method??
}
-
- // Find all calls in an nmethod and clear the ones that point to non-entrant,
- // zombie and unloaded nmethods.
- ResourceMark rm;
- RelocIterator iter(this, low_boundary);
- while(iter.next()) {
- switch(iter.type()) {
- case relocInfo::virtual_call_type:
- case relocInfo::opt_virtual_call_type: {
- CompiledIC *ic = CompiledIC_at(&iter);
- // Ok, to lookup references to zombies here
- CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
- if( cb != NULL && cb->is_compiled() ) {
- CompiledMethod* nm = cb->as_compiled_method();
- // Clean inline caches pointing to zombie, non-entrant and unloaded methods
- if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive());
- }
- break;
- }
- case relocInfo::static_call_type: {
- CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
- CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
- if( cb != NULL && cb->is_compiled() ) {
- CompiledMethod* cm = cb->as_compiled_method();
- // Clean inline caches pointing to zombie, non-entrant and unloaded methods
- if (clean_all || !cm->is_in_use() || (cm->method()->code() != cm)) {
- csc->set_to_clean();
- }
- }
- break;
- }
- default:
- break;
- }
- }
+ return low_boundary;
}
int CompiledMethod::verify_icholder_relocations() {
@@ -437,17 +403,15 @@ unsigned char CompiledMethod::unloading_clock() {
return OrderAccess::load_acquire(&_unloading_clock);
}
-// Processing of oop references should have been sufficient to keep
-// all strong references alive. Any weak references should have been
-// cleared as well. Visit all the metadata and ensure that it's
-// really alive.
-void CompiledMethod::verify_metadata_loaders(address low_boundary) {
+
+// static_stub_Relocations may have dangling references to
+// nmethods so trim them out here. Otherwise it looks like
+// compiled code is maintaining a link to dead metadata.
+void CompiledMethod::clean_ic_stubs() {
#ifdef ASSERT
- RelocIterator iter(this, low_boundary);
- while (iter.next()) {
- // static_stub_Relocations may have dangling references to
- // Method*s so trim them out here. Otherwise it looks like
- // compiled code is maintaining a link to dead metadata.
+ address low_boundary = oops_reloc_begin();
+ RelocIterator iter(this, low_boundary);
+ while (iter.next()) {
address static_call_addr = NULL;
if (iter.type() == relocInfo::opt_virtual_call_type) {
CompiledIC* cic = CompiledIC_at(&iter);
@@ -470,8 +434,6 @@ void CompiledMethod::verify_metadata_loaders(address low_boundary) {
}
}
}
- // Check that the metadata embedded in the nmethod is alive
- metadata_do(check_class);
#endif
}
@@ -479,67 +441,43 @@ void CompiledMethod::verify_metadata_loaders(address low_boundary) {
// GC to unload an nmethod if it contains otherwise unreachable
// oops.
-void CompiledMethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
+void CompiledMethod::do_unloading(BoolObjectClosure* is_alive) {
// Make sure the oop's ready to receive visitors
assert(!is_zombie() && !is_unloaded(),
"should not call follow on zombie or unloaded nmethod");
- // If the method is not entrant then a JMP is plastered over the
- // first few bytes. If an oop in the old code was there, that oop
- // should not get GC'd. Skip the first few bytes of oops on
- // not-entrant methods.
- address low_boundary = verified_entry_point();
- if (is_not_entrant()) {
- low_boundary += NativeJump::instruction_size;
- // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
- // (See comment above.)
- }
+ address low_boundary = oops_reloc_begin();
- // Exception cache
- clean_exception_cache();
-
- // If class unloading occurred we first iterate over all inline caches and
- // clear ICs where the cached oop is referring to an unloaded klass or method.
- // The remaining live cached oops will be traversed in the relocInfo::oop_type
- // iteration below.
- if (unloading_occurred) {
- RelocIterator iter(this, low_boundary);
- while(iter.next()) {
- if (iter.type() == relocInfo::virtual_call_type) {
- CompiledIC *ic = CompiledIC_at(&iter);
- clean_ic_if_metadata_is_dead(ic);
- }
- }
- }
-
- if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
+ if (do_unloading_oops(low_boundary, is_alive)) {
return;
}
#if INCLUDE_JVMCI
- if (do_unloading_jvmci(unloading_occurred)) {
+ if (do_unloading_jvmci()) {
return;
}
#endif
- // Ensure that all metadata is still alive
- verify_metadata_loaders(low_boundary);
+ // Cleanup exception cache and inline caches happens
+ // after all the unloaded methods are found.
}
+// Clean references to unloaded nmethods at addr from this one, which is not unloaded.
template
-static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from) {
+static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
+ bool parallel, bool clean_all) {
// Ok, to lookup references to zombies here
CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
if (nm != NULL) {
- if (nm->unloading_clock() != CompiledMethod::global_unloading_clock()) {
+ if (parallel && nm->unloading_clock() != CompiledMethod::global_unloading_clock()) {
// The nmethod has not been processed yet.
return true;
}
// Clean inline caches pointing to both zombie and not_entrant methods
- if (!nm->is_in_use() || (nm->method()->code() != nm)) {
- ic->set_to_clean();
+ if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) {
+ ic->set_to_clean(from->is_alive());
assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
}
}
@@ -547,12 +485,14 @@ static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address add
return false;
}
-static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from) {
- return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from);
+static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
+ bool parallel, bool clean_all = false) {
+ return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, parallel, clean_all);
}
-static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from) {
- return clean_if_nmethod_is_unloaded(csc, csc->destination(), from);
+static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
+ bool parallel, bool clean_all = false) {
+ return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, parallel, clean_all);
}
bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
@@ -562,47 +502,79 @@ bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unl
assert(!is_zombie() && !is_unloaded(),
"should not call follow on zombie or unloaded nmethod");
- // If the method is not entrant then a JMP is plastered over the
- // first few bytes. If an oop in the old code was there, that oop
- // should not get GC'd. Skip the first few bytes of oops on
- // not-entrant methods.
- address low_boundary = verified_entry_point();
- if (is_not_entrant()) {
- low_boundary += NativeJump::instruction_size;
- // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
- // (See comment above.)
+ address low_boundary = oops_reloc_begin();
+
+ if (do_unloading_oops(low_boundary, is_alive)) {
+ return false;
}
- // Exception cache
- clean_exception_cache();
+#if INCLUDE_JVMCI
+ if (do_unloading_jvmci()) {
+ return false;
+ }
+#endif
+ return unload_nmethod_caches(/*parallel*/true, unloading_occurred);
+}
+
+// Cleans caches in nmethods that point to either classes that are unloaded
+// or nmethods that are unloaded.
+//
+// Can be called either in parallel by G1 currently or after all
+// nmethods are unloaded. Return postponed=true in the parallel case for
+// inline caches found that point to nmethods that are not yet visited during
+// the do_unloading walk.
+bool CompiledMethod::unload_nmethod_caches(bool parallel, bool unloading_occurred) {
+
+ // Exception cache only needs to be called if unloading occurred
+ if (unloading_occurred) {
+ clean_exception_cache();
+ }
+
+ bool postponed = cleanup_inline_caches_impl(parallel, unloading_occurred, /*clean_all*/false);
+
+ // All static stubs need to be cleaned.
+ clean_ic_stubs();
+
+ // Check that the metadata embedded in the nmethod is alive
+ DEBUG_ONLY(metadata_do(check_class));
+
+ return postponed;
+}
+
+// Called to clean up after class unloading for live nmethods and from the sweeper
+// for all methods.
+bool CompiledMethod::cleanup_inline_caches_impl(bool parallel, bool unloading_occurred, bool clean_all) {
+ assert_locked_or_safepoint(CompiledIC_lock);
bool postponed = false;
- RelocIterator iter(this, low_boundary);
+ // Find all calls in an nmethod and clear the ones that point to non-entrant,
+ // zombie and unloaded nmethods.
+ RelocIterator iter(this, oops_reloc_begin());
while(iter.next()) {
switch (iter.type()) {
case relocInfo::virtual_call_type:
if (unloading_occurred) {
- // If class unloading occurred we first iterate over all inline caches and
- // clear ICs where the cached oop is referring to an unloaded klass or method.
+ // If class unloading occurred we first clear ICs where the cached metadata
+ // is referring to an unloaded klass or method.
clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
}
- postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this);
+ postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all);
break;
case relocInfo::opt_virtual_call_type:
- postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this);
+ postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all);
break;
case relocInfo::static_call_type:
- postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this);
+ postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, parallel, clean_all);
break;
case relocInfo::oop_type:
- // handled by do_unloading_oops below
+ // handled by do_unloading_oops already
break;
case relocInfo::metadata_type:
@@ -613,19 +585,6 @@ bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unl
}
}
- if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
- return postponed;
- }
-
-#if INCLUDE_JVMCI
- if (do_unloading_jvmci(unloading_occurred)) {
- return postponed;
- }
-#endif
-
- // Ensure that all metadata is still alive
- verify_metadata_loaders(low_boundary);
-
return postponed;
}
@@ -636,32 +595,21 @@ void CompiledMethod::do_unloading_parallel_postponed() {
assert(!is_zombie(),
"should not call follow on zombie nmethod");
- // If the method is not entrant then a JMP is plastered over the
- // first few bytes. If an oop in the old code was there, that oop
- // should not get GC'd. Skip the first few bytes of oops on
- // not-entrant methods.
- address low_boundary = verified_entry_point();
- if (is_not_entrant()) {
- low_boundary += NativeJump::instruction_size;
- // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
- // (See comment above.)
- }
-
- RelocIterator iter(this, low_boundary);
+ RelocIterator iter(this, oops_reloc_begin());
while(iter.next()) {
switch (iter.type()) {
case relocInfo::virtual_call_type:
- clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this);
+ clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, true);
break;
case relocInfo::opt_virtual_call_type:
- clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this);
+ clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, true);
break;
case relocInfo::static_call_type:
- clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this);
+ clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, true);
break;
default:
diff --git a/src/hotspot/share/code/compiledMethod.hpp b/src/hotspot/share/code/compiledMethod.hpp
index d281405ee0c..6291c6804b1 100644
--- a/src/hotspot/share/code/compiledMethod.hpp
+++ b/src/hotspot/share/code/compiledMethod.hpp
@@ -331,8 +331,19 @@ public:
static address get_deopt_original_pc(const frame* fr);
- // Inline cache support
- void cleanup_inline_caches(bool clean_all = false);
+ // GC unloading support
+ // Cleans unloaded klasses and unloaded nmethods in inline caches
+ bool unload_nmethod_caches(bool parallel, bool class_unloading_occurred);
+
+ // Inline cache support for class unloading and nmethod unloading
+ private:
+ bool cleanup_inline_caches_impl(bool parallel, bool unloading_occurred, bool clean_all);
+ public:
+ bool cleanup_inline_caches(bool clean_all = false) {
+ // Serial version used by sweeper and whitebox test
+ return cleanup_inline_caches_impl(false, false, clean_all);
+ }
+
virtual void clear_inline_caches();
void clear_ic_stubs();
@@ -364,12 +375,15 @@ public:
void set_unloading_next(CompiledMethod* next) { _unloading_next = next; }
CompiledMethod* unloading_next() { return _unloading_next; }
+ protected:
+ address oops_reloc_begin() const;
+ private:
void static clean_ic_if_metadata_is_dead(CompiledIC *ic);
- // Check that all metadata is still alive
- void verify_metadata_loaders(address low_boundary);
+ void clean_ic_stubs();
- virtual void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
+ public:
+ virtual void do_unloading(BoolObjectClosure* is_alive);
// The parallel versions are used by G1.
virtual bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
virtual void do_unloading_parallel_postponed();
@@ -381,9 +395,9 @@ public:
unsigned char unloading_clock();
protected:
- virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) = 0;
+ virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive) = 0;
#if INCLUDE_JVMCI
- virtual bool do_unloading_jvmci(bool unloading_occurred) = 0;
+ virtual bool do_unloading_jvmci() = 0;
#endif
private:
diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp
index 30b76407897..98a7bdbeed3 100644
--- a/src/hotspot/share/code/nmethod.cpp
+++ b/src/hotspot/share/code/nmethod.cpp
@@ -51,7 +51,7 @@
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
#include "runtime/safepointVerifiers.hpp"
#include "runtime/sharedRuntime.hpp"
@@ -946,21 +946,8 @@ void nmethod::fix_oop_relocations(address begin, address end, bool initialize_im
void nmethod::verify_clean_inline_caches() {
assert_locked_or_safepoint(CompiledIC_lock);
- // If the method is not entrant or zombie then a JMP is plastered over the
- // first few bytes. If an oop in the old code was there, that oop
- // should not get GC'd. Skip the first few bytes of oops on
- // not-entrant methods.
- address low_boundary = verified_entry_point();
- if (!is_in_use()) {
- low_boundary += NativeJump::instruction_size;
- // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
- // This means that the low_boundary is going to be a little too high.
- // This shouldn't matter, since oops of non-entrant methods are never used.
- // In fact, why are we bothering to look at oops in a non-entrant method??
- }
-
ResourceMark rm;
- RelocIterator iter(this, low_boundary);
+ RelocIterator iter(this, oops_reloc_begin());
while(iter.next()) {
switch(iter.type()) {
case relocInfo::virtual_call_type:
@@ -1041,13 +1028,17 @@ void nmethod::make_unloaded(oop cause) {
flush_dependencies(/*delete_immediately*/false);
// Break cycle between nmethod & method
- LogTarget(Trace, class, unload) lt;
+ LogTarget(Trace, class, unload, nmethod) lt;
if (lt.is_enabled()) {
LogStream ls(lt);
- ls.print_cr("making nmethod " INTPTR_FORMAT
- " unloadable, Method*(" INTPTR_FORMAT
- "), cause(" INTPTR_FORMAT ")",
- p2i(this), p2i(_method), p2i(cause));
+ ls.print("making nmethod " INTPTR_FORMAT
+ " unloadable, Method*(" INTPTR_FORMAT
+ "), cause(" INTPTR_FORMAT ") ",
+ p2i(this), p2i(_method), p2i(cause));
+ if (cause != NULL) {
+ cause->print_value_on(&ls);
+ }
+ ls.cr();
}
// Unlink the osr method, so we do not look this up again
if (is_osr_method()) {
@@ -1378,17 +1369,15 @@ void nmethod::flush_dependencies(bool delete_immediately) {
// If this oop is not live, the nmethod can be unloaded.
-bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred) {
+bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root) {
assert(root != NULL, "just checking");
oop obj = *root;
if (obj == NULL || is_alive->do_object_b(obj)) {
return false;
}
- // If ScavengeRootsInCode is true, an nmethod might be unloaded
- // simply because one of its constant oops has gone dead.
+ // An nmethod might be unloaded simply because one of its constant oops has gone dead.
// No actual classes need to be unloaded in order for this to occur.
- assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");
make_unloaded(obj);
return true;
}
@@ -1466,7 +1455,7 @@ void nmethod::post_compiled_method_unload() {
set_unload_reported();
}
-bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive, bool unloading_occurred) {
+bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive) {
assert(iter_at_oop->type() == relocInfo::oop_type, "Wrong relocation type");
oop_Relocation* r = iter_at_oop->oop_reloc();
@@ -1477,7 +1466,7 @@ bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *i
"oop must be found in exactly one place");
if (r->oop_is_immediate() && r->oop_value() != NULL) {
// Unload this nmethod if the oop is dead.
- if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
+ if (can_unload(is_alive, r->oop_addr())) {
return true;;
}
}
@@ -1485,18 +1474,18 @@ bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *i
return false;
}
-bool nmethod::do_unloading_scopes(BoolObjectClosure* is_alive, bool unloading_occurred) {
+bool nmethod::do_unloading_scopes(BoolObjectClosure* is_alive) {
// Scopes
for (oop* p = oops_begin(); p < oops_end(); p++) {
if (*p == Universe::non_oop_word()) continue; // skip non-oops
- if (can_unload(is_alive, p, unloading_occurred)) {
+ if (can_unload(is_alive, p)) {
return true;
}
}
return false;
}
-bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) {
+bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive) {
// Compiled code
// Prevent extra code cache walk for platforms that don't have immediate oops.
@@ -1504,18 +1493,18 @@ bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_aliv
RelocIterator iter(this, low_boundary);
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
- if (unload_if_dead_at(&iter, is_alive, unloading_occurred)) {
+ if (unload_if_dead_at(&iter, is_alive)) {
return true;
}
}
}
}
- return do_unloading_scopes(is_alive, unloading_occurred);
+ return do_unloading_scopes(is_alive);
}
#if INCLUDE_JVMCI
-bool nmethod::do_unloading_jvmci(bool unloading_occurred) {
+bool nmethod::do_unloading_jvmci() {
if (_jvmci_installed_code != NULL) {
if (JNIHandles::is_global_weak_cleared(_jvmci_installed_code)) {
if (_jvmci_installed_code_triggers_unloading) {
@@ -1533,15 +1522,9 @@ bool nmethod::do_unloading_jvmci(bool unloading_occurred) {
// Iterate over metadata calling this function. Used by RedefineClasses
void nmethod::metadata_do(void f(Metadata*)) {
- address low_boundary = verified_entry_point();
- if (is_not_entrant()) {
- low_boundary += NativeJump::instruction_size;
- // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
- // (See comment above.)
- }
{
// Visit all immediate references that are embedded in the instruction stream.
- RelocIterator iter(this, low_boundary);
+ RelocIterator iter(this, oops_reloc_begin());
while (iter.next()) {
if (iter.type() == relocInfo::metadata_type ) {
metadata_Relocation* r = iter.metadata_reloc();
@@ -1588,20 +1571,9 @@ void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
assert(!is_unloaded(), "should not call follow on unloaded nmethod");
- // If the method is not entrant or zombie then a JMP is plastered over the
- // first few bytes. If an oop in the old code was there, that oop
- // should not get GC'd. Skip the first few bytes of oops on
- // not-entrant methods.
- address low_boundary = verified_entry_point();
- if (is_not_entrant()) {
- low_boundary += NativeJump::instruction_size;
- // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
- // (See comment above.)
- }
-
// Prevent extra code cache walk for platforms that don't have immediate oops.
if (relocInfo::mustIterateImmediateOopsInCode()) {
- RelocIterator iter(this, low_boundary);
+ RelocIterator iter(this, oops_reloc_begin());
while (iter.next()) {
if (iter.type() == relocInfo::oop_type ) {
@@ -1650,7 +1622,11 @@ bool nmethod::test_set_oops_do_mark() {
break;
}
// Mark was clear when we first saw this guy.
- if (TraceScavenge) { print_on(tty, "oops_do, mark"); }
+ LogTarget(Trace, gc, nmethod) lt;
+ if (lt.is_enabled()) {
+ LogStream ls(lt);
+ CompileTask::print(&ls, this, "oops_do, mark", /*short_form:*/ true);
+ }
return false;
}
}
@@ -1659,7 +1635,7 @@ bool nmethod::test_set_oops_do_mark() {
}
void nmethod::oops_do_marking_prologue() {
- if (TraceScavenge) { tty->print_cr("[oops_do_marking_prologue"); }
+ log_trace(gc, nmethod)("oops_do_marking_prologue");
assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
// We use cmpxchg instead of regular assignment here because the user
// may fork a bunch of threads, and we need them all to see the same state.
@@ -1675,20 +1651,26 @@ void nmethod::oops_do_marking_epilogue() {
nmethod* next = cur->_oops_do_mark_link;
cur->_oops_do_mark_link = NULL;
DEBUG_ONLY(cur->verify_oop_relocations());
- NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark"));
+
+ LogTarget(Trace, gc, nmethod) lt;
+ if (lt.is_enabled()) {
+ LogStream ls(lt);
+ CompileTask::print(&ls, cur, "oops_do, unmark", /*short_form:*/ true);
+ }
cur = next;
}
nmethod* required = _oops_do_mark_nmethods;
nmethod* observed = Atomic::cmpxchg((nmethod*)NULL, &_oops_do_mark_nmethods, required);
guarantee(observed == required, "no races in this sequential code");
- if (TraceScavenge) { tty->print_cr("oops_do_marking_epilogue]"); }
+ log_trace(gc, nmethod)("oops_do_marking_epilogue");
}
class DetectScavengeRoot: public OopClosure {
bool _detected_scavenge_root;
+ nmethod* _print_nm;
public:
- DetectScavengeRoot() : _detected_scavenge_root(false)
- { NOT_PRODUCT(_print_nm = NULL); }
+ DetectScavengeRoot(nmethod* nm) : _detected_scavenge_root(false), _print_nm(nm) {}
+
bool detected_scavenge_root() { return _detected_scavenge_root; }
virtual void do_oop(oop* p) {
if ((*p) != NULL && Universe::heap()->is_scavengable(*p)) {
@@ -1699,21 +1681,25 @@ public:
virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
#ifndef PRODUCT
- nmethod* _print_nm;
void maybe_print(oop* p) {
- if (_print_nm == NULL) return;
- if (!_detected_scavenge_root) _print_nm->print_on(tty, "new scavenge root");
- tty->print_cr("" PTR_FORMAT "[offset=%d] detected scavengable oop " PTR_FORMAT " (found at " PTR_FORMAT ")",
- p2i(_print_nm), (int)((intptr_t)p - (intptr_t)_print_nm),
- p2i(*p), p2i(p));
- (*p)->print();
+ LogTarget(Trace, gc, nmethod) lt;
+ if (lt.is_enabled()) {
+ LogStream ls(lt);
+ if (!_detected_scavenge_root) {
+ CompileTask::print(&ls, _print_nm, "new scavenge root", /*short_form:*/ true);
+ }
+ ls.print("" PTR_FORMAT "[offset=%d] detected scavengable oop " PTR_FORMAT " (found at " PTR_FORMAT ") ",
+ p2i(_print_nm), (int)((intptr_t)p - (intptr_t)_print_nm),
+ p2i(*p), p2i(p));
+ (*p)->print_value_on(&ls);
+ ls.cr();
+ }
}
#endif //PRODUCT
};
bool nmethod::detect_scavenge_root_oops() {
- DetectScavengeRoot detect_scavenge_root;
- NOT_PRODUCT(if (TraceScavenge) detect_scavenge_root._print_nm = this);
+ DetectScavengeRoot detect_scavenge_root(this);
oops_do(&detect_scavenge_root);
return detect_scavenge_root.detected_scavenge_root();
}
diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp
index ed37c129ce2..a33688d893f 100644
--- a/src/hotspot/share/code/nmethod.hpp
+++ b/src/hotspot/share/code/nmethod.hpp
@@ -484,18 +484,18 @@ public:
#endif
protected:
- virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred);
+ virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive);
#if INCLUDE_JVMCI
// See comment for _jvmci_installed_code_triggers_unloading field.
// Returns whether this nmethod was unloaded.
- virtual bool do_unloading_jvmci(bool unloading_occurred);
+ virtual bool do_unloading_jvmci();
#endif
private:
- bool do_unloading_scopes(BoolObjectClosure* is_alive, bool unloading_occurred);
+ bool do_unloading_scopes(BoolObjectClosure* is_alive);
// Unload a nmethod if the *root object is dead.
- bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
- bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred);
+ bool can_unload(BoolObjectClosure* is_alive, oop* root);
+ bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive);
public:
void oops_do(OopClosure* f) { oops_do(f, false); }
diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp
index 3bcad4d97fc..91c19dd23bf 100644
--- a/src/hotspot/share/compiler/compileBroker.cpp
+++ b/src/hotspot/share/compiler/compileBroker.cpp
@@ -170,21 +170,23 @@ elapsedTimer CompileBroker::_t_standard_compilation;
elapsedTimer CompileBroker::_t_invalidated_compilation;
elapsedTimer CompileBroker::_t_bailedout_compilation;
-int CompileBroker::_total_bailout_count = 0;
-int CompileBroker::_total_invalidated_count = 0;
-int CompileBroker::_total_compile_count = 0;
-int CompileBroker::_total_osr_compile_count = 0;
-int CompileBroker::_total_standard_compile_count = 0;
+int CompileBroker::_total_bailout_count = 0;
+int CompileBroker::_total_invalidated_count = 0;
+int CompileBroker::_total_compile_count = 0;
+int CompileBroker::_total_osr_compile_count = 0;
+int CompileBroker::_total_standard_compile_count = 0;
+int CompileBroker::_total_compiler_stopped_count = 0;
+int CompileBroker::_total_compiler_restarted_count = 0;
-int CompileBroker::_sum_osr_bytes_compiled = 0;
-int CompileBroker::_sum_standard_bytes_compiled = 0;
-int CompileBroker::_sum_nmethod_size = 0;
-int CompileBroker::_sum_nmethod_code_size = 0;
+int CompileBroker::_sum_osr_bytes_compiled = 0;
+int CompileBroker::_sum_standard_bytes_compiled = 0;
+int CompileBroker::_sum_nmethod_size = 0;
+int CompileBroker::_sum_nmethod_code_size = 0;
-long CompileBroker::_peak_compilation_time = 0;
+long CompileBroker::_peak_compilation_time = 0;
-CompileQueue* CompileBroker::_c2_compile_queue = NULL;
-CompileQueue* CompileBroker::_c1_compile_queue = NULL;
+CompileQueue* CompileBroker::_c2_compile_queue = NULL;
+CompileQueue* CompileBroker::_c1_compile_queue = NULL;
diff --git a/src/hotspot/share/compiler/compileBroker.hpp b/src/hotspot/share/compiler/compileBroker.hpp
index 312a4a5c526..87589025725 100644
--- a/src/hotspot/share/compiler/compileBroker.hpp
+++ b/src/hotspot/share/compiler/compileBroker.hpp
@@ -219,6 +219,8 @@ class CompileBroker: AllStatic {
static int _total_native_compile_count;
static int _total_osr_compile_count;
static int _total_standard_compile_count;
+ static int _total_compiler_stopped_count;
+ static int _total_compiler_restarted_count;
static int _sum_osr_bytes_compiled;
static int _sum_standard_bytes_compiled;
static int _sum_nmethod_size;
@@ -338,7 +340,15 @@ public:
static bool set_should_compile_new_jobs(jint new_state) {
// Return success if the current caller set it
jint old = Atomic::cmpxchg(new_state, &_should_compile_new_jobs, 1-new_state);
- return (old == (1-new_state));
+ bool success = (old == (1-new_state));
+ if (success) {
+ if (new_state == run_compilation) {
+ _total_compiler_restarted_count++;
+ } else {
+ _total_compiler_stopped_count++;
+ }
+ }
+ return success;
}
static void disable_compilation_forever() {
@@ -393,18 +403,20 @@ public:
static CompileLog* get_log(CompilerThread* ct);
- static int get_total_compile_count() { return _total_compile_count; }
- static int get_total_bailout_count() { return _total_bailout_count; }
- static int get_total_invalidated_count() { return _total_invalidated_count; }
- static int get_total_native_compile_count() { return _total_native_compile_count; }
- static int get_total_osr_compile_count() { return _total_osr_compile_count; }
- static int get_total_standard_compile_count() { return _total_standard_compile_count; }
- static int get_sum_osr_bytes_compiled() { return _sum_osr_bytes_compiled; }
- static int get_sum_standard_bytes_compiled() { return _sum_standard_bytes_compiled; }
- static int get_sum_nmethod_size() { return _sum_nmethod_size;}
- static int get_sum_nmethod_code_size() { return _sum_nmethod_code_size; }
- static long get_peak_compilation_time() { return _peak_compilation_time; }
- static long get_total_compilation_time() { return _t_total_compilation.milliseconds(); }
+ static int get_total_compile_count() { return _total_compile_count; }
+ static int get_total_bailout_count() { return _total_bailout_count; }
+ static int get_total_invalidated_count() { return _total_invalidated_count; }
+ static int get_total_native_compile_count() { return _total_native_compile_count; }
+ static int get_total_osr_compile_count() { return _total_osr_compile_count; }
+ static int get_total_standard_compile_count() { return _total_standard_compile_count; }
+ static int get_total_compiler_stopped_count() { return _total_compiler_stopped_count; }
+ static int get_total_compiler_restarted_count() { return _total_compiler_restarted_count; }
+ static int get_sum_osr_bytes_compiled() { return _sum_osr_bytes_compiled; }
+ static int get_sum_standard_bytes_compiled() { return _sum_standard_bytes_compiled; }
+ static int get_sum_nmethod_size() { return _sum_nmethod_size;}
+ static int get_sum_nmethod_code_size() { return _sum_nmethod_code_size; }
+ static long get_peak_compilation_time() { return _peak_compilation_time; }
+ static long get_total_compilation_time() { return _t_total_compilation.milliseconds(); }
// Log that compilation profiling is skipped because metaspace is full.
static void log_metaspace_failure();
diff --git a/src/hotspot/share/compiler/compilerDirectives.hpp b/src/hotspot/share/compiler/compilerDirectives.hpp
index edb815c962f..c06d6b89919 100644
--- a/src/hotspot/share/compiler/compilerDirectives.hpp
+++ b/src/hotspot/share/compiler/compilerDirectives.hpp
@@ -66,7 +66,8 @@ NOT_PRODUCT(cflags(TraceOptoOutput, bool, TraceOptoOutput, TraceOptoOutput))
cflags(VectorizeDebug, uintx, 0, VectorizeDebug) \
cflags(CloneMapDebug, bool, false, CloneMapDebug) \
cflags(IGVPrintLevel, intx, PrintIdealGraphLevel, IGVPrintLevel) \
- cflags(MaxNodeLimit, intx, MaxNodeLimit, MaxNodeLimit)
+ cflags(MaxNodeLimit, intx, MaxNodeLimit, MaxNodeLimit) \
+ZGC_ONLY(cflags(ZOptimizeLoadBarriers, bool, ZOptimizeLoadBarriers, ZOptimizeLoadBarriers))
#else
#define compilerdirectives_c2_flags(cflags)
#endif
diff --git a/src/hotspot/share/compiler/disassembler.cpp b/src/hotspot/share/compiler/disassembler.cpp
index d98b9ce67d4..3a1b145acdb 100644
--- a/src/hotspot/share/compiler/disassembler.cpp
+++ b/src/hotspot/share/compiler/disassembler.cpp
@@ -155,6 +155,7 @@ class decode_env {
CodeStrings _strings;
outputStream* _output;
address _start, _end;
+ ptrdiff_t _offset;
char _option_buf[512];
char _print_raw;
@@ -191,7 +192,8 @@ class decode_env {
void print_address(address value);
public:
- decode_env(CodeBlob* code, outputStream* output, CodeStrings c = CodeStrings());
+ decode_env(CodeBlob* code, outputStream* output,
+ CodeStrings c = CodeStrings(), ptrdiff_t offset = 0);
address decode_instructions(address start, address end);
@@ -221,13 +223,15 @@ class decode_env {
const char* options() { return _option_buf; }
};
-decode_env::decode_env(CodeBlob* code, outputStream* output, CodeStrings c) {
+decode_env::decode_env(CodeBlob* code, outputStream* output, CodeStrings c,
+ ptrdiff_t offset) {
memset(this, 0, sizeof(*this)); // Beware, this zeroes bits of fields.
_output = output ? output : tty;
_code = code;
if (code != NULL && code->is_nmethod())
_nm = (nmethod*) code;
_strings.copy(c);
+ _offset = offset;
// by default, output pc but not bytes:
_print_pc = true;
@@ -354,7 +358,7 @@ void decode_env::print_insn_labels() {
if (cb != NULL) {
cb->print_block_comment(st, p);
}
- _strings.print_block_comment(st, (intptr_t)(p - _start));
+ _strings.print_block_comment(st, (intptr_t)(p - _start + _offset));
if (_print_pc) {
st->print(" " PTR_FORMAT ": ", p2i(p));
}
@@ -507,10 +511,11 @@ void Disassembler::decode(CodeBlob* cb, outputStream* st) {
env.decode_instructions(cb->code_begin(), cb->code_end());
}
-void Disassembler::decode(address start, address end, outputStream* st, CodeStrings c) {
+void Disassembler::decode(address start, address end, outputStream* st, CodeStrings c,
+ ptrdiff_t offset) {
ttyLocker ttyl;
if (!load_library()) return;
- decode_env env(CodeCache::find_blob_unsafe(start), st, c);
+ decode_env env(CodeCache::find_blob_unsafe(start), st, c, offset);
env.decode_instructions(start, end);
}
diff --git a/src/hotspot/share/compiler/disassembler.hpp b/src/hotspot/share/compiler/disassembler.hpp
index 4755ce7c8a4..6220da4175f 100644
--- a/src/hotspot/share/compiler/disassembler.hpp
+++ b/src/hotspot/share/compiler/disassembler.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -75,7 +75,8 @@ class Disassembler {
}
static void decode(CodeBlob *cb, outputStream* st = NULL);
static void decode(nmethod* nm, outputStream* st = NULL);
- static void decode(address begin, address end, outputStream* st = NULL, CodeStrings c = CodeStrings());
+ static void decode(address begin, address end, outputStream* st = NULL,
+ CodeStrings c = CodeStrings(), ptrdiff_t offset = 0);
};
#endif // SHARE_VM_COMPILER_DISASSEMBLER_HPP
diff --git a/src/hotspot/share/compiler/oopMap.cpp b/src/hotspot/share/compiler/oopMap.cpp
index 1c228b6aa8b..5c0828b10a5 100644
--- a/src/hotspot/share/compiler/oopMap.cpp
+++ b/src/hotspot/share/compiler/oopMap.cpp
@@ -380,8 +380,12 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
continue;
}
#ifdef ASSERT
- if ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
- !Universe::heap()->is_in_or_null(*loc)) {
+ // We can not verify the oop here if we are using ZGC, the oop
+ // will be bad in case we had a safepoint between a load and a
+ // load barrier.
+ if (!UseZGC &&
+ ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
+ !Universe::heap()->is_in_or_null(*loc))) {
tty->print_cr("# Found non oop pointer. Dumping state at failure");
// try to dump out some helpful debugging information
trace_codeblob_maps(fr, reg_map);
diff --git a/src/hotspot/share/gc/cms/adaptiveFreeList.cpp b/src/hotspot/share/gc/cms/adaptiveFreeList.cpp
index 753cc8a8dee..f0319ed6041 100644
--- a/src/hotspot/share/gc/cms/adaptiveFreeList.cpp
+++ b/src/hotspot/share/gc/cms/adaptiveFreeList.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
#include "memory/freeList.inline.hpp"
#include "runtime/globals.hpp"
#include "runtime/mutex.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "runtime/vmThread.hpp"
template <>
diff --git a/src/hotspot/share/gc/cms/cmsCardTable.cpp b/src/hotspot/share/gc/cms/cmsCardTable.cpp
index e0a96cf9daf..b823a079b2a 100644
--- a/src/hotspot/share/gc/cms/cmsCardTable.cpp
+++ b/src/hotspot/share/gc/cms/cmsCardTable.cpp
@@ -34,7 +34,7 @@
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/mutexLocker.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "runtime/vmThread.hpp"
CMSCardTable::CMSCardTable(MemRegion whole_heap) :
diff --git a/src/hotspot/share/gc/cms/cmsHeap.cpp b/src/hotspot/share/gc/cms/cmsHeap.cpp
index 408775114b0..b2c426967cf 100644
--- a/src/hotspot/share/gc/cms/cmsHeap.cpp
+++ b/src/hotspot/share/gc/cms/cmsHeap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -220,14 +220,14 @@ void CMSHeap::cms_process_roots(StrongRootsScope* scope,
ScanningOption so,
bool only_strong_roots,
OopsInGenClosure* root_closure,
- CLDClosure* cld_closure) {
+ CLDClosure* cld_closure,
+ OopStorage::ParState* par_state_string) {
MarkingCodeBlobClosure mark_code_closure(root_closure, !CodeBlobToOopClosure::FixRelocations);
- OopsInGenClosure* weak_roots = only_strong_roots ? NULL : root_closure;
CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
- process_roots(scope, so, root_closure, weak_roots, cld_closure, weak_cld_closure, &mark_code_closure);
+ process_roots(scope, so, root_closure, cld_closure, weak_cld_closure, &mark_code_closure);
if (!only_strong_roots) {
- process_string_table_roots(scope, root_closure);
+ process_string_table_roots(scope, root_closure, par_state_string);
}
if (young_gen_as_roots &&
diff --git a/src/hotspot/share/gc/cms/cmsHeap.hpp b/src/hotspot/share/gc/cms/cmsHeap.hpp
index ee800fff7f8..93f177aadec 100644
--- a/src/hotspot/share/gc/cms/cmsHeap.hpp
+++ b/src/hotspot/share/gc/cms/cmsHeap.hpp
@@ -30,6 +30,7 @@
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/gcCause.hpp"
#include "gc/shared/genCollectedHeap.hpp"
+#include "gc/shared/oopStorageParState.hpp"
#include "utilities/growableArray.hpp"
class CLDClosure;
@@ -90,7 +91,8 @@ public:
ScanningOption so,
bool only_strong_roots,
OopsInGenClosure* root_closure,
- CLDClosure* cld_closure);
+ CLDClosure* cld_closure,
+ OopStorage::ParState* par_state_string = NULL);
GCMemoryManager* old_manager() const { return _old_manager; }
diff --git a/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp b/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp
index 53b54cd3476..0b6c7c380fc 100644
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp
+++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp
@@ -45,7 +45,7 @@
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/java.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/align.hpp"
#include "utilities/copy.hpp"
diff --git a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp
index 934aa42d124..422ba66bcae 100644
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp
@@ -54,6 +54,7 @@
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/genOopClosures.inline.hpp"
#include "gc/shared/isGCActiveMark.hpp"
+#include "gc/shared/oopStorageParState.hpp"
#include "gc/shared/referencePolicy.hpp"
#include "gc/shared/space.inline.hpp"
#include "gc/shared/strongRootsScope.hpp"
@@ -74,7 +75,7 @@
#include "runtime/globals_extension.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "runtime/timer.hpp"
#include "runtime/vmThread.hpp"
#include "services/memoryService.hpp"
@@ -2769,10 +2770,12 @@ class CMSParMarkTask : public AbstractGangTask {
protected:
CMSCollector* _collector;
uint _n_workers;
+ OopStorage::ParState _par_state_string;
CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
AbstractGangTask(name),
_collector(collector),
- _n_workers(n_workers) {}
+ _n_workers(n_workers),
+ _par_state_string(StringTable::weak_storage()) {}
// Work method in support of parallel rescan ... of young gen spaces
void do_young_space_rescan(OopsInGenClosure* cl,
ContiguousSpace* space,
@@ -4274,7 +4277,9 @@ void CMSParInitialMarkTask::work(uint worker_id) {
GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
_collector->should_unload_classes(),
&par_mri_cl,
- &cld_closure);
+ &cld_closure,
+ &_par_state_string);
+
assert(_collector->should_unload_classes()
|| (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
"if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
@@ -4403,7 +4408,8 @@ void CMSParRemarkTask::work(uint worker_id) {
GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
_collector->should_unload_classes(),
&par_mrias_cl,
- NULL); // The dirty klasses will be handled below
+ NULL, // The dirty klasses will be handled below
+ &_par_state_string);
assert(_collector->should_unload_classes()
|| (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
@@ -5142,7 +5148,7 @@ void CMSCollector::refProcessingWork() {
rp->setup_policy(false);
verify_work_stacks_empty();
- ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_queues());
+ ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues());
{
GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
@@ -5201,7 +5207,7 @@ void CMSCollector::refProcessingWork() {
GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer_cm);
// Unload classes and purge the SystemDictionary.
- bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure, _gc_timer_cm);
+ bool purged_class = SystemDictionary::do_unloading(_gc_timer_cm);
// Unload nmethods.
CodeCache::do_unloading(&_is_alive_closure, purged_class);
diff --git a/src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.cpp b/src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.cpp
index bb3282634ce..9c22c29a107 100644
--- a/src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.cpp
+++ b/src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.cpp
@@ -30,17 +30,16 @@
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/jvmFlagConstraintsGC.hpp"
#include "memory/universe.hpp"
-#include "runtime/flags/jvmFlagRangeList.hpp"
#include "runtime/globals_extension.hpp"
#include "utilities/globalDefinitions.hpp"
static JVMFlag::Error ParallelGCThreadsAndCMSWorkQueueDrainThreshold(uint threads, uintx threshold, bool verbose) {
// CMSWorkQueueDrainThreshold is verified to be less than max_juint
if (UseConcMarkSweepGC && (threads > (uint)(max_jint / (uint)threshold))) {
- CommandLineError::print(verbose,
- "ParallelGCThreads (" UINT32_FORMAT ") or CMSWorkQueueDrainThreshold ("
- UINTX_FORMAT ") is too large\n",
- threads, threshold);
+ JVMFlag::printError(verbose,
+ "ParallelGCThreads (" UINT32_FORMAT ") or CMSWorkQueueDrainThreshold ("
+ UINTX_FORMAT ") is too large\n",
+ threads, threshold);
return JVMFlag::VIOLATES_CONSTRAINT;
}
return JVMFlag::SUCCESS;
@@ -49,20 +48,20 @@ static JVMFlag::Error ParallelGCThreadsAndCMSWorkQueueDrainThreshold(uint thread
JVMFlag::Error ParallelGCThreadsConstraintFuncCMS(uint value, bool verbose) {
// To avoid overflow at ParScanClosure::do_oop_work.
if (UseConcMarkSweepGC && (value > (max_jint / 10))) {
- CommandLineError::print(verbose,
- "ParallelGCThreads (" UINT32_FORMAT ") must be "
- "less than or equal to " UINT32_FORMAT " for CMS GC\n",
- value, (max_jint / 10));
+ JVMFlag::printError(verbose,
+ "ParallelGCThreads (" UINT32_FORMAT ") must be "
+ "less than or equal to " UINT32_FORMAT " for CMS GC\n",
+ value, (max_jint / 10));
return JVMFlag::VIOLATES_CONSTRAINT;
}
return ParallelGCThreadsAndCMSWorkQueueDrainThreshold(value, CMSWorkQueueDrainThreshold, verbose);
}
JVMFlag::Error ParGCStridesPerThreadConstraintFunc(uintx value, bool verbose) {
if (UseConcMarkSweepGC && (value > ((uintx)max_jint / (uintx)ParallelGCThreads))) {
- CommandLineError::print(verbose,
- "ParGCStridesPerThread (" UINTX_FORMAT ") must be "
- "less than or equal to ergonomic maximum (" UINTX_FORMAT ")\n",
- value, ((uintx)max_jint / (uintx)ParallelGCThreads));
+ JVMFlag::printError(verbose,
+ "ParGCStridesPerThread (" UINTX_FORMAT ") must be "
+ "less than or equal to ergonomic maximum (" UINTX_FORMAT ")\n",
+ value, ((uintx)max_jint / (uintx)ParallelGCThreads));
return JVMFlag::VIOLATES_CONSTRAINT;
}
return JVMFlag::SUCCESS;
@@ -76,10 +75,10 @@ JVMFlag::Error ParGCCardsPerStrideChunkConstraintFunc(intx value, bool verbose)
size_t card_table_size = ct->cards_required(heap_size) - 1; // Valid card table size
if ((size_t)value > card_table_size) {
- CommandLineError::print(verbose,
- "ParGCCardsPerStrideChunk (" INTX_FORMAT ") is too large for the heap size and "
- "must be less than or equal to card table size (" SIZE_FORMAT ")\n",
- value, card_table_size);
+ JVMFlag::printError(verbose,
+ "ParGCCardsPerStrideChunk (" INTX_FORMAT ") is too large for the heap size and "
+ "must be less than or equal to card table size (" SIZE_FORMAT ")\n",
+ value, card_table_size);
return JVMFlag::VIOLATES_CONSTRAINT;
}
@@ -89,10 +88,10 @@ JVMFlag::Error ParGCCardsPerStrideChunkConstraintFunc(intx value, bool verbose)
uintx n_strides = ParallelGCThreads * ParGCStridesPerThread;
uintx ergo_max = max_uintx / n_strides;
if ((uintx)value > ergo_max) {
- CommandLineError::print(verbose,
- "ParGCCardsPerStrideChunk (" INTX_FORMAT ") must be "
- "less than or equal to ergonomic maximum (" UINTX_FORMAT ")\n",
- value, ergo_max);
+ JVMFlag::printError(verbose,
+ "ParGCCardsPerStrideChunk (" INTX_FORMAT ") must be "
+ "less than or equal to ergonomic maximum (" UINTX_FORMAT ")\n",
+ value, ergo_max);
return JVMFlag::VIOLATES_CONSTRAINT;
}
}
@@ -104,10 +103,10 @@ JVMFlag::Error CMSOldPLABMinConstraintFunc(size_t value, bool verbose) {
if (UseConcMarkSweepGC) {
if (value > CMSOldPLABMax) {
- CommandLineError::print(verbose,
- "CMSOldPLABMin (" SIZE_FORMAT ") must be "
- "less than or equal to CMSOldPLABMax (" SIZE_FORMAT ")\n",
- value, CMSOldPLABMax);
+ JVMFlag::printError(verbose,
+ "CMSOldPLABMin (" SIZE_FORMAT ") must be "
+ "less than or equal to CMSOldPLABMax (" SIZE_FORMAT ")\n",
+ value, CMSOldPLABMax);
return JVMFlag::VIOLATES_CONSTRAINT;
}
status = MaxPLABSizeBounds("CMSOldPLABMin", value, verbose);
@@ -129,11 +128,11 @@ static JVMFlag::Error CMSReservedAreaConstraintFunc(const char* name, size_t val
ConcurrentMarkSweepGeneration* cms = CMSHeap::heap()->old_gen();
const size_t ergo_max = cms->cmsSpace()->max_flag_size_for_task_size();
if (value > ergo_max) {
- CommandLineError::print(verbose,
- "%s (" SIZE_FORMAT ") must be "
- "less than or equal to ergonomic maximum (" SIZE_FORMAT ") "
- "which is based on the maximum size of the old generation of the Java heap\n",
- name, value, ergo_max);
+ JVMFlag::printError(verbose,
+ "%s (" SIZE_FORMAT ") must be "
+ "less than or equal to ergonomic maximum (" SIZE_FORMAT ") "
+ "which is based on the maximum size of the old generation of the Java heap\n",
+ name, value, ergo_max);
return JVMFlag::VIOLATES_CONSTRAINT;
}
}
@@ -150,10 +149,10 @@ JVMFlag::Error CMSRescanMultipleConstraintFunc(size_t value, bool verbose) {
// Note that rescan_task_size() will be aligned if CMSRescanMultiple is a multiple of 'HeapWordSize'
// because rescan_task_size() is CardTable::card_size / HeapWordSize * BitsPerWord.
if (value % HeapWordSize != 0) {
- CommandLineError::print(verbose,
- "CMSRescanMultiple (" SIZE_FORMAT ") must be "
- "a multiple of " SIZE_FORMAT "\n",
- value, HeapWordSize);
+ JVMFlag::printError(verbose,
+ "CMSRescanMultiple (" SIZE_FORMAT ") must be "
+ "a multiple of " SIZE_FORMAT "\n",
+ value, HeapWordSize);
status = JVMFlag::VIOLATES_CONSTRAINT;
}
}
@@ -167,10 +166,10 @@ JVMFlag::Error CMSConcMarkMultipleConstraintFunc(size_t value, bool verbose) {
JVMFlag::Error CMSPrecleanDenominatorConstraintFunc(uintx value, bool verbose) {
if (UseConcMarkSweepGC && (value <= CMSPrecleanNumerator)) {
- CommandLineError::print(verbose,
- "CMSPrecleanDenominator (" UINTX_FORMAT ") must be "
- "strickly greater than CMSPrecleanNumerator (" UINTX_FORMAT ")\n",
- value, CMSPrecleanNumerator);
+ JVMFlag::printError(verbose,
+ "CMSPrecleanDenominator (" UINTX_FORMAT ") must be "
+ "strickly greater than CMSPrecleanNumerator (" UINTX_FORMAT ")\n",
+ value, CMSPrecleanNumerator);
return JVMFlag::VIOLATES_CONSTRAINT;
}
return JVMFlag::SUCCESS;
@@ -178,10 +177,10 @@ JVMFlag::Error CMSPrecleanDenominatorConstraintFunc(uintx value, bool verbose) {
JVMFlag::Error CMSPrecleanNumeratorConstraintFunc(uintx value, bool verbose) {
if (UseConcMarkSweepGC && (value >= CMSPrecleanDenominator)) {
- CommandLineError::print(verbose,
- "CMSPrecleanNumerator (" UINTX_FORMAT ") must be "
- "less than CMSPrecleanDenominator (" UINTX_FORMAT ")\n",
- value, CMSPrecleanDenominator);
+ JVMFlag::printError(verbose,
+ "CMSPrecleanNumerator (" UINTX_FORMAT ") must be "
+ "less than CMSPrecleanDenominator (" UINTX_FORMAT ")\n",
+ value, CMSPrecleanDenominator);
return JVMFlag::VIOLATES_CONSTRAINT;
}
return JVMFlag::SUCCESS;
@@ -191,10 +190,10 @@ JVMFlag::Error CMSSamplingGrainConstraintFunc(uintx value, bool verbose) {
if (UseConcMarkSweepGC) {
size_t max_capacity = CMSHeap::heap()->young_gen()->max_capacity();
if (value > max_uintx - max_capacity) {
- CommandLineError::print(verbose,
- "CMSSamplingGrain (" UINTX_FORMAT ") must be "
- "less than or equal to ergonomic maximum (" SIZE_FORMAT ")\n",
- value, max_uintx - max_capacity);
+ JVMFlag::printError(verbose,
+ "CMSSamplingGrain (" UINTX_FORMAT ") must be "
+ "less than or equal to ergonomic maximum (" SIZE_FORMAT ")\n",
+ value, max_uintx - max_capacity);
return JVMFlag::VIOLATES_CONSTRAINT;
}
}
@@ -216,11 +215,11 @@ JVMFlag::Error CMSBitMapYieldQuantumConstraintFunc(size_t value, bool verbose) {
size_t bitmap_size = cms->collector()->markBitMap()->sizeInWords();
if (value > bitmap_size) {
- CommandLineError::print(verbose,
- "CMSBitMapYieldQuantum (" SIZE_FORMAT ") must "
- "be less than or equal to bitmap size (" SIZE_FORMAT ") "
- "whose size corresponds to the size of old generation of the Java heap\n",
- value, bitmap_size);
+ JVMFlag::printError(verbose,
+ "CMSBitMapYieldQuantum (" SIZE_FORMAT ") must "
+ "be less than or equal to bitmap size (" SIZE_FORMAT ") "
+ "whose size corresponds to the size of old generation of the Java heap\n",
+ value, bitmap_size);
return JVMFlag::VIOLATES_CONSTRAINT;
}
}
@@ -229,9 +228,9 @@ JVMFlag::Error CMSBitMapYieldQuantumConstraintFunc(size_t value, bool verbose) {
JVMFlag::Error OldPLABSizeConstraintFuncCMS(size_t value, bool verbose) {
if (value == 0) {
- CommandLineError::print(verbose,
- "OldPLABSize (" SIZE_FORMAT ") must be greater than 0",
- value);
+ JVMFlag::printError(verbose,
+ "OldPLABSize (" SIZE_FORMAT ") must be greater than 0",
+ value);
return JVMFlag::VIOLATES_CONSTRAINT;
}
// For CMS, OldPLABSize is the number of free blocks of a given size that are used when
diff --git a/src/hotspot/share/gc/cms/parNewGeneration.cpp b/src/hotspot/share/gc/cms/parNewGeneration.cpp
index 86025d7bafe..4e625ed1ec3 100644
--- a/src/hotspot/share/gc/cms/parNewGeneration.cpp
+++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "classfile/stringTable.hpp"
#include "gc/cms/cmsHeap.inline.hpp"
#include "gc/cms/compactibleFreeListSpace.hpp"
#include "gc/cms/concurrentMarkSweepGeneration.hpp"
@@ -589,7 +590,8 @@ ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen,
_young_gen(young_gen), _old_gen(old_gen),
_young_old_boundary(young_old_boundary),
_state_set(state_set),
- _strong_roots_scope(strong_roots_scope)
+ _strong_roots_scope(strong_roots_scope),
+ _par_state_string(StringTable::weak_storage())
{}
void ParNewGenTask::work(uint worker_id) {
@@ -611,7 +613,8 @@ void ParNewGenTask::work(uint worker_id) {
heap->young_process_roots(_strong_roots_scope,
&par_scan_state.to_space_root_closure(),
&par_scan_state.older_gen_closure(),
- &cld_scan_closure);
+ &cld_scan_closure,
+ &_par_state_string);
par_scan_state.end_strong_roots();
@@ -958,7 +961,7 @@ void ParNewGeneration::collect(bool full,
// Can the mt_degree be set later (at run_task() time would be best)?
rp->set_active_mt_degree(active_workers);
ReferenceProcessorStats stats;
- ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_queues());
+ ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues());
if (rp->processing_is_mt()) {
ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
stats = rp->process_discovered_references(&is_alive, &keep_alive,
diff --git a/src/hotspot/share/gc/cms/parNewGeneration.hpp b/src/hotspot/share/gc/cms/parNewGeneration.hpp
index 92b74004876..3ccd16224d6 100644
--- a/src/hotspot/share/gc/cms/parNewGeneration.hpp
+++ b/src/hotspot/share/gc/cms/parNewGeneration.hpp
@@ -29,6 +29,7 @@
#include "gc/serial/defNewGeneration.hpp"
#include "gc/shared/copyFailedInfo.hpp"
#include "gc/shared/gcTrace.hpp"
+#include "gc/shared/oopStorageParState.hpp"
#include "gc/shared/plab.hpp"
#include "gc/shared/preservedMarks.hpp"
#include "gc/shared/taskqueue.hpp"
@@ -236,6 +237,7 @@ class ParNewGenTask: public AbstractGangTask {
HeapWord* _young_old_boundary;
class ParScanThreadStateSet* _state_set;
StrongRootsScope* _strong_roots_scope;
+ OopStorage::ParState _par_state_string;
public:
ParNewGenTask(ParNewGeneration* young_gen,
diff --git a/src/hotspot/share/gc/epsilon/epsilonArguments.cpp b/src/hotspot/share/gc/epsilon/epsilonArguments.cpp
new file mode 100644
index 00000000000..b323ef32f85
--- /dev/null
+++ b/src/hotspot/share/gc/epsilon/epsilonArguments.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/epsilon/epsilonArguments.hpp"
+#include "gc/epsilon/epsilonHeap.hpp"
+#include "gc/epsilon/epsilonCollectorPolicy.hpp"
+#include "gc/shared/gcArguments.inline.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/globals_extension.hpp"
+#include "runtime/vm_version.hpp"
+#include "utilities/macros.hpp"
+
+size_t EpsilonArguments::conservative_max_heap_alignment() {
+ return UseLargePages ? os::large_page_size() : os::vm_page_size();
+}
+
+void EpsilonArguments::initialize() {
+ GCArguments::initialize();
+
+ assert(UseEpsilonGC, "Sanity");
+
+ // Forcefully exit when OOME is detected. Nothing we can do at that point.
+ if (FLAG_IS_DEFAULT(ExitOnOutOfMemoryError)) {
+ FLAG_SET_DEFAULT(ExitOnOutOfMemoryError, true);
+ }
+
+ if (EpsilonMaxTLABSize < MinTLABSize) {
+ warning("EpsilonMaxTLABSize < MinTLABSize, adjusting it to " SIZE_FORMAT, MinTLABSize);
+ EpsilonMaxTLABSize = MinTLABSize;
+ }
+
+ if (!EpsilonElasticTLAB && EpsilonElasticTLABDecay) {
+ warning("Disabling EpsilonElasticTLABDecay because EpsilonElasticTLAB is disabled");
+ FLAG_SET_DEFAULT(EpsilonElasticTLABDecay, false);
+ }
+
+#ifdef COMPILER2
+ // Enable loop strip mining: there are still non-GC safepoints, no need to make it worse
+ if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
+ FLAG_SET_DEFAULT(UseCountedLoopSafepoints, true);
+ if (FLAG_IS_DEFAULT(LoopStripMiningIter)) {
+ FLAG_SET_DEFAULT(LoopStripMiningIter, 1000);
+ }
+ }
+#endif
+}
+
+CollectedHeap* EpsilonArguments::create_heap() {
+ return create_heap_with_policy();
+}
diff --git a/src/hotspot/share/gc/epsilon/epsilonArguments.hpp b/src/hotspot/share/gc/epsilon/epsilonArguments.hpp
new file mode 100644
index 00000000000..865c2f1c412
--- /dev/null
+++ b/src/hotspot/share/gc/epsilon/epsilonArguments.hpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_EPSILON_EPSILONARGUMENTS_HPP
+#define SHARE_GC_EPSILON_EPSILONARGUMENTS_HPP
+
+#include "gc/shared/gcArguments.hpp"
+
+class CollectedHeap;
+
+class EpsilonArguments : public GCArguments {
+public:
+ virtual void initialize();
+ virtual size_t conservative_max_heap_alignment();
+ virtual CollectedHeap* create_heap();
+};
+
+#endif // SHARE_GC_EPSILON_EPSILONARGUMENTS_HPP
diff --git a/src/hotspot/share/gc/epsilon/epsilonBarrierSet.cpp b/src/hotspot/share/gc/epsilon/epsilonBarrierSet.cpp
new file mode 100644
index 00000000000..e53297828e0
--- /dev/null
+++ b/src/hotspot/share/gc/epsilon/epsilonBarrierSet.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/thread.hpp"
+#include "gc/epsilon/epsilonBarrierSet.hpp"
+#include "gc/epsilon/epsilonThreadLocalData.hpp"
+#include "gc/shared/collectorPolicy.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
+#include "utilities/macros.hpp"
+#ifdef COMPILER1
+#include "gc/shared/c1/barrierSetC1.hpp"
+#endif
+#ifdef COMPILER2
+#include "gc/shared/c2/barrierSetC2.hpp"
+#endif
+
+EpsilonBarrierSet::EpsilonBarrierSet() : BarrierSet(
+ make_barrier_set_assembler(),
+ make_barrier_set_c1(),
+ make_barrier_set_c2(),
+ BarrierSet::FakeRtti(BarrierSet::EpsilonBarrierSet)) {};
+
+void EpsilonBarrierSet::on_thread_create(Thread *thread) {
+ EpsilonThreadLocalData::create(thread);
+}
+
+void EpsilonBarrierSet::on_thread_destroy(Thread *thread) {
+ EpsilonThreadLocalData::destroy(thread);
+}
diff --git a/src/hotspot/share/gc/epsilon/epsilonBarrierSet.hpp b/src/hotspot/share/gc/epsilon/epsilonBarrierSet.hpp
new file mode 100644
index 00000000000..420e38fd13e
--- /dev/null
+++ b/src/hotspot/share/gc/epsilon/epsilonBarrierSet.hpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_EPSILON_BARRIERSET_HPP
+#define SHARE_VM_GC_EPSILON_BARRIERSET_HPP
+
+#include "gc/shared/barrierSetAssembler.hpp"
+#include "gc/shared/barrierSet.hpp"
+
+// No interaction with application is required for Epsilon, and therefore
+// the barrier set is empty.
+class EpsilonBarrierSet: public BarrierSet {
+ friend class VMStructs;
+
+public:
+ EpsilonBarrierSet();
+
+ virtual void print_on(outputStream *st) const {}
+
+ virtual void on_thread_create(Thread* thread);
+ virtual void on_thread_destroy(Thread* thread);
+
+ template
+ class AccessBarrier: public BarrierSet::AccessBarrier {};
+};
+
+template<>
+struct BarrierSet::GetName {
+ static const BarrierSet::Name value = BarrierSet::EpsilonBarrierSet;
+};
+
+template<>
+struct BarrierSet::GetType {
+ typedef ::EpsilonBarrierSet type;
+};
+
+#endif // SHARE_VM_GC_EPSILON_BARRIERSET_HPP
diff --git a/src/hotspot/share/gc/epsilon/epsilonCollectorPolicy.hpp b/src/hotspot/share/gc/epsilon/epsilonCollectorPolicy.hpp
new file mode 100644
index 00000000000..855395e51e7
--- /dev/null
+++ b/src/hotspot/share/gc/epsilon/epsilonCollectorPolicy.hpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_EPSILON_COLLECTORPOLICY_HPP
+#define SHARE_VM_GC_EPSILON_COLLECTORPOLICY_HPP
+
+#include "gc/shared/collectorPolicy.hpp"
+
+class EpsilonCollectorPolicy: public CollectorPolicy {
+protected:
+ virtual void initialize_alignments() {
+ size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
+ size_t align = MAX2((size_t)os::vm_allocation_granularity(), page_size);
+ _space_alignment = align;
+ _heap_alignment = align;
+ }
+
+public:
+ EpsilonCollectorPolicy() : CollectorPolicy() {};
+};
+
+#endif // SHARE_VM_GC_EPSILON_COLLECTORPOLICY_HPP
diff --git a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp
new file mode 100644
index 00000000000..612914b9747
--- /dev/null
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/epsilon/epsilonHeap.hpp"
+#include "gc/epsilon/epsilonMemoryPool.hpp"
+#include "gc/epsilon/epsilonThreadLocalData.hpp"
+#include "memory/allocation.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/resourceArea.hpp"
+
+jint EpsilonHeap::initialize() {
+ size_t align = _policy->heap_alignment();
+ size_t init_byte_size = align_up(_policy->initial_heap_byte_size(), align);
+ size_t max_byte_size = align_up(_policy->max_heap_byte_size(), align);
+
+ // Initialize backing storage
+ ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
+ _virtual_space.initialize(heap_rs, init_byte_size);
+
+ MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
+ MemRegion reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary());
+
+ initialize_reserved_region(reserved_region.start(), reserved_region.end());
+
+ _space = new ContiguousSpace();
+ _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
+
+ // Precompute hot fields
+ _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), EpsilonMaxTLABSize / HeapWordSize);
+ _step_counter_update = MIN2(max_byte_size / 16, EpsilonUpdateCountersStep);
+ _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);
+ _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
+
+ // Enable monitoring
+ _monitoring_support = new EpsilonMonitoringSupport(this);
+ _last_counter_update = 0;
+ _last_heap_print = 0;
+
+ // Install barrier set
+ BarrierSet::set_barrier_set(new EpsilonBarrierSet());
+
+ // All done, print out the configuration
+ if (init_byte_size != max_byte_size) {
+ log_info(gc)("Resizeable heap; starting at " SIZE_FORMAT "M, max: " SIZE_FORMAT "M, step: " SIZE_FORMAT "M",
+ init_byte_size / M, max_byte_size / M, EpsilonMinHeapExpand / M);
+ } else {
+ log_info(gc)("Non-resizeable heap; start/max: " SIZE_FORMAT "M", init_byte_size / M);
+ }
+
+ if (UseTLAB) {
+ log_info(gc)("Using TLAB allocation; max: " SIZE_FORMAT "K", _max_tlab_size * HeapWordSize / K);
+ if (EpsilonElasticTLAB) {
+ log_info(gc)("Elastic TLABs enabled; elasticity: %.2fx", EpsilonTLABElasticity);
+ }
+ if (EpsilonElasticTLABDecay) {
+ log_info(gc)("Elastic TLABs decay enabled; decay time: " SIZE_FORMAT "ms", EpsilonTLABDecayTime);
+ }
+ } else {
+ log_info(gc)("Not using TLAB allocation");
+ }
+
+ return JNI_OK;
+}
+
+void EpsilonHeap::post_initialize() {
+ CollectedHeap::post_initialize();
+}
+
+void EpsilonHeap::initialize_serviceability() {
+ _pool = new EpsilonMemoryPool(this);
+ _memory_manager.add_pool(_pool);
+}
+
+GrowableArray EpsilonHeap::memory_managers() {
+ GrowableArray memory_managers(1);
+ memory_managers.append(&_memory_manager);
+ return memory_managers;
+}
+
+GrowableArray EpsilonHeap::memory_pools() {
+ GrowableArray memory_pools(1);
+ memory_pools.append(_pool);
+ return memory_pools;
+}
+
+size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const {
+ // Return max allocatable TLAB size, and let allocation path figure out
+ // the actual TLAB allocation size.
+ return _max_tlab_size;
+}
+
+EpsilonHeap* EpsilonHeap::heap() {
+ CollectedHeap* heap = Universe::heap();
+ assert(heap != NULL, "Uninitialized access to EpsilonHeap::heap()");
+ assert(heap->kind() == CollectedHeap::Epsilon, "Not an Epsilon heap");
+ return (EpsilonHeap*)heap;
+}
+
+HeapWord* EpsilonHeap::allocate_work(size_t size) {
+ HeapWord* res = _space->par_allocate(size);
+
+ while (res == NULL) {
+ // Allocation failed, attempt expansion, and retry:
+ MutexLockerEx ml(Heap_lock);
+
+ size_t space_left = max_capacity() - capacity();
+ size_t want_space = MAX2(size, EpsilonMinHeapExpand);
+
+ if (want_space < space_left) {
+ // Enough space to expand in bulk:
+ bool expand = _virtual_space.expand_by(want_space);
+ assert(expand, "Should be able to expand");
+ } else if (size < space_left) {
+ // No space to expand in bulk, and this allocation is still possible,
+ // take all the remaining space:
+ bool expand = _virtual_space.expand_by(space_left);
+ assert(expand, "Should be able to expand");
+ } else {
+ // No space left:
+ return NULL;
+ }
+
+ _space->set_end((HeapWord *) _virtual_space.high());
+ res = _space->par_allocate(size);
+ }
+
+ size_t used = _space->used();
+
+ // Allocation successful, update counters
+ {
+ size_t last = _last_counter_update;
+ if ((used - last >= _step_counter_update) && Atomic::cmpxchg(used, &_last_counter_update, last) == last) {
+ _monitoring_support->update_counters();
+ }
+ }
+
+ // ...and print the occupancy line, if needed
+ {
+ size_t last = _last_heap_print;
+ if ((used - last >= _step_heap_print) && Atomic::cmpxchg(used, &_last_heap_print, last) == last) {
+ log_info(gc)("Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M (%.2f%%) committed, " SIZE_FORMAT "M (%.2f%%) used",
+ max_capacity() / M,
+ capacity() / M,
+ capacity() * 100.0 / max_capacity(),
+ used / M,
+ used * 100.0 / max_capacity());
+ }
+ }
+
+ return res;
+}
+
+HeapWord* EpsilonHeap::allocate_new_tlab(size_t min_size,
+ size_t requested_size,
+ size_t* actual_size) {
+ Thread* thread = Thread::current();
+
+ // Defaults in case elastic paths are not taken
+ bool fits = true;
+ size_t size = requested_size;
+ size_t ergo_tlab = requested_size;
+ int64_t time = 0;
+
+ if (EpsilonElasticTLAB) {
+ ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(thread);
+
+ if (EpsilonElasticTLABDecay) {
+ int64_t last_time = EpsilonThreadLocalData::last_tlab_time(thread);
+ time = (int64_t) os::javaTimeNanos();
+
+ assert(last_time <= time, "time should be monotonic");
+
+ // If the thread had not allocated recently, retract the ergonomic size.
+ // This conserves memory when the thread had initial burst of allocations,
+ // and then started allocating only sporadically.
+ if (last_time != 0 && (time - last_time > _decay_time_ns)) {
+ ergo_tlab = 0;
+ EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
+ }
+ }
+
+ // If we can fit the allocation under current TLAB size, do so.
+ // Otherwise, we want to elastically increase the TLAB size.
+ fits = (requested_size <= ergo_tlab);
+ if (!fits) {
+ size = (size_t) (ergo_tlab * EpsilonTLABElasticity);
+ }
+ }
+
+ // Always honor boundaries
+ size = MAX2(min_size, MIN2(_max_tlab_size, size));
+
+ if (log_is_enabled(Trace, gc)) {
+ ResourceMark rm;
+ log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
+ "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
+ thread->name(),
+ requested_size * HeapWordSize / K,
+ min_size * HeapWordSize / K,
+ _max_tlab_size * HeapWordSize / K,
+ ergo_tlab * HeapWordSize / K,
+ size * HeapWordSize / K);
+ }
+
+ // All prepared, let's do it!
+ HeapWord* res = allocate_work(size);
+
+ if (res != NULL) {
+ // Allocation successful
+ *actual_size = size;
+ if (EpsilonElasticTLABDecay) {
+ EpsilonThreadLocalData::set_last_tlab_time(thread, time);
+ }
+ if (EpsilonElasticTLAB && !fits) {
+ // If we requested expansion, this is our new ergonomic TLAB size
+ EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
+ }
+ } else {
+ // Allocation failed, reset ergonomics to try and fit smaller TLABs
+ if (EpsilonElasticTLAB) {
+ EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
+ }
+ }
+
+ return res;
+}
+
+HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
+ *gc_overhead_limit_was_exceeded = false;
+ return allocate_work(size);
+}
+
+void EpsilonHeap::collect(GCCause::Cause cause) {
+ log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
+ _monitoring_support->update_counters();
+}
+
+void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
+ log_info(gc)("Full GC request for \"%s\" is ignored", GCCause::to_string(gc_cause()));
+ _monitoring_support->update_counters();
+}
+
+void EpsilonHeap::safe_object_iterate(ObjectClosure *cl) {
+ _space->safe_object_iterate(cl);
+}
+
+void EpsilonHeap::print_on(outputStream *st) const {
+ st->print_cr("Epsilon Heap");
+
+ // Cast away constness:
+ ((VirtualSpace)_virtual_space).print_on(st);
+
+ st->print_cr("Allocation space:");
+ _space->print_on(st);
+}
+
+void EpsilonHeap::print_tracing_info() const {
+ Log(gc) log;
+ size_t allocated_kb = used() / K;
+ log.info("Total allocated: " SIZE_FORMAT " KB",
+ allocated_kb);
+ log.info("Average allocation rate: " SIZE_FORMAT " KB/sec",
+ (size_t)(allocated_kb * NANOSECS_PER_SEC / os::elapsed_counter()));
+}
diff --git a/src/hotspot/share/gc/epsilon/epsilonHeap.hpp b/src/hotspot/share/gc/epsilon/epsilonHeap.hpp
new file mode 100644
index 00000000000..f58fe522c98
--- /dev/null
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.hpp
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_EPSILON_COLLECTEDHEAP_HPP
+#define SHARE_VM_GC_EPSILON_COLLECTEDHEAP_HPP
+
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/softRefPolicy.hpp"
+#include "gc/shared/space.hpp"
+#include "services/memoryManager.hpp"
+#include "gc/epsilon/epsilonCollectorPolicy.hpp"
+#include "gc/epsilon/epsilonMonitoringSupport.hpp"
+#include "gc/epsilon/epsilonBarrierSet.hpp"
+#include "gc/epsilon/epsilon_globals.hpp"
+
+class EpsilonHeap : public CollectedHeap {
+ friend class VMStructs;
+private:
+ EpsilonCollectorPolicy* _policy;
+ SoftRefPolicy _soft_ref_policy;
+ EpsilonMonitoringSupport* _monitoring_support;
+ MemoryPool* _pool;
+ GCMemoryManager _memory_manager;
+ ContiguousSpace* _space;
+ VirtualSpace _virtual_space;
+ size_t _max_tlab_size;
+ size_t _step_counter_update;
+ size_t _step_heap_print;
+ int64_t _decay_time_ns;
+ volatile size_t _last_counter_update;
+ volatile size_t _last_heap_print;
+
+public:
+ static EpsilonHeap* heap();
+
+ EpsilonHeap(EpsilonCollectorPolicy* p) :
+ _policy(p),
+ _memory_manager("Epsilon Heap", "") {};
+
+ virtual Name kind() const {
+ return CollectedHeap::Epsilon;
+ }
+
+ virtual const char* name() const {
+ return "Epsilon";
+ }
+
+ virtual CollectorPolicy* collector_policy() const {
+ return _policy;
+ }
+
+ virtual SoftRefPolicy* soft_ref_policy() {
+ return &_soft_ref_policy;
+ }
+
+ virtual jint initialize();
+ virtual void post_initialize();
+ virtual void initialize_serviceability();
+
+ virtual GrowableArray memory_managers();
+ virtual GrowableArray memory_pools();
+
+ virtual size_t max_capacity() const { return _virtual_space.reserved_size(); }
+ virtual size_t capacity() const { return _virtual_space.committed_size(); }
+ virtual size_t used() const { return _space->used(); }
+
+ virtual bool is_in(const void* p) const {
+ return _space->is_in(p);
+ }
+
+ virtual bool is_scavengable(oop obj) {
+ // No GC is going to happen, therefore no objects ever move.
+ return false;
+ }
+
+ virtual bool is_maximal_no_gc() const {
+ // No GC is going to happen. Return "we are at max", when we are about to fail.
+ return used() == capacity();
+ }
+
+ // Allocation
+ HeapWord* allocate_work(size_t size);
+ virtual HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
+ virtual HeapWord* allocate_new_tlab(size_t min_size,
+ size_t requested_size,
+ size_t* actual_size);
+
+ // TLAB allocation
+ virtual bool supports_tlab_allocation() const { return true; }
+ virtual size_t tlab_capacity(Thread* thr) const { return capacity(); }
+ virtual size_t tlab_used(Thread* thr) const { return used(); }
+ virtual size_t max_tlab_size() const { return _max_tlab_size; }
+ virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
+
+ virtual void collect(GCCause::Cause cause);
+ virtual void do_full_collection(bool clear_all_soft_refs);
+
+ // Heap walking support
+ virtual void safe_object_iterate(ObjectClosure* cl);
+ virtual void object_iterate(ObjectClosure* cl) {
+ safe_object_iterate(cl);
+ }
+
+ // No support for block parsing.
+ virtual HeapWord* block_start(const void* addr) const { return NULL; }
+ virtual size_t block_size(const HeapWord* addr) const { return 0; }
+ virtual bool block_is_obj(const HeapWord* addr) const { return false; }
+
+ // No GC threads
+ virtual void print_gc_threads_on(outputStream* st) const {}
+ virtual void gc_threads_do(ThreadClosure* tc) const {}
+
+ // No heap verification
+ virtual void prepare_for_verify() {}
+ virtual void verify(VerifyOption option) {}
+
+ virtual jlong millis_since_last_gc() {
+ // Report time since the VM start
+ return os::elapsed_counter() / NANOSECS_PER_MILLISEC;
+ }
+
+ virtual void print_on(outputStream* st) const;
+ virtual void print_tracing_info() const;
+
+};
+
+#endif // SHARE_VM_GC_EPSILON_COLLECTEDHEAP_HPP
diff --git a/src/hotspot/share/gc/epsilon/epsilonMemoryPool.cpp b/src/hotspot/share/gc/epsilon/epsilonMemoryPool.cpp
new file mode 100644
index 00000000000..4896e2d7a21
--- /dev/null
+++ b/src/hotspot/share/gc/epsilon/epsilonMemoryPool.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/epsilon/epsilonHeap.hpp"
+#include "gc/epsilon/epsilonMemoryPool.hpp"
+
+EpsilonMemoryPool::EpsilonMemoryPool(EpsilonHeap* heap) :
+ _heap(heap),
+ CollectedMemoryPool("Epsilon Heap",
+ heap->capacity(),
+ heap->max_capacity(),
+ false) {
+ assert(UseEpsilonGC, "sanity");
+}
+
+MemoryUsage EpsilonMemoryPool::get_memory_usage() {
+ size_t initial_sz = initial_size();
+ size_t max_sz = max_size();
+ size_t used = used_in_bytes();
+ size_t committed = committed_in_bytes();
+
+ return MemoryUsage(initial_sz, used, committed, max_sz);
+}
diff --git a/src/hotspot/share/gc/epsilon/epsilonMemoryPool.hpp b/src/hotspot/share/gc/epsilon/epsilonMemoryPool.hpp
new file mode 100644
index 00000000000..97f85b036fb
--- /dev/null
+++ b/src/hotspot/share/gc/epsilon/epsilonMemoryPool.hpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_EPSILON_EPSILONMEMORYPOOL_HPP
+#define SHARE_VM_GC_EPSILON_EPSILONMEMORYPOOL_HPP
+
+#include "gc/epsilon/epsilonHeap.hpp"
+#include "services/memoryPool.hpp"
+#include "services/memoryUsage.hpp"
+#include "utilities/macros.hpp"
+
+class EpsilonMemoryPool : public CollectedMemoryPool {
+private:
+ EpsilonHeap* _heap;
+
+public:
+ EpsilonMemoryPool(EpsilonHeap* heap);
+ size_t committed_in_bytes() { return _heap->capacity(); }
+ size_t used_in_bytes() { return _heap->used(); }
+ size_t max_size() const { return _heap->max_capacity(); }
+ MemoryUsage get_memory_usage();
+};
+
+#endif // SHARE_VM_GC_EPSILON_EPSILONMEMORYPOOL_HPP
diff --git a/src/hotspot/share/gc/epsilon/epsilonMonitoringSupport.cpp b/src/hotspot/share/gc/epsilon/epsilonMonitoringSupport.cpp
new file mode 100644
index 00000000000..5fc19ee5beb
--- /dev/null
+++ b/src/hotspot/share/gc/epsilon/epsilonMonitoringSupport.cpp
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/epsilon/epsilonMonitoringSupport.hpp"
+#include "gc/epsilon/epsilonHeap.hpp"
+#include "gc/shared/generationCounters.hpp"
+#include "memory/allocation.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/metaspaceCounters.hpp"
+#include "memory/resourceArea.hpp"
+#include "services/memoryService.hpp"
+
+class EpsilonSpaceCounters: public CHeapObj {
+ friend class VMStructs;
+
+private:
+ PerfVariable* _capacity;
+ PerfVariable* _used;
+ char* _name_space;
+
+public:
+ EpsilonSpaceCounters(const char* name,
+ int ordinal,
+ size_t max_size,
+ size_t initial_capacity,
+ GenerationCounters* gc) {
+ if (UsePerfData) {
+ EXCEPTION_MARK;
+ ResourceMark rm;
+
+ const char* cns = PerfDataManager::name_space(gc->name_space(), "space", ordinal);
+
+ _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC);
+ strcpy(_name_space, cns);
+
+ const char* cname = PerfDataManager::counter_name(_name_space, "name");
+ PerfDataManager::create_string_constant(SUN_GC, cname, name, CHECK);
+
+ cname = PerfDataManager::counter_name(_name_space, "maxCapacity");
+ PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes, (jlong)max_size, CHECK);
+
+ cname = PerfDataManager::counter_name(_name_space, "capacity");
+ _capacity = PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes, initial_capacity, CHECK);
+
+ cname = PerfDataManager::counter_name(_name_space, "used");
+ _used = PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes, (jlong) 0, CHECK);
+
+ cname = PerfDataManager::counter_name(_name_space, "initCapacity");
+ PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes, initial_capacity, CHECK);
+ }
+ }
+
+ ~EpsilonSpaceCounters() {
+ if (_name_space != NULL) {
+ FREE_C_HEAP_ARRAY(char, _name_space);
+ }
+ }
+
+ inline void update_all(size_t capacity, size_t used) {
+ _capacity->set_value(capacity);
+ _used->set_value(used);
+ }
+};
+
+class EpsilonGenerationCounters : public GenerationCounters {
+private:
+ EpsilonHeap* _heap;
+public:
+ EpsilonGenerationCounters(EpsilonHeap* heap) :
+ GenerationCounters("Heap", 1, 1, 0, heap->max_capacity(), heap->capacity()),
+ _heap(heap)
+ {};
+
+ virtual void update_all() {
+ _current_size->set_value(_heap->capacity());
+ }
+};
+
+EpsilonMonitoringSupport::EpsilonMonitoringSupport(EpsilonHeap* heap) {
+ _heap_counters = new EpsilonGenerationCounters(heap);
+ _space_counters = new EpsilonSpaceCounters("Heap", 0, heap->max_capacity(), 0, _heap_counters);
+}
+
+void EpsilonMonitoringSupport::update_counters() {
+ MemoryService::track_memory_usage();
+
+ if (UsePerfData) {
+ EpsilonHeap* heap = EpsilonHeap::heap();
+ size_t used = heap->used();
+ size_t capacity = heap->capacity();
+ _heap_counters->update_all();
+ _space_counters->update_all(capacity, used);
+ MetaspaceCounters::update_performance_counters();
+ CompressedClassSpaceCounters::update_performance_counters();
+ }
+}
+
diff --git a/src/hotspot/share/gc/epsilon/epsilonMonitoringSupport.hpp b/src/hotspot/share/gc/epsilon/epsilonMonitoringSupport.hpp
new file mode 100644
index 00000000000..5747bcaaaa1
--- /dev/null
+++ b/src/hotspot/share/gc/epsilon/epsilonMonitoringSupport.hpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_EPSILON_EPSILONMONITORINGSUPPORT_HPP
+#define SHARE_VM_GC_EPSILON_EPSILONMONITORINGSUPPORT_HPP
+
+#include "memory/allocation.hpp"
+
+class GenerationCounters;
+class EpsilonSpaceCounters;
+class EpsilonHeap;
+
+class EpsilonMonitoringSupport : public CHeapObj {
+private:
+ GenerationCounters* _heap_counters;
+ EpsilonSpaceCounters* _space_counters;
+
+public:
+ EpsilonMonitoringSupport(EpsilonHeap* heap);
+ void update_counters();
+};
+
+#endif // SHARE_VM_GC_EPSILON_EPSILONMONITORINGSUPPORT_HPP
+
diff --git a/src/hotspot/share/gc/epsilon/epsilonThreadLocalData.hpp b/src/hotspot/share/gc/epsilon/epsilonThreadLocalData.hpp
new file mode 100644
index 00000000000..5d614b86c0d
--- /dev/null
+++ b/src/hotspot/share/gc/epsilon/epsilonThreadLocalData.hpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_EPSILON_EPSILONTHREADLOCALDATA_HPP
+#define SHARE_VM_GC_EPSILON_EPSILONTHREADLOCALDATA_HPP
+
+#include "runtime/thread.hpp"
+#include "utilities/debug.hpp"
+
+class EpsilonThreadLocalData {
+private:
+ size_t _ergo_tlab_size;
+ int64_t _last_tlab_time;
+
+ EpsilonThreadLocalData() :
+ _ergo_tlab_size(0),
+ _last_tlab_time(0) {}
+
+ static EpsilonThreadLocalData* data(Thread* thread) {
+ assert(UseEpsilonGC, "Sanity");
+ return thread->gc_data();
+ }
+
+public:
+ static void create(Thread* thread) {
+ new (data(thread)) EpsilonThreadLocalData();
+ }
+
+ static void destroy(Thread* thread) {
+ data(thread)->~EpsilonThreadLocalData();
+ }
+
+ static size_t ergo_tlab_size(Thread *thread) {
+ return data(thread)->_ergo_tlab_size;
+ }
+
+ static int64_t last_tlab_time(Thread *thread) {
+ return data(thread)->_last_tlab_time;
+ }
+
+ static void set_ergo_tlab_size(Thread *thread, size_t val) {
+ data(thread)->_ergo_tlab_size = val;
+ }
+
+ static void set_last_tlab_time(Thread *thread, int64_t time) {
+ data(thread)->_last_tlab_time = time;
+ }
+};
+
+#endif // SHARE_VM_GC_EPSILON_EPSILONTHREADLOCALDATA_HPP
diff --git a/src/hotspot/share/gc/epsilon/epsilon_globals.hpp b/src/hotspot/share/gc/epsilon/epsilon_globals.hpp
new file mode 100644
index 00000000000..6d0f314752b
--- /dev/null
+++ b/src/hotspot/share/gc/epsilon/epsilon_globals.hpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_EPSILON_GLOBALS_HPP
+#define SHARE_VM_GC_EPSILON_GLOBALS_HPP
+
+#include "runtime/globals.hpp"
+//
+// Defines all globals flags used by the Epsilon GC.
+//
+
+#define GC_EPSILON_FLAGS(develop, \
+ develop_pd, \
+ product, \
+ product_pd, \
+ diagnostic, \
+ diagnostic_pd, \
+ experimental, \
+ notproduct, \
+ manageable, \
+ product_rw, \
+ lp64_product, \
+ range, \
+ constraint, \
+ writeable) \
+ \
+ experimental(size_t, EpsilonPrintHeapSteps, 20, \
+ "Print heap occupancy stats with this number of steps. " \
+ "0 turns the printing off.") \
+ range(0, max_intx) \
+ \
+ experimental(size_t, EpsilonUpdateCountersStep, 1 * M, \
+ "Update heap occupancy counters after allocating this much " \
+ "memory. Higher values would make allocations faster at " \
+ "the expense of lower resolution in heap counters.") \
+ range(1, max_intx) \
+ \
+ experimental(size_t, EpsilonMaxTLABSize, 4 * M, \
+ "Max TLAB size to use with Epsilon GC. Larger value improves " \
+ "performance at the expense of per-thread memory waste. This " \
+ "asks TLAB machinery to cap TLAB sizes at this value.") \
+ range(1, max_intx) \
+ \
+ experimental(bool, EpsilonElasticTLAB, true, \
+ "Use elastic policy to manage TLAB sizes. This conserves memory " \
+ "for non-actively allocating threads, even when they request " \
+ "large TLABs for themselves. Active threads would experience " \
+ "smaller TLABs until policy catches up.") \
+ \
+ experimental(bool, EpsilonElasticTLABDecay, true, \
+ "Use timed decays to shrik TLAB sizes. This conserves memory " \
+ "for the threads that allocate in bursts of different sizes, " \
+ "for example the small/rare allocations coming after the initial "\
+ "large burst.") \
+ \
+ experimental(double, EpsilonTLABElasticity, 1.1, \
+ "Multiplier to use when deciding on next TLAB size. Larger value "\
+ "improves performance at the expense of per-thread memory waste. "\
+ "Lower value improves memory footprint, but penalizes actively " \
+ "allocating threads.") \
+ range(1, max_intx) \
+ \
+ experimental(size_t, EpsilonTLABDecayTime, 1000, \
+ "TLAB sizing policy decays to initial size after thread had not " \
+ "allocated for this long. Time is in milliseconds. Lower value " \
+ "improves memory footprint, but penalizes actively allocating " \
+ "threads.") \
+ range(1, max_intx) \
+ \
+ experimental(size_t, EpsilonMinHeapExpand, 128 * M, \
+ "Min expansion step for heap. Larger value improves performance " \
+ "at the potential expense of memory waste.") \
+ range(1, max_intx)
+
+#endif // SHARE_VM_GC_EPSILON_GLOBALS_HPP
diff --git a/src/hotspot/share/gc/epsilon/vmStructs_epsilon.hpp b/src/hotspot/share/gc/epsilon/vmStructs_epsilon.hpp
new file mode 100644
index 00000000000..39072ad649e
--- /dev/null
+++ b/src/hotspot/share/gc/epsilon/vmStructs_epsilon.hpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018, Red Hat Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_EPSILON_VMSTRUCTS_HPP
+#define SHARE_GC_EPSILON_VMSTRUCTS_HPP
+
+#include "gc/epsilon/epsilonHeap.hpp"
+#include "gc/shared/space.hpp"
+#include "memory/virtualspace.hpp"
+
+#define VM_STRUCTS_EPSILONGC(nonstatic_field, \
+ volatile_nonstatic_field, \
+ static_field) \
+ nonstatic_field(EpsilonHeap, _virtual_space, VirtualSpace) \
+ nonstatic_field(EpsilonHeap, _space, ContiguousSpace*)
+
+#define VM_TYPES_EPSILONGC(declare_type, \
+ declare_toplevel_type, \
+ declare_integer_type) \
+ declare_type(EpsilonHeap, CollectedHeap)
+
+#define VM_INT_CONSTANTS_EPSILONGC(declare_constant, \
+ declare_constant_with_value)
+
+#endif // SHARE_GC_EPSILON_VMSTRUCTS_HPP
diff --git a/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp b/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp
index 844e2440c33..66f4f64499e 100644
--- a/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp
+++ b/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "gc/g1/c2/g1BarrierSetC2.hpp"
#include "gc/g1/g1BarrierSet.hpp"
+#include "gc/g1/g1BarrierSetRuntime.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/heapRegion.hpp"
@@ -33,10 +34,9 @@
#include "opto/idealKit.hpp"
#include "opto/macro.hpp"
#include "opto/type.hpp"
-#include "runtime/sharedRuntime.hpp"
#include "utilities/macros.hpp"
-const TypeFunc *G1BarrierSetC2::g1_wb_pre_Type() {
+const TypeFunc *G1BarrierSetC2::write_ref_field_pre_entry_Type() {
const Type **fields = TypeTuple::fields(2);
fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
@@ -49,7 +49,7 @@ const TypeFunc *G1BarrierSetC2::g1_wb_pre_Type() {
return TypeFunc::make(domain, range);
}
-const TypeFunc *G1BarrierSetC2::g1_wb_post_Type() {
+const TypeFunc *G1BarrierSetC2::write_ref_field_post_entry_Type() {
const Type **fields = TypeTuple::fields(2);
fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Card addr
fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
@@ -264,8 +264,8 @@ void G1BarrierSetC2::pre_barrier(GraphKit* kit,
} __ else_(); {
// logging buffer is full, call the runtime
- const TypeFunc *tf = g1_wb_pre_Type();
- __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
+ const TypeFunc *tf = write_ref_field_pre_entry_Type();
+ __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), "write_ref_field_pre_entry", pre_val, tls);
} __ end_if(); // (!index)
} __ end_if(); // (pre_val != NULL)
} __ end_if(); // (!marking)
@@ -364,7 +364,7 @@ void G1BarrierSetC2::g1_mark_card(GraphKit* kit,
__ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered);
} __ else_(); {
- __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
+ __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), "write_ref_field_post_entry", card_adr, __ thread());
} __ end_if();
}
@@ -419,7 +419,7 @@ void G1BarrierSetC2::post_barrier(GraphKit* kit,
Node* dirty_card = __ ConI((jint)G1CardTable::dirty_card_val());
Node* zeroX = __ ConX(0);
- const TypeFunc *tf = g1_wb_post_Type();
+ const TypeFunc *tf = write_ref_field_post_entry_Type();
// Offsets into the thread
const int index_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
@@ -652,7 +652,7 @@ bool G1BarrierSetC2::is_gc_barrier_node(Node* node) const {
return false;
}
- return strcmp(call->_name, "g1_wb_pre") == 0 || strcmp(call->_name, "g1_wb_post") == 0;
+ return strcmp(call->_name, "write_ref_field_pre_entry") == 0 || strcmp(call->_name, "write_ref_field_post_entry") == 0;
}
void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
@@ -747,7 +747,7 @@ Node* G1BarrierSetC2::step_over_gc_barrier(Node* c) const {
if (r->in(j) != NULL && r->in(j)->is_Proj() &&
r->in(j)->in(0) != NULL &&
r->in(j)->in(0)->Opcode() == Op_CallLeaf &&
- r->in(j)->in(0)->as_Call()->entry_point() == CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post)) {
+ r->in(j)->in(0)->as_Call()->entry_point() == CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry)) {
Node* call = r->in(j)->in(0);
c = c->in(i == 1 ? 2 : 1);
if (c != NULL) {
diff --git a/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.hpp b/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.hpp
index 6a3f1ccb169..4f8efa3a461 100644
--- a/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.hpp
+++ b/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.hpp
@@ -79,8 +79,8 @@ protected:
// Unsafe.getObject should be recorded in an SATB log buffer.
void insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar) const;
- static const TypeFunc* g1_wb_pre_Type();
- static const TypeFunc* g1_wb_post_Type();
+ static const TypeFunc* write_ref_field_pre_entry_Type();
+ static const TypeFunc* write_ref_field_post_entry_Type();
virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
diff --git a/src/hotspot/share/gc/g1/collectionSetChooser.cpp b/src/hotspot/share/gc/g1/collectionSetChooser.cpp
index f9aa2423e02..9d2c90b546c 100644
--- a/src/hotspot/share/gc/g1/collectionSetChooser.cpp
+++ b/src/hotspot/share/gc/g1/collectionSetChooser.cpp
@@ -147,7 +147,7 @@ void CollectionSetChooser::sort_regions() {
void CollectionSetChooser::add_region(HeapRegion* hr) {
assert(!hr->is_pinned(),
"Pinned region shouldn't be added to the collection set (index %u)", hr->hrm_index());
- assert(!hr->is_young(), "should not be young!");
+ assert(hr->is_old(), "should be old but is %s", hr->get_type_str());
assert(hr->rem_set()->is_complete(),
"Trying to add region %u to the collection set with incomplete remembered set", hr->hrm_index());
_regions.append(hr);
@@ -185,7 +185,7 @@ uint CollectionSetChooser::claim_array_chunk(uint chunk_size) {
void CollectionSetChooser::set_region(uint index, HeapRegion* hr) {
assert(regions_at(index) == NULL, "precondition");
- assert(!hr->is_young(), "should not be young!");
+ assert(hr->is_old(), "should be old but is %s", hr->get_type_str());
regions_at_put(index, hr);
hr->calc_gc_efficiency();
}
@@ -233,18 +233,19 @@ public:
_cset_updater(hrSorted, true /* parallel */, chunk_size) { }
bool do_heap_region(HeapRegion* r) {
- // Do we have any marking information for this region?
- if (r->is_marked()) {
- // We will skip any region that's currently used as an old GC
- // alloc region (we should not consider those for collection
- // before we fill them up).
- if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
- _cset_updater.add_region(r);
- } else if (r->is_old()) {
- // Can clean out the remembered sets of all regions that we did not choose but
- // we created the remembered set for.
- r->rem_set()->clear(true);
- }
+ // We will skip any region that's currently used as an old GC
+ // alloc region (we should not consider those for collection
+ // before we fill them up).
+ if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
+ _cset_updater.add_region(r);
+ } else if (r->is_old()) {
+ // Keep remembered sets for humongous regions, otherwise clean out remembered
+ // sets for old regions.
+ r->rem_set()->clear(true /* only_cardset */);
+ } else {
+ assert(!r->is_old() || !r->rem_set()->is_tracked(),
+ "Missed to clear unused remembered set of region %u (%s) that is %s",
+ r->hrm_index(), r->get_type_str(), r->rem_set()->get_state_str());
}
return false;
}
@@ -280,11 +281,10 @@ bool CollectionSetChooser::region_occupancy_low_enough_for_evac(size_t live_byte
}
bool CollectionSetChooser::should_add(HeapRegion* hr) const {
- assert(hr->is_marked(), "pre-condition");
- assert(!hr->is_young(), "should never consider young regions");
- return !hr->is_pinned() &&
- region_occupancy_low_enough_for_evac(hr->live_bytes()) &&
- hr->rem_set()->is_complete();
+ return !hr->is_young() &&
+ !hr->is_pinned() &&
+ region_occupancy_low_enough_for_evac(hr->live_bytes()) &&
+ hr->rem_set()->is_complete();
}
void CollectionSetChooser::rebuild(WorkGang* workers, uint n_regions) {
diff --git a/src/hotspot/share/gc/g1/g1AllocRegion.cpp b/src/hotspot/share/gc/g1/g1AllocRegion.cpp
index 73a5bdb6a11..174ddcd4e3f 100644
--- a/src/hotspot/share/gc/g1/g1AllocRegion.cpp
+++ b/src/hotspot/share/gc/g1/g1AllocRegion.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/resourceArea.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "utilities/align.hpp"
G1CollectedHeap* G1AllocRegion::_g1h = NULL;
diff --git a/src/hotspot/share/gc/g1/g1BarrierSet.cpp b/src/hotspot/share/gc/g1/g1BarrierSet.cpp
index ecd1d174d44..8531d36e13c 100644
--- a/src/hotspot/share/gc/g1/g1BarrierSet.cpp
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.cpp
@@ -72,21 +72,6 @@ void G1BarrierSet::enqueue(oop pre_val) {
}
}
-void G1BarrierSet::write_ref_array_pre_oop_entry(oop* dst, size_t length) {
- G1BarrierSet *bs = barrier_set_cast(BarrierSet::barrier_set());
- bs->write_ref_array_pre(dst, length, false);
-}
-
-void G1BarrierSet::write_ref_array_pre_narrow_oop_entry(narrowOop* dst, size_t length) {
- G1BarrierSet *bs = barrier_set_cast(BarrierSet::barrier_set());
- bs->write_ref_array_pre(dst, length, false);
-}
-
-void G1BarrierSet::write_ref_array_post_entry(HeapWord* dst, size_t length) {
- G1BarrierSet *bs = barrier_set_cast(BarrierSet::barrier_set());
- bs->G1BarrierSet::write_ref_array(dst, length);
-}
-
template void
G1BarrierSet::write_ref_array_pre_work(T* dst, size_t count) {
if (!_satb_mark_queue_set.is_active()) return;
diff --git a/src/hotspot/share/gc/g1/g1BarrierSet.hpp b/src/hotspot/share/gc/g1/g1BarrierSet.hpp
index 3f4af78bcbd..7a789226041 100644
--- a/src/hotspot/share/gc/g1/g1BarrierSet.hpp
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.hpp
@@ -50,16 +50,12 @@ class G1BarrierSet: public CardTableBarrierSet {
// pre-marking object graph.
static void enqueue(oop pre_val);
- static void enqueue_if_weak_or_archive(DecoratorSet decorators, oop value);
+ static void enqueue_if_weak(DecoratorSet decorators, oop value);
template void write_ref_array_pre_work(T* dst, size_t count);
virtual void write_ref_array_pre(oop* dst, size_t count, bool dest_uninitialized);
virtual void write_ref_array_pre(narrowOop* dst, size_t count, bool dest_uninitialized);
- static void write_ref_array_pre_oop_entry(oop* dst, size_t length);
- static void write_ref_array_pre_narrow_oop_entry(narrowOop* dst, size_t length);
- static void write_ref_array_post_entry(HeapWord* dst, size_t length);
-
template
void write_ref_field_pre(T* field);
diff --git a/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp b/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp
index 759684da212..f16f2e56cd8 100644
--- a/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp
@@ -54,15 +54,14 @@ inline void G1BarrierSet::write_ref_field_post(T* field, oop new_val) {
}
}
-inline void G1BarrierSet::enqueue_if_weak_or_archive(DecoratorSet decorators, oop value) {
+inline void G1BarrierSet::enqueue_if_weak(DecoratorSet decorators, oop value) {
assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known");
- // Archive roots need to be enqueued since they add subgraphs to the
- // Java heap that were not there at the snapshot when marking started.
- // Weak and phantom references also need enqueueing for similar reasons.
- const bool in_archive_root = (decorators & IN_ARCHIVE_ROOT) != 0;
+ // Loading from a weak or phantom reference needs enqueueing, as
+ // the object may not have been reachable (part of the snapshot)
+ // when marking started.
const bool on_strong_oop_ref = (decorators & ON_STRONG_OOP_REF) != 0;
const bool peek = (decorators & AS_NO_KEEPALIVE) != 0;
- const bool needs_enqueue = in_archive_root || (!peek && !on_strong_oop_ref);
+ const bool needs_enqueue = (!peek && !on_strong_oop_ref);
if (needs_enqueue && value != NULL) {
enqueue(value);
@@ -74,7 +73,7 @@ template
inline oop G1BarrierSet::AccessBarrier::
oop_load_not_in_heap(T* addr) {
oop value = ModRef::oop_load_not_in_heap(addr);
- enqueue_if_weak_or_archive(decorators, value);
+ enqueue_if_weak(decorators, value);
return value;
}
@@ -83,7 +82,7 @@ template
inline oop G1BarrierSet::AccessBarrier::
oop_load_in_heap(T* addr) {
oop value = ModRef::oop_load_in_heap(addr);
- enqueue_if_weak_or_archive(decorators, value);
+ enqueue_if_weak(decorators, value);
return value;
}
@@ -91,7 +90,7 @@ template
inline oop G1BarrierSet::AccessBarrier::
oop_load_in_heap_at(oop base, ptrdiff_t offset) {
oop value = ModRef::oop_load_in_heap_at(base, offset);
- enqueue_if_weak_or_archive(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength(base, offset), value);
+ enqueue_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength(base, offset), value);
return value;
}
diff --git a/src/hotspot/share/gc/g1/g1BarrierSetRuntime.cpp b/src/hotspot/share/gc/g1/g1BarrierSetRuntime.cpp
new file mode 100644
index 00000000000..37a05346881
--- /dev/null
+++ b/src/hotspot/share/gc/g1/g1BarrierSetRuntime.cpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1BarrierSet.inline.hpp"
+#include "gc/g1/g1BarrierSetRuntime.hpp"
+#include "gc/g1/g1ThreadLocalData.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+#include "utilities/macros.hpp"
+
+void G1BarrierSetRuntime::write_ref_array_pre_oop_entry(oop* dst, size_t length) {
+ G1BarrierSet *bs = barrier_set_cast(BarrierSet::barrier_set());
+ bs->write_ref_array_pre(dst, length, false);
+}
+
+void G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry(narrowOop* dst, size_t length) {
+ G1BarrierSet *bs = barrier_set_cast(BarrierSet::barrier_set());
+ bs->write_ref_array_pre(dst, length, false);
+}
+
+void G1BarrierSetRuntime::write_ref_array_post_entry(HeapWord* dst, size_t length) {
+ G1BarrierSet *bs = barrier_set_cast(BarrierSet::barrier_set());
+ bs->G1BarrierSet::write_ref_array(dst, length);
+}
+
+// G1 pre write barrier slowpath
+JRT_LEAF(void, G1BarrierSetRuntime::write_ref_field_pre_entry(oopDesc* orig, JavaThread *thread))
+ if (orig == NULL) {
+ assert(false, "should be optimized out");
+ return;
+ }
+ assert(oopDesc::is_oop(orig, true /* ignore mark word */), "Error");
+ // store the original value that was in the field reference
+ G1ThreadLocalData::satb_mark_queue(thread).enqueue(orig);
+JRT_END
+
+// G1 post write barrier slowpath
+JRT_LEAF(void, G1BarrierSetRuntime::write_ref_field_post_entry(void* card_addr, JavaThread* thread))
+ G1ThreadLocalData::dirty_card_queue(thread).enqueue(card_addr);
+JRT_END
diff --git a/src/hotspot/share/gc/g1/g1BarrierSetRuntime.hpp b/src/hotspot/share/gc/g1/g1BarrierSetRuntime.hpp
new file mode 100644
index 00000000000..89be1d9c3f5
--- /dev/null
+++ b/src/hotspot/share/gc/g1/g1BarrierSetRuntime.hpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_G1BARRIERSETRUNTIME_HPP
+#define SHARE_GC_G1_G1BARRIERSETRUNTIME_HPP
+
+#include "memory/allocation.hpp"
+#include "oops/oopsHierarchy.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+class oopDesc;
+class JavaThread;
+
+class G1BarrierSetRuntime: public AllStatic {
+public:
+ // Arraycopy stub generator
+ static void write_ref_array_pre_oop_entry(oop* dst, size_t length);
+ static void write_ref_array_pre_narrow_oop_entry(narrowOop* dst, size_t length);
+ static void write_ref_array_post_entry(HeapWord* dst, size_t length);
+
+ // C2 slow-path runtime calls.
+ static void write_ref_field_pre_entry(oopDesc* orig, JavaThread *thread);
+ static void write_ref_field_post_entry(void* card_addr, JavaThread* thread);
+};
+
+#endif // SHARE_GC_G1_G1BARRIERSETRUNTIME_HPP
diff --git a/src/hotspot/share/gc/g1/g1CardTable.cpp b/src/hotspot/share/gc/g1/g1CardTable.cpp
index 3a68f4a7c46..b66ddd28a5b 100644
--- a/src/hotspot/share/gc/g1/g1CardTable.cpp
+++ b/src/hotspot/share/gc/g1/g1CardTable.cpp
@@ -28,7 +28,7 @@
#include "gc/shared/memset_with_concurrent_readers.hpp"
#include "logging/log.hpp"
#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
bool G1CardTable::mark_card_deferred(size_t card_index) {
jbyte val = _byte_map[card_index];
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
index 63a11e86d8a..e3256ddbecf 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
@@ -69,6 +69,7 @@
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/generationSpec.hpp"
#include "gc/shared/isGCActiveMark.hpp"
+#include "gc/shared/oopStorageParState.hpp"
#include "gc/shared/preservedMarks.inline.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/shared/referenceProcessor.inline.hpp"
@@ -77,6 +78,7 @@
#include "logging/log.hpp"
#include "memory/allocation.hpp"
#include "memory/iterator.hpp"
+#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "oops/access.inline.hpp"
#include "oops/compressedOops.inline.hpp"
@@ -86,7 +88,7 @@
#include "runtime/flags/flagSetting.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "runtime/threadSMR.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/align.hpp"
@@ -822,6 +824,18 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
decrease_used(size_used);
}
+oop G1CollectedHeap::materialize_archived_object(oop obj) {
+ assert(obj != NULL, "archived obj is NULL");
+ assert(MetaspaceShared::is_archive_object(obj), "must be archived object");
+
+ // Loading an archived object makes it strongly reachable. If it is
+ // loaded during concurrent marking, it must be enqueued to the SATB
+ // queue, shading the previously white object gray.
+ G1BarrierSet::enqueue(obj);
+
+ return obj;
+}
+
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
ResourceMark rm; // For retrieving the thread names in log messages.
@@ -3218,6 +3232,7 @@ class G1StringAndSymbolCleaningTask : public AbstractGangTask {
private:
BoolObjectClosure* _is_alive;
G1StringDedupUnlinkOrOopsDoClosure _dedup_closure;
+ OopStorage::ParState _par_state_string;
int _initial_string_table_size;
int _initial_symbol_table_size;
@@ -3237,24 +3252,19 @@ public:
AbstractGangTask("String/Symbol Unlinking"),
_is_alive(is_alive),
_dedup_closure(is_alive, NULL, false),
+ _par_state_string(StringTable::weak_storage()),
_process_strings(process_strings), _strings_processed(0), _strings_removed(0),
_process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0),
_process_string_dedup(process_string_dedup) {
- _initial_string_table_size = StringTable::the_table()->table_size();
+ _initial_string_table_size = (int) StringTable::the_table()->table_size();
_initial_symbol_table_size = SymbolTable::the_table()->table_size();
- if (process_strings) {
- StringTable::clear_parallel_claimed_index();
- }
if (process_symbols) {
SymbolTable::clear_parallel_claimed_index();
}
}
~G1StringAndSymbolCleaningTask() {
- guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
- "claim value %d after unlink less than initial string table size %d",
- StringTable::parallel_claimed_index(), _initial_string_table_size);
guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
"claim value %d after unlink less than initial symbol table size %d",
SymbolTable::parallel_claimed_index(), _initial_symbol_table_size);
@@ -3273,7 +3283,7 @@ public:
int symbols_processed = 0;
int symbols_removed = 0;
if (_process_strings) {
- StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
+ StringTable::possibly_parallel_unlink(&_par_state_string, _is_alive, &strings_processed, &strings_removed);
Atomic::add(strings_processed, &_strings_processed);
Atomic::add(strings_removed, &_strings_removed);
}
@@ -3355,7 +3365,7 @@ private:
add_to_postponed_list(nm);
}
- // Mark that this thread has been cleaned/unloaded.
+ // Mark that this nmethod has been cleaned/unloaded.
// After this call, it will be safe to ask if this nmethod was unloaded or not.
nm->set_unloading_clock(CompiledMethod::global_unloading_clock());
}
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
index 2d9c042bced..31cf2d99764 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
@@ -699,6 +699,8 @@ public:
// mapping failed, with the same non-overlapping and sorted MemRegion array.
void dealloc_archive_regions(MemRegion* range, size_t count);
+ oop materialize_archived_object(oop obj);
+
private:
// Shrink the garbage-first heap by at most the given size (in bytes!).
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
index afd5f5f19a0..30000fd5b34 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
@@ -32,7 +32,7 @@
#include "gc/g1/heapRegionManager.inline.hpp"
#include "gc/g1/heapRegionSet.inline.hpp"
#include "gc/shared/taskqueue.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
G1EvacStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) {
switch (dest.value()) {
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
index 028fb8e2a1e..37ed91d9c73 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
@@ -1625,7 +1625,7 @@ void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
// Reference lists are balanced (see balance_all_queues() and balance_queues()).
rp->set_active_mt_degree(active_workers);
- ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_queues());
+ ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues());
// Process the weak references.
const ReferenceProcessorStats& stats =
@@ -1651,7 +1651,11 @@ void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
}
if (has_overflown()) {
- // We can not trust g1_is_alive if the marking stack overflowed
+ // We can not trust g1_is_alive and the contents of the heap if the marking stack
+ // overflowed while processing references. Exit the VM.
+ fatal("Overflow during reference processing, can not continue. Please "
+ "increase MarkStackSizeMax (current value: " SIZE_FORMAT ") and "
+ "restart.", MarkStackSizeMax);
return;
}
@@ -1665,7 +1669,7 @@ void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
// Unload Klasses, String, Symbols, Code Cache, etc.
if (ClassUnloadingWithConcurrentMark) {
GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
- bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, _gc_timer_cm, false /* Defer cleaning */);
+ bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm, false /* Defer cleaning */);
_g1h->complete_cleaning(&g1_is_alive, purged_classes);
} else {
GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
diff --git a/src/hotspot/share/gc/g1/g1FullCollector.cpp b/src/hotspot/share/gc/g1/g1FullCollector.cpp
index cebcef28d9f..4362ee87e30 100644
--- a/src/hotspot/share/gc/g1/g1FullCollector.cpp
+++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp
@@ -222,7 +222,7 @@ void G1FullCollector::phase1_mark_live_objects() {
if (ClassUnloading) {
GCTraceTime(Debug, gc, phases) debug("Phase 1: Class Unloading and Cleanup", scope()->timer());
// Unload classes and purge the SystemDictionary.
- bool purged_class = SystemDictionary::do_unloading(&_is_alive, scope()->timer());
+ bool purged_class = SystemDictionary::do_unloading(scope()->timer());
_heap->complete_cleaning(&_is_alive, purged_class);
} else {
GCTraceTime(Debug, gc, phases) debug("Phase 1: String and Symbol Tables Cleanup", scope()->timer());
diff --git a/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.cpp b/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.cpp
index eb1931fdfbf..43eeddfaedf 100644
--- a/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.cpp
+++ b/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.cpp
@@ -78,7 +78,7 @@ void G1FullGCReferenceProcessingExecutor::execute(STWGCTimer* timer, G1FullGCTra
G1FullGCMarker* marker = _collector->marker(0);
G1IsAliveClosure is_alive(_collector->mark_bitmap());
G1FullKeepAliveClosure keep_alive(marker);
- ReferenceProcessorPhaseTimes pt(timer, _reference_processor->num_queues());
+ ReferenceProcessorPhaseTimes pt(timer, _reference_processor->max_num_queues());
AbstractRefProcTaskExecutor* executor = _reference_processor->processing_is_mt() ? this : NULL;
// Process discovered references, use this executor if multi-threaded
diff --git a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp
index 5aeb4baa453..8d70c0fe108 100644
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp
@@ -468,15 +468,24 @@ G1EvacPhaseWithTrimTimeTracker::G1EvacPhaseWithTrimTimeTracker(G1ParScanThreadSt
_pss(pss),
_start(Ticks::now()),
_total_time(total_time),
- _trim_time(trim_time) {
+ _trim_time(trim_time),
+ _stopped(false) {
assert(_pss->trim_ticks().value() == 0, "Possibly remaining trim ticks left over from previous use");
}
G1EvacPhaseWithTrimTimeTracker::~G1EvacPhaseWithTrimTimeTracker() {
+ if (!_stopped) {
+ stop();
+ }
+}
+
+void G1EvacPhaseWithTrimTimeTracker::stop() {
+ assert(!_stopped, "Should only be called once");
_total_time += (Ticks::now() - _start) - _pss->trim_ticks();
_trim_time += _pss->trim_ticks();
_pss->reset_trim_ticks();
+ _stopped = true;
}
G1GCParPhaseTimesTracker::G1GCParPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCParPhases phase, uint worker_id) :
@@ -504,6 +513,8 @@ G1EvacPhaseTimesTracker::G1EvacPhaseTimesTracker(G1GCPhaseTimes* phase_times,
G1EvacPhaseTimesTracker::~G1EvacPhaseTimesTracker() {
if (_phase_times != NULL) {
+ // Explicitly stop the trim tracker since it's not yet destructed.
+ _trim_tracker.stop();
// Exclude trim time by increasing the start time.
_start_time += _trim_time;
_phase_times->record_or_add_objcopy_time_secs(_worker_id, _trim_time.seconds());
diff --git a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp
index d5ec33400c0..3b20cfe4d2d 100644
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp
@@ -373,9 +373,13 @@ class G1EvacPhaseWithTrimTimeTracker : public StackObj {
Tickspan& _total_time;
Tickspan& _trim_time;
+
+ bool _stopped;
public:
G1EvacPhaseWithTrimTimeTracker(G1ParScanThreadState* pss, Tickspan& total_time, Tickspan& trim_time);
~G1EvacPhaseWithTrimTimeTracker();
+
+ void stop();
};
class G1GCParPhaseTimesTracker : public CHeapObj {
diff --git a/src/hotspot/share/gc/g1/g1Policy.cpp b/src/hotspot/share/gc/g1/g1Policy.cpp
index 4f882a1fb20..d94a1173208 100644
--- a/src/hotspot/share/gc/g1/g1Policy.cpp
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp
@@ -825,10 +825,10 @@ double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards) const {
size_t G1Policy::predict_bytes_to_copy(HeapRegion* hr) const {
size_t bytes_to_copy;
- if (hr->is_marked())
+ if (!hr->is_young()) {
bytes_to_copy = hr->max_live_bytes();
- else {
- assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
+ } else {
+ assert(hr->age_in_surv_rate_group() != -1, "invariant");
int age = hr->age_in_surv_rate_group();
double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
bytes_to_copy = (size_t) (hr->used() * yg_surv_rate);
diff --git a/src/hotspot/share/gc/g1/g1RootProcessor.cpp b/src/hotspot/share/gc/g1/g1RootProcessor.cpp
index a2c4d07b714..e47c9f7baea 100644
--- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp
@@ -38,6 +38,7 @@
#include "gc/g1/g1RootClosures.hpp"
#include "gc/g1/g1RootProcessor.hpp"
#include "gc/g1/heapRegion.inline.hpp"
+#include "gc/shared/oopStorageParState.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "gc/shared/weakProcessor.hpp"
#include "memory/allocation.inline.hpp"
@@ -72,6 +73,7 @@ G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h, uint n_workers) :
_process_strong_tasks(G1RP_PS_NumElements),
_srs(n_workers),
_lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
+ _par_state_string(StringTable::weak_storage()),
_n_workers_discovered_strong_classes(0) {}
void G1RootProcessor::evacuate_roots(G1ParScanThreadState* pss, uint worker_i) {
@@ -241,7 +243,6 @@ void G1RootProcessor::process_vm_roots(G1RootClosures* closures,
G1GCPhaseTimes* phase_times,
uint worker_i) {
OopClosure* strong_roots = closures->strong_oops();
- OopClosure* weak_roots = closures->weak_oops();
{
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::UniverseRoots, worker_i);
@@ -290,7 +291,7 @@ void G1RootProcessor::process_vm_roots(G1RootClosures* closures,
{
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SystemDictionaryRoots, worker_i);
if (!_process_strong_tasks.is_task_claimed(G1RP_PS_SystemDictionary_oops_do)) {
- SystemDictionary::roots_oops_do(strong_roots, weak_roots);
+ SystemDictionary::oops_do(strong_roots);
}
}
}
@@ -302,7 +303,7 @@ void G1RootProcessor::process_string_table_roots(G1RootClosures* closures,
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::StringTableRoots, worker_i);
// All threads execute the following. A specific chunk of buckets
// from the StringTable are the individual tasks.
- StringTable::possibly_parallel_oops_do(closures->weak_oops());
+ StringTable::possibly_parallel_oops_do(&_par_state_string, closures->weak_oops());
}
void G1RootProcessor::process_code_cache_roots(CodeBlobClosure* code_closure,
diff --git a/src/hotspot/share/gc/g1/g1RootProcessor.hpp b/src/hotspot/share/gc/g1/g1RootProcessor.hpp
index c4b5f03016e..3c2e811cb80 100644
--- a/src/hotspot/share/gc/g1/g1RootProcessor.hpp
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.hpp
@@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_G1_G1ROOTPROCESSOR_HPP
#define SHARE_VM_GC_G1_G1ROOTPROCESSOR_HPP
+#include "gc/shared/oopStorageParState.hpp"
#include "gc/shared/strongRootsScope.hpp"
#include "memory/allocation.hpp"
#include "runtime/mutex.hpp"
@@ -49,6 +50,7 @@ class G1RootProcessor : public StackObj {
G1CollectedHeap* _g1h;
SubTasksDone _process_strong_tasks;
StrongRootsScope _srs;
+ OopStorage::ParState _par_state_string;
// Used to implement the Thread work barrier.
Monitor _lock;
diff --git a/src/hotspot/share/gc/g1/g1_globals.hpp b/src/hotspot/share/gc/g1/g1_globals.hpp
index 6c2263586e6..8c7aec8472e 100644
--- a/src/hotspot/share/gc/g1/g1_globals.hpp
+++ b/src/hotspot/share/gc/g1/g1_globals.hpp
@@ -108,9 +108,6 @@
"When expanding, % of uncommitted space to claim.") \
range(0, 100) \
\
- develop(bool, G1RSBarrierRegionFilter, true, \
- "If true, generate region filtering code in RS barrier") \
- \
product(size_t, G1UpdateBufferSize, 256, \
"Size of an update buffer") \
range(1, NOT_LP64(32*M) LP64_ONLY(1*G)) \
diff --git a/src/hotspot/share/gc/g1/heapRegion.cpp b/src/hotspot/share/gc/g1/heapRegion.cpp
index 057e7cb4a67..783a4ebcbdb 100644
--- a/src/hotspot/share/gc/g1/heapRegion.cpp
+++ b/src/hotspot/share/gc/g1/heapRegion.cpp
@@ -43,7 +43,7 @@
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "utilities/growableArray.hpp"
int HeapRegion::LogOfHRGrainBytes = 0;
diff --git a/src/hotspot/share/gc/g1/heapRegion.hpp b/src/hotspot/share/gc/g1/heapRegion.hpp
index 3bb6049332d..01d3c4d8758 100644
--- a/src/hotspot/share/gc/g1/heapRegion.hpp
+++ b/src/hotspot/share/gc/g1/heapRegion.hpp
@@ -541,10 +541,6 @@ class HeapRegion: public G1ContiguousSpace {
// objects during evac failure handling.
void note_self_forwarding_removal_end(size_t marked_bytes);
- // Returns "false" iff no object in the region was allocated when the
- // last mark phase ended.
- bool is_marked() { return _prev_top_at_mark_start != bottom(); }
-
void reset_during_compaction() {
assert(is_humongous(),
"should only be called for humongous regions");
diff --git a/src/hotspot/share/gc/g1/jvmFlagConstraintsG1.cpp b/src/hotspot/share/gc/g1/jvmFlagConstraintsG1.cpp
index 43de961a1f6..a5e10c4b674 100644
--- a/src/hotspot/share/gc/g1/jvmFlagConstraintsG1.cpp
+++ b/src/hotspot/share/gc/g1/jvmFlagConstraintsG1.cpp
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "gc/g1/heapRegionBounds.inline.hpp"
-#include "runtime/flags/jvmFlagRangeList.hpp"
+#include "gc/g1/jvmFlagConstraintsG1.hpp"
#include "runtime/globals_extension.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -34,10 +34,10 @@ JVMFlag::Error G1RSetRegionEntriesConstraintFunc(intx value, bool verbose) {
// Default value of G1RSetRegionEntries=0 means will be set ergonomically.
// Minimum value is 1.
if (FLAG_IS_CMDLINE(G1RSetRegionEntries) && (value < 1)) {
- CommandLineError::print(verbose,
- "G1RSetRegionEntries (" INTX_FORMAT ") must be "
- "greater than or equal to 1\n",
- value);
+ JVMFlag::printError(verbose,
+ "G1RSetRegionEntries (" INTX_FORMAT ") must be "
+ "greater than or equal to 1\n",
+ value);
return JVMFlag::VIOLATES_CONSTRAINT;
} else {
return JVMFlag::SUCCESS;
@@ -50,10 +50,10 @@ JVMFlag::Error G1RSetSparseRegionEntriesConstraintFunc(intx value, bool verbose)
// Default value of G1RSetSparseRegionEntries=0 means will be set ergonomically.
// Minimum value is 1.
if (FLAG_IS_CMDLINE(G1RSetSparseRegionEntries) && (value < 1)) {
- CommandLineError::print(verbose,
- "G1RSetSparseRegionEntries (" INTX_FORMAT ") must be "
- "greater than or equal to 1\n",
- value);
+ JVMFlag::printError(verbose,
+ "G1RSetSparseRegionEntries (" INTX_FORMAT ") must be "
+ "greater than or equal to 1\n",
+ value);
return JVMFlag::VIOLATES_CONSTRAINT;
} else {
return JVMFlag::SUCCESS;
@@ -65,10 +65,10 @@ JVMFlag::Error G1HeapRegionSizeConstraintFunc(size_t value, bool verbose) {
// Default value of G1HeapRegionSize=0 means will be set ergonomically.
if (FLAG_IS_CMDLINE(G1HeapRegionSize) && (value < HeapRegionBounds::min_size())) {
- CommandLineError::print(verbose,
- "G1HeapRegionSize (" SIZE_FORMAT ") must be "
- "greater than or equal to ergonomic heap region minimum size\n",
- value);
+ JVMFlag::printError(verbose,
+ "G1HeapRegionSize (" SIZE_FORMAT ") must be "
+ "greater than or equal to ergonomic heap region minimum size\n",
+ value);
return JVMFlag::VIOLATES_CONSTRAINT;
} else {
return JVMFlag::SUCCESS;
@@ -79,10 +79,10 @@ JVMFlag::Error G1NewSizePercentConstraintFunc(uintx value, bool verbose) {
if (!UseG1GC) return JVMFlag::SUCCESS;
if (value > G1MaxNewSizePercent) {
- CommandLineError::print(verbose,
- "G1NewSizePercent (" UINTX_FORMAT ") must be "
- "less than or equal to G1MaxNewSizePercent (" UINTX_FORMAT ")\n",
- value, G1MaxNewSizePercent);
+ JVMFlag::printError(verbose,
+ "G1NewSizePercent (" UINTX_FORMAT ") must be "
+ "less than or equal to G1MaxNewSizePercent (" UINTX_FORMAT ")\n",
+ value, G1MaxNewSizePercent);
return JVMFlag::VIOLATES_CONSTRAINT;
} else {
return JVMFlag::SUCCESS;
@@ -93,10 +93,10 @@ JVMFlag::Error G1MaxNewSizePercentConstraintFunc(uintx value, bool verbose) {
if (!UseG1GC) return JVMFlag::SUCCESS;
if (value < G1NewSizePercent) {
- CommandLineError::print(verbose,
- "G1MaxNewSizePercent (" UINTX_FORMAT ") must be "
- "greater than or equal to G1NewSizePercent (" UINTX_FORMAT ")\n",
- value, G1NewSizePercent);
+ JVMFlag::printError(verbose,
+ "G1MaxNewSizePercent (" UINTX_FORMAT ") must be "
+ "greater than or equal to G1NewSizePercent (" UINTX_FORMAT ")\n",
+ value, G1NewSizePercent);
return JVMFlag::VIOLATES_CONSTRAINT;
} else {
return JVMFlag::SUCCESS;
@@ -105,10 +105,10 @@ JVMFlag::Error G1MaxNewSizePercentConstraintFunc(uintx value, bool verbose) {
JVMFlag::Error MaxGCPauseMillisConstraintFuncG1(uintx value, bool verbose) {
if (UseG1GC && FLAG_IS_CMDLINE(MaxGCPauseMillis) && (value >= GCPauseIntervalMillis)) {
- CommandLineError::print(verbose,
- "MaxGCPauseMillis (" UINTX_FORMAT ") must be "
- "less than GCPauseIntervalMillis (" UINTX_FORMAT ")\n",
- value, GCPauseIntervalMillis);
+ JVMFlag::printError(verbose,
+ "MaxGCPauseMillis (" UINTX_FORMAT ") must be "
+ "less than GCPauseIntervalMillis (" UINTX_FORMAT ")\n",
+ value, GCPauseIntervalMillis);
return JVMFlag::VIOLATES_CONSTRAINT;
}
@@ -119,25 +119,25 @@ JVMFlag::Error GCPauseIntervalMillisConstraintFuncG1(uintx value, bool verbose)
if (UseG1GC) {
if (FLAG_IS_CMDLINE(GCPauseIntervalMillis)) {
if (value < 1) {
- CommandLineError::print(verbose,
- "GCPauseIntervalMillis (" UINTX_FORMAT ") must be "
- "greater than or equal to 1\n",
- value);
+ JVMFlag::printError(verbose,
+ "GCPauseIntervalMillis (" UINTX_FORMAT ") must be "
+ "greater than or equal to 1\n",
+ value);
return JVMFlag::VIOLATES_CONSTRAINT;
}
if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
- CommandLineError::print(verbose,
- "GCPauseIntervalMillis cannot be set "
- "without setting MaxGCPauseMillis\n");
+ JVMFlag::printError(verbose,
+ "GCPauseIntervalMillis cannot be set "
+ "without setting MaxGCPauseMillis\n");
return JVMFlag::VIOLATES_CONSTRAINT;
}
if (value <= MaxGCPauseMillis) {
- CommandLineError::print(verbose,
- "GCPauseIntervalMillis (" UINTX_FORMAT ") must be "
- "greater than MaxGCPauseMillis (" UINTX_FORMAT ")\n",
- value, MaxGCPauseMillis);
+ JVMFlag::printError(verbose,
+ "GCPauseIntervalMillis (" UINTX_FORMAT ") must be "
+ "greater than MaxGCPauseMillis (" UINTX_FORMAT ")\n",
+ value, MaxGCPauseMillis);
return JVMFlag::VIOLATES_CONSTRAINT;
}
}
@@ -153,9 +153,9 @@ JVMFlag::Error NewSizeConstraintFuncG1(size_t value, bool verbose) {
// i.e. result of '(uint)(NewSize / region size(1~32MB))'
// So maximum of NewSize should be 'max_juint * 1M'
if (UseG1GC && (value > (max_juint * 1 * M))) {
- CommandLineError::print(verbose,
- "NewSize (" SIZE_FORMAT ") must be less than ergonomic maximum value\n",
- value);
+ JVMFlag::printError(verbose,
+ "NewSize (" SIZE_FORMAT ") must be less than ergonomic maximum value\n",
+ value);
return JVMFlag::VIOLATES_CONSTRAINT;
}
#endif // _LP64
diff --git a/src/hotspot/share/gc/parallel/gcTaskManager.cpp b/src/hotspot/share/gc/parallel/gcTaskManager.cpp
index f01608626f9..54e706a3569 100644
--- a/src/hotspot/share/gc/parallel/gcTaskManager.cpp
+++ b/src/hotspot/share/gc/parallel/gcTaskManager.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
#include "memory/resourceArea.hpp"
#include "runtime/mutex.hpp"
#include "runtime/mutexLocker.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
//
diff --git a/src/hotspot/share/gc/parallel/jvmFlagConstraintsParallel.cpp b/src/hotspot/share/gc/parallel/jvmFlagConstraintsParallel.cpp
index ab95428b26e..e951014fd96 100644
--- a/src/hotspot/share/gc/parallel/jvmFlagConstraintsParallel.cpp
+++ b/src/hotspot/share/gc/parallel/jvmFlagConstraintsParallel.cpp
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "runtime/flags/jvmFlagRangeList.hpp"
+#include "gc/parallel/jvmFlagConstraintsParallel.hpp"
#include "runtime/globals.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -32,10 +32,10 @@ JVMFlag::Error ParallelGCThreadsConstraintFuncParallel(uint value, bool verbose)
// So can't exceed with "max_jint"
if (UseParallelGC && (value > (uint)max_jint)) {
- CommandLineError::print(verbose,
- "ParallelGCThreads (" UINT32_FORMAT ") must be "
- "less than or equal to " UINT32_FORMAT " for Parallel GC\n",
- value, max_jint);
+ JVMFlag::printError(verbose,
+ "ParallelGCThreads (" UINT32_FORMAT ") must be "
+ "less than or equal to " UINT32_FORMAT " for Parallel GC\n",
+ value, max_jint);
return JVMFlag::VIOLATES_CONSTRAINT;
}
return JVMFlag::SUCCESS;
@@ -44,10 +44,10 @@ JVMFlag::Error ParallelGCThreadsConstraintFuncParallel(uint value, bool verbose)
JVMFlag::Error InitialTenuringThresholdConstraintFuncParallel(uintx value, bool verbose) {
// InitialTenuringThreshold is only used for ParallelGC.
if (UseParallelGC && (value > MaxTenuringThreshold)) {
- CommandLineError::print(verbose,
- "InitialTenuringThreshold (" UINTX_FORMAT ") must be "
- "less than or equal to MaxTenuringThreshold (" UINTX_FORMAT ")\n",
- value, MaxTenuringThreshold);
+ JVMFlag::printError(verbose,
+ "InitialTenuringThreshold (" UINTX_FORMAT ") must be "
+ "less than or equal to MaxTenuringThreshold (" UINTX_FORMAT ")\n",
+ value, MaxTenuringThreshold);
return JVMFlag::VIOLATES_CONSTRAINT;
}
return JVMFlag::SUCCESS;
@@ -57,10 +57,10 @@ JVMFlag::Error MaxTenuringThresholdConstraintFuncParallel(uintx value, bool verb
// As only ParallelGC uses InitialTenuringThreshold,
// we don't need to compare InitialTenuringThreshold with MaxTenuringThreshold.
if (UseParallelGC && (value < InitialTenuringThreshold)) {
- CommandLineError::print(verbose,
- "MaxTenuringThreshold (" UINTX_FORMAT ") must be "
- "greater than or equal to InitialTenuringThreshold (" UINTX_FORMAT ")\n",
- value, InitialTenuringThreshold);
+ JVMFlag::printError(verbose,
+ "MaxTenuringThreshold (" UINTX_FORMAT ") must be "
+ "greater than or equal to InitialTenuringThreshold (" UINTX_FORMAT ")\n",
+ value, InitialTenuringThreshold);
return JVMFlag::VIOLATES_CONSTRAINT;
}
diff --git a/src/hotspot/share/gc/parallel/pcTasks.cpp b/src/hotspot/share/gc/parallel/pcTasks.cpp
index 33d9dfaf8a1..fc155a992fd 100644
--- a/src/hotspot/share/gc/parallel/pcTasks.cpp
+++ b/src/hotspot/share/gc/parallel/pcTasks.cpp
@@ -104,7 +104,7 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
break;
case system_dictionary:
- SystemDictionary::always_strong_oops_do(&mark_and_push_closure);
+ SystemDictionary::oops_do(&mark_and_push_closure);
break;
case class_loader_data:
@@ -149,19 +149,16 @@ void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
void RefProcTaskExecutor::execute(ProcessTask& task)
{
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
- uint parallel_gc_threads = heap->gc_task_manager()->workers();
uint active_gc_threads = heap->gc_task_manager()->active_workers();
OopTaskQueueSet* qset = ParCompactionManager::stack_array();
ParallelTaskTerminator terminator(active_gc_threads, qset);
GCTaskQueue* q = GCTaskQueue::create();
- for(uint i=0; ienqueue(new RefProcTaskProxy(task, i));
}
- if (task.marks_oops_alive()) {
- if (parallel_gc_threads>1) {
- for (uint j=0; jenqueue(new StealMarkingTask(&terminator));
- }
+ if (task.marks_oops_alive() && (active_gc_threads>1)) {
+ for (uint j=0; jenqueue(new StealMarkingTask(&terminator));
}
}
PSParallelCompact::gc_task_manager()->execute_and_wait(q);
diff --git a/src/hotspot/share/gc/parallel/psMarkSweep.cpp b/src/hotspot/share/gc/parallel/psMarkSweep.cpp
index c8dc766cc6b..b2fd9b15c29 100644
--- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp
+++ b/src/hotspot/share/gc/parallel/psMarkSweep.cpp
@@ -521,7 +521,7 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
ObjectSynchronizer::oops_do(mark_and_push_closure());
Management::oops_do(mark_and_push_closure());
JvmtiExport::oops_do(mark_and_push_closure());
- SystemDictionary::always_strong_oops_do(mark_and_push_closure());
+ SystemDictionary::oops_do(mark_and_push_closure());
ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
// Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
//CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
@@ -536,7 +536,7 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer);
ref_processor()->setup_policy(clear_all_softrefs);
- ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_queues());
+ ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->max_num_queues());
const ReferenceProcessorStats& stats =
ref_processor()->process_discovered_references(
is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, &pt);
@@ -556,7 +556,7 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer);
// Unload classes and purge the SystemDictionary.
- bool purged_class = SystemDictionary::do_unloading(is_alive_closure(), _gc_timer);
+ bool purged_class = SystemDictionary::do_unloading(_gc_timer);
// Unload nmethods.
CodeCache::do_unloading(is_alive_closure(), purged_class);
diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp
index a67dae68017..2fffd5b05ac 100644
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp
@@ -2111,8 +2111,11 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
ReferenceProcessorStats stats;
- ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->num_queues());
+ ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->max_num_queues());
+
if (ref_processor()->processing_is_mt()) {
+ ref_processor()->set_active_mt_degree(active_gc_threads);
+
RefProcTaskExecutor task_executor;
stats = ref_processor()->process_discovered_references(
is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
@@ -2139,7 +2142,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer);
// Follow system dictionary roots and unload classes.
- bool purged_class = SystemDictionary::do_unloading(is_alive_closure(), &_gc_timer);
+ bool purged_class = SystemDictionary::do_unloading(&_gc_timer);
// Unload nmethods.
CodeCache::do_unloading(is_alive_closure(), purged_class);
diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp
index 7f94c12bd84..c492df60ecd 100644
--- a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp
@@ -213,7 +213,8 @@ inline oop PSPromotionManager::copy_to_survivor_space(oop o) {
Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
// Now we have to CAS in the header.
- if (o->cas_forward_to(new_obj, test_mark)) {
+ // Make copy visible to threads reading the forwardee.
+ if (o->cas_forward_to(new_obj, test_mark, memory_order_release)) {
// We won any races, we "own" this object.
assert(new_obj == o->forwardee(), "Sanity");
@@ -256,11 +257,12 @@ inline oop PSPromotionManager::copy_to_survivor_space(oop o) {
}
// don't update this before the unallocation!
- new_obj = o->forwardee();
+ // Using acquire though consume would be accurate for accessing new_obj.
+ new_obj = o->forwardee_acquire();
}
} else {
assert(o->is_forwarded(), "Sanity");
- new_obj = o->forwardee();
+ new_obj = o->forwardee_acquire();
}
// This code must come after the CAS test, or it will print incorrect
diff --git a/src/hotspot/share/gc/parallel/psScavenge.cpp b/src/hotspot/share/gc/parallel/psScavenge.cpp
index b81fe6ed58a..43c0c8576ca 100644
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp
@@ -399,7 +399,7 @@ bool PSScavenge::invoke_no_policy() {
PSKeepAliveClosure keep_alive(promotion_manager);
PSEvacuateFollowersClosure evac_followers(promotion_manager);
ReferenceProcessorStats stats;
- ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->num_queues());
+ ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->max_num_queues());
if (reference_processor()->processing_is_mt()) {
PSRefProcTaskExecutor task_executor;
stats = reference_processor()->process_discovered_references(
diff --git a/src/hotspot/share/gc/serial/defNewGeneration.cpp b/src/hotspot/share/gc/serial/defNewGeneration.cpp
index 0413e9c71ac..935a3f5617d 100644
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp
@@ -629,7 +629,7 @@ void DefNewGeneration::collect(bool full,
FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
ReferenceProcessor* rp = ref_processor();
rp->setup_policy(clear_all_soft_refs);
- ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_queues());
+ ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues());
const ReferenceProcessorStats& stats =
rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
NULL, &pt);
diff --git a/src/hotspot/share/gc/serial/genMarkSweep.cpp b/src/hotspot/share/gc/serial/genMarkSweep.cpp
index 72f84d3d392..0090007466f 100644
--- a/src/hotspot/share/gc/serial/genMarkSweep.cpp
+++ b/src/hotspot/share/gc/serial/genMarkSweep.cpp
@@ -208,7 +208,7 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
GCTraceTime(Debug, gc, phases) tm_m("Reference Processing", gc_timer());
ref_processor()->setup_policy(clear_all_softrefs);
- ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_queues());
+ ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->max_num_queues());
const ReferenceProcessorStats& stats =
ref_processor()->process_discovered_references(
&is_alive, &keep_alive, &follow_stack_closure, NULL, &pt);
@@ -228,7 +228,7 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", gc_timer());
// Unload classes and purge the SystemDictionary.
- bool purged_class = SystemDictionary::do_unloading(&is_alive, gc_timer());
+ bool purged_class = SystemDictionary::do_unloading(gc_timer());
// Unload nmethods.
CodeCache::do_unloading(&is_alive, purged_class);
diff --git a/src/hotspot/share/gc/shared/barrierSet.hpp b/src/hotspot/share/gc/shared/barrierSet.hpp
index 748f33b323e..bf1c1e395ae 100644
--- a/src/hotspot/share/gc/shared/barrierSet.hpp
+++ b/src/hotspot/share/gc/shared/barrierSet.hpp
@@ -103,17 +103,17 @@ protected:
~BarrierSet() { }
template
- BarrierSetAssembler* make_barrier_set_assembler() {
+ static BarrierSetAssembler* make_barrier_set_assembler() {
return NOT_ZERO(new BarrierSetAssemblerT()) ZERO_ONLY(NULL);
}
template
- BarrierSetC1* make_barrier_set_c1() {
+ static BarrierSetC1* make_barrier_set_c1() {
return COMPILER1_PRESENT(new BarrierSetC1T()) NOT_COMPILER1(NULL);
}
template
- BarrierSetC2* make_barrier_set_c2() {
+ static BarrierSetC2* make_barrier_set_c2() {
return COMPILER2_PRESENT(new BarrierSetC2T()) NOT_COMPILER2(NULL);
}
@@ -213,8 +213,12 @@ public:
}
template
- static void arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
- Raw::arraycopy(src_obj, dst_obj, src, dst, length);
+ static void arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
+ arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
+ size_t length) {
+ Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
+ dst_obj, dst_offset_in_bytes, dst_raw,
+ length);
}
// Heap oop accesses. These accessors get resolved when
@@ -257,8 +261,12 @@ public:
}
template
- static bool oop_arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
- return Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length);
+ static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
+ arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
+ size_t length) {
+ return Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw,
+ dst_obj, dst_offset_in_bytes, dst_raw,
+ length);
}
// Off-heap oop accesses. These accessors get resolved when
diff --git a/src/hotspot/share/gc/shared/barrierSetConfig.hpp b/src/hotspot/share/gc/shared/barrierSetConfig.hpp
index fee39c7d579..afef9759687 100644
--- a/src/hotspot/share/gc/shared/barrierSetConfig.hpp
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.hpp
@@ -30,7 +30,9 @@
// Do something for each concrete barrier set part of the build.
#define FOR_EACH_CONCRETE_BARRIER_SET_DO(f) \
f(CardTableBarrierSet) \
- G1GC_ONLY(f(G1BarrierSet))
+ EPSILONGC_ONLY(f(EpsilonBarrierSet)) \
+ G1GC_ONLY(f(G1BarrierSet)) \
+ ZGC_ONLY(f(ZBarrierSet))
#define FOR_EACH_ABSTRACT_BARRIER_SET_DO(f) \
f(ModRef)
diff --git a/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp b/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp
index 5f4f502b2e9..5030975100a 100644
--- a/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp
@@ -30,8 +30,14 @@
#include "gc/shared/modRefBarrierSet.inline.hpp"
#include "gc/shared/cardTableBarrierSet.inline.hpp"
+#if INCLUDE_EPSILONGC
+#include "gc/epsilon/epsilonBarrierSet.hpp"
+#endif
#if INCLUDE_G1GC
-#include "gc/g1/g1BarrierSet.inline.hpp" // G1 support
+#include "gc/g1/g1BarrierSet.inline.hpp"
+#endif
+#if INCLUDE_ZGC
+#include "gc/z/zBarrierSet.inline.hpp"
#endif
#endif // SHARE_VM_GC_SHARED_BARRIERSETCONFIG_INLINE_HPP
diff --git a/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp b/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp
index b0eb89bd342..b92f89399e3 100644
--- a/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp
+++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp
@@ -27,7 +27,7 @@
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/cardTable.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
template
inline void CardTableBarrierSet::write_ref_field_post(T* field, oop newVal) {
diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp
index 7fbe469a4d1..75af999fc0b 100644
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp
+++ b/src/hotspot/share/gc/shared/collectedHeap.cpp
@@ -365,20 +365,33 @@ void CollectedHeap::check_for_valid_allocation_state() {
}
#endif
-HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
+HeapWord* CollectedHeap::obj_allocate_raw(Klass* klass, size_t size,
+ bool* gc_overhead_limit_was_exceeded, TRAPS) {
+ if (UseTLAB) {
+ HeapWord* result = allocate_from_tlab(klass, size, THREAD);
+ if (result != NULL) {
+ return result;
+ }
+ }
+
+ return allocate_outside_tlab(klass, size, gc_overhead_limit_was_exceeded, THREAD);
+}
+
+HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, size_t size, TRAPS) {
+ ThreadLocalAllocBuffer& tlab = THREAD->tlab();
// Retain tlab and allocate object in shared space if
// the amount free in the tlab is too large to discard.
- if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
- thread->tlab().record_slow_allocation(size);
+ if (tlab.free() > tlab.refill_waste_limit()) {
+ tlab.record_slow_allocation(size);
return NULL;
}
// Discard tlab and allocate a new one.
// To minimize fragmentation, the last TLAB may be smaller than the rest.
- size_t new_tlab_size = thread->tlab().compute_size(size);
+ size_t new_tlab_size = tlab.compute_size(size);
- thread->tlab().clear_before_allocation();
+ tlab.clear_before_allocation();
if (new_tlab_size == 0) {
return NULL;
@@ -397,7 +410,7 @@ HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, s
assert(actual_tlab_size != 0, "Allocation succeeded but actual size not updated. obj at: " PTR_FORMAT " min: " SIZE_FORMAT ", desired: " SIZE_FORMAT,
p2i(obj), min_tlab_size, new_tlab_size);
- AllocTracer::send_allocation_in_new_tlab(klass, obj, actual_tlab_size * HeapWordSize, size * HeapWordSize, thread);
+ AllocTracer::send_allocation_in_new_tlab(klass, obj, actual_tlab_size * HeapWordSize, size * HeapWordSize, THREAD);
if (ZeroTLAB) {
// ..and clear it.
@@ -412,7 +425,7 @@ HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, s
Copy::fill_to_words(obj + hdr_size, actual_tlab_size - hdr_size, badHeapWordVal);
#endif // ASSERT
}
- thread->tlab().fill(obj, obj + size, actual_tlab_size);
+ tlab.fill(obj, obj + size, actual_tlab_size);
return obj;
}
diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp
index e61f6c23ae6..ecd8301624e 100644
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp
@@ -89,6 +89,7 @@ class GCHeapLog : public EventLogBase {
// CMSHeap
// G1CollectedHeap
// ParallelScavengeHeap
+// ZCollectedHeap
//
class CollectedHeap : public CHeapObj {
friend class VMStructs;
@@ -141,8 +142,18 @@ class CollectedHeap : public CHeapObj {
virtual void resize_all_tlabs();
// Allocate from the current thread's TLAB, with broken-out slow path.
- inline static HeapWord* allocate_from_tlab(Klass* klass, Thread* thread, size_t size);
- static HeapWord* allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size);
+ inline static HeapWord* allocate_from_tlab(Klass* klass, size_t size, TRAPS);
+ static HeapWord* allocate_from_tlab_slow(Klass* klass, size_t size, TRAPS);
+
+ inline static HeapWord* allocate_outside_tlab(Klass* klass, size_t size,
+ bool* gc_overhead_limit_was_exceeded, TRAPS);
+
+ // Raw memory allocation facilities
+ // The obj and array allocate methods are covers for these methods.
+ // mem_allocate() should never be
+ // called to allocate TLABs, only individual objects.
+ virtual HeapWord* mem_allocate(size_t size,
+ bool* gc_overhead_limit_was_exceeded) = 0;
// Allocate an uninitialized block of the given size, or returns NULL if
// this is impossible.
@@ -196,7 +207,9 @@ class CollectedHeap : public CHeapObj {
Serial,
Parallel,
CMS,
- G1
+ G1,
+ Epsilon,
+ Z
};
static inline size_t filler_array_max_size() {
@@ -309,12 +322,12 @@ class CollectedHeap : public CHeapObj {
inline static oop array_allocate_nozero(Klass* klass, int size, int length, TRAPS);
inline static oop class_allocate(Klass* klass, int size, TRAPS);
- // Raw memory allocation facilities
- // The obj and array allocate methods are covers for these methods.
- // mem_allocate() should never be
- // called to allocate TLABs, only individual objects.
- virtual HeapWord* mem_allocate(size_t size,
- bool* gc_overhead_limit_was_exceeded) = 0;
+ // Raw memory allocation. This may or may not use TLAB allocations to satisfy the
+ // allocation. A GC implementation may override this function to satisfy the allocation
+ // in any way. But the default is to try a TLAB allocation, and otherwise perform
+ // mem_allocate.
+ virtual HeapWord* obj_allocate_raw(Klass* klass, size_t size,
+ bool* gc_overhead_limit_was_exceeded, TRAPS);
// Utilities for turning raw memory into filler objects.
//
diff --git a/src/hotspot/share/gc/shared/collectedHeap.inline.hpp b/src/hotspot/share/gc/shared/collectedHeap.inline.hpp
index ee9f5089a94..08c8131333f 100644
--- a/src/hotspot/share/gc/shared/collectedHeap.inline.hpp
+++ b/src/hotspot/share/gc/shared/collectedHeap.inline.hpp
@@ -137,31 +137,14 @@ HeapWord* CollectedHeap::common_mem_allocate_noinit(Klass* klass, size_t size, T
return NULL; // caller does a CHECK_0 too
}
- HeapWord* result = NULL;
- if (UseTLAB) {
- result = allocate_from_tlab(klass, THREAD, size);
- if (result != NULL) {
- assert(!HAS_PENDING_EXCEPTION,
- "Unexpected exception, will result in uninitialized storage");
- return result;
- }
- }
bool gc_overhead_limit_was_exceeded = false;
- result = Universe::heap()->mem_allocate(size,
- &gc_overhead_limit_was_exceeded);
+ CollectedHeap* heap = Universe::heap();
+ HeapWord* result = heap->obj_allocate_raw(klass, size, &gc_overhead_limit_was_exceeded, THREAD);
+
if (result != NULL) {
- NOT_PRODUCT(Universe::heap()->
- check_for_non_bad_heap_word_value(result, size));
- assert(!HAS_PENDING_EXCEPTION,
- "Unexpected exception, will result in uninitialized storage");
- THREAD->incr_allocated_bytes(size * HeapWordSize);
-
- AllocTracer::send_allocation_outside_tlab(klass, result, size * HeapWordSize, THREAD);
-
return result;
}
-
if (!gc_overhead_limit_was_exceeded) {
// -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
report_java_out_of_memory("Java heap space");
@@ -193,15 +176,34 @@ HeapWord* CollectedHeap::common_mem_allocate_init(Klass* klass, size_t size, TRA
return obj;
}
-HeapWord* CollectedHeap::allocate_from_tlab(Klass* klass, Thread* thread, size_t size) {
+HeapWord* CollectedHeap::allocate_from_tlab(Klass* klass, size_t size, TRAPS) {
assert(UseTLAB, "should use UseTLAB");
- HeapWord* obj = thread->tlab().allocate(size);
+ HeapWord* obj = THREAD->tlab().allocate(size);
if (obj != NULL) {
return obj;
}
// Otherwise...
- return allocate_from_tlab_slow(klass, thread, size);
+ obj = allocate_from_tlab_slow(klass, size, THREAD);
+ assert(obj == NULL || !HAS_PENDING_EXCEPTION,
+ "Unexpected exception, will result in uninitialized storage");
+ return obj;
+}
+
+HeapWord* CollectedHeap::allocate_outside_tlab(Klass* klass, size_t size,
+ bool* gc_overhead_limit_was_exceeded, TRAPS) {
+ HeapWord* result = Universe::heap()->mem_allocate(size, gc_overhead_limit_was_exceeded);
+ if (result == NULL) {
+ return result;
+ }
+
+ NOT_PRODUCT(Universe::heap()->check_for_non_bad_heap_word_value(result, size));
+ assert(!HAS_PENDING_EXCEPTION,
+ "Unexpected exception, will result in uninitialized storage");
+ THREAD->incr_allocated_bytes(size * HeapWordSize);
+
+ AllocTracer::send_allocation_outside_tlab(klass, result, size * HeapWordSize, THREAD);
+ return result;
}
void CollectedHeap::init_obj(HeapWord* obj, size_t size) {
diff --git a/src/hotspot/share/gc/shared/gcCause.cpp b/src/hotspot/share/gc/shared/gcCause.cpp
index cc8b216ff7c..3a7c3e1a9e7 100644
--- a/src/hotspot/share/gc/shared/gcCause.cpp
+++ b/src/hotspot/share/gc/shared/gcCause.cpp
@@ -105,6 +105,21 @@ const char* GCCause::to_string(GCCause::Cause cause) {
case _dcmd_gc_run:
return "Diagnostic Command";
+ case _z_timer:
+ return "Timer";
+
+ case _z_warmup:
+ return "Warmup";
+
+ case _z_allocation_rate:
+ return "Allocation Rate";
+
+ case _z_allocation_stall:
+ return "Allocation Stall";
+
+ case _z_proactive:
+ return "Proactive";
+
case _last_gc_cause:
return "ILLEGAL VALUE - last gc cause - ILLEGAL VALUE";
diff --git a/src/hotspot/share/gc/shared/gcCause.hpp b/src/hotspot/share/gc/shared/gcCause.hpp
index 7bedce85a49..5bf68b4c1f0 100644
--- a/src/hotspot/share/gc/shared/gcCause.hpp
+++ b/src/hotspot/share/gc/shared/gcCause.hpp
@@ -78,6 +78,12 @@ class GCCause : public AllStatic {
_dcmd_gc_run,
+ _z_timer,
+ _z_warmup,
+ _z_allocation_rate,
+ _z_allocation_stall,
+ _z_proactive,
+
_last_gc_cause
};
diff --git a/src/hotspot/share/gc/shared/gcConfig.cpp b/src/hotspot/share/gc/shared/gcConfig.cpp
index c5f49b90e89..ff8a78e226b 100644
--- a/src/hotspot/share/gc/shared/gcConfig.cpp
+++ b/src/hotspot/share/gc/shared/gcConfig.cpp
@@ -31,6 +31,9 @@
#if INCLUDE_CMSGC
#include "gc/cms/cmsArguments.hpp"
#endif
+#if INCLUDE_EPSILONGC
+#include "gc/epsilon/epsilonArguments.hpp"
+#endif
#if INCLUDE_G1GC
#include "gc/g1/g1Arguments.hpp"
#endif
@@ -40,6 +43,9 @@
#if INCLUDE_SERIALGC
#include "gc/serial/serialArguments.hpp"
#endif
+#if INCLUDE_ZGC
+#include "gc/z/zArguments.hpp"
+#endif
struct SupportedGC {
bool& _flag;
@@ -52,18 +58,22 @@ struct SupportedGC {
};
CMSGC_ONLY(static CMSArguments cmsArguments;)
+ EPSILONGC_ONLY(static EpsilonArguments epsilonArguments;)
G1GC_ONLY(static G1Arguments g1Arguments;)
PARALLELGC_ONLY(static ParallelArguments parallelArguments;)
SERIALGC_ONLY(static SerialArguments serialArguments;)
+ ZGC_ONLY(static ZArguments zArguments;)
// Table of supported GCs, for translating between command
// line flag, CollectedHeap::Name and GCArguments instance.
static const SupportedGC SupportedGCs[] = {
CMSGC_ONLY_ARG(SupportedGC(UseConcMarkSweepGC, CollectedHeap::CMS, cmsArguments, "concurrent mark sweep gc"))
+ EPSILONGC_ONLY_ARG(SupportedGC(UseEpsilonGC, CollectedHeap::Epsilon, epsilonArguments, "epsilon gc"))
G1GC_ONLY_ARG(SupportedGC(UseG1GC, CollectedHeap::G1, g1Arguments, "g1 gc"))
PARALLELGC_ONLY_ARG(SupportedGC(UseParallelGC, CollectedHeap::Parallel, parallelArguments, "parallel gc"))
PARALLELGC_ONLY_ARG(SupportedGC(UseParallelOldGC, CollectedHeap::Parallel, parallelArguments, "parallel gc"))
SERIALGC_ONLY_ARG(SupportedGC(UseSerialGC, CollectedHeap::Serial, serialArguments, "serial gc"))
+ ZGC_ONLY_ARG(SupportedGC(UseZGC, CollectedHeap::Z, zArguments, "z gc"))
};
#define FOR_EACH_SUPPORTED_GC(var) \
@@ -88,10 +98,12 @@ void GCConfig::select_gc_ergonomically() {
}
NOT_CMSGC( UNSUPPORTED_OPTION(UseConcMarkSweepGC));
+ NOT_EPSILONGC( UNSUPPORTED_OPTION(UseEpsilonGC);)
NOT_G1GC( UNSUPPORTED_OPTION(UseG1GC);)
NOT_PARALLELGC(UNSUPPORTED_OPTION(UseParallelGC);)
NOT_PARALLELGC(UNSUPPORTED_OPTION(UseParallelOldGC));
NOT_SERIALGC( UNSUPPORTED_OPTION(UseSerialGC);)
+ NOT_ZGC( UNSUPPORTED_OPTION(UseZGC);)
}
bool GCConfig::is_no_gc_selected() {
diff --git a/src/hotspot/share/gc/shared/gcConfiguration.cpp b/src/hotspot/share/gc/shared/gcConfiguration.cpp
index 453d8c633c2..f37387dd73c 100644
--- a/src/hotspot/share/gc/shared/gcConfiguration.cpp
+++ b/src/hotspot/share/gc/shared/gcConfiguration.cpp
@@ -43,6 +43,10 @@ GCName GCConfiguration::young_collector() const {
return ParNew;
}
+ if (UseZGC) {
+ return NA;
+ }
+
return DefNew;
}
@@ -59,6 +63,10 @@ GCName GCConfiguration::old_collector() const {
return ParallelOld;
}
+ if (UseZGC) {
+ return Z;
+ }
+
return SerialOld;
}
diff --git a/src/hotspot/share/gc/shared/gcName.hpp b/src/hotspot/share/gc/shared/gcName.hpp
index 35cb58b1512..bc21fa6ccaa 100644
--- a/src/hotspot/share/gc/shared/gcName.hpp
+++ b/src/hotspot/share/gc/shared/gcName.hpp
@@ -38,6 +38,8 @@ enum GCName {
ConcurrentMarkSweep,
G1Old,
G1Full,
+ Z,
+ NA,
GCNameEndSentinel
};
@@ -55,6 +57,8 @@ class GCNameHelper {
case ConcurrentMarkSweep: return "ConcurrentMarkSweep";
case G1Old: return "G1Old";
case G1Full: return "G1Full";
+ case Z: return "Z";
+ case NA: return "N/A";
default: ShouldNotReachHere(); return NULL;
}
}
diff --git a/src/hotspot/share/gc/shared/gcThreadLocalData.hpp b/src/hotspot/share/gc/shared/gcThreadLocalData.hpp
index b8251fdf963..7599a7f6821 100644
--- a/src/hotspot/share/gc/shared/gcThreadLocalData.hpp
+++ b/src/hotspot/share/gc/shared/gcThreadLocalData.hpp
@@ -40,6 +40,6 @@
// should consider placing frequently accessed fields first in
// T, so that field offsets relative to Thread are small, which
// often allows for a more compact instruction encoding.
-typedef uint64_t GCThreadLocalData[14]; // 112 bytes
+typedef uint64_t GCThreadLocalData[18]; // 144 bytes
#endif // SHARE_GC_SHARED_GCTHREADLOCALDATA_HPP
diff --git a/src/hotspot/share/gc/shared/gc_globals.hpp b/src/hotspot/share/gc/shared/gc_globals.hpp
index e37526ccf24..686d46c87fd 100644
--- a/src/hotspot/share/gc/shared/gc_globals.hpp
+++ b/src/hotspot/share/gc/shared/gc_globals.hpp
@@ -29,6 +29,9 @@
#if INCLUDE_CMSGC
#include "gc/cms/cms_globals.hpp"
#endif
+#if INCLUDE_EPSILONGC
+#include "gc/epsilon/epsilon_globals.hpp"
+#endif
#if INCLUDE_G1GC
#include "gc/g1/g1_globals.hpp"
#endif
@@ -38,6 +41,9 @@
#if INCLUDE_SERIALGC
#include "gc/serial/serial_globals.hpp"
#endif
+#if INCLUDE_ZGC
+#include "gc/z/z_globals.hpp"
+#endif
#define GC_FLAGS(develop, \
develop_pd, \
@@ -70,6 +76,22 @@
constraint, \
writeable)) \
\
+ EPSILONGC_ONLY(GC_EPSILON_FLAGS( \
+ develop, \
+ develop_pd, \
+ product, \
+ product_pd, \
+ diagnostic, \
+ diagnostic_pd, \
+ experimental, \
+ notproduct, \
+ manageable, \
+ product_rw, \
+ lp64_product, \
+ range, \
+ constraint, \
+ writeable)) \
+ \
G1GC_ONLY(GC_G1_FLAGS( \
develop, \
develop_pd, \
@@ -118,6 +140,22 @@
constraint, \
writeable)) \
\
+ ZGC_ONLY(GC_Z_FLAGS( \
+ develop, \
+ develop_pd, \
+ product, \
+ product_pd, \
+ diagnostic, \
+ diagnostic_pd, \
+ experimental, \
+ notproduct, \
+ manageable, \
+ product_rw, \
+ lp64_product, \
+ range, \
+ constraint, \
+ writeable)) \
+ \
/* gc */ \
\
product(bool, UseConcMarkSweepGC, false, \
@@ -135,6 +173,12 @@
product(bool, UseParallelOldGC, false, \
"Use the Parallel Old garbage collector") \
\
+ experimental(bool, UseEpsilonGC, false, \
+ "Use the Epsilon (no-op) garbage collector") \
+ \
+ experimental(bool, UseZGC, false, \
+ "Use the Z garbage collector") \
+ \
product(uint, ParallelGCThreads, 0, \
"Number of parallel threads parallel gc will use") \
constraint(ParallelGCThreadsConstraintFunc,AfterErgo) \
diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.cpp b/src/hotspot/share/gc/shared/genCollectedHeap.cpp
index 1936b0c1e2a..5a5804f053e 100644
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp
@@ -44,6 +44,7 @@
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/genOopClosures.inline.hpp"
#include "gc/shared/generationSpec.hpp"
+#include "gc/shared/oopStorageParState.inline.hpp"
#include "gc/shared/space.hpp"
#include "gc/shared/strongRootsScope.hpp"
#include "gc/shared/vmGCOperations.hpp"
@@ -783,7 +784,6 @@ static AssertNonScavengableClosure assert_is_non_scavengable_closure;
void GenCollectedHeap::process_roots(StrongRootsScope* scope,
ScanningOption so,
OopClosure* strong_roots,
- OopClosure* weak_roots,
CLDClosure* strong_cld_closure,
CLDClosure* weak_cld_closure,
CodeBlobToOopClosure* code_roots) {
@@ -827,7 +827,7 @@ void GenCollectedHeap::process_roots(StrongRootsScope* scope,
}
if (!_process_strong_tasks->is_task_claimed(GCH_PS_SystemDictionary_oops_do)) {
- SystemDictionary::roots_oops_do(strong_roots, weak_roots);
+ SystemDictionary::oops_do(strong_roots);
}
if (!_process_strong_tasks->is_task_claimed(GCH_PS_CodeCache_oops_do)) {
@@ -852,12 +852,17 @@ void GenCollectedHeap::process_roots(StrongRootsScope* scope,
}
void GenCollectedHeap::process_string_table_roots(StrongRootsScope* scope,
- OopClosure* root_closure) {
+ OopClosure* root_closure,
+ OopStorage::ParState* par_state_string) {
assert(root_closure != NULL, "Must be set");
// All threads execute the following. A specific chunk of buckets
// from the StringTable are the individual tasks.
+
+ // Either we should be single threaded or have a ParState
+ assert((scope->n_threads() <= 1) || par_state_string != NULL, "Parallel but no ParState");
+
if (scope->n_threads() > 1) {
- StringTable::possibly_parallel_oops_do(root_closure);
+ StringTable::possibly_parallel_oops_do(par_state_string, root_closure);
} else {
StringTable::oops_do(root_closure);
}
@@ -866,12 +871,13 @@ void GenCollectedHeap::process_string_table_roots(StrongRootsScope* scope,
void GenCollectedHeap::young_process_roots(StrongRootsScope* scope,
OopsInGenClosure* root_closure,
OopsInGenClosure* old_gen_closure,
- CLDClosure* cld_closure) {
+ CLDClosure* cld_closure,
+ OopStorage::ParState* par_state_string) {
MarkingCodeBlobClosure mark_code_closure(root_closure, CodeBlobToOopClosure::FixRelocations);
- process_roots(scope, SO_ScavengeCodeCache, root_closure, root_closure,
+ process_roots(scope, SO_ScavengeCodeCache, root_closure,
cld_closure, cld_closure, &mark_code_closure);
- process_string_table_roots(scope, root_closure);
+ process_string_table_roots(scope, root_closure, par_state_string);
if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
root_closure->reset_generation();
@@ -891,17 +897,17 @@ void GenCollectedHeap::full_process_roots(StrongRootsScope* scope,
ScanningOption so,
bool only_strong_roots,
OopsInGenClosure* root_closure,
- CLDClosure* cld_closure) {
+ CLDClosure* cld_closure,
+ OopStorage::ParState* par_state_string) {
MarkingCodeBlobClosure mark_code_closure(root_closure, is_adjust_phase);
- OopsInGenClosure* weak_roots = only_strong_roots ? NULL : root_closure;
CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
- process_roots(scope, so, root_closure, weak_roots, cld_closure, weak_cld_closure, &mark_code_closure);
+ process_roots(scope, so, root_closure, cld_closure, weak_cld_closure, &mark_code_closure);
if (is_adjust_phase) {
// We never treat the string table as roots during marking
// for the full gc, so we only need to process it during
// the adjust phase.
- process_string_table_roots(scope, root_closure);
+ process_string_table_roots(scope, root_closure, par_state_string);
}
_process_strong_tasks->all_tasks_completed(scope->n_threads());
diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.hpp b/src/hotspot/share/gc/shared/genCollectedHeap.hpp
index a8b4dd704f9..a250fcd8a14 100644
--- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp
@@ -28,6 +28,7 @@
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/generation.hpp"
+#include "gc/shared/oopStorageParState.hpp"
#include "gc/shared/softRefGenPolicy.hpp"
class AdaptiveSizePolicy;
@@ -396,13 +397,13 @@ public:
void process_roots(StrongRootsScope* scope,
ScanningOption so,
OopClosure* strong_roots,
- OopClosure* weak_roots,
CLDClosure* strong_cld_closure,
CLDClosure* weak_cld_closure,
CodeBlobToOopClosure* code_roots);
void process_string_table_roots(StrongRootsScope* scope,
- OopClosure* root_closure);
+ OopClosure* root_closure,
+ OopStorage::ParState* par_state_string);
// Accessor for memory state verification support
NOT_PRODUCT(
@@ -416,14 +417,16 @@ public:
void young_process_roots(StrongRootsScope* scope,
OopsInGenClosure* root_closure,
OopsInGenClosure* old_gen_closure,
- CLDClosure* cld_closure);
+ CLDClosure* cld_closure,
+ OopStorage::ParState* par_state_string = NULL);
void full_process_roots(StrongRootsScope* scope,
bool is_adjust_phase,
ScanningOption so,
bool only_strong_roots,
OopsInGenClosure* root_closure,
- CLDClosure* cld_closure);
+ CLDClosure* cld_closure,
+ OopStorage::ParState* par_state_string = NULL);
// Apply "root_closure" to all the weak roots of the system.
// These include JNI weak roots, string table,
diff --git a/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp b/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp
index ba522dbb4f3..0cdbdedf198 100644
--- a/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp
+++ b/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp
@@ -30,12 +30,10 @@
#include "gc/shared/plab.hpp"
#include "gc/shared/threadLocalAllocBuffer.hpp"
#include "runtime/arguments.hpp"
-#include "runtime/flags/jvmFlagRangeList.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/align.hpp"
-#include "utilities/defaultStream.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_CMSGC
#include "gc/cms/jvmFlagConstraintsCMS.hpp"
@@ -88,10 +86,10 @@ JVMFlag::Error ConcGCThreadsConstraintFunc(uint value, bool verbose) {
// CMS and G1 GCs use ConcGCThreads.
if ((GCConfig::is_gc_selected(CollectedHeap::CMS) ||
GCConfig::is_gc_selected(CollectedHeap::G1)) && (value > ParallelGCThreads)) {
- CommandLineError::print(verbose,
- "ConcGCThreads (" UINT32_FORMAT ") must be "
- "less than or equal to ParallelGCThreads (" UINT32_FORMAT ")\n",
- value, ParallelGCThreads);
+ JVMFlag::printError(verbose,
+ "ConcGCThreads (" UINT32_FORMAT ") must be "
+ "less than or equal to ParallelGCThreads (" UINT32_FORMAT ")\n",
+ value, ParallelGCThreads);
return JVMFlag::VIOLATES_CONSTRAINT;
}
@@ -102,10 +100,10 @@ static JVMFlag::Error MinPLABSizeBounds(const char* name, size_t value, bool ver
if ((GCConfig::is_gc_selected(CollectedHeap::CMS) ||
GCConfig::is_gc_selected(CollectedHeap::G1) ||
GCConfig::is_gc_selected(CollectedHeap::Parallel)) && (value < PLAB::min_size())) {
- CommandLineError::print(verbose,
- "%s (" SIZE_FORMAT ") must be "
- "greater than or equal to ergonomic PLAB minimum size (" SIZE_FORMAT ")\n",
- name, value, PLAB::min_size());
+ JVMFlag::printError(verbose,
+ "%s (" SIZE_FORMAT ") must be "
+ "greater than or equal to ergonomic PLAB minimum size (" SIZE_FORMAT ")\n",
+ name, value, PLAB::min_size());
return JVMFlag::VIOLATES_CONSTRAINT;
}
@@ -116,10 +114,10 @@ JVMFlag::Error MaxPLABSizeBounds(const char* name, size_t value, bool verbose) {
if ((GCConfig::is_gc_selected(CollectedHeap::CMS) ||
GCConfig::is_gc_selected(CollectedHeap::G1) ||
GCConfig::is_gc_selected(CollectedHeap::Parallel)) && (value > PLAB::max_size())) {
- CommandLineError::print(verbose,
- "%s (" SIZE_FORMAT ") must be "
- "less than or equal to ergonomic PLAB maximum size (" SIZE_FORMAT ")\n",
- name, value, PLAB::max_size());
+ JVMFlag::printError(verbose,
+ "%s (" SIZE_FORMAT ") must be "
+ "less than or equal to ergonomic PLAB maximum size (" SIZE_FORMAT ")\n",
+ name, value, PLAB::max_size());
return JVMFlag::VIOLATES_CONSTRAINT;
}
@@ -156,10 +154,10 @@ JVMFlag::Error OldPLABSizeConstraintFunc(size_t value, bool verbose) {
JVMFlag::Error MinHeapFreeRatioConstraintFunc(uintx value, bool verbose) {
if (value > MaxHeapFreeRatio) {
- CommandLineError::print(verbose,
- "MinHeapFreeRatio (" UINTX_FORMAT ") must be "
- "less than or equal to MaxHeapFreeRatio (" UINTX_FORMAT ")\n",
- value, MaxHeapFreeRatio);
+ JVMFlag::printError(verbose,
+ "MinHeapFreeRatio (" UINTX_FORMAT ") must be "
+ "less than or equal to MaxHeapFreeRatio (" UINTX_FORMAT ")\n",
+ value, MaxHeapFreeRatio);
return JVMFlag::VIOLATES_CONSTRAINT;
} else {
return JVMFlag::SUCCESS;
@@ -168,10 +166,10 @@ JVMFlag::Error MinHeapFreeRatioConstraintFunc(uintx value, bool verbose) {
JVMFlag::Error MaxHeapFreeRatioConstraintFunc(uintx value, bool verbose) {
if (value < MinHeapFreeRatio) {
- CommandLineError::print(verbose,
- "MaxHeapFreeRatio (" UINTX_FORMAT ") must be "
- "greater than or equal to MinHeapFreeRatio (" UINTX_FORMAT ")\n",
- value, MinHeapFreeRatio);
+ JVMFlag::printError(verbose,
+ "MaxHeapFreeRatio (" UINTX_FORMAT ") must be "
+ "greater than or equal to MinHeapFreeRatio (" UINTX_FORMAT ")\n",
+ value, MinHeapFreeRatio);
return JVMFlag::VIOLATES_CONSTRAINT;
} else {
return JVMFlag::SUCCESS;
@@ -180,11 +178,11 @@ JVMFlag::Error MaxHeapFreeRatioConstraintFunc(uintx value, bool verbose) {
static JVMFlag::Error CheckMaxHeapSizeAndSoftRefLRUPolicyMSPerMB(size_t maxHeap, intx softRef, bool verbose) {
if ((softRef > 0) && ((maxHeap / M) > (max_uintx / softRef))) {
- CommandLineError::print(verbose,
- "Desired lifetime of SoftReferences cannot be expressed correctly. "
- "MaxHeapSize (" SIZE_FORMAT ") or SoftRefLRUPolicyMSPerMB "
- "(" INTX_FORMAT ") is too large\n",
- maxHeap, softRef);
+ JVMFlag::printError(verbose,
+ "Desired lifetime of SoftReferences cannot be expressed correctly. "
+ "MaxHeapSize (" SIZE_FORMAT ") or SoftRefLRUPolicyMSPerMB "
+ "(" INTX_FORMAT ") is too large\n",
+ maxHeap, softRef);
return JVMFlag::VIOLATES_CONSTRAINT;
} else {
return JVMFlag::SUCCESS;
@@ -197,10 +195,10 @@ JVMFlag::Error SoftRefLRUPolicyMSPerMBConstraintFunc(intx value, bool verbose) {
JVMFlag::Error MarkStackSizeConstraintFunc(size_t value, bool verbose) {
if (value > MarkStackSizeMax) {
- CommandLineError::print(verbose,
- "MarkStackSize (" SIZE_FORMAT ") must be "
- "less than or equal to MarkStackSizeMax (" SIZE_FORMAT ")\n",
- value, MarkStackSizeMax);
+ JVMFlag::printError(verbose,
+ "MarkStackSize (" SIZE_FORMAT ") must be "
+ "less than or equal to MarkStackSizeMax (" SIZE_FORMAT ")\n",
+ value, MarkStackSizeMax);
return JVMFlag::VIOLATES_CONSTRAINT;
} else {
return JVMFlag::SUCCESS;
@@ -209,10 +207,10 @@ JVMFlag::Error MarkStackSizeConstraintFunc(size_t value, bool verbose) {
JVMFlag::Error MinMetaspaceFreeRatioConstraintFunc(uintx value, bool verbose) {
if (value > MaxMetaspaceFreeRatio) {
- CommandLineError::print(verbose,
- "MinMetaspaceFreeRatio (" UINTX_FORMAT ") must be "
- "less than or equal to MaxMetaspaceFreeRatio (" UINTX_FORMAT ")\n",
- value, MaxMetaspaceFreeRatio);
+ JVMFlag::printError(verbose,
+ "MinMetaspaceFreeRatio (" UINTX_FORMAT ") must be "
+ "less than or equal to MaxMetaspaceFreeRatio (" UINTX_FORMAT ")\n",
+ value, MaxMetaspaceFreeRatio);
return JVMFlag::VIOLATES_CONSTRAINT;
} else {
return JVMFlag::SUCCESS;
@@ -221,10 +219,10 @@ JVMFlag::Error MinMetaspaceFreeRatioConstraintFunc(uintx value, bool verbose) {
JVMFlag::Error MaxMetaspaceFreeRatioConstraintFunc(uintx value, bool verbose) {
if (value < MinMetaspaceFreeRatio) {
- CommandLineError::print(verbose,
- "MaxMetaspaceFreeRatio (" UINTX_FORMAT ") must be "
- "greater than or equal to MinMetaspaceFreeRatio (" UINTX_FORMAT ")\n",
- value, MinMetaspaceFreeRatio);
+ JVMFlag::printError(verbose,
+ "MaxMetaspaceFreeRatio (" UINTX_FORMAT ") must be "
+ "greater than or equal to MinMetaspaceFreeRatio (" UINTX_FORMAT ")\n",
+ value, MinMetaspaceFreeRatio);
return JVMFlag::VIOLATES_CONSTRAINT;
} else {
return JVMFlag::SUCCESS;
@@ -252,12 +250,12 @@ JVMFlag::Error MaxTenuringThresholdConstraintFunc(uintx value, bool verbose) {
// MaxTenuringThreshold=0 means NeverTenure=false && AlwaysTenure=true
if ((value == 0) && (NeverTenure || !AlwaysTenure)) {
- CommandLineError::print(verbose,
- "MaxTenuringThreshold (0) should match to NeverTenure=false "
- "&& AlwaysTenure=true. But we have NeverTenure=%s "
- "AlwaysTenure=%s\n",
- NeverTenure ? "true" : "false",
- AlwaysTenure ? "true" : "false");
+ JVMFlag::printError(verbose,
+ "MaxTenuringThreshold (0) should match to NeverTenure=false "
+ "&& AlwaysTenure=true. But we have NeverTenure=%s "
+ "AlwaysTenure=%s\n",
+ NeverTenure ? "true" : "false",
+ AlwaysTenure ? "true" : "false");
return JVMFlag::VIOLATES_CONSTRAINT;
}
return JVMFlag::SUCCESS;
@@ -288,10 +286,10 @@ JVMFlag::Error GCPauseIntervalMillisConstraintFunc(uintx value, bool verbose) {
JVMFlag::Error InitialBootClassLoaderMetaspaceSizeConstraintFunc(size_t value, bool verbose) {
size_t aligned_max = align_down(max_uintx/2, Metaspace::reserve_alignment_words());
if (value > aligned_max) {
- CommandLineError::print(verbose,
- "InitialBootClassLoaderMetaspaceSize (" SIZE_FORMAT ") must be "
- "less than or equal to aligned maximum value (" SIZE_FORMAT ")\n",
- value, aligned_max);
+ JVMFlag::printError(verbose,
+ "InitialBootClassLoaderMetaspaceSize (" SIZE_FORMAT ") must be "
+ "less than or equal to aligned maximum value (" SIZE_FORMAT ")\n",
+ value, aligned_max);
return JVMFlag::VIOLATES_CONSTRAINT;
}
return JVMFlag::SUCCESS;
@@ -301,10 +299,10 @@ JVMFlag::Error InitialBootClassLoaderMetaspaceSizeConstraintFunc(size_t value, b
static JVMFlag::Error MaxSizeForAlignment(const char* name, size_t value, size_t alignment, bool verbose) {
size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
if (value > aligned_max) {
- CommandLineError::print(verbose,
- "%s (" SIZE_FORMAT ") must be "
- "less than or equal to aligned maximum value (" SIZE_FORMAT ")\n",
- name, value, aligned_max);
+ JVMFlag::printError(verbose,
+ "%s (" SIZE_FORMAT ") must be "
+ "less than or equal to aligned maximum value (" SIZE_FORMAT ")\n",
+ name, value, aligned_max);
return JVMFlag::VIOLATES_CONSTRAINT;
}
return JVMFlag::SUCCESS;
@@ -343,10 +341,10 @@ JVMFlag::Error HeapBaseMinAddressConstraintFunc(size_t value, bool verbose) {
// If an overflow happened in Arguments::set_heap_size(), MaxHeapSize will have too large a value.
// Check for this by ensuring that MaxHeapSize plus the requested min base address still fit within max_uintx.
if (UseCompressedOops && FLAG_IS_ERGO(MaxHeapSize) && (value > (max_uintx - MaxHeapSize))) {
- CommandLineError::print(verbose,
- "HeapBaseMinAddress (" SIZE_FORMAT ") or MaxHeapSize (" SIZE_FORMAT ") is too large. "
- "Sum of them must be less than or equal to maximum of size_t (" SIZE_FORMAT ")\n",
- value, MaxHeapSize, max_uintx);
+ JVMFlag::printError(verbose,
+ "HeapBaseMinAddress (" SIZE_FORMAT ") or MaxHeapSize (" SIZE_FORMAT ") is too large. "
+ "Sum of them must be less than or equal to maximum of size_t (" SIZE_FORMAT ")\n",
+ value, MaxHeapSize, max_uintx);
return JVMFlag::VIOLATES_CONSTRAINT;
}
@@ -367,17 +365,17 @@ JVMFlag::Error NewSizeConstraintFunc(size_t value, bool verbose) {
JVMFlag::Error MinTLABSizeConstraintFunc(size_t value, bool verbose) {
// At least, alignment reserve area is needed.
if (value < ThreadLocalAllocBuffer::alignment_reserve_in_bytes()) {
- CommandLineError::print(verbose,
- "MinTLABSize (" SIZE_FORMAT ") must be "
- "greater than or equal to reserved area in TLAB (" SIZE_FORMAT ")\n",
- value, ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
+ JVMFlag::printError(verbose,
+ "MinTLABSize (" SIZE_FORMAT ") must be "
+ "greater than or equal to reserved area in TLAB (" SIZE_FORMAT ")\n",
+ value, ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
return JVMFlag::VIOLATES_CONSTRAINT;
}
if (value > (ThreadLocalAllocBuffer::max_size() * HeapWordSize)) {
- CommandLineError::print(verbose,
- "MinTLABSize (" SIZE_FORMAT ") must be "
- "less than or equal to ergonomic TLAB maximum (" SIZE_FORMAT ")\n",
- value, ThreadLocalAllocBuffer::max_size() * HeapWordSize);
+ JVMFlag::printError(verbose,
+ "MinTLABSize (" SIZE_FORMAT ") must be "
+ "less than or equal to ergonomic TLAB maximum (" SIZE_FORMAT ")\n",
+ value, ThreadLocalAllocBuffer::max_size() * HeapWordSize);
return JVMFlag::VIOLATES_CONSTRAINT;
}
return JVMFlag::SUCCESS;
@@ -387,17 +385,17 @@ JVMFlag::Error TLABSizeConstraintFunc(size_t value, bool verbose) {
// Skip for default value of zero which means set ergonomically.
if (FLAG_IS_CMDLINE(TLABSize)) {
if (value < MinTLABSize) {
- CommandLineError::print(verbose,
- "TLABSize (" SIZE_FORMAT ") must be "
- "greater than or equal to MinTLABSize (" SIZE_FORMAT ")\n",
- value, MinTLABSize);
+ JVMFlag::printError(verbose,
+ "TLABSize (" SIZE_FORMAT ") must be "
+ "greater than or equal to MinTLABSize (" SIZE_FORMAT ")\n",
+ value, MinTLABSize);
return JVMFlag::VIOLATES_CONSTRAINT;
}
if (value > (ThreadLocalAllocBuffer::max_size() * HeapWordSize)) {
- CommandLineError::print(verbose,
- "TLABSize (" SIZE_FORMAT ") must be "
- "less than or equal to ergonomic TLAB maximum size (" SIZE_FORMAT ")\n",
- value, (ThreadLocalAllocBuffer::max_size() * HeapWordSize));
+ JVMFlag::printError(verbose,
+ "TLABSize (" SIZE_FORMAT ") must be "
+ "less than or equal to ergonomic TLAB maximum size (" SIZE_FORMAT ")\n",
+ value, (ThreadLocalAllocBuffer::max_size() * HeapWordSize));
return JVMFlag::VIOLATES_CONSTRAINT;
}
}
@@ -412,10 +410,10 @@ JVMFlag::Error TLABWasteIncrementConstraintFunc(uintx value, bool verbose) {
// Compare with 'max_uintx' as ThreadLocalAllocBuffer::_refill_waste_limit is 'size_t'.
if (refill_waste_limit > (max_uintx - value)) {
- CommandLineError::print(verbose,
- "TLABWasteIncrement (" UINTX_FORMAT ") must be "
- "less than or equal to ergonomic TLAB waste increment maximum size(" SIZE_FORMAT ")\n",
- value, (max_uintx - refill_waste_limit));
+ JVMFlag::printError(verbose,
+ "TLABWasteIncrement (" UINTX_FORMAT ") must be "
+ "less than or equal to ergonomic TLAB waste increment maximum size(" SIZE_FORMAT ")\n",
+ value, (max_uintx - refill_waste_limit));
return JVMFlag::VIOLATES_CONSTRAINT;
}
}
@@ -425,11 +423,11 @@ JVMFlag::Error TLABWasteIncrementConstraintFunc(uintx value, bool verbose) {
JVMFlag::Error SurvivorRatioConstraintFunc(uintx value, bool verbose) {
if (FLAG_IS_CMDLINE(SurvivorRatio) &&
(value > (MaxHeapSize / Universe::heap()->collector_policy()->space_alignment()))) {
- CommandLineError::print(verbose,
- "SurvivorRatio (" UINTX_FORMAT ") must be "
- "less than or equal to ergonomic SurvivorRatio maximum (" SIZE_FORMAT ")\n",
- value,
- (MaxHeapSize / Universe::heap()->collector_policy()->space_alignment()));
+ JVMFlag::printError(verbose,
+ "SurvivorRatio (" UINTX_FORMAT ") must be "
+ "less than or equal to ergonomic SurvivorRatio maximum (" SIZE_FORMAT ")\n",
+ value,
+ (MaxHeapSize / Universe::heap()->collector_policy()->space_alignment()));
return JVMFlag::VIOLATES_CONSTRAINT;
} else {
return JVMFlag::SUCCESS;
@@ -438,10 +436,10 @@ JVMFlag::Error SurvivorRatioConstraintFunc(uintx value, bool verbose) {
JVMFlag::Error MetaspaceSizeConstraintFunc(size_t value, bool verbose) {
if (value > MaxMetaspaceSize) {
- CommandLineError::print(verbose,
- "MetaspaceSize (" SIZE_FORMAT ") must be "
- "less than or equal to MaxMetaspaceSize (" SIZE_FORMAT ")\n",
- value, MaxMetaspaceSize);
+ JVMFlag::printError(verbose,
+ "MetaspaceSize (" SIZE_FORMAT ") must be "
+ "less than or equal to MaxMetaspaceSize (" SIZE_FORMAT ")\n",
+ value, MaxMetaspaceSize);
return JVMFlag::VIOLATES_CONSTRAINT;
} else {
return JVMFlag::SUCCESS;
@@ -450,10 +448,10 @@ JVMFlag::Error MetaspaceSizeConstraintFunc(size_t value, bool verbose) {
JVMFlag::Error MaxMetaspaceSizeConstraintFunc(size_t value, bool verbose) {
if (value < MetaspaceSize) {
- CommandLineError::print(verbose,
- "MaxMetaspaceSize (" SIZE_FORMAT ") must be "
- "greater than or equal to MetaspaceSize (" SIZE_FORMAT ")\n",
- value, MaxMetaspaceSize);
+ JVMFlag::printError(verbose,
+ "MaxMetaspaceSize (" SIZE_FORMAT ") must be "
+ "greater than or equal to MetaspaceSize (" SIZE_FORMAT ")\n",
+ value, MaxMetaspaceSize);
return JVMFlag::VIOLATES_CONSTRAINT;
} else {
return JVMFlag::SUCCESS;
@@ -463,17 +461,17 @@ JVMFlag::Error MaxMetaspaceSizeConstraintFunc(size_t value, bool verbose) {
JVMFlag::Error SurvivorAlignmentInBytesConstraintFunc(intx value, bool verbose) {
if (value != 0) {
if (!is_power_of_2(value)) {
- CommandLineError::print(verbose,
- "SurvivorAlignmentInBytes (" INTX_FORMAT ") must be "
- "power of 2\n",
- value);
+ JVMFlag::printError(verbose,
+ "SurvivorAlignmentInBytes (" INTX_FORMAT ") must be "
+ "power of 2\n",
+ value);
return JVMFlag::VIOLATES_CONSTRAINT;
}
if (value < ObjectAlignmentInBytes) {
- CommandLineError::print(verbose,
- "SurvivorAlignmentInBytes (" INTX_FORMAT ") must be "
- "greater than or equal to ObjectAlignmentInBytes (" INTX_FORMAT ")\n",
- value, ObjectAlignmentInBytes);
+ JVMFlag::printError(verbose,
+ "SurvivorAlignmentInBytes (" INTX_FORMAT ") must be "
+ "greater than or equal to ObjectAlignmentInBytes (" INTX_FORMAT ")\n",
+ value, ObjectAlignmentInBytes);
return JVMFlag::VIOLATES_CONSTRAINT;
}
}
diff --git a/src/hotspot/share/gc/shared/modRefBarrierSet.hpp b/src/hotspot/share/gc/shared/modRefBarrierSet.hpp
index fd86292a396..72b6fd4c197 100644
--- a/src/hotspot/share/gc/shared/modRefBarrierSet.hpp
+++ b/src/hotspot/share/gc/shared/modRefBarrierSet.hpp
@@ -83,7 +83,9 @@ public:
static oop oop_atomic_xchg_in_heap(oop new_value, T* addr);
template
- static bool oop_arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length);
+ static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
+ arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
+ size_t length);
static void clone_in_heap(oop src, oop dst, size_t size);
diff --git a/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp b/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp
index 8c0db7dff66..d84f1aa892f 100644
--- a/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp
+++ b/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp
@@ -91,35 +91,41 @@ oop_atomic_xchg_in_heap(oop new_value, T* addr) {
template
template
inline bool ModRefBarrierSet::AccessBarrier::
-oop_arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
+ arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
+ size_t length) {
BarrierSetT *bs = barrier_set_cast(barrier_set());
+ src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
+ dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
+
if (!HasDecorator::value) {
// Optimized covariant case
- bs->write_ref_array_pre(dst, length,
+ bs->write_ref_array_pre(dst_raw, length,
HasDecorator::value);
- Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length);
- bs->write_ref_array((HeapWord*)dst, length);
+ Raw::oop_arraycopy(NULL, 0, src_raw, NULL, 0, dst_raw, length);
+ bs->write_ref_array((HeapWord*)dst_raw, length);
} else {
+ assert(dst_obj != NULL, "better have an actual oop");
Klass* bound = objArrayOop(dst_obj)->element_klass();
- T* from = src;
+ T* from = const_cast(src_raw);
T* end = from + length;
- for (T* p = dst; from < end; from++, p++) {
+ for (T* p = dst_raw; from < end; from++, p++) {
T element = *from;
if (oopDesc::is_instanceof_or_null(CompressedOops::decode(element), bound)) {
bs->template write_ref_field_pre(p);
*p = element;
} else {
// We must do a barrier to cover the partial copy.
- const size_t pd = pointer_delta(p, dst, (size_t)heapOopSize);
+ const size_t pd = pointer_delta(p, dst_raw, (size_t)heapOopSize);
// pointer delta is scaled to number of elements (length field in
// objArrayOop) which we assume is 32 bit.
assert(pd == (size_t)(int)pd, "length field overflow");
- bs->write_ref_array((HeapWord*)dst, pd);
+ bs->write_ref_array((HeapWord*)dst_raw, pd);
return false;
}
}
- bs->write_ref_array((HeapWord*)dst, length);
+ bs->write_ref_array((HeapWord*)dst_raw, length);
}
return true;
}
diff --git a/src/hotspot/share/gc/shared/oopStorage.cpp b/src/hotspot/share/gc/shared/oopStorage.cpp
index 9feada8a266..b3389255ab7 100644
--- a/src/hotspot/share/gc/shared/oopStorage.cpp
+++ b/src/hotspot/share/gc/shared/oopStorage.cpp
@@ -33,7 +33,7 @@
#include "runtime/handles.inline.hpp"
#include "runtime/mutex.hpp"
#include "runtime/mutexLocker.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.hpp"
@@ -52,9 +52,7 @@ OopStorage::AllocateEntry::~AllocateEntry() {
assert(_next == NULL, "deleting attached block");
}
-OopStorage::AllocateList::AllocateList(const AllocateEntry& (*get_entry)(const Block& block)) :
- _head(NULL), _tail(NULL), _get_entry(get_entry)
-{}
+OopStorage::AllocateList::AllocateList() : _head(NULL), _tail(NULL) {}
OopStorage::AllocateList::~AllocateList() {
// ~OopStorage() empties its lists before destroying them.
@@ -68,8 +66,8 @@ void OopStorage::AllocateList::push_front(const Block& block) {
assert(_tail == NULL, "invariant");
_head = _tail = █
} else {
- _get_entry(block)._next = old;
- _get_entry(*old)._prev = █
+ block.allocate_entry()._next = old;
+ old->allocate_entry()._prev = █
_head = █
}
}
@@ -80,14 +78,14 @@ void OopStorage::AllocateList::push_back(const Block& block) {
assert(_head == NULL, "invariant");
_head = _tail = █
} else {
- _get_entry(*old)._next = █
- _get_entry(block)._prev = old;
+ old->allocate_entry()._next = █
+ block.allocate_entry()._prev = old;
_tail = █
}
}
void OopStorage::AllocateList::unlink(const Block& block) {
- const AllocateEntry& block_entry = _get_entry(block);
+ const AllocateEntry& block_entry = block.allocate_entry();
const Block* prev_blk = block_entry._prev;
const Block* next_blk = block_entry._next;
block_entry._prev = NULL;
@@ -98,15 +96,15 @@ void OopStorage::AllocateList::unlink(const Block& block) {
_head = _tail = NULL;
} else if (prev_blk == NULL) {
assert(_head == &block, "invariant");
- _get_entry(*next_blk)._prev = NULL;
+ next_blk->allocate_entry()._prev = NULL;
_head = next_blk;
} else if (next_blk == NULL) {
assert(_tail == &block, "invariant");
- _get_entry(*prev_blk)._next = NULL;
+ prev_blk->allocate_entry()._next = NULL;
_tail = prev_blk;
} else {
- _get_entry(*next_blk)._prev = prev_blk;
- _get_entry(*prev_blk)._next = next_blk;
+ next_blk->allocate_entry()._prev = prev_blk;
+ prev_blk->allocate_entry()._next = next_blk;
}
}
@@ -232,10 +230,6 @@ OopStorage::Block::~Block() {
const_cast(_owner) = NULL;
}
-const OopStorage::AllocateEntry& OopStorage::Block::get_allocate_entry(const Block& block) {
- return block._allocate_entry;
-}
-
size_t OopStorage::Block::allocation_size() {
// _data must be first member, so aligning Block aligns _data.
STATIC_ASSERT(_data_pos == 0);
@@ -769,7 +763,7 @@ OopStorage::OopStorage(const char* name,
Mutex* active_mutex) :
_name(dup_name(name)),
_active_array(ActiveArray::create(initial_active_array_size)),
- _allocate_list(&Block::get_allocate_entry),
+ _allocate_list(),
_deferred_updates(NULL),
_allocate_mutex(allocate_mutex),
_active_mutex(active_mutex),
@@ -907,7 +901,8 @@ size_t OopStorage::total_memory_usage() const {
// Parallel iteration support
uint OopStorage::BasicParState::default_estimated_thread_count(bool concurrent) {
- return concurrent ? ConcGCThreads : ParallelGCThreads;
+ uint configured = concurrent ? ConcGCThreads : ParallelGCThreads;
+ return MAX2(1u, configured); // Never estimate zero threads.
}
OopStorage::BasicParState::BasicParState(const OopStorage* storage,
diff --git a/src/hotspot/share/gc/shared/oopStorage.hpp b/src/hotspot/share/gc/shared/oopStorage.hpp
index ff2b0c18e0d..5479945275e 100644
--- a/src/hotspot/share/gc/shared/oopStorage.hpp
+++ b/src/hotspot/share/gc/shared/oopStorage.hpp
@@ -178,14 +178,13 @@ NOT_AIX( private: )
class AllocateList {
const Block* _head;
const Block* _tail;
- const AllocateEntry& (*_get_entry)(const Block& block);
// Noncopyable.
AllocateList(const AllocateList&);
AllocateList& operator=(const AllocateList&);
public:
- AllocateList(const AllocateEntry& (*get_entry)(const Block& block));
+ AllocateList();
~AllocateList();
Block* head();
diff --git a/src/hotspot/share/gc/shared/oopStorage.inline.hpp b/src/hotspot/share/gc/shared/oopStorage.inline.hpp
index 08ead02fc2d..20f9750f1e9 100644
--- a/src/hotspot/share/gc/shared/oopStorage.inline.hpp
+++ b/src/hotspot/share/gc/shared/oopStorage.inline.hpp
@@ -158,7 +158,7 @@ class OopStorage::Block /* No base class, to avoid messing up alignment. */ {
Block& operator=(const Block&);
public:
- static const AllocateEntry& get_allocate_entry(const Block& block);
+ const AllocateEntry& allocate_entry() const;
static size_t allocation_size();
static size_t allocation_alignment_shift();
@@ -214,19 +214,19 @@ inline const OopStorage::Block* OopStorage::AllocateList::ctail() const {
}
inline OopStorage::Block* OopStorage::AllocateList::prev(Block& block) {
- return const_cast(_get_entry(block)._prev);
+ return const_cast(block.allocate_entry()._prev);
}
inline OopStorage::Block* OopStorage::AllocateList::next(Block& block) {
- return const_cast(_get_entry(block)._next);
+ return const_cast(block.allocate_entry()._next);
}
inline const OopStorage::Block* OopStorage::AllocateList::prev(const Block& block) const {
- return _get_entry(block)._prev;
+ return block.allocate_entry()._prev;
}
inline const OopStorage::Block* OopStorage::AllocateList::next(const Block& block) const {
- return _get_entry(block)._next;
+ return block.allocate_entry()._next;
}
template
@@ -296,7 +296,11 @@ inline OopStorage::SkipNullFn OopStorage::skip_null_fn(F f) {
return SkipNullFn(f);
}
-// Inline Block accesses for use in iteration inner loop.
+// Inline Block accesses for use in iteration loops.
+
+inline const OopStorage::AllocateEntry& OopStorage::Block::allocate_entry() const {
+ return _allocate_entry;
+}
inline void OopStorage::Block::check_index(unsigned index) const {
assert(index < ARRAY_SIZE(_data), "Index out of bounds: %u", index);
diff --git a/src/hotspot/share/gc/shared/referenceProcessor.cpp b/src/hotspot/share/gc/shared/referenceProcessor.cpp
index 4b8de99ce46..02ef4930af7 100644
--- a/src/hotspot/share/gc/shared/referenceProcessor.cpp
+++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp
@@ -592,6 +592,28 @@ void ReferenceProcessor::set_active_mt_degree(uint v) {
_next_id = 0;
}
+bool ReferenceProcessor::need_balance_queues(DiscoveredList refs_lists[]) {
+ assert(_processing_is_mt, "why balance non-mt processing?");
+ // _num_queues is the processing degree. Only list entries up to
+ // _num_queues will be processed, so any non-empty lists beyond
+ // that must be redistributed to lists in that range. Even if not
+ // needed for that, balancing may be desirable to eliminate poor
+ // distribution of references among the lists.
+ if (ParallelRefProcBalancingEnabled) {
+ return true; // Configuration says do it.
+ } else {
+ // Configuration says don't balance, but if there are non-empty
+ // lists beyond the processing degree, then must ignore the
+ // configuration and balance anyway.
+ for (uint i = _num_queues; i < _max_num_queues; ++i) {
+ if (!refs_lists[i].is_empty()) {
+ return true; // Must balance despite configuration.
+ }
+ }
+ return false; // Safe to obey configuration and not balance.
+ }
+}
+
// Balances reference queues.
// Move entries from all queues[0, 1, ..., _max_num_q-1] to
// queues[0, 1, ..., _num_q-1] because only the first _num_q
@@ -690,7 +712,7 @@ void ReferenceProcessor::process_discovered_reflist(
phase_times->set_processing_is_mt(mt_processing);
- if (mt_processing && ParallelRefProcBalancingEnabled) {
+ if (mt_processing && need_balance_queues(refs_lists)) {
RefProcBalanceQueuesTimeTracker tt(phase_times);
balance_queues(refs_lists);
}
diff --git a/src/hotspot/share/gc/shared/referenceProcessor.hpp b/src/hotspot/share/gc/shared/referenceProcessor.hpp
index 65791c55441..bbdefd53bf9 100644
--- a/src/hotspot/share/gc/shared/referenceProcessor.hpp
+++ b/src/hotspot/share/gc/shared/referenceProcessor.hpp
@@ -320,7 +320,8 @@ private:
void log_reflist_counts(DiscoveredList ref_lists[], uint num_active_queues) PRODUCT_RETURN;
// Balances reference queues.
- void balance_queues(DiscoveredList ref_lists[]);
+ void balance_queues(DiscoveredList refs_lists[]);
+ bool need_balance_queues(DiscoveredList refs_lists[]);
// Update (advance) the soft ref master clock field.
void update_soft_ref_master_clock();
diff --git a/src/hotspot/share/gc/shared/space.cpp b/src/hotspot/share/gc/shared/space.cpp
index 5fa1ec8cae1..b8659e57f63 100644
--- a/src/hotspot/share/gc/shared/space.cpp
+++ b/src/hotspot/share/gc/shared/space.cpp
@@ -36,7 +36,7 @@
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/java.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "runtime/prefetch.inline.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/align.hpp"
diff --git a/src/hotspot/share/gc/shared/specialized_oop_closures.hpp b/src/hotspot/share/gc/shared/specialized_oop_closures.hpp
index af2ab821563..7f38678fd7b 100644
--- a/src/hotspot/share/gc/shared/specialized_oop_closures.hpp
+++ b/src/hotspot/share/gc/shared/specialized_oop_closures.hpp
@@ -35,6 +35,9 @@
#if INCLUDE_SERIALGC
#include "gc/serial/serial_specialized_oop_closures.hpp"
#endif
+#if INCLUDE_ZGC
+#include "gc/z/z_specialized_oop_closures.hpp"
+#endif
// The following OopClosure types get specialized versions of
// "oop_oop_iterate" that invoke the closures' do_oop methods
@@ -67,7 +70,8 @@ class OopsInGenClosure;
SERIALGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f)) \
CMSGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f)) \
G1GC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(f)) \
- G1GC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1FULL(f))
+ G1GC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1FULL(f)) \
+ ZGC_ONLY(SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_Z(f))
// We separate these out, because sometime the general one has
// a different definition from the specialized ones, and sometimes it
diff --git a/src/hotspot/share/gc/shared/strongRootsScope.cpp b/src/hotspot/share/gc/shared/strongRootsScope.cpp
index 43a697f8cda..a167ba48958 100644
--- a/src/hotspot/share/gc/shared/strongRootsScope.cpp
+++ b/src/hotspot/share/gc/shared/strongRootsScope.cpp
@@ -38,8 +38,6 @@ MarkScope::~MarkScope() {
StrongRootsScope::StrongRootsScope(uint n_threads) : _n_threads(n_threads) {
Threads::change_thread_claim_parity();
- // Zero the claimed high water mark in the StringTable
- StringTable::clear_parallel_claimed_index();
}
StrongRootsScope::~StrongRootsScope() {
diff --git a/src/hotspot/share/gc/shared/taskqueue.inline.hpp b/src/hotspot/share/gc/shared/taskqueue.inline.hpp
index 476098d57aa..214bb3f222f 100644
--- a/src/hotspot/share/gc/shared/taskqueue.inline.hpp
+++ b/src/hotspot/share/gc/shared/taskqueue.inline.hpp
@@ -29,7 +29,7 @@
#include "memory/allocation.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
#include "utilities/debug.hpp"
#include "utilities/stack.inline.hpp"
diff --git a/src/hotspot/share/gc/shared/vmStructs_gc.hpp b/src/hotspot/share/gc/shared/vmStructs_gc.hpp
index 6594ed03c50..92c03d4f2bf 100644
--- a/src/hotspot/share/gc/shared/vmStructs_gc.hpp
+++ b/src/hotspot/share/gc/shared/vmStructs_gc.hpp
@@ -37,6 +37,9 @@
#if INCLUDE_CMSGC
#include "gc/cms/vmStructs_cms.hpp"
#endif
+#if INCLUDE_EPSILONGC
+#include "gc/epsilon/vmStructs_epsilon.hpp"
+#endif
#if INCLUDE_G1GC
#include "gc/g1/vmStructs_g1.hpp"
#endif
@@ -47,6 +50,9 @@
#include "gc/serial/defNewGeneration.hpp"
#include "gc/serial/vmStructs_serial.hpp"
#endif
+#if INCLUDE_ZGC
+#include "gc/z/vmStructs_z.hpp"
+#endif
#define VM_STRUCTS_GC(nonstatic_field, \
volatile_nonstatic_field, \
@@ -55,6 +61,9 @@
CMSGC_ONLY(VM_STRUCTS_CMSGC(nonstatic_field, \
volatile_nonstatic_field, \
static_field)) \
+ EPSILONGC_ONLY(VM_STRUCTS_EPSILONGC(nonstatic_field, \
+ volatile_nonstatic_field, \
+ static_field)) \
G1GC_ONLY(VM_STRUCTS_G1GC(nonstatic_field, \
volatile_nonstatic_field, \
static_field)) \
@@ -64,6 +73,10 @@
SERIALGC_ONLY(VM_STRUCTS_SERIALGC(nonstatic_field, \
volatile_nonstatic_field, \
static_field)) \
+ ZGC_ONLY(VM_STRUCTS_ZGC(nonstatic_field, \
+ volatile_nonstatic_field, \
+ static_field)) \
+ \
/**********************************************************************************/ \
/* Generation and Space hierarchies */ \
/**********************************************************************************/ \
@@ -153,6 +166,9 @@
CMSGC_ONLY(VM_TYPES_CMSGC(declare_type, \
declare_toplevel_type, \
declare_integer_type)) \
+ EPSILONGC_ONLY(VM_TYPES_EPSILONGC(declare_type, \
+ declare_toplevel_type, \
+ declare_integer_type)) \
G1GC_ONLY(VM_TYPES_G1GC(declare_type, \
declare_toplevel_type, \
declare_integer_type)) \
@@ -162,6 +178,10 @@
SERIALGC_ONLY(VM_TYPES_SERIALGC(declare_type, \
declare_toplevel_type, \
declare_integer_type)) \
+ ZGC_ONLY(VM_TYPES_ZGC(declare_type, \
+ declare_toplevel_type, \
+ declare_integer_type)) \
+ \
/******************************************/ \
/* Generation and space hierarchies */ \
/* (needed for run-time type information) */ \
@@ -225,12 +245,16 @@
declare_constant_with_value) \
CMSGC_ONLY(VM_INT_CONSTANTS_CMSGC(declare_constant, \
declare_constant_with_value)) \
+ EPSILONGC_ONLY(VM_INT_CONSTANTS_EPSILONGC(declare_constant, \
+ declare_constant_with_value)) \
G1GC_ONLY(VM_INT_CONSTANTS_G1GC(declare_constant, \
declare_constant_with_value)) \
PARALLELGC_ONLY(VM_INT_CONSTANTS_PARALLELGC(declare_constant, \
declare_constant_with_value)) \
SERIALGC_ONLY(VM_INT_CONSTANTS_SERIALGC(declare_constant, \
declare_constant_with_value)) \
+ ZGC_ONLY(VM_INT_CONSTANTS_ZGC(declare_constant, \
+ declare_constant_with_value)) \
\
/********************************************/ \
/* Generation and Space Hierarchy Constants */ \
@@ -274,5 +298,7 @@
declare_constant(Generation::LogOfGenGrain) \
declare_constant(Generation::GenGrain) \
+#define VM_LONG_CONSTANTS_GC(declare_constant) \
+ ZGC_ONLY(VM_LONG_CONSTANTS_ZGC(declare_constant))
#endif // SHARE_GC_SHARED_VMSTRUCTS_GC_HPP
diff --git a/src/hotspot/share/gc/shared/weakProcessor.cpp b/src/hotspot/share/gc/shared/weakProcessor.cpp
index 6be1bd2d96d..381863456c5 100644
--- a/src/hotspot/share/gc/shared/weakProcessor.cpp
+++ b/src/hotspot/share/gc/shared/weakProcessor.cpp
@@ -23,6 +23,8 @@
*/
#include "precompiled.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "gc/shared/oopStorage.inline.hpp"
#include "gc/shared/weakProcessor.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/jniHandles.hpp"
@@ -34,6 +36,7 @@
void WeakProcessor::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive) {
JNIHandles::weak_oops_do(is_alive, keep_alive);
JvmtiExport::weak_oops_do(is_alive, keep_alive);
+ SystemDictionary::vm_weak_oop_storage()->weak_oops_do(is_alive, keep_alive);
JFR_ONLY(Jfr::weak_oops_do(is_alive, keep_alive);)
}
diff --git a/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp
new file mode 100644
index 00000000000..a1b43005cf0
--- /dev/null
+++ b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp
@@ -0,0 +1,247 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "c1/c1_LIR.hpp"
+#include "c1/c1_LIRGenerator.hpp"
+#include "c1/c1_CodeStubs.hpp"
+#include "gc/z/c1/zBarrierSetC1.hpp"
+#include "gc/z/zBarrierSet.hpp"
+#include "gc/z/zBarrierSetAssembler.hpp"
+#include "gc/z/zThreadLocalData.hpp"
+#include "utilities/macros.hpp"
+
+ZLoadBarrierStubC1::ZLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address runtime_stub) :
+ _decorators(access.decorators()),
+ _ref_addr(access.resolved_addr()),
+ _ref(ref),
+ _tmp(LIR_OprFact::illegalOpr),
+ _patch_info(access.patch_emit_info()),
+ _runtime_stub(runtime_stub) {
+
+ // Allocate tmp register if needed
+ if (!_ref_addr->is_register()) {
+ assert(_ref_addr->is_address(), "Must be an address");
+ if (_ref_addr->as_address_ptr()->index()->is_valid() ||
+ _ref_addr->as_address_ptr()->disp() != 0) {
+ // Has index or displacement, need tmp register to load address into
+ _tmp = access.gen()->new_pointer_register();
+ } else {
+ // No index or displacement, address available in base register
+ _ref_addr = _ref_addr->as_address_ptr()->base();
+ }
+ }
+
+ assert(_ref->is_register(), "Must be a register");
+ assert(_ref_addr->is_register() != _tmp->is_register(), "Only one should be a register");
+}
+
+DecoratorSet ZLoadBarrierStubC1::decorators() const {
+ return _decorators;
+}
+
+LIR_Opr ZLoadBarrierStubC1::ref() const {
+ return _ref;
+}
+
+LIR_Opr ZLoadBarrierStubC1::ref_addr() const {
+ return _ref_addr;
+}
+
+LIR_Opr ZLoadBarrierStubC1::tmp() const {
+ return _tmp;
+}
+
+LIR_PatchCode ZLoadBarrierStubC1::patch_code() const {
+ return (_decorators & C1_NEEDS_PATCHING) != 0 ? lir_patch_normal : lir_patch_none;
+}
+
+CodeEmitInfo*& ZLoadBarrierStubC1::patch_info() {
+ return _patch_info;
+}
+
+address ZLoadBarrierStubC1::runtime_stub() const {
+ return _runtime_stub;
+}
+
+void ZLoadBarrierStubC1::visit(LIR_OpVisitState* visitor) {
+ if (_patch_info != NULL) {
+ visitor->do_slow_case(_patch_info);
+ } else {
+ visitor->do_slow_case();
+ }
+
+ visitor->do_input(_ref_addr);
+ visitor->do_output(_ref);
+
+ if (_tmp->is_valid()) {
+ visitor->do_temp(_tmp);
+ }
+}
+
+void ZLoadBarrierStubC1::emit_code(LIR_Assembler* ce) {
+ ZBarrierSet::assembler()->generate_c1_load_barrier_stub(ce, this);
+}
+
+#ifndef PRODUCT
+void ZLoadBarrierStubC1::print_name(outputStream* out) const {
+ out->print("ZLoadBarrierStubC1");
+}
+#endif // PRODUCT
+
+class LIR_OpZLoadBarrierTest : public LIR_Op {
+private:
+ LIR_Opr _opr;
+
+public:
+ LIR_OpZLoadBarrierTest(LIR_Opr opr) :
+ LIR_Op(),
+ _opr(opr) {}
+
+ virtual void visit(LIR_OpVisitState* state) {
+ state->do_input(_opr);
+ }
+
+ virtual void emit_code(LIR_Assembler* ce) {
+ ZBarrierSet::assembler()->generate_c1_load_barrier_test(ce, _opr);
+ }
+
+ virtual void print_instr(outputStream* out) const {
+ _opr->print(out);
+ out->print(" ");
+ }
+
+#ifndef PRODUCT
+ virtual const char* name() const {
+ return "lir_z_load_barrier_test";
+ }
+#endif // PRODUCT
+};
+
+static bool barrier_needed(LIRAccess& access) {
+ return ZBarrierSet::barrier_needed(access.decorators(), access.type());
+}
+
+ZBarrierSetC1::ZBarrierSetC1() :
+ _load_barrier_on_oop_field_preloaded_runtime_stub(NULL),
+ _load_barrier_on_weak_oop_field_preloaded_runtime_stub(NULL) {}
+
+address ZBarrierSetC1::load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const {
+ assert((decorators & ON_PHANTOM_OOP_REF) == 0, "Unsupported decorator");
+ //assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Unsupported decorator");
+
+ if ((decorators & ON_WEAK_OOP_REF) != 0) {
+ return _load_barrier_on_weak_oop_field_preloaded_runtime_stub;
+ } else {
+ return _load_barrier_on_oop_field_preloaded_runtime_stub;
+ }
+}
+
+#ifdef ASSERT
+#define __ access.gen()->lir(__FILE__, __LINE__)->
+#else
+#define __ access.gen()->lir()->
+#endif
+
+void ZBarrierSetC1::load_barrier(LIRAccess& access, LIR_Opr result) const {
+ // Fast path
+ __ append(new LIR_OpZLoadBarrierTest(result));
+
+ // Slow path
+ const address runtime_stub = load_barrier_on_oop_field_preloaded_runtime_stub(access.decorators());
+ CodeStub* const stub = new ZLoadBarrierStubC1(access, result, runtime_stub);
+ __ branch(lir_cond_notEqual, T_ADDRESS, stub);
+ __ branch_destination(stub->continuation());
+}
+
+#undef __
+
+void ZBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
+ BarrierSetC1::load_at_resolved(access, result);
+
+ if (barrier_needed(access)) {
+ load_barrier(access, result);
+ }
+}
+
+static void pre_load_barrier(LIRAccess& access) {
+ DecoratorSet decorators = access.decorators();
+
+ // Downgrade access to MO_UNORDERED
+ decorators = (decorators & ~MO_DECORATOR_MASK) | MO_UNORDERED;
+
+ // Remove C1_WRITE_ACCESS
+ decorators = (decorators & ~C1_WRITE_ACCESS);
+
+ // Generate synthetic load at
+ access.gen()->access_load_at(decorators,
+ access.type(),
+ access.base().item(),
+ access.offset().opr(),
+ access.gen()->new_register(access.type()),
+ NULL /* patch_emit_info */,
+ NULL /* load_emit_info */);
+}
+
+LIR_Opr ZBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
+ if (barrier_needed(access)) {
+ pre_load_barrier(access);
+ }
+
+ return BarrierSetC1::atomic_xchg_at_resolved(access, value);
+}
+
+LIR_Opr ZBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
+ if (barrier_needed(access)) {
+ pre_load_barrier(access);
+ }
+
+ return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
+}
+
+class ZLoadBarrierRuntimeStubCodeGenClosure : public StubAssemblerCodeGenClosure {
+private:
+ const DecoratorSet _decorators;
+
+public:
+ ZLoadBarrierRuntimeStubCodeGenClosure(DecoratorSet decorators) :
+ _decorators(decorators) {}
+
+ virtual OopMapSet* generate_code(StubAssembler* sasm) {
+ ZBarrierSet::assembler()->generate_c1_load_barrier_runtime_stub(sasm, _decorators);
+ return NULL;
+ }
+};
+
+static address generate_c1_runtime_stub(BufferBlob* blob, DecoratorSet decorators, const char* name) {
+ ZLoadBarrierRuntimeStubCodeGenClosure cl(decorators);
+ CodeBlob* const code_blob = Runtime1::generate_blob(blob, -1 /* stub_id */, name, false /* expect_oop_map*/, &cl);
+ return code_blob->code_begin();
+}
+
+void ZBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* blob) {
+ _load_barrier_on_oop_field_preloaded_runtime_stub =
+ generate_c1_runtime_stub(blob, ON_STRONG_OOP_REF, "load_barrier_on_oop_field_preloaded_runtime_stub");
+ _load_barrier_on_weak_oop_field_preloaded_runtime_stub =
+ generate_c1_runtime_stub(blob, ON_WEAK_OOP_REF, "load_barrier_on_weak_oop_field_preloaded_runtime_stub");
+}
diff --git a/src/hotspot/share/gc/z/c1/zBarrierSetC1.hpp b/src/hotspot/share/gc/z/c1/zBarrierSetC1.hpp
new file mode 100644
index 00000000000..4f1514f988c
--- /dev/null
+++ b/src/hotspot/share/gc/z/c1/zBarrierSetC1.hpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_C1_ZBARRIERSETC1_HPP
+#define SHARE_GC_Z_C1_ZBARRIERSETC1_HPP
+
+#include "c1/c1_CodeStubs.hpp"
+#include "c1/c1_IR.hpp"
+#include "c1/c1_LIR.hpp"
+#include "gc/shared/c1/barrierSetC1.hpp"
+#include "oops/accessDecorators.hpp"
+
+class ZLoadBarrierStubC1 : public CodeStub {
+private:
+ DecoratorSet _decorators;
+ LIR_Opr _ref_addr;
+ LIR_Opr _ref;
+ LIR_Opr _tmp;
+ CodeEmitInfo* _patch_info;
+ address _runtime_stub;
+
+public:
+ ZLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address runtime_stub);
+
+ DecoratorSet decorators() const;
+ LIR_Opr ref() const;
+ LIR_Opr ref_addr() const;
+ LIR_Opr tmp() const;
+ LIR_PatchCode patch_code() const;
+ CodeEmitInfo*& patch_info();
+ address runtime_stub() const;
+
+ virtual void emit_code(LIR_Assembler* ce);
+ virtual void visit(LIR_OpVisitState* visitor);
+
+#ifndef PRODUCT
+ virtual void print_name(outputStream* out) const;
+#endif // PRODUCT
+};
+
+class ZBarrierSetC1 : public BarrierSetC1 {
+private:
+ address _load_barrier_on_oop_field_preloaded_runtime_stub;
+ address _load_barrier_on_weak_oop_field_preloaded_runtime_stub;
+
+ address load_barrier_on_oop_field_preloaded_runtime_stub(DecoratorSet decorators) const;
+ void load_barrier(LIRAccess& access, LIR_Opr result) const;
+
+protected:
+ virtual void load_at_resolved(LIRAccess& access, LIR_Opr result);
+ virtual LIR_Opr atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value);
+ virtual LIR_Opr atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value);
+
+public:
+ ZBarrierSetC1();
+
+ virtual void generate_c1_runtime_stubs(BufferBlob* blob);
+};
+
+#endif // SHARE_GC_Z_C1_ZBARRIERSETC1_HPP
diff --git a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp
new file mode 100644
index 00000000000..d63fd33d69f
--- /dev/null
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp
@@ -0,0 +1,1480 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "opto/compile.hpp"
+#include "opto/castnode.hpp"
+#include "opto/graphKit.hpp"
+#include "opto/idealKit.hpp"
+#include "opto/loopnode.hpp"
+#include "opto/macro.hpp"
+#include "opto/node.hpp"
+#include "opto/type.hpp"
+#include "utilities/macros.hpp"
+#include "gc/z/c2/zBarrierSetC2.hpp"
+#include "gc/z/zThreadLocalData.hpp"
+#include "gc/z/zBarrierSetRuntime.hpp"
+
+ZBarrierSetC2State::ZBarrierSetC2State(Arena* comp_arena)
+ : _load_barrier_nodes(new (comp_arena) GrowableArray(comp_arena, 8, 0, NULL)) {}
+
+int ZBarrierSetC2State::load_barrier_count() const {
+ return _load_barrier_nodes->length();
+}
+
+void ZBarrierSetC2State::add_load_barrier_node(LoadBarrierNode * n) {
+ assert(!_load_barrier_nodes->contains(n), " duplicate entry in expand list");
+ _load_barrier_nodes->append(n);
+}
+
+void ZBarrierSetC2State::remove_load_barrier_node(LoadBarrierNode * n) {
+ // this function may be called twice for a node so check
+ // that the node is in the array before attempting to remove it
+ if (_load_barrier_nodes->contains(n)) {
+ _load_barrier_nodes->remove(n);
+ }
+}
+
+LoadBarrierNode* ZBarrierSetC2State::load_barrier_node(int idx) const {
+ return _load_barrier_nodes->at(idx);
+}
+
+void* ZBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
+ return new(comp_arena) ZBarrierSetC2State(comp_arena);
+}
+
+ZBarrierSetC2State* ZBarrierSetC2::state() const {
+ return reinterpret_cast(Compile::current()->barrier_set_state());
+}
+
+bool ZBarrierSetC2::is_gc_barrier_node(Node* node) const {
+ return node->is_LoadBarrier();
+}
+
+void ZBarrierSetC2::register_potential_barrier_node(Node* node) const {
+ if (node->is_LoadBarrier()) {
+ state()->add_load_barrier_node(node->as_LoadBarrier());
+ }
+}
+
+void ZBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
+ if (node->is_LoadBarrier()) {
+ state()->remove_load_barrier_node(node->as_LoadBarrier());
+ }
+}
+
+void ZBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful) const {
+ // Remove useless LoadBarrier nodes
+ ZBarrierSetC2State* s = state();
+ for (int i = s->load_barrier_count()-1; i >= 0; i--) {
+ LoadBarrierNode* n = s->load_barrier_node(i);
+ if (!useful.member(n)) {
+ unregister_potential_barrier_node(n);
+ }
+ }
+}
+
+void ZBarrierSetC2::enqueue_useful_gc_barrier(Unique_Node_List &worklist, Node* node) const {
+ if (node->is_LoadBarrier() && !node->as_LoadBarrier()->has_true_uses()) {
+ worklist.push(node);
+ }
+}
+
+void ZBarrierSetC2::find_dominating_barriers(PhaseIterGVN& igvn) {
+ // Look for dominating barriers on the same address only once all
+ // other loop opts are over: loop opts may cause a safepoint to be
+ // inserted between a barrier and its dominating barrier.
+ Compile* C = Compile::current();
+ ZBarrierSetC2* bs = (ZBarrierSetC2*)BarrierSet::barrier_set()->barrier_set_c2();
+ ZBarrierSetC2State* s = bs->state();
+ if (s->load_barrier_count() >= 2) {
+ Compile::TracePhase tp("idealLoop", &C->timers[Phase::_t_idealLoop]);
+ PhaseIdealLoop ideal_loop(igvn, true, false, true);
+ if (C->major_progress()) C->print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
+ }
+}
+
+void ZBarrierSetC2::add_users_to_worklist(Unique_Node_List* worklist) const {
+ // Permanent temporary workaround
+ // Loadbarriers may have non-obvious dead uses keeping them alive during parsing. The use is
+ // removed by RemoveUseless (after parsing, before optimize) but the barriers won't be added to
+ // the worklist. Unless we add them explicitly they are not guaranteed to end up there.
+ ZBarrierSetC2State* s = state();
+
+ for (int i = 0; i < s->load_barrier_count(); i++) {
+ LoadBarrierNode* n = s->load_barrier_node(i);
+ worklist->push(n);
+ }
+}
+
+const TypeFunc* ZBarrierSetC2::load_barrier_Type() const {
+ const Type** fields;
+
+ // Create input types (domain)
+ fields = TypeTuple::fields(2);
+ fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;
+ fields[TypeFunc::Parms+1] = TypeOopPtr::BOTTOM;
+ const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
+
+ // Create result type (range)
+ fields = TypeTuple::fields(1);
+ fields[TypeFunc::Parms+0] = TypeInstPtr::BOTTOM;
+ const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
+
+ return TypeFunc::make(domain, range);
+}
+
+// == LoadBarrierNode ==
+
+LoadBarrierNode::LoadBarrierNode(Compile* C,
+ Node* c,
+ Node* mem,
+ Node* val,
+ Node* adr,
+ bool weak,
+ bool writeback,
+ bool oop_reload_allowed) :
+ MultiNode(Number_of_Inputs),
+ _weak(weak),
+ _writeback(writeback),
+ _oop_reload_allowed(oop_reload_allowed) {
+ init_req(Control, c);
+ init_req(Memory, mem);
+ init_req(Oop, val);
+ init_req(Address, adr);
+ init_req(Similar, C->top());
+
+ init_class_id(Class_LoadBarrier);
+ BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
+ bs->register_potential_barrier_node(this);
+}
+
+const Type *LoadBarrierNode::bottom_type() const {
+ const Type** floadbarrier = (const Type **)(Compile::current()->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
+ Node* in_oop = in(Oop);
+ floadbarrier[Control] = Type::CONTROL;
+ floadbarrier[Memory] = Type::MEMORY;
+ floadbarrier[Oop] = in_oop == NULL ? Type::TOP : in_oop->bottom_type();
+ return TypeTuple::make(Number_of_Outputs, floadbarrier);
+}
+
+const Type *LoadBarrierNode::Value(PhaseGVN *phase) const {
+ const Type** floadbarrier = (const Type **)(phase->C->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
+ const Type* val_t = phase->type(in(Oop));
+ floadbarrier[Control] = Type::CONTROL;
+ floadbarrier[Memory] = Type::MEMORY;
+ floadbarrier[Oop] = val_t;
+ return TypeTuple::make(Number_of_Outputs, floadbarrier);
+}
+
+bool LoadBarrierNode::is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n) {
+ if (phase != NULL) {
+ return phase->is_dominator(d, n);
+ }
+
+ for (int i = 0; i < 10 && n != NULL; i++) {
+ n = IfNode::up_one_dom(n, linear_only);
+ if (n == d) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+LoadBarrierNode* LoadBarrierNode::has_dominating_barrier(PhaseIdealLoop* phase, bool linear_only, bool look_for_similar) {
+ Node* val = in(LoadBarrierNode::Oop);
+ if (in(Similar)->is_Proj() && in(Similar)->in(0)->is_LoadBarrier()) {
+ LoadBarrierNode* lb = in(Similar)->in(0)->as_LoadBarrier();
+ assert(lb->in(Address) == in(Address), "");
+ // Load barrier on Similar edge dominates so if it now has the Oop field it can replace this barrier.
+ if (lb->in(Oop) == in(Oop)) {
+ return lb;
+ }
+ // Follow chain of load barrier through Similar edges
+ while (!lb->in(Similar)->is_top()) {
+ lb = lb->in(Similar)->in(0)->as_LoadBarrier();
+ assert(lb->in(Address) == in(Address), "");
+ }
+ if (lb != in(Similar)->in(0)) {
+ return lb;
+ }
+ }
+ for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
+ Node* u = val->fast_out(i);
+ if (u != this && u->is_LoadBarrier() && u->in(Oop) == val && u->as_LoadBarrier()->has_true_uses()) {
+ Node* this_ctrl = in(LoadBarrierNode::Control);
+ Node* other_ctrl = u->in(LoadBarrierNode::Control);
+ if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) {
+ return u->as_LoadBarrier();
+ }
+ }
+ }
+
+ if (ZVerifyLoadBarriers || can_be_eliminated()) {
+ return NULL;
+ }
+
+ if (!look_for_similar) {
+ return NULL;
+ }
+
+ Node* addr = in(LoadBarrierNode::Address);
+ for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) {
+ Node* u = addr->fast_out(i);
+ if (u != this && u->is_LoadBarrier() && u->as_LoadBarrier()->has_true_uses()) {
+ Node* this_ctrl = in(LoadBarrierNode::Control);
+ Node* other_ctrl = u->in(LoadBarrierNode::Control);
+ if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) {
+ ResourceMark rm;
+ Unique_Node_List wq;
+ wq.push(in(LoadBarrierNode::Control));
+ bool ok = true;
+ bool dom_found = false;
+ for (uint next = 0; next < wq.size(); ++next) {
+ Node *n = wq.at(next);
+ if (n->is_top()) {
+ return NULL;
+ }
+ assert(n->is_CFG(), "");
+ if (n->is_SafePoint()) {
+ ok = false;
+ break;
+ }
+ if (n == u) {
+ dom_found = true;
+ continue;
+ }
+ if (n->is_Region()) {
+ for (uint i = 1; i < n->req(); i++) {
+ Node* m = n->in(i);
+ if (m != NULL) {
+ wq.push(m);
+ }
+ }
+ } else {
+ Node* m = n->in(0);
+ if (m != NULL) {
+ wq.push(m);
+ }
+ }
+ }
+ if (ok) {
+ assert(dom_found, "");
+ return u->as_LoadBarrier();;
+ }
+ break;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+void LoadBarrierNode::push_dominated_barriers(PhaseIterGVN* igvn) const {
+ // change to that barrier may affect a dominated barrier so re-push those
+ Node* val = in(LoadBarrierNode::Oop);
+
+ for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
+ Node* u = val->fast_out(i);
+ if (u != this && u->is_LoadBarrier() && u->in(Oop) == val) {
+ Node* this_ctrl = in(Control);
+ Node* other_ctrl = u->in(Control);
+ if (is_dominator(NULL, false, this_ctrl, other_ctrl)) {
+ igvn->_worklist.push(u);
+ }
+ }
+
+ Node* addr = in(LoadBarrierNode::Address);
+ for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) {
+ Node* u = addr->fast_out(i);
+ if (u != this && u->is_LoadBarrier() && u->in(Similar)->is_top()) {
+ Node* this_ctrl = in(Control);
+ Node* other_ctrl = u->in(Control);
+ if (is_dominator(NULL, false, this_ctrl, other_ctrl)) {
+ igvn->_worklist.push(u);
+ }
+ }
+ }
+ }
+}
+
+Node *LoadBarrierNode::Identity(PhaseGVN *phase) {
+ if (!phase->C->directive()->ZOptimizeLoadBarriersOption) {
+ return this;
+ }
+
+ bool redundant_addr = false;
+ LoadBarrierNode* dominating_barrier = has_dominating_barrier(NULL, true, false);
+ if (dominating_barrier != NULL) {
+ assert(dominating_barrier->in(Oop) == in(Oop), "");
+ return dominating_barrier;
+ }
+
+ return this;
+}
+
+Node *LoadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+ if (remove_dead_region(phase, can_reshape)) {
+ return this;
+ }
+
+ Node* val = in(Oop);
+ Node* mem = in(Memory);
+ Node* ctrl = in(Control);
+ Node* adr = in(Address);
+ assert(val->Opcode() != Op_LoadN, "");
+
+ if (mem->is_MergeMem()) {
+ Node* new_mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
+ set_req(Memory, new_mem);
+ if (mem->outcnt() == 0 && can_reshape) {
+ phase->is_IterGVN()->_worklist.push(mem);
+ }
+
+ return this;
+ }
+
+ bool optimizeLoadBarriers = phase->C->directive()->ZOptimizeLoadBarriersOption;
+ LoadBarrierNode* dominating_barrier = optimizeLoadBarriers ? has_dominating_barrier(NULL, !can_reshape, !phase->C->major_progress()) : NULL;
+ if (dominating_barrier != NULL && dominating_barrier->in(Oop) != in(Oop)) {
+ assert(in(Address) == dominating_barrier->in(Address), "");
+ set_req(Similar, dominating_barrier->proj_out(Oop));
+ return this;
+ }
+
+ bool eliminate = (optimizeLoadBarriers && !(val->is_Phi() || val->Opcode() == Op_LoadP || val->Opcode() == Op_GetAndSetP || val->is_DecodeN())) ||
+ (can_reshape && (dominating_barrier != NULL || !has_true_uses()));
+
+ if (eliminate) {
+ if (can_reshape) {
+ PhaseIterGVN* igvn = phase->is_IterGVN();
+ Node* out_ctrl = proj_out_or_null(Control);
+ Node* out_res = proj_out_or_null(Oop);
+
+ if (out_ctrl != NULL) {
+ igvn->replace_node(out_ctrl, ctrl);
+ }
+
+ // That transformation may cause the Similar edge on the load barrier to be invalid
+ fix_similar_in_uses(igvn);
+ if (out_res != NULL) {
+ if (dominating_barrier != NULL) {
+ igvn->replace_node(out_res, dominating_barrier->proj_out(Oop));
+ } else {
+ igvn->replace_node(out_res, val);
+ }
+ }
+ }
+
+ return new ConINode(TypeInt::ZERO);
+ }
+
+ // If the Similar edge is no longer a load barrier, clear it
+ Node* similar = in(Similar);
+ if (!similar->is_top() && !(similar->is_Proj() && similar->in(0)->is_LoadBarrier())) {
+ set_req(Similar, phase->C->top());
+ return this;
+ }
+
+ if (can_reshape) {
+ // If this barrier is linked through the Similar edge by a
+ // dominated barrier and both barriers have the same Oop field,
+ // the dominated barrier can go away, so push it for reprocessing.
+ // We also want to avoid a barrier to depend on another dominating
+ // barrier through its Similar edge that itself depend on another
+ // barrier through its Similar edge and rather have the first
+ // depend on the third.
+ PhaseIterGVN* igvn = phase->is_IterGVN();
+ Node* out_res = proj_out(Oop);
+ for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
+ Node* u = out_res->fast_out(i);
+ if (u->is_LoadBarrier() && u->in(Similar) == out_res &&
+ (u->in(Oop) == val || !u->in(Similar)->is_top())) {
+ igvn->_worklist.push(u);
+ }
+ }
+
+ push_dominated_barriers(igvn);
+ }
+
+ return NULL;
+}
+
+void LoadBarrierNode::fix_similar_in_uses(PhaseIterGVN* igvn) {
+ Node* out_res = proj_out_or_null(Oop);
+ if (out_res == NULL) {
+ return;
+ }
+
+ for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
+ Node* u = out_res->fast_out(i);
+ if (u->is_LoadBarrier() && u->in(Similar) == out_res) {
+ igvn->replace_input_of(u, Similar, igvn->C->top());
+ --i;
+ --imax;
+ }
+ }
+}
+
+bool LoadBarrierNode::has_true_uses() const {
+ Node* out_res = proj_out_or_null(Oop);
+ if (out_res == NULL) {
+ return false;
+ }
+
+ for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) {
+ Node* u = out_res->fast_out(i);
+ if (!u->is_LoadBarrier() || u->in(Similar) != out_res) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// == Accesses ==
+
+Node* ZBarrierSetC2::make_cas_loadbarrier(C2AtomicAccess& access) const {
+ assert(!UseCompressedOops, "Not allowed");
+ CompareAndSwapNode* cas = (CompareAndSwapNode*)access.raw_access();
+ PhaseGVN& gvn = access.kit()->gvn();
+ Compile* C = Compile::current();
+ GraphKit* kit = access.kit();
+
+ Node* in_ctrl = cas->in(MemNode::Control);
+ Node* in_mem = cas->in(MemNode::Memory);
+ Node* in_adr = cas->in(MemNode::Address);
+ Node* in_val = cas->in(MemNode::ValueIn);
+ Node* in_expected = cas->in(LoadStoreConditionalNode::ExpectedIn);
+
+ float likely = PROB_LIKELY(0.999);
+
+ const TypePtr *adr_type = gvn.type(in_adr)->isa_ptr();
+ Compile::AliasType* alias_type = C->alias_type(adr_type);
+ int alias_idx = C->get_alias_index(adr_type);
+
+ // Outer check - true: continue, false: load and check
+ Node* region = new RegionNode(3);
+ Node* phi = new PhiNode(region, TypeInt::BOOL);
+ Node* phi_mem = new PhiNode(region, Type::MEMORY, adr_type);
+
+ // Inner check - is the healed ref equal to the expected
+ Node* region2 = new RegionNode(3);
+ Node* phi2 = new PhiNode(region2, TypeInt::BOOL);
+ Node* phi_mem2 = new PhiNode(region2, Type::MEMORY, adr_type);
+
+ // CAS node returns 0 or 1
+ Node* cmp = gvn.transform(new CmpINode(cas, kit->intcon(0)));
+ Node* bol = gvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
+ IfNode* iff = gvn.transform(new IfNode(in_ctrl, bol, likely, COUNT_UNKNOWN))->as_If();
+ Node* then = gvn.transform(new IfTrueNode(iff));
+ Node* elsen = gvn.transform(new IfFalseNode(iff));
+
+ Node* scmemproj1 = gvn.transform(new SCMemProjNode(cas));
+
+ kit->set_memory(scmemproj1, alias_idx);
+ phi_mem->init_req(1, scmemproj1);
+ phi_mem2->init_req(2, scmemproj1);
+
+ // CAS fail - reload and heal oop
+ Node* reload = kit->make_load(elsen, in_adr, TypeOopPtr::BOTTOM, T_OBJECT, MemNode::unordered);
+ Node* barrier = gvn.transform(new LoadBarrierNode(C, elsen, scmemproj1, reload, in_adr, false, true, false));
+ Node* barrierctrl = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control));
+ Node* barrierdata = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Oop));
+
+ // Check load
+ Node* tmpX = gvn.transform(new CastP2XNode(NULL, barrierdata));
+ Node* in_expX = gvn.transform(new CastP2XNode(NULL, in_expected));
+ Node* cmp2 = gvn.transform(new CmpXNode(tmpX, in_expX));
+ Node *bol2 = gvn.transform(new BoolNode(cmp2, BoolTest::ne))->as_Bool();
+ IfNode* iff2 = gvn.transform(new IfNode(barrierctrl, bol2, likely, COUNT_UNKNOWN))->as_If();
+ Node* then2 = gvn.transform(new IfTrueNode(iff2));
+ Node* elsen2 = gvn.transform(new IfFalseNode(iff2));
+
+ // redo CAS
+ Node* cas2 = gvn.transform(new CompareAndSwapPNode(elsen2, kit->memory(alias_idx), in_adr, in_val, in_expected, cas->order()));
+ Node* scmemproj2 = gvn.transform(new SCMemProjNode(cas2));
+ kit->set_control(elsen2);
+ kit->set_memory(scmemproj2, alias_idx);
+
+ // Merge inner flow - check if healed oop was equal too expected.
+ region2->set_req(1, kit->control());
+ region2->set_req(2, then2);
+ phi2->set_req(1, cas2);
+ phi2->set_req(2, kit->intcon(0));
+ phi_mem2->init_req(1, scmemproj2);
+ kit->set_memory(phi_mem2, alias_idx);
+
+ // Merge outer flow - then check if first cas succeded
+ region->set_req(1, then);
+ region->set_req(2, region2);
+ phi->set_req(1, kit->intcon(1));
+ phi->set_req(2, phi2);
+ phi_mem->init_req(2, phi_mem2);
+ kit->set_memory(phi_mem, alias_idx);
+
+ gvn.transform(region2);
+ gvn.transform(phi2);
+ gvn.transform(phi_mem2);
+ gvn.transform(region);
+ gvn.transform(phi);
+ gvn.transform(phi_mem);
+
+ kit->set_control(region);
+ kit->insert_mem_bar(Op_MemBarCPUOrder);
+
+ return phi;
+}
+
+Node* ZBarrierSetC2::make_cmpx_loadbarrier(C2AtomicAccess& access) const {
+ CompareAndExchangePNode* cmpx = (CompareAndExchangePNode*)access.raw_access();
+ GraphKit* kit = access.kit();
+ PhaseGVN& gvn = kit->gvn();
+ Compile* C = Compile::current();
+
+ Node* in_ctrl = cmpx->in(MemNode::Control);
+ Node* in_mem = cmpx->in(MemNode::Memory);
+ Node* in_adr = cmpx->in(MemNode::Address);
+ Node* in_val = cmpx->in(MemNode::ValueIn);
+ Node* in_expected = cmpx->in(LoadStoreConditionalNode::ExpectedIn);
+
+ float likely = PROB_LIKELY(0.999);
+
+ const TypePtr *adr_type = cmpx->get_ptr_type();
+ Compile::AliasType* alias_type = C->alias_type(adr_type);
+ int alias_idx = C->get_alias_index(adr_type);
+
+ // Outer check - true: continue, false: load and check
+ Node* region = new RegionNode(3);
+ Node* phi = new PhiNode(region, adr_type);
+
+ // Inner check - is the healed ref equal to the expected
+ Node* region2 = new RegionNode(3);
+ Node* phi2 = new PhiNode(region2, adr_type);
+
+ // Check if cmpx succeded
+ Node* cmp = gvn.transform(new CmpPNode(cmpx, in_expected));
+ Node* bol = gvn.transform(new BoolNode(cmp, BoolTest::eq))->as_Bool();
+ IfNode* iff = gvn.transform(new IfNode(in_ctrl, bol, likely, COUNT_UNKNOWN))->as_If();
+ Node* then = gvn.transform(new IfTrueNode(iff));
+ Node* elsen = gvn.transform(new IfFalseNode(iff));
+
+ Node* scmemproj1 = gvn.transform(new SCMemProjNode(cmpx));
+ kit->set_memory(scmemproj1, alias_idx);
+
+ // CAS fail - reload and heal oop
+ Node* reload = kit->make_load(elsen, in_adr, TypeOopPtr::BOTTOM, T_OBJECT, MemNode::unordered);
+ Node* barrier = gvn.transform(new LoadBarrierNode(C, elsen, scmemproj1, reload, in_adr, false, true, false));
+ Node* barrierctrl = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control));
+ Node* barrierdata = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Oop));
+
+ // Check load
+ Node* tmpX = gvn.transform(new CastP2XNode(NULL, barrierdata));
+ Node* in_expX = gvn.transform(new CastP2XNode(NULL, in_expected));
+ Node* cmp2 = gvn.transform(new CmpXNode(tmpX, in_expX));
+ Node *bol2 = gvn.transform(new BoolNode(cmp2, BoolTest::ne))->as_Bool();
+ IfNode* iff2 = gvn.transform(new IfNode(barrierctrl, bol2, likely, COUNT_UNKNOWN))->as_If();
+ Node* then2 = gvn.transform(new IfTrueNode(iff2));
+ Node* elsen2 = gvn.transform(new IfFalseNode(iff2));
+
+ // Redo CAS
+ Node* cmpx2 = gvn.transform(new CompareAndExchangePNode(elsen2, kit->memory(alias_idx), in_adr, in_val, in_expected, adr_type, cmpx->get_ptr_type(), cmpx->order()));
+ Node* scmemproj2 = gvn.transform(new SCMemProjNode(cmpx2));
+ kit->set_control(elsen2);
+ kit->set_memory(scmemproj2, alias_idx);
+
+ // Merge inner flow - check if healed oop was equal too expected.
+ region2->set_req(1, kit->control());
+ region2->set_req(2, then2);
+ phi2->set_req(1, cmpx2);
+ phi2->set_req(2, barrierdata);
+
+ // Merge outer flow - then check if first cas succeded
+ region->set_req(1, then);
+ region->set_req(2, region2);
+ phi->set_req(1, cmpx);
+ phi->set_req(2, phi2);
+
+ gvn.transform(region2);
+ gvn.transform(phi2);
+ gvn.transform(region);
+ gvn.transform(phi);
+
+ kit->set_control(region);
+ kit->set_memory(in_mem, alias_idx);
+ kit->insert_mem_bar(Op_MemBarCPUOrder);
+
+ return phi;
+}
+
+Node* ZBarrierSetC2::load_barrier(GraphKit* kit, Node* val, Node* adr, bool weak, bool writeback, bool oop_reload_allowed) const {
+ PhaseGVN& gvn = kit->gvn();
+ Node* barrier = new LoadBarrierNode(Compile::current(), kit->control(), kit->memory(TypeRawPtr::BOTTOM), val, adr, weak, writeback, oop_reload_allowed);
+ Node* transformed_barrier = gvn.transform(barrier);
+
+ if (transformed_barrier->is_LoadBarrier()) {
+ if (barrier == transformed_barrier) {
+ kit->set_control(gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control)));
+ }
+ return gvn.transform(new ProjNode(transformed_barrier, LoadBarrierNode::Oop));
+ } else {
+ return val;
+ }
+}
+
+static bool barrier_needed(C2Access access) {
+ return ZBarrierSet::barrier_needed(access.decorators(), access.type());
+}
+
+Node* ZBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
+ Node* p = BarrierSetC2::load_at_resolved(access, val_type);
+ if (!barrier_needed(access)) {
+ return p;
+ }
+
+ bool conc_root = (access.decorators() & IN_CONCURRENT_ROOT) != 0;
+ bool weak = (access.decorators() & ON_WEAK_OOP_REF) != 0;
+
+ GraphKit* kit = access.kit();
+ PhaseGVN& gvn = kit->gvn();
+ Node* adr = access.addr().node();
+ Node* heap_base_oop = access.base();
+ bool unsafe = (access.decorators() & C2_UNSAFE_ACCESS) != 0;
+ if (unsafe) {
+ if (!ZVerifyLoadBarriers) {
+ p = load_barrier(kit, p, adr);
+ } else {
+ if (!TypePtr::NULL_PTR->higher_equal(gvn.type(heap_base_oop))) {
+ p = load_barrier(kit, p, adr);
+ } else {
+ IdealKit ideal(kit);
+ IdealVariable res(ideal);
+#define __ ideal.
+ __ declarations_done();
+ __ set(res, p);
+ __ if_then(heap_base_oop, BoolTest::ne, kit->null(), PROB_UNLIKELY(0.999)); {
+ kit->sync_kit(ideal);
+ p = load_barrier(kit, p, adr);
+ __ set(res, p);
+ __ sync_kit(kit);
+ } __ end_if();
+ kit->final_sync(ideal);
+ p = __ value(res);
+#undef __
+ }
+ }
+ return p;
+ } else {
+ return load_barrier(access.kit(), p, access.addr().node(), weak, true, true);
+ }
+}
+
+Node* ZBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val,
+ Node* new_val, const Type* val_type) const {
+ Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type);
+ if (!barrier_needed(access)) {
+ return result;
+ }
+
+ access.set_needs_pinning(false);
+ return make_cmpx_loadbarrier(access);
+}
+
+Node* ZBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val,
+ Node* new_val, const Type* value_type) const {
+ Node* result = BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
+ if (!barrier_needed(access)) {
+ return result;
+ }
+
+ Node* load_store = access.raw_access();
+ bool weak_cas = (access.decorators() & C2_WEAK_CMPXCHG) != 0;
+ bool expected_is_null = (expected_val->get_ptr_type() == TypePtr::NULL_PTR);
+
+ if (!expected_is_null) {
+ if (weak_cas) {
+ access.set_needs_pinning(false);
+ load_store = make_cas_loadbarrier(access);
+ } else {
+ access.set_needs_pinning(false);
+ load_store = make_cas_loadbarrier(access);
+ }
+ }
+
+ return load_store;
+}
+
+Node* ZBarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* val_type) const {
+ Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type);
+ if (!barrier_needed(access)) {
+ return result;
+ }
+
+ Node* load_store = access.raw_access();
+ Node* adr = access.addr().node();
+
+ return load_barrier(access.kit(), load_store, adr, false, false, false);
+}
+
+// == Macro Expansion ==
+
+void ZBarrierSetC2::expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const {
+ Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
+ Node* in_mem = barrier->in(LoadBarrierNode::Memory);
+ Node* in_val = barrier->in(LoadBarrierNode::Oop);
+ Node* in_adr = barrier->in(LoadBarrierNode::Address);
+
+ Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
+ Node* out_res = barrier->proj_out(LoadBarrierNode::Oop);
+
+ PhaseIterGVN &igvn = phase->igvn();
+
+ if (ZVerifyLoadBarriers) {
+ igvn.replace_node(out_res, in_val);
+ igvn.replace_node(out_ctrl, in_ctrl);
+ return;
+ }
+
+ if (barrier->can_be_eliminated()) {
+ // Clone and pin the load for this barrier below the dominating
+ // barrier: the load cannot be allowed to float above the
+ // dominating barrier
+ Node* load = in_val;
+
+ if (load->is_Load()) {
+ Node* new_load = load->clone();
+ Node* addp = new_load->in(MemNode::Address);
+ assert(addp->is_AddP() || addp->is_Phi() || addp->is_Load(), "bad address");
+ Node* cast = new CastPPNode(addp, igvn.type(addp), true);
+ Node* ctrl = NULL;
+ Node* similar = barrier->in(LoadBarrierNode::Similar);
+ if (similar->is_Phi()) {
+ // already expanded
+ ctrl = similar->in(0);
+ } else {
+ assert(similar->is_Proj() && similar->in(0)->is_LoadBarrier(), "unexpected graph shape");
+ ctrl = similar->in(0)->as_LoadBarrier()->proj_out(LoadBarrierNode::Control);
+ }
+ assert(ctrl != NULL, "bad control");
+ cast->set_req(0, ctrl);
+ igvn.transform(cast);
+ new_load->set_req(MemNode::Address, cast);
+ igvn.transform(new_load);
+
+ igvn.replace_node(out_res, new_load);
+ igvn.replace_node(out_ctrl, in_ctrl);
+ return;
+ }
+ // cannot eliminate
+ }
+
+ // There are two cases that require the basic loadbarrier
+ // 1) When the writeback of a healed oop must be avoided (swap)
+ // 2) When we must guarantee that no reload of is done (swap, cas, cmpx)
+ if (!barrier->is_writeback()) {
+ assert(!barrier->oop_reload_allowed(), "writeback barriers should be marked as requires oop");
+ }
+
+ if (!barrier->oop_reload_allowed()) {
+ expand_loadbarrier_basic(phase, barrier);
+ } else {
+ expand_loadbarrier_optimized(phase, barrier);
+ }
+}
+
+// Basic loadbarrier using conventional arg passing
+void ZBarrierSetC2::expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const {
+ PhaseIterGVN &igvn = phase->igvn();
+
+ Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
+ Node* in_mem = barrier->in(LoadBarrierNode::Memory);
+ Node* in_val = barrier->in(LoadBarrierNode::Oop);
+ Node* in_adr = barrier->in(LoadBarrierNode::Address);
+
+ Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
+ Node* out_res = barrier->proj_out(LoadBarrierNode::Oop);
+
+ float unlikely = PROB_UNLIKELY(0.999);
+ const Type* in_val_maybe_null_t = igvn.type(in_val);
+
+ Node* jthread = igvn.transform(new ThreadLocalNode());
+ Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset()));
+ Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered));
+ Node* cast = igvn.transform(new CastP2XNode(in_ctrl, in_val));
+ Node* obj_masked = igvn.transform(new AndXNode(cast, bad_mask));
+ Node* cmp = igvn.transform(new CmpXNode(obj_masked, igvn.zerocon(TypeX_X->basic_type())));
+ Node *bol = igvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
+ IfNode* iff = igvn.transform(new IfNode(in_ctrl, bol, unlikely, COUNT_UNKNOWN))->as_If();
+ Node* then = igvn.transform(new IfTrueNode(iff));
+ Node* elsen = igvn.transform(new IfFalseNode(iff));
+
+ Node* result_region;
+ Node* result_val;
+
+ result_region = new RegionNode(3);
+ result_val = new PhiNode(result_region, TypeInstPtr::BOTTOM);
+
+ result_region->set_req(1, elsen);
+ Node* res = igvn.transform(new CastPPNode(in_val, in_val_maybe_null_t));
+ res->init_req(0, elsen);
+ result_val->set_req(1, res);
+
+ const TypeFunc *tf = load_barrier_Type();
+ Node* call;
+ if (barrier->is_weak()) {
+ call = new CallLeafNode(tf,
+ ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr(),
+ "ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded",
+ TypeRawPtr::BOTTOM);
+ } else {
+ call = new CallLeafNode(tf,
+ ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(),
+ "ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded",
+ TypeRawPtr::BOTTOM);
+ }
+
+ call->init_req(TypeFunc::Control, then);
+ call->init_req(TypeFunc::I_O , phase->top());
+ call->init_req(TypeFunc::Memory , in_mem);
+ call->init_req(TypeFunc::FramePtr, phase->top());
+ call->init_req(TypeFunc::ReturnAdr, phase->top());
+ call->init_req(TypeFunc::Parms+0, in_val);
+ if (barrier->is_writeback()) {
+ call->init_req(TypeFunc::Parms+1, in_adr);
+ } else {
+ // when slow path is called with a null adr, the healed oop will not be written back
+ call->init_req(TypeFunc::Parms+1, igvn.zerocon(T_OBJECT));
+ }
+ call = igvn.transform(call);
+
+ Node* ctrl = igvn.transform(new ProjNode(call, TypeFunc::Control));
+ res = igvn.transform(new ProjNode(call, TypeFunc::Parms));
+ res = igvn.transform(new CheckCastPPNode(ctrl, res, in_val_maybe_null_t));
+
+ result_region->set_req(2, ctrl);
+ result_val->set_req(2, res);
+
+ result_region = igvn.transform(result_region);
+ result_val = igvn.transform(result_val);
+
+ if (out_ctrl != NULL) { // added if cond
+ igvn.replace_node(out_ctrl, result_region);
+ }
+ igvn.replace_node(out_res, result_val);
+}
+
+// Optimized, low spill, loadbarrier variant using stub specialized on register used
+void ZBarrierSetC2::expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const {
+ PhaseIterGVN &igvn = phase->igvn();
+#ifdef PRINT_NODE_TRAVERSALS
+ Node* preceding_barrier_node = barrier->in(LoadBarrierNode::Oop);
+#endif
+
+ Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
+ Node* in_mem = barrier->in(LoadBarrierNode::Memory);
+ Node* in_val = barrier->in(LoadBarrierNode::Oop);
+ Node* in_adr = barrier->in(LoadBarrierNode::Address);
+
+ Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
+ Node* out_res = barrier->proj_out(LoadBarrierNode::Oop);
+
+ assert(barrier->in(LoadBarrierNode::Oop) != NULL, "oop to loadbarrier node cannot be null");
+
+#ifdef PRINT_NODE_TRAVERSALS
+ tty->print("\n\n\nBefore barrier optimization:\n");
+ traverse(barrier, out_ctrl, out_res, -1);
+
+ tty->print("\nBefore barrier optimization: preceding_barrier_node\n");
+ traverse(preceding_barrier_node, out_ctrl, out_res, -1);
+#endif
+
+ float unlikely = PROB_UNLIKELY(0.999);
+
+ Node* jthread = igvn.transform(new ThreadLocalNode());
+ Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset()));
+ Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr,
+ TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(),
+ MemNode::unordered));
+ Node* cast = igvn.transform(new CastP2XNode(in_ctrl, in_val));
+ Node* obj_masked = igvn.transform(new AndXNode(cast, bad_mask));
+ Node* cmp = igvn.transform(new CmpXNode(obj_masked, igvn.zerocon(TypeX_X->basic_type())));
+ Node *bol = igvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool();
+ IfNode* iff = igvn.transform(new IfNode(in_ctrl, bol, unlikely, COUNT_UNKNOWN))->as_If();
+ Node* then = igvn.transform(new IfTrueNode(iff));
+ Node* elsen = igvn.transform(new IfFalseNode(iff));
+
+ Node* slow_path_surrogate;
+ if (!barrier->is_weak()) {
+ slow_path_surrogate = igvn.transform(new LoadBarrierSlowRegNode(then, in_mem, in_adr, in_val->adr_type(),
+ (const TypePtr*) in_val->bottom_type(), MemNode::unordered));
+ } else {
+ slow_path_surrogate = igvn.transform(new LoadBarrierWeakSlowRegNode(then, in_mem, in_adr, in_val->adr_type(),
+ (const TypePtr*) in_val->bottom_type(), MemNode::unordered));
+ }
+
+ Node *new_loadp;
+ new_loadp = slow_path_surrogate;
+ // create the final region/phi pair to converge cntl/data paths to downstream code
+ Node* result_region = igvn.transform(new RegionNode(3));
+ result_region->set_req(1, then);
+ result_region->set_req(2, elsen);
+
+ Node* result_phi = igvn.transform(new PhiNode(result_region, TypeInstPtr::BOTTOM));
+ result_phi->set_req(1, new_loadp);
+ result_phi->set_req(2, barrier->in(LoadBarrierNode::Oop));
+
+ // finally, connect the original outputs to the barrier region and phi to complete the expansion/substitution
+ // igvn.replace_node(out_ctrl, result_region);
+ if (out_ctrl != NULL) { // added if cond
+ igvn.replace_node(out_ctrl, result_region);
+ }
+ igvn.replace_node(out_res, result_phi);
+
+ assert(barrier->outcnt() == 0,"LoadBarrier macro node has non-null outputs after expansion!");
+
+#ifdef PRINT_NODE_TRAVERSALS
+ tty->print("\nAfter barrier optimization: old out_ctrl\n");
+ traverse(out_ctrl, out_ctrl, out_res, -1);
+ tty->print("\nAfter barrier optimization: old out_res\n");
+ traverse(out_res, out_ctrl, out_res, -1);
+ tty->print("\nAfter barrier optimization: old barrier\n");
+ traverse(barrier, out_ctrl, out_res, -1);
+ tty->print("\nAfter barrier optimization: preceding_barrier_node\n");
+ traverse(preceding_barrier_node, result_region, result_phi, -1);
+#endif
+
+ return;
+}
+
+bool ZBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const {
+ Compile* C = Compile::current();
+ PhaseIterGVN &igvn = macro->igvn();
+ ZBarrierSetC2State* s = state();
+ if (s->load_barrier_count() > 0) {
+#ifdef ASSERT
+ verify_gc_barriers(false);
+#endif
+ igvn.set_delay_transform(true);
+ int skipped = 0;
+ while (s->load_barrier_count() > skipped) {
+ int load_barrier_count = s->load_barrier_count();
+ LoadBarrierNode * n = s->load_barrier_node(load_barrier_count-1-skipped);
+ if (igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) {
+ // node is unreachable, so don't try to expand it
+ s->remove_load_barrier_node(n);
+ continue;
+ }
+ if (!n->can_be_eliminated()) {
+ skipped++;
+ continue;
+ }
+ expand_loadbarrier_node(macro, n);
+ assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
+ if (C->failing()) return true;
+ }
+ while (s->load_barrier_count() > 0) {
+ int load_barrier_count = s->load_barrier_count();
+ LoadBarrierNode* n = s->load_barrier_node(load_barrier_count - 1);
+ assert(!(igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())), "should have been processed already");
+ assert(!n->can_be_eliminated(), "should have been processed already");
+ expand_loadbarrier_node(macro, n);
+ assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
+ if (C->failing()) return true;
+ }
+ igvn.set_delay_transform(false);
+ igvn.optimize();
+ if (C->failing()) return true;
+ }
+ return false;
+}
+
+// == Loop optimization ==
+
+static bool replace_with_dominating_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, bool last_round) {
+ PhaseIterGVN &igvn = phase->igvn();
+ Compile* C = Compile::current();
+
+ LoadBarrierNode* lb2 = lb->has_dominating_barrier(phase, false, last_round);
+ if (lb2 != NULL) {
+ if (lb->in(LoadBarrierNode::Oop) != lb2->in(LoadBarrierNode::Oop)) {
+ assert(lb->in(LoadBarrierNode::Address) == lb2->in(LoadBarrierNode::Address), "");
+ igvn.replace_input_of(lb, LoadBarrierNode::Similar, lb2->proj_out(LoadBarrierNode::Oop));
+ C->set_major_progress();
+ } else {
+ // That transformation may cause the Similar edge on dominated load barriers to be invalid
+ lb->fix_similar_in_uses(&igvn);
+
+ Node* val = lb->proj_out(LoadBarrierNode::Oop);
+ assert(lb2->has_true_uses(), "");
+ assert(lb2->in(LoadBarrierNode::Oop) == lb->in(LoadBarrierNode::Oop), "");
+
+ phase->lazy_update(lb, lb->in(LoadBarrierNode::Control));
+ phase->lazy_replace(lb->proj_out(LoadBarrierNode::Control), lb->in(LoadBarrierNode::Control));
+ igvn.replace_node(val, lb2->proj_out(LoadBarrierNode::Oop));
+
+ return true;
+ }
+ }
+ return false;
+}
+
+static Node* find_dominating_memory(PhaseIdealLoop* phase, Node* mem, Node* dom, int i) {
+ assert(dom->is_Region() || i == -1, "");
+ Node* m = mem;
+ while(phase->is_dominator(dom, phase->has_ctrl(m) ? phase->get_ctrl(m) : m->in(0))) {
+ if (m->is_Mem()) {
+ assert(m->as_Mem()->adr_type() == TypeRawPtr::BOTTOM, "");
+ m = m->in(MemNode::Memory);
+ } else if (m->is_MergeMem()) {
+ m = m->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
+ } else if (m->is_Phi()) {
+ if (m->in(0) == dom && i != -1) {
+ m = m->in(i);
+ break;
+ } else {
+ m = m->in(LoopNode::EntryControl);
+ }
+ } else if (m->is_Proj()) {
+ m = m->in(0);
+ } else if (m->is_SafePoint() || m->is_MemBar()) {
+ m = m->in(TypeFunc::Memory);
+ } else {
+#ifdef ASSERT
+ m->dump();
+#endif
+ ShouldNotReachHere();
+ }
+ }
+ return m;
+}
+
+static LoadBarrierNode* clone_load_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, Node* ctl, Node* mem, Node* oop_in) {
+ PhaseIterGVN &igvn = phase->igvn();
+ Compile* C = Compile::current();
+ Node* the_clone = lb->clone();
+ the_clone->set_req(LoadBarrierNode::Control, ctl);
+ the_clone->set_req(LoadBarrierNode::Memory, mem);
+ if (oop_in != NULL) {
+ the_clone->set_req(LoadBarrierNode::Oop, oop_in);
+ }
+
+ LoadBarrierNode* new_lb = the_clone->as_LoadBarrier();
+ igvn.register_new_node_with_optimizer(new_lb);
+ IdealLoopTree *loop = phase->get_loop(new_lb->in(0));
+ phase->set_ctrl(new_lb, new_lb->in(0));
+ phase->set_loop(new_lb, loop);
+ phase->set_idom(new_lb, new_lb->in(0), phase->dom_depth(new_lb->in(0))+1);
+ if (!loop->_child) {
+ loop->_body.push(new_lb);
+ }
+
+ Node* proj_ctl = new ProjNode(new_lb, LoadBarrierNode::Control);
+ igvn.register_new_node_with_optimizer(proj_ctl);
+ phase->set_ctrl(proj_ctl, proj_ctl->in(0));
+ phase->set_loop(proj_ctl, loop);
+ phase->set_idom(proj_ctl, new_lb, phase->dom_depth(new_lb)+1);
+ if (!loop->_child) {
+ loop->_body.push(proj_ctl);
+ }
+
+ Node* proj_oop = new ProjNode(new_lb, LoadBarrierNode::Oop);
+ phase->register_new_node(proj_oop, new_lb);
+
+ if (!new_lb->in(LoadBarrierNode::Similar)->is_top()) {
+ LoadBarrierNode* similar = new_lb->in(LoadBarrierNode::Similar)->in(0)->as_LoadBarrier();
+ if (!phase->is_dominator(similar, ctl)) {
+ igvn.replace_input_of(new_lb, LoadBarrierNode::Similar, C->top());
+ }
+ }
+
+ return new_lb;
+}
+
+static void replace_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, Node* new_val) {
+ PhaseIterGVN &igvn = phase->igvn();
+ Node* val = lb->proj_out(LoadBarrierNode::Oop);
+ igvn.replace_node(val, new_val);
+ phase->lazy_update(lb, lb->in(LoadBarrierNode::Control));
+ phase->lazy_replace(lb->proj_out(LoadBarrierNode::Control), lb->in(LoadBarrierNode::Control));
+}
+
+static bool split_barrier_thru_phi(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
+ PhaseIterGVN &igvn = phase->igvn();
+ Compile* C = Compile::current();
+
+ if (lb->in(LoadBarrierNode::Oop)->is_Phi()) {
+ Node* oop_phi = lb->in(LoadBarrierNode::Oop);
+
+ if (oop_phi->in(2) == oop_phi) {
+ // Ignore phis with only one input
+ return false;
+ }
+
+ if (phase->is_dominator(phase->get_ctrl(lb->in(LoadBarrierNode::Address)),
+ oop_phi->in(0)) && phase->get_ctrl(lb->in(LoadBarrierNode::Address)) != oop_phi->in(0)) {
+ // That transformation may cause the Similar edge on dominated load barriers to be invalid
+ lb->fix_similar_in_uses(&igvn);
+
+ RegionNode* region = oop_phi->in(0)->as_Region();
+
+ int backedge = LoopNode::LoopBackControl;
+ if (region->is_Loop() && region->in(backedge)->is_Proj() && region->in(backedge)->in(0)->is_If()) {
+ Node* c = region->in(backedge)->in(0)->in(0);
+ assert(c->unique_ctrl_out() == region->in(backedge)->in(0), "");
+ Node* oop = lb->in(LoadBarrierNode::Oop)->in(backedge);
+ Node* oop_c = phase->has_ctrl(oop) ? phase->get_ctrl(oop) : oop;
+ if (!phase->is_dominator(oop_c, c)) {
+ return false;
+ }
+ }
+
+ // If the node on the backedge above the phi is the node itself - we have a self loop.
+ // Don't clone - this will be folded later.
+ if (oop_phi->in(LoopNode::LoopBackControl) == lb->proj_out(LoadBarrierNode::Oop)) {
+ return false;
+ }
+
+ bool is_strip_mined = region->is_CountedLoop() && region->as_CountedLoop()->is_strip_mined();
+ Node *phi = oop_phi->clone();
+
+ for (uint i = 1; i < region->req(); i++) {
+ Node* ctrl = region->in(i);
+ if (ctrl != C->top()) {
+ assert(!phase->is_dominator(ctrl, region) || region->is_Loop(), "");
+
+ Node* mem = lb->in(LoadBarrierNode::Memory);
+ Node* m = find_dominating_memory(phase, mem, region, i);
+
+ if (region->is_Loop() && i == LoopNode::LoopBackControl && ctrl->is_Proj() && ctrl->in(0)->is_If()) {
+ ctrl = ctrl->in(0)->in(0);
+ } else if (region->is_Loop() && is_strip_mined) {
+ // If this is a strip mined loop, control must move above OuterStripMinedLoop
+ assert(i == LoopNode::EntryControl, "check");
+ assert(ctrl->is_OuterStripMinedLoop(), "sanity");
+ ctrl = ctrl->as_OuterStripMinedLoop()->in(LoopNode::EntryControl);
+ }
+
+ LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, ctrl, m, lb->in(LoadBarrierNode::Oop)->in(i));
+ Node* out_ctrl = new_lb->proj_out(LoadBarrierNode::Control);
+
+ if (is_strip_mined && (i == LoopNode::EntryControl)) {
+ assert(region->in(i)->is_OuterStripMinedLoop(), "");
+ igvn.replace_input_of(region->in(i), i, out_ctrl);
+ } else if (ctrl == region->in(i)) {
+ igvn.replace_input_of(region, i, out_ctrl);
+ } else {
+ Node* iff = region->in(i)->in(0);
+ igvn.replace_input_of(iff, 0, out_ctrl);
+ phase->set_idom(iff, out_ctrl, phase->dom_depth(out_ctrl)+1);
+ }
+ phi->set_req(i, new_lb->proj_out(LoadBarrierNode::Oop));
+ }
+ }
+ phase->register_new_node(phi, region);
+ replace_barrier(phase, lb, phi);
+
+ if (region->is_Loop()) {
+ // Load barrier moved to the back edge of the Loop may now
+ // have a safepoint on the path to the barrier on the Similar
+ // edge
+ igvn.replace_input_of(phi->in(LoopNode::LoopBackControl)->in(0), LoadBarrierNode::Similar, C->top());
+ Node* head = region->in(LoopNode::EntryControl);
+ phase->set_idom(region, head, phase->dom_depth(head)+1);
+ phase->recompute_dom_depth();
+ if (head->is_CountedLoop() && head->as_CountedLoop()->is_main_loop()) {
+ head->as_CountedLoop()->set_normal_loop();
+ }
+ }
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool move_out_of_loop(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
+ PhaseIterGVN &igvn = phase->igvn();
+ IdealLoopTree *lb_loop = phase->get_loop(lb->in(0));
+ if (lb_loop != phase->ltree_root() && !lb_loop->_irreducible) {
+ Node* oop_ctrl = phase->get_ctrl(lb->in(LoadBarrierNode::Oop));
+ IdealLoopTree *oop_loop = phase->get_loop(oop_ctrl);
+ IdealLoopTree* adr_loop = phase->get_loop(phase->get_ctrl(lb->in(LoadBarrierNode::Address)));
+ if (!lb_loop->is_member(oop_loop) && !lb_loop->is_member(adr_loop)) {
+ // That transformation may cause the Similar edge on dominated load barriers to be invalid
+ lb->fix_similar_in_uses(&igvn);
+
+ Node* head = lb_loop->_head;
+ assert(head->is_Loop(), "");
+
+ if (phase->is_dominator(head, oop_ctrl)) {
+ assert(oop_ctrl->Opcode() == Op_CProj && oop_ctrl->in(0)->Opcode() == Op_NeverBranch, "");
+ assert(lb_loop->is_member(phase->get_loop(oop_ctrl->in(0)->in(0))), "");
+ return false;
+ }
+
+ if (head->is_CountedLoop()) {
+ CountedLoopNode* cloop = head->as_CountedLoop();
+ if (cloop->is_main_loop()) {
+ cloop->set_normal_loop();
+ }
+ // When we are moving barrier out of a counted loop,
+ // make sure we move it all the way out of the strip mined outer loop.
+ if (cloop->is_strip_mined()) {
+ head = cloop->outer_loop();
+ }
+ }
+
+ Node* mem = lb->in(LoadBarrierNode::Memory);
+ Node* m = find_dominating_memory(phase, mem, head, -1);
+
+ LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, head->in(LoopNode::EntryControl), m, NULL);
+
+ assert(phase->idom(head) == head->in(LoopNode::EntryControl), "");
+ Node* proj_ctl = new_lb->proj_out(LoadBarrierNode::Control);
+ igvn.replace_input_of(head, LoopNode::EntryControl, proj_ctl);
+ phase->set_idom(head, proj_ctl, phase->dom_depth(proj_ctl) + 1);
+
+ replace_barrier(phase, lb, new_lb->proj_out(LoadBarrierNode::Oop));
+
+ phase->recompute_dom_depth();
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool common_barriers(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
+ PhaseIterGVN &igvn = phase->igvn();
+ Node* in_val = lb->in(LoadBarrierNode::Oop);
+ for (DUIterator_Fast imax, i = in_val->fast_outs(imax); i < imax; i++) {
+ Node* u = in_val->fast_out(i);
+ if (u != lb && u->is_LoadBarrier() && u->as_LoadBarrier()->has_true_uses()) {
+ Node* this_ctrl = lb->in(LoadBarrierNode::Control);
+ Node* other_ctrl = u->in(LoadBarrierNode::Control);
+
+ Node* lca = phase->dom_lca(this_ctrl, other_ctrl);
+ bool ok = true;
+
+ Node* proj1 = NULL;
+ Node* proj2 = NULL;
+
+ while (this_ctrl != lca && ok) {
+ if (this_ctrl->in(0) != NULL &&
+ this_ctrl->in(0)->is_MultiBranch()) {
+ if (this_ctrl->in(0)->in(0) == lca) {
+ assert(proj1 == NULL, "");
+ assert(this_ctrl->is_Proj(), "");
+ proj1 = this_ctrl;
+ } else if (!(this_ctrl->in(0)->is_If() && this_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none))) {
+ ok = false;
+ }
+ }
+ this_ctrl = phase->idom(this_ctrl);
+ }
+ while (other_ctrl != lca && ok) {
+ if (other_ctrl->in(0) != NULL &&
+ other_ctrl->in(0)->is_MultiBranch()) {
+ if (other_ctrl->in(0)->in(0) == lca) {
+ assert(other_ctrl->is_Proj(), "");
+ assert(proj2 == NULL, "");
+ proj2 = other_ctrl;
+ } else if (!(other_ctrl->in(0)->is_If() && other_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none))) {
+ ok = false;
+ }
+ }
+ other_ctrl = phase->idom(other_ctrl);
+ }
+ assert(proj1 == NULL || proj2 == NULL || proj1->in(0) == proj2->in(0), "");
+ if (ok && proj1 && proj2 && proj1 != proj2 && proj1->in(0)->is_If()) {
+ // That transformation may cause the Similar edge on dominated load barriers to be invalid
+ lb->fix_similar_in_uses(&igvn);
+ u->as_LoadBarrier()->fix_similar_in_uses(&igvn);
+
+ Node* split = lca->unique_ctrl_out();
+ assert(split->in(0) == lca, "");
+
+ Node* mem = lb->in(LoadBarrierNode::Memory);
+ Node* m = find_dominating_memory(phase, mem, split, -1);
+ LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, lca, m, NULL);
+
+ Node* proj_ctl = new_lb->proj_out(LoadBarrierNode::Control);
+ igvn.replace_input_of(split, 0, new_lb->proj_out(LoadBarrierNode::Control));
+ phase->set_idom(split, proj_ctl, phase->dom_depth(proj_ctl)+1);
+
+ Node* proj_oop = new_lb->proj_out(LoadBarrierNode::Oop);
+ replace_barrier(phase, lb, proj_oop);
+ replace_barrier(phase, u->as_LoadBarrier(), proj_oop);
+
+ phase->recompute_dom_depth();
+
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static void optimize_load_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, bool last_round) {
+ Compile* C = Compile::current();
+
+ if (!C->directive()->ZOptimizeLoadBarriersOption) {
+ return;
+ }
+
+ if (lb->has_true_uses()) {
+ if (replace_with_dominating_barrier(phase, lb, last_round)) {
+ return;
+ }
+
+ if (split_barrier_thru_phi(phase, lb)) {
+ return;
+ }
+
+ if (move_out_of_loop(phase, lb)) {
+ return;
+ }
+
+ if (common_barriers(phase, lb)) {
+ return;
+ }
+ }
+}
+
+void ZBarrierSetC2::loop_optimize_gc_barrier(PhaseIdealLoop* phase, Node* node, bool last_round) {
+ if (node->is_LoadBarrier()) {
+ optimize_load_barrier(phase, node->as_LoadBarrier(), last_round);
+ }
+}
+
+// == Verification ==
+
+#ifdef ASSERT
+
+static bool look_for_barrier(Node* n, bool post_parse, VectorSet& visited) {
+ if (visited.test_set(n->_idx)) {
+ return true;
+ }
+
+ for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+ Node* u = n->fast_out(i);
+ if (u->is_LoadBarrier()) {
+ } else if ((u->is_Phi() || u->is_CMove()) && !post_parse) {
+ if (!look_for_barrier(u, post_parse, visited)) {
+ return false;
+ }
+ } else if (u->Opcode() == Op_EncodeP || u->Opcode() == Op_DecodeN) {
+ if (!look_for_barrier(u, post_parse, visited)) {
+ return false;
+ }
+ } else if (u->Opcode() != Op_SCMemProj) {
+ tty->print("bad use"); u->dump();
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void ZBarrierSetC2::verify_gc_barriers(bool post_parse) const {
+ ZBarrierSetC2State* s = state();
+ Compile* C = Compile::current();
+ ResourceMark rm;
+ VectorSet visited(Thread::current()->resource_area());
+ for (int i = 0; i < s->load_barrier_count(); i++) {
+ LoadBarrierNode* n = s->load_barrier_node(i);
+
+ // The dominating barrier on the same address if it exists and
+ // this barrier must not be applied on the value from the same
+ // load otherwise the value is not reloaded before it's used the
+ // second time.
+ assert(n->in(LoadBarrierNode::Similar)->is_top() ||
+ (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() &&
+ n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Address) == n->in(LoadBarrierNode::Address) &&
+ n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Oop) != n->in(LoadBarrierNode::Oop)),
+ "broken similar edge");
+
+ assert(post_parse || n->as_LoadBarrier()->has_true_uses(),
+ "found unneeded load barrier");
+
+ // Several load barrier nodes chained through their Similar edge
+ // break the code that remove the barriers in final graph reshape.
+ assert(n->in(LoadBarrierNode::Similar)->is_top() ||
+ (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() &&
+ n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Similar)->is_top()),
+ "chain of Similar load barriers");
+
+ if (!n->in(LoadBarrierNode::Similar)->is_top()) {
+ ResourceMark rm;
+ Unique_Node_List wq;
+ Node* other = n->in(LoadBarrierNode::Similar)->in(0);
+ wq.push(n);
+ bool ok = true;
+ bool dom_found = false;
+ for (uint next = 0; next < wq.size(); ++next) {
+ Node *n = wq.at(next);
+ assert(n->is_CFG(), "");
+ assert(!n->is_SafePoint(), "");
+
+ if (n == other) {
+ continue;
+ }
+
+ if (n->is_Region()) {
+ for (uint i = 1; i < n->req(); i++) {
+ Node* m = n->in(i);
+ if (m != NULL) {
+ wq.push(m);
+ }
+ }
+ } else {
+ Node* m = n->in(0);
+ if (m != NULL) {
+ wq.push(m);
+ }
+ }
+ }
+ }
+
+ if (ZVerifyLoadBarriers) {
+ if ((n->is_Load() || n->is_LoadStore()) && n->bottom_type()->make_oopptr() != NULL) {
+ visited.Clear();
+ bool found = look_for_barrier(n, post_parse, visited);
+ if (!found) {
+ n->dump(1);
+ n->dump(-3);
+ stringStream ss;
+ C->method()->print_short_name(&ss);
+ tty->print_cr("-%s-", ss.as_string());
+ assert(found, "");
+ }
+ }
+ }
+ }
+}
+
+#endif
diff --git a/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp b/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp
new file mode 100644
index 00000000000..666c7c24e85
--- /dev/null
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_C2_ZBARRIERSETC2_HPP
+#define SHARE_GC_Z_C2_ZBARRIERSETC2_HPP
+
+#include "gc/shared/c2/barrierSetC2.hpp"
+#include "memory/allocation.hpp"
+#include "opto/node.hpp"
+#include "utilities/growableArray.hpp"
+
+class LoadBarrierNode : public MultiNode {
+private:
+ bool _weak;
+ bool _writeback; // Controls if the barrier writes the healed oop back to memory
+ // A swap on a memory location must never write back the healed oop
+ bool _oop_reload_allowed; // Controls if the barrier are allowed to reload the oop from memory
+ // before healing, otherwise both the oop and the address must be passed to the
+ // barrier from the oop
+
+ static bool is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n);
+ void push_dominated_barriers(PhaseIterGVN* igvn) const;
+
+public:
+ enum {
+ Control,
+ Memory,
+ Oop,
+ Address,
+ Number_of_Outputs = Address,
+ Similar,
+ Number_of_Inputs
+ };
+
+ LoadBarrierNode(Compile* C,
+ Node* c,
+ Node* mem,
+ Node* val,
+ Node* adr,
+ bool weak,
+ bool writeback,
+ bool oop_reload_allowed);
+
+ virtual int Opcode() const;
+ virtual const Type *bottom_type() const;
+ virtual const Type *Value(PhaseGVN *phase) const;
+ virtual Node *Identity(PhaseGVN *phase);
+ virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+
+ LoadBarrierNode* has_dominating_barrier(PhaseIdealLoop* phase,
+ bool linear_only,
+ bool look_for_similar);
+
+ void fix_similar_in_uses(PhaseIterGVN* igvn);
+
+ bool has_true_uses() const;
+
+ bool can_be_eliminated() const {
+ return !in(Similar)->is_top();
+ }
+
+ bool is_weak() const {
+ return _weak;
+ }
+
+ bool is_writeback() const {
+ return _writeback;
+ }
+
+ bool oop_reload_allowed() const {
+ return _oop_reload_allowed;
+ }
+};
+
+class LoadBarrierSlowRegNode : public LoadPNode {
+public:
+ LoadBarrierSlowRegNode(Node *c,
+ Node *mem,
+ Node *adr,
+ const TypePtr *at,
+ const TypePtr* t,
+ MemOrd mo,
+ ControlDependency control_dependency = DependsOnlyOnTest)
+ : LoadPNode(c, mem, adr, at, t, mo, control_dependency) {}
+
+ virtual const char * name() {
+ return "LoadBarrierSlowRegNode";
+ }
+
+ virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
+ return NULL;
+ }
+
+ virtual int Opcode() const;
+};
+
+class LoadBarrierWeakSlowRegNode : public LoadPNode {
+public:
+ LoadBarrierWeakSlowRegNode(Node *c,
+ Node *mem,
+ Node *adr,
+ const TypePtr *at,
+ const TypePtr* t,
+ MemOrd mo,
+ ControlDependency control_dependency = DependsOnlyOnTest)
+ : LoadPNode(c, mem, adr, at, t, mo, control_dependency) {}
+
+ virtual const char * name() {
+ return "LoadBarrierWeakSlowRegNode";
+ }
+
+ virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
+ return NULL;
+ }
+
+ virtual int Opcode() const;
+};
+
+class ZBarrierSetC2State : public ResourceObj {
+private:
+ // List of load barrier nodes which need to be expanded before matching
+ GrowableArray* _load_barrier_nodes;
+
+public:
+ ZBarrierSetC2State(Arena* comp_arena);
+ int load_barrier_count() const;
+ void add_load_barrier_node(LoadBarrierNode* n);
+ void remove_load_barrier_node(LoadBarrierNode* n);
+ LoadBarrierNode* load_barrier_node(int idx) const;
+};
+
+class ZBarrierSetC2 : public BarrierSetC2 {
+private:
+ ZBarrierSetC2State* state() const;
+ Node* make_cas_loadbarrier(C2AtomicAccess& access) const;
+ Node* make_cmpx_loadbarrier(C2AtomicAccess& access) const;
+ void expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const;
+ void expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const;
+ void expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const;
+ const TypeFunc* load_barrier_Type() const;
+
+protected:
+ virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
+ virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access,
+ Node* expected_val,
+ Node* new_val,
+ const Type* val_type) const;
+ virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access,
+ Node* expected_val,
+ Node* new_val,
+ const Type* value_type) const;
+ virtual Node* atomic_xchg_at_resolved(C2AtomicAccess& access,
+ Node* new_val,
+ const Type* val_type) const;
+
+public:
+ Node* load_barrier(GraphKit* kit,
+ Node* val,
+ Node* adr,
+ bool weak = false,
+ bool writeback = true,
+ bool oop_reload_allowed = true) const;
+
+ virtual void* create_barrier_state(Arena* comp_arena) const;
+ virtual bool is_gc_barrier_node(Node* node) const;
+ virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { }
+ virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful) const;
+ virtual void add_users_to_worklist(Unique_Node_List* worklist) const;
+ virtual void enqueue_useful_gc_barrier(Unique_Node_List &worklist, Node* node) const;
+ virtual void register_potential_barrier_node(Node* node) const;
+ virtual void unregister_potential_barrier_node(Node* node) const;
+ virtual bool array_copy_requires_gc_barriers(BasicType type) const { return true; }
+ virtual Node* step_over_gc_barrier(Node* c) const { return c; }
+ // If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be
+ // expanded later, then now is the time to do so.
+ virtual bool expand_macro_nodes(PhaseMacroExpand* macro) const;
+
+ static void find_dominating_barriers(PhaseIterGVN& igvn);
+ static void loop_optimize_gc_barrier(PhaseIdealLoop* phase, Node* node, bool last_round);
+
+#ifdef ASSERT
+ virtual void verify_gc_barriers(bool post_parse) const;
+#endif
+};
+
+#endif // SHARE_GC_Z_C2_ZBARRIERSETC2_HPP
diff --git a/src/hotspot/share/gc/z/vmStructs_z.cpp b/src/hotspot/share/gc/z/vmStructs_z.cpp
new file mode 100644
index 00000000000..5eacabfb0d9
--- /dev/null
+++ b/src/hotspot/share/gc/z/vmStructs_z.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/vmStructs_z.hpp"
+
+ZGlobalsForVMStructs::ZGlobalsForVMStructs() :
+ _ZGlobalPhase(&ZGlobalPhase),
+ _ZAddressGoodMask(&ZAddressGoodMask),
+ _ZAddressBadMask(&ZAddressBadMask),
+ _ZAddressWeakBadMask(&ZAddressWeakBadMask),
+ _ZObjectAlignmentSmallShift(&ZObjectAlignmentSmallShift),
+ _ZObjectAlignmentSmall(&ZObjectAlignmentSmall) {
+}
+
+ZGlobalsForVMStructs ZGlobalsForVMStructs::_instance;
+ZGlobalsForVMStructs* ZGlobalsForVMStructs::_instance_p = &ZGlobalsForVMStructs::_instance;
diff --git a/src/hotspot/share/gc/z/vmStructs_z.hpp b/src/hotspot/share/gc/z/vmStructs_z.hpp
new file mode 100644
index 00000000000..464f42f75a8
--- /dev/null
+++ b/src/hotspot/share/gc/z/vmStructs_z.hpp
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_Z_VMSTRUCTS_Z_HPP
+#define SHARE_VM_GC_Z_VMSTRUCTS_Z_HPP
+
+#include "gc/z/zAddressRangeMap.hpp"
+#include "gc/z/zCollectedHeap.hpp"
+#include "gc/z/zHeap.hpp"
+#include "gc/z/zPageAllocator.hpp"
+#include "gc/z/zPhysicalMemory.hpp"
+#include "utilities/macros.hpp"
+
+// Expose some ZGC globals to the SA agent.
+class ZGlobalsForVMStructs {
+ static ZGlobalsForVMStructs _instance;
+
+public:
+ static ZGlobalsForVMStructs* _instance_p;
+
+ ZGlobalsForVMStructs();
+
+ uint32_t* _ZGlobalPhase;
+
+ uintptr_t* _ZAddressGoodMask;
+ uintptr_t* _ZAddressBadMask;
+ uintptr_t* _ZAddressWeakBadMask;
+
+ const int* _ZObjectAlignmentSmallShift;
+ const int* _ZObjectAlignmentSmall;
+};
+
+typedef ZAddressRangeMap ZAddressRangeMapForPageTable;
+
+#define VM_STRUCTS_ZGC(nonstatic_field, volatile_nonstatic_field, static_field) \
+ static_field(ZGlobalsForVMStructs, _instance_p, ZGlobalsForVMStructs*) \
+ nonstatic_field(ZGlobalsForVMStructs, _ZGlobalPhase, uint32_t*) \
+ nonstatic_field(ZGlobalsForVMStructs, _ZAddressGoodMask, uintptr_t*) \
+ nonstatic_field(ZGlobalsForVMStructs, _ZAddressBadMask, uintptr_t*) \
+ nonstatic_field(ZGlobalsForVMStructs, _ZAddressWeakBadMask, uintptr_t*) \
+ nonstatic_field(ZGlobalsForVMStructs, _ZObjectAlignmentSmallShift, const int*) \
+ nonstatic_field(ZGlobalsForVMStructs, _ZObjectAlignmentSmall, const int*) \
+ \
+ nonstatic_field(ZCollectedHeap, _heap, ZHeap) \
+ \
+ nonstatic_field(ZHeap, _page_allocator, ZPageAllocator) \
+ nonstatic_field(ZHeap, _pagetable, ZPageTable) \
+ \
+ nonstatic_field(ZPage, _type, const uint8_t) \
+ nonstatic_field(ZPage, _virtual, const ZVirtualMemory) \
+ nonstatic_field(ZPage, _forwarding, ZForwardingTable) \
+ \
+ nonstatic_field(ZPageAllocator, _physical, ZPhysicalMemoryManager) \
+ nonstatic_field(ZPageAllocator, _used, size_t) \
+ \
+ nonstatic_field(ZPageTable, _map, ZAddressRangeMapForPageTable) \
+ \
+ nonstatic_field(ZAddressRangeMapForPageTable, _map, ZPageTableEntry* const) \
+ \
+ nonstatic_field(ZVirtualMemory, _start, uintptr_t) \
+ nonstatic_field(ZVirtualMemory, _end, uintptr_t) \
+ \
+ nonstatic_field(ZForwardingTable, _table, ZForwardingTableEntry*) \
+ nonstatic_field(ZForwardingTable, _size, size_t) \
+ \
+ nonstatic_field(ZPhysicalMemoryManager, _max_capacity, const size_t) \
+ nonstatic_field(ZPhysicalMemoryManager, _capacity, size_t)
+
+#define VM_INT_CONSTANTS_ZGC(declare_constant, declare_constant_with_value) \
+ declare_constant(ZPhaseRelocate) \
+ declare_constant(ZPageTypeSmall) \
+ declare_constant(ZPageTypeMedium) \
+ declare_constant(ZPageTypeLarge) \
+ declare_constant(ZObjectAlignmentMediumShift) \
+ declare_constant(ZObjectAlignmentLargeShift)
+
+#define VM_LONG_CONSTANTS_ZGC(declare_constant) \
+ declare_constant(ZPageSizeSmallShift) \
+ declare_constant(ZPageSizeMediumShift) \
+ declare_constant(ZPageSizeMinShift) \
+ declare_constant(ZAddressOffsetShift) \
+ declare_constant(ZAddressOffsetBits) \
+ declare_constant(ZAddressOffsetMask) \
+ declare_constant(ZAddressSpaceStart)
+
+#define VM_TYPES_ZGC(declare_type, declare_toplevel_type, declare_integer_type) \
+ declare_toplevel_type(ZGlobalsForVMStructs) \
+ declare_type(ZCollectedHeap, CollectedHeap) \
+ declare_toplevel_type(ZHeap) \
+ declare_toplevel_type(ZPage) \
+ declare_toplevel_type(ZPageAllocator) \
+ declare_toplevel_type(ZPageTable) \
+ declare_toplevel_type(ZPageTableEntry) \
+ declare_toplevel_type(ZAddressRangeMapForPageTable) \
+ declare_toplevel_type(ZVirtualMemory) \
+ declare_toplevel_type(ZForwardingTable) \
+ declare_toplevel_type(ZForwardingTableEntry) \
+ declare_toplevel_type(ZPhysicalMemoryManager)
+
+#endif // SHARE_VM_GC_Z_VMSTRUCTS_Z_HPP
diff --git a/src/hotspot/share/gc/z/zAddress.cpp b/src/hotspot/share/gc/z/zAddress.cpp
new file mode 100644
index 00000000000..87200e18520
--- /dev/null
+++ b/src/hotspot/share/gc/z/zAddress.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "runtime/thread.hpp"
+
+void ZAddressMasks::set_good_mask(uintptr_t mask) {
+ uintptr_t old_bad_mask = ZAddressBadMask;
+ ZAddressGoodMask = mask;
+ ZAddressBadMask = ZAddressGoodMask ^ ZAddressMetadataMask;
+ ZAddressWeakBadMask = (ZAddressGoodMask | ZAddressMetadataRemapped | ZAddressMetadataFinalizable) ^ ZAddressMetadataMask;
+}
+
+void ZAddressMasks::initialize() {
+ ZAddressMetadataMarked = ZAddressMetadataMarked0;
+ set_good_mask(ZAddressMetadataRemapped);
+}
+
+void ZAddressMasks::flip_to_marked() {
+ ZAddressMetadataMarked ^= (ZAddressMetadataMarked0 | ZAddressMetadataMarked1);
+ set_good_mask(ZAddressMetadataMarked);
+}
+
+void ZAddressMasks::flip_to_remapped() {
+ set_good_mask(ZAddressMetadataRemapped);
+}
diff --git a/src/hotspot/share/gc/z/zAddress.hpp b/src/hotspot/share/gc/z/zAddress.hpp
new file mode 100644
index 00000000000..b2131eeb1df
--- /dev/null
+++ b/src/hotspot/share/gc/z/zAddress.hpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZADDRESS_HPP
+#define SHARE_GC_Z_ZADDRESS_HPP
+
+#include "memory/allocation.hpp"
+
+class ZAddress : public AllStatic {
+public:
+ static bool is_null(uintptr_t value);
+ static bool is_bad(uintptr_t value);
+ static bool is_good(uintptr_t value);
+ static bool is_good_or_null(uintptr_t value);
+ static bool is_weak_bad(uintptr_t value);
+ static bool is_weak_good(uintptr_t value);
+ static bool is_weak_good_or_null(uintptr_t value);
+ static bool is_marked(uintptr_t value);
+ static bool is_finalizable(uintptr_t value);
+ static bool is_remapped(uintptr_t value);
+
+ static uintptr_t address(uintptr_t value);
+ static uintptr_t offset(uintptr_t value);
+ static uintptr_t good(uintptr_t value);
+ static uintptr_t good_or_null(uintptr_t value);
+ static uintptr_t finalizable_good(uintptr_t value);
+ static uintptr_t marked(uintptr_t value);
+ static uintptr_t marked0(uintptr_t value);
+ static uintptr_t marked1(uintptr_t value);
+ static uintptr_t remapped(uintptr_t value);
+ static uintptr_t remapped_or_null(uintptr_t value);
+};
+
+class ZAddressMasks : public AllStatic {
+ friend class ZAddressTest;
+
+private:
+ static void set_good_mask(uintptr_t mask);
+
+public:
+ static void initialize();
+ static void flip_to_marked();
+ static void flip_to_remapped();
+};
+
+#endif // SHARE_GC_Z_ZADDRESS_HPP
diff --git a/src/hotspot/share/gc/z/zAddress.inline.hpp b/src/hotspot/share/gc/z/zAddress.inline.hpp
new file mode 100644
index 00000000000..ab227447b37
--- /dev/null
+++ b/src/hotspot/share/gc/z/zAddress.inline.hpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZADDRESS_INLINE_HPP
+#define SHARE_GC_Z_ZADDRESS_INLINE_HPP
+
+#include "gc/z/zAddress.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "utilities/macros.hpp"
+#include OS_CPU_HEADER_INLINE(gc/z/zAddress)
+
+inline bool ZAddress::is_null(uintptr_t value) {
+ return value == 0;
+}
+
+inline bool ZAddress::is_bad(uintptr_t value) {
+ return value & ZAddressBadMask;
+}
+
+inline bool ZAddress::is_good(uintptr_t value) {
+ return !is_bad(value) && !is_null(value);
+}
+
+inline bool ZAddress::is_good_or_null(uintptr_t value) {
+ // Checking if an address is "not bad" is an optimized version of
+ // checking if it's "good or null", which eliminates an explicit
+ // null check. However, the implicit null check only checks that
+ // the mask bits are zero, not that the entire address is zero.
+ // This means that an address without mask bits would pass through
+ // the barrier as if it was null. This should be harmless as such
+ // addresses should ever be passed through the barrier.
+ const bool result = !is_bad(value);
+ assert((is_good(value) || is_null(value)) == result, "Bad address");
+ return result;
+}
+
+inline bool ZAddress::is_weak_bad(uintptr_t value) {
+ return value & ZAddressWeakBadMask;
+}
+
+inline bool ZAddress::is_weak_good(uintptr_t value) {
+ return !is_weak_bad(value) && !is_null(value);
+}
+
+inline bool ZAddress::is_weak_good_or_null(uintptr_t value) {
+ return !is_weak_bad(value);
+}
+
+inline bool ZAddress::is_marked(uintptr_t value) {
+ return value & ZAddressMetadataMarked;
+}
+
+inline bool ZAddress::is_finalizable(uintptr_t value) {
+ return value & ZAddressMetadataFinalizable;
+}
+
+inline bool ZAddress::is_remapped(uintptr_t value) {
+ return value & ZAddressMetadataRemapped;
+}
+
+inline uintptr_t ZAddress::offset(uintptr_t value) {
+ return value & ZAddressOffsetMask;
+}
+
+inline uintptr_t ZAddress::good(uintptr_t value) {
+ return address(offset(value) | ZAddressGoodMask);
+}
+
+inline uintptr_t ZAddress::good_or_null(uintptr_t value) {
+ return is_null(value) ? 0 : good(value);
+}
+
+inline uintptr_t ZAddress::finalizable_good(uintptr_t value) {
+ return address(offset(value) | ZAddressMetadataFinalizable | ZAddressGoodMask);
+}
+
+inline uintptr_t ZAddress::marked(uintptr_t value) {
+ return address(offset(value) | ZAddressMetadataMarked);
+}
+
+inline uintptr_t ZAddress::marked0(uintptr_t value) {
+ return address(offset(value) | ZAddressMetadataMarked0);
+}
+
+inline uintptr_t ZAddress::marked1(uintptr_t value) {
+ return address(offset(value) | ZAddressMetadataMarked1);
+}
+
+inline uintptr_t ZAddress::remapped(uintptr_t value) {
+ return address(offset(value) | ZAddressMetadataRemapped);
+}
+
+inline uintptr_t ZAddress::remapped_or_null(uintptr_t value) {
+ return is_null(value) ? 0 : remapped(value);
+}
+
+#endif // SHARE_GC_Z_ZADDRESS_INLINE_HPP
diff --git a/src/hotspot/share/gc/z/zAddressRangeMap.hpp b/src/hotspot/share/gc/z/zAddressRangeMap.hpp
new file mode 100644
index 00000000000..0d441de626e
--- /dev/null
+++ b/src/hotspot/share/gc/z/zAddressRangeMap.hpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZADDRESSRANGEMAP_HPP
+#define SHARE_GC_Z_ZADDRESSRANGEMAP_HPP
+
+#include "memory/allocation.hpp"
+
+template
+class ZAddressRangeMapIterator;
+
+template
+class ZAddressRangeMap {
+ friend class VMStructs;
+ friend class ZAddressRangeMapIterator;
+
+private:
+ T* const _map;
+
+ size_t index_for_addr(uintptr_t addr) const;
+ size_t size() const;
+
+public:
+ ZAddressRangeMap();
+ ~ZAddressRangeMap();
+
+ T get(uintptr_t addr) const;
+ void put(uintptr_t addr, T value);
+};
+
+template
+class ZAddressRangeMapIterator : public StackObj {
+public:
+ const ZAddressRangeMap* const _map;
+ size_t _next;
+
+public:
+ ZAddressRangeMapIterator(const ZAddressRangeMap* map);
+
+ bool next(T* value);
+};
+
+#endif // SHARE_GC_Z_ZADDRESSRANGEMAP_HPP
diff --git a/src/hotspot/share/gc/z/zAddressRangeMap.inline.hpp b/src/hotspot/share/gc/z/zAddressRangeMap.inline.hpp
new file mode 100644
index 00000000000..f091e0295dd
--- /dev/null
+++ b/src/hotspot/share/gc/z/zAddressRangeMap.inline.hpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZADDRESSRANGEMAP_INLINE_HPP
+#define SHARE_GC_Z_ZADDRESSRANGEMAP_INLINE_HPP
+
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zAddressRangeMap.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "memory/allocation.inline.hpp"
+
+template
+ZAddressRangeMap::ZAddressRangeMap() :
+ _map(MmapArrayAllocator::allocate(size(), mtGC)) {}
+
+template
+ZAddressRangeMap::~ZAddressRangeMap() {
+ MmapArrayAllocator::free(_map, size());
+}
+
+template
+size_t ZAddressRangeMap::index_for_addr(uintptr_t addr) const {
+ assert(!ZAddress::is_null(addr), "Invalid address");
+
+ const size_t index = ZAddress::offset(addr) >> AddressRangeShift;
+ assert(index < size(), "Invalid index");
+
+ return index;
+}
+
+template
+size_t ZAddressRangeMap::size() const {
+ return ZAddressOffsetMax >> AddressRangeShift;
+}
+
+template
+T ZAddressRangeMap::get(uintptr_t addr) const {
+ const uintptr_t index = index_for_addr(addr);
+ return _map[index];
+}
+
+template
+void ZAddressRangeMap::put(uintptr_t addr, T value) {
+ const uintptr_t index = index_for_addr(addr);
+ _map[index] = value;
+}
+
+template
+inline ZAddressRangeMapIterator::ZAddressRangeMapIterator(const ZAddressRangeMap* map) :
+ _map(map),
+ _next(0) {}
+
+template
+inline bool ZAddressRangeMapIterator::next(T* value) {
+ if (_next < _map->size()) {
+ *value = _map->_map[_next++];
+ return true;
+ }
+
+ // End of map
+ return false;
+}
+
+#endif // SHARE_GC_Z_ZADDRESSRANGEMAP_INLINE_HPP
diff --git a/src/hotspot/share/gc/z/zAllocationFlags.hpp b/src/hotspot/share/gc/z/zAllocationFlags.hpp
new file mode 100644
index 00000000000..3116a0fd189
--- /dev/null
+++ b/src/hotspot/share/gc/z/zAllocationFlags.hpp
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZALLOCATIONFLAGS_HPP
+#define SHARE_GC_Z_ZALLOCATIONFLAGS_HPP
+
+#include "gc/z/zBitField.hpp"
+#include "memory/allocation.hpp"
+
+//
+// Allocation flags layout
+// -----------------------
+//
+// 7 4 3 2 1 0
+// +---+-+-+-+-+-+
+// |000|1|1|1|1|1|
+// +---+-+-+-+-+-+
+// | | | | | |
+// | | | | | * 0-0 Java Thread Flag (1-bit)
+// | | | | |
+// | | | | * 1-1 Worker Thread Flag (1-bit)
+// | | | |
+// | | | * 2-2 Non-Blocking Flag (1-bit)
+// | | |
+// | | * 3-3 Relocation Flag (1-bit)
+// | |
+// | * 4-4 No Reserve Flag (1-bit)
+// |
+// * 7-5 Unused (3-bits)
+//
+
+class ZAllocationFlags {
+private:
+ typedef ZBitField field_java_thread;
+ typedef ZBitField field_worker_thread;
+ typedef ZBitField field_non_blocking;
+ typedef ZBitField field_relocation;
+ typedef ZBitField field_no_reserve;
+
+ uint8_t _flags;
+
+public:
+ ZAllocationFlags() :
+ _flags(0) {}
+
+ void set_java_thread() {
+ _flags |= field_java_thread::encode(true);
+ }
+
+ void set_worker_thread() {
+ _flags |= field_worker_thread::encode(true);
+ }
+
+ void set_non_blocking() {
+ _flags |= field_non_blocking::encode(true);
+ }
+
+ void set_relocation() {
+ _flags |= field_relocation::encode(true);
+ }
+
+ void set_no_reserve() {
+ _flags |= field_no_reserve::encode(true);
+ }
+
+ bool java_thread() const {
+ return field_java_thread::decode(_flags);
+ }
+
+ bool worker_thread() const {
+ return field_worker_thread::decode(_flags);
+ }
+
+ bool non_blocking() const {
+ return field_non_blocking::decode(_flags);
+ }
+
+ bool relocation() const {
+ return field_relocation::decode(_flags);
+ }
+
+ bool no_reserve() const {
+ return field_no_reserve::decode(_flags);
+ }
+};
+
+#endif // SHARE_GC_Z_ZALLOCATIONFLAGS_HPP
diff --git a/src/hotspot/share/gc/z/zArguments.cpp b/src/hotspot/share/gc/z/zArguments.cpp
new file mode 100644
index 00000000000..19fd2898e20
--- /dev/null
+++ b/src/hotspot/share/gc/z/zArguments.cpp
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zArguments.hpp"
+#include "gc/z/zCollectedHeap.hpp"
+#include "gc/z/zCollectorPolicy.hpp"
+#include "gc/z/zWorkers.hpp"
+#include "gc/shared/gcArguments.inline.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/globals_extension.hpp"
+
+size_t ZArguments::conservative_max_heap_alignment() {
+ return 0;
+}
+
+void ZArguments::initialize() {
+ GCArguments::initialize();
+
+ // Enable NUMA by default
+ if (FLAG_IS_DEFAULT(UseNUMA)) {
+ FLAG_SET_DEFAULT(UseNUMA, true);
+ }
+
+ // Disable biased locking by default
+ if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
+ FLAG_SET_DEFAULT(UseBiasedLocking, false);
+ }
+
+ // Select number of parallel threads
+ if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
+ FLAG_SET_DEFAULT(ParallelGCThreads, ZWorkers::calculate_nparallel());
+ }
+
+ if (ParallelGCThreads == 0) {
+ vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ParallelGCThreads=0");
+ }
+
+ // Select number of concurrent threads
+ if (FLAG_IS_DEFAULT(ConcGCThreads)) {
+ FLAG_SET_DEFAULT(ConcGCThreads, ZWorkers::calculate_nconcurrent());
+ }
+
+ if (ConcGCThreads == 0) {
+ vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ConcGCThreads=0");
+ }
+
+#ifdef COMPILER2
+ // Enable loop strip mining by default
+ if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
+ FLAG_SET_DEFAULT(UseCountedLoopSafepoints, true);
+ if (FLAG_IS_DEFAULT(LoopStripMiningIter)) {
+ FLAG_SET_DEFAULT(LoopStripMiningIter, 1000);
+ }
+ }
+#endif
+
+ // To avoid asserts in set_active_workers()
+ FLAG_SET_DEFAULT(UseDynamicNumberOfGCThreads, true);
+
+ // CompressedOops/UseCompressedClassPointers not supported
+ FLAG_SET_DEFAULT(UseCompressedOops, false);
+ FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
+
+ // ClassUnloading not (yet) supported
+ FLAG_SET_DEFAULT(ClassUnloading, false);
+ FLAG_SET_DEFAULT(ClassUnloadingWithConcurrentMark, false);
+
+ // Verification before startup and after exit not (yet) supported
+ FLAG_SET_DEFAULT(VerifyDuringStartup, false);
+ FLAG_SET_DEFAULT(VerifyBeforeExit, false);
+
+ // Verification of stacks not (yet) supported, for the same reason
+ // we need fixup_partial_loads
+ DEBUG_ONLY(FLAG_SET_DEFAULT(VerifyStack, false));
+
+ // JVMCI not (yet) supported
+ if (EnableJVMCI) {
+ vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:+EnableJVMCI");
+ }
+}
+
+CollectedHeap* ZArguments::create_heap() {
+ return create_heap_with_policy();
+}
diff --git a/src/hotspot/share/gc/z/zArguments.hpp b/src/hotspot/share/gc/z/zArguments.hpp
new file mode 100644
index 00000000000..f85b1471a2b
--- /dev/null
+++ b/src/hotspot/share/gc/z/zArguments.hpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZARGUMENTS_HPP
+#define SHARE_GC_Z_ZARGUMENTS_HPP
+
+#include "gc/shared/gcArguments.hpp"
+
+class CollectedHeap;
+
+class ZArguments : public GCArguments {
+public:
+ virtual void initialize();
+ virtual size_t conservative_max_heap_alignment();
+ virtual CollectedHeap* create_heap();
+};
+
+#endif // SHARE_GC_Z_ZARGUMENTS_HPP
diff --git a/src/hotspot/share/gc/z/zArray.hpp b/src/hotspot/share/gc/z/zArray.hpp
new file mode 100644
index 00000000000..d5e6e6f0fee
--- /dev/null
+++ b/src/hotspot/share/gc/z/zArray.hpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZARRAY_HPP
+#define SHARE_GC_Z_ZARRAY_HPP
+
+#include "memory/allocation.hpp"
+
+template
+class ZArray {
+private:
+ static const size_t initial_capacity = 32;
+
+ T* _array;
+ size_t _size;
+ size_t _capacity;
+
+ // Copy and assignment are not allowed
+ ZArray(const ZArray& array);
+ ZArray& operator=(const ZArray& array);
+
+ void expand(size_t new_capacity);
+
+public:
+ ZArray();
+ ~ZArray();
+
+ size_t size() const;
+ bool is_empty() const;
+
+ T at(size_t index) const;
+
+ void add(T value);
+ void clear();
+};
+
+template
+class ZArrayIteratorImpl : public StackObj {
+private:
+ ZArray* const _array;
+ size_t _next;
+
+public:
+ ZArrayIteratorImpl(ZArray* array);
+
+ bool next(T* elem);
+};
+
+// Iterator types
+#define ZARRAY_SERIAL false
+#define ZARRAY_PARALLEL true
+
+template
+class ZArrayIterator : public ZArrayIteratorImpl {
+public:
+ ZArrayIterator(ZArray* array) :
+ ZArrayIteratorImpl(array) {}
+};
+
+template
+class ZArrayParallelIterator : public ZArrayIteratorImpl {
+public:
+ ZArrayParallelIterator(ZArray* array) :
+ ZArrayIteratorImpl(array) {}
+};
+
+#endif // SHARE_GC_Z_ZARRAY_HPP
diff --git a/src/hotspot/share/gc/z/zArray.inline.hpp b/src/hotspot/share/gc/z/zArray.inline.hpp
new file mode 100644
index 00000000000..eb92b6be8da
--- /dev/null
+++ b/src/hotspot/share/gc/z/zArray.inline.hpp
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZARRAY_INLINE_HPP
+#define SHARE_GC_Z_ZARRAY_INLINE_HPP
+
+#include "gc/z/zArray.hpp"
+#include "memory/allocation.inline.hpp"
+#include "runtime/atomic.hpp"
+
+template
+inline ZArray::ZArray() :
+ _array(NULL),
+ _size(0),
+ _capacity(0) {}
+
+template
+inline ZArray::~ZArray() {
+ if (_array != NULL) {
+ FREE_C_HEAP_ARRAY(T, _array);
+ }
+}
+
+template