This commit is contained in:
Phil Race 2017-11-27 10:35:05 -08:00
commit dcaa7f90ba
481 changed files with 15633 additions and 6044 deletions

View file

@ -456,3 +456,5 @@ b87d7b5d5dedc1185e5929470f945b7378cdb3ad jdk-10+27
a6e591e12f122768f675428e1e5a838fd0e9c7ec jdk-10+29 a6e591e12f122768f675428e1e5a838fd0e9c7ec jdk-10+29
8fee80b92e65149f7414250fd5e34b6f35d417b4 jdk-10+30 8fee80b92e65149f7414250fd5e34b6f35d417b4 jdk-10+30
e6278add9ff28fab70fe1cc4c1d65f7363dc9445 jdk-10+31 e6278add9ff28fab70fe1cc4c1d65f7363dc9445 jdk-10+31
a2008587c13fa05fa2dbfcb09fe987576fbedfd1 jdk-10+32
bbd692ad4fa300ecca7939ffbe3b1d5e52a28cc6 jdk-10+33

View file

@ -871,9 +871,9 @@ test-support/</code></pre>
<p>When building for distribution, <code>zipped</code> is a good solution. Binaries built with <code>internal</code> is suitable for use by developers, since they facilitate debugging, but should be stripped before distributed to end users.</p> <p>When building for distribution, <code>zipped</code> is a good solution. Binaries built with <code>internal</code> is suitable for use by developers, since they facilitate debugging, but should be stripped before distributed to end users.</p>
<h3 id="autoconf-details">Autoconf Details</h3> <h3 id="autoconf-details">Autoconf Details</h3>
<p>The <code>configure</code> script is based on the autoconf framework, but in some details deviate from a normal autoconf <code>configure</code> script.</p> <p>The <code>configure</code> script is based on the autoconf framework, but in some details deviate from a normal autoconf <code>configure</code> script.</p>
<p>The <code>configure</code> script in the top level directory of OpenJDK is just a thin wrapper that calls <code>common/autoconf/configure</code>. This in turn provides functionality that is not easily expressed in the normal Autoconf framework, and then calls into the core of the <code>configure</code> script, which is the <code>common/autoconf/generated-configure.sh</code> file.</p> <p>The <code>configure</code> script in the top level directory of OpenJDK is just a thin wrapper that calls <code>make/autoconf/configure</code>. This in turn provides functionality that is not easily expressed in the normal Autoconf framework, and then calls into the core of the <code>configure</code> script, which is the <code>make/autoconf/generated-configure.sh</code> file.</p>
<p>As the name implies, this file is generated by Autoconf. It is checked in after regeneration, to alleviate the common user to have to install Autoconf.</p> <p>As the name implies, this file is generated by Autoconf. It is checked in after regeneration, to alleviate the common user to have to install Autoconf.</p>
<p>The build system will detect if the Autoconf source files have changed, and will trigger a regeneration of <code>common/autoconf/generated-configure.sh</code> if needed. You can also manually request such an update by <code>bash common/autoconf/autogen.sh</code>.</p> <p>The build system will detect if the Autoconf source files have changed, and will trigger a regeneration of <code>make/autoconf/generated-configure.sh</code> if needed. You can also manually request such an update by <code>bash make/autoconf/autogen.sh</code>.</p>
<p>If you make changes to the build system that requires a re-generation, note the following:</p> <p>If you make changes to the build system that requires a re-generation, note the following:</p>
<ul> <ul>
<li><p>You must use <em>exactly</em> version 2.69 of autoconf for your patch to be accepted. This is to avoid spurious changes in the generated file. Note that Ubuntu 16.04 ships a patched version of autoconf which claims to be 2.69, but is not.</p></li> <li><p>You must use <em>exactly</em> version 2.69 of autoconf for your patch to be accepted. This is to avoid spurious changes in the generated file. Note that Ubuntu 16.04 ships a patched version of autoconf which claims to be 2.69, but is not.</p></li>

View file

@ -1660,18 +1660,18 @@ The `configure` script is based on the autoconf framework, but in some details
deviate from a normal autoconf `configure` script. deviate from a normal autoconf `configure` script.
The `configure` script in the top level directory of OpenJDK is just a thin The `configure` script in the top level directory of OpenJDK is just a thin
wrapper that calls `common/autoconf/configure`. This in turn provides wrapper that calls `make/autoconf/configure`. This in turn provides
functionality that is not easily expressed in the normal Autoconf framework, functionality that is not easily expressed in the normal Autoconf framework,
and then calls into the core of the `configure` script, which is the and then calls into the core of the `configure` script, which is the
`common/autoconf/generated-configure.sh` file. `make/autoconf/generated-configure.sh` file.
As the name implies, this file is generated by Autoconf. It is checked in after As the name implies, this file is generated by Autoconf. It is checked in after
regeneration, to alleviate the common user to have to install Autoconf. regeneration, to alleviate the common user to have to install Autoconf.
The build system will detect if the Autoconf source files have changed, and The build system will detect if the Autoconf source files have changed, and
will trigger a regeneration of `common/autoconf/generated-configure.sh` if will trigger a regeneration of `make/autoconf/generated-configure.sh` if
needed. You can also manually request such an update by `bash needed. You can also manually request such an update by `bash
common/autoconf/autogen.sh`. make/autoconf/autogen.sh`.
If you make changes to the build system that requires a re-generation, note the If you make changes to the build system that requires a re-generation, note the
following: following:

View file

@ -177,7 +177,7 @@ ifneq ($(filter product-bundles, $(MAKECMDGOALS)), )
# Create special filter rules when dealing with unzipped .dSYM directories on # Create special filter rules when dealing with unzipped .dSYM directories on
# macosx # macosx
ifeq ($(OPENJDK_TARGET_OS), macosx) ifeq ($(OPENJDK_TARGET_OS), macosx)
ifeq ($(ZIP_DEBUGINFO_FILES), false) ifeq ($(ZIP_EXTERNAL_DEBUG_SYMBOLS), false)
JDK_SYMBOLS_EXCLUDE_PATTERN := $(addprefix %, \ JDK_SYMBOLS_EXCLUDE_PATTERN := $(addprefix %, \
$(call containing, .dSYM/, $(patsubst $(JDK_IMAGE_DIR)/%, %, $(ALL_JDK_FILES)))) $(call containing, .dSYM/, $(patsubst $(JDK_IMAGE_DIR)/%, %, $(ALL_JDK_FILES))))
endif endif
@ -212,7 +212,7 @@ ifneq ($(filter product-bundles, $(MAKECMDGOALS)), )
# Create special filter rules when dealing with unzipped .dSYM directories on # Create special filter rules when dealing with unzipped .dSYM directories on
# macosx # macosx
ifeq ($(OPENJDK_TARGET_OS), macosx) ifeq ($(OPENJDK_TARGET_OS), macosx)
ifeq ($(ZIP_DEBUGINFO_FILES), false) ifeq ($(ZIP_EXTERNAL_DEBUG_SYMBOLS), false)
JRE_SYMBOLS_EXCLUDE_PATTERN := $(addprefix %, \ JRE_SYMBOLS_EXCLUDE_PATTERN := $(addprefix %, \
$(call containing, .dSYM/, $(patsubst $(JRE_IMAGE_DIR)/%, %, $(ALL_JRE_FILES)))) $(call containing, .dSYM/, $(patsubst $(JRE_IMAGE_DIR)/%, %, $(ALL_JRE_FILES))))
endif endif

View file

@ -329,7 +329,7 @@ else # HAS_SPEC=true
$(call PrintFailureReports) $(call PrintFailureReports)
$(call PrintBuildLogFailures) $(call PrintBuildLogFailures)
$(call ReportProfileTimes) $(call ReportProfileTimes)
$(PRINTF) "Hint: See common/doc/building.html#troubleshooting for assistance.\n\n" $(PRINTF) "Hint: See doc/building.html#troubleshooting for assistance.\n\n"
ifneq ($(COMPARE_BUILD), ) ifneq ($(COMPARE_BUILD), )
$(call CleanupCompareBuild) $(call CleanupCompareBuild)
endif endif

View file

@ -1049,7 +1049,7 @@ ALL_TARGETS += default jdk images docs bundles all
# file. # file.
CLEAN_DIRS += hotspot jdk bootcycle-build test buildtools support \ CLEAN_DIRS += hotspot jdk bootcycle-build test buildtools support \
images make-support test-make bundles buildjdk images make-support test-make bundles buildjdk test-results test-support
CLEAN_DIR_TARGETS := $(addprefix clean-, $(CLEAN_DIRS)) CLEAN_DIR_TARGETS := $(addprefix clean-, $(CLEAN_DIRS))
CLEAN_SUPPORT_DIRS += demos CLEAN_SUPPORT_DIRS += demos
CLEAN_SUPPORT_DIR_TARGETS := $(addprefix clean-, $(CLEAN_SUPPORT_DIRS)) CLEAN_SUPPORT_DIR_TARGETS := $(addprefix clean-, $(CLEAN_SUPPORT_DIRS))
@ -1094,6 +1094,8 @@ $(CLEAN_MODULE_PHASE_TARGETS):
# while classes and touch files end up in jdk. # while classes and touch files end up in jdk.
clean-support: clean-jdk clean-support: clean-jdk
clean-test: clean-test-results clean-test-support
# Remove everything, including configure configuration. If the output # Remove everything, including configure configuration. If the output
# directory was created by configure and now becomes empty, remove it as well. # directory was created by configure and now becomes empty, remove it as well.
dist-clean: clean dist-clean: clean

View file

@ -32,15 +32,17 @@ include FindTests.gmk
# We will always run multiple tests serially # We will always run multiple tests serially
.NOTPARALLEL: .NOTPARALLEL:
# Directories to find jtreg tests relative to
JTREG_TEST_TOPDIRS := $(TOPDIR) $(JTREG_TESTROOTS)
# Hook to include the corresponding custom file, if present. # Hook to include the corresponding custom file, if present.
$(eval $(call IncludeCustomExtension, RunTests.gmk)) $(eval $(call IncludeCustomExtension, RunTests.gmk))
TEST_RESULTS_DIR := $(OUTPUTDIR)/test-results TEST_RESULTS_DIR := $(OUTPUTDIR)/test-results
TEST_SUPPORT_DIR := $(OUTPUTDIR)/test-support TEST_SUPPORT_DIR := $(OUTPUTDIR)/test-support
ifeq ($(CUSTOM_ROOT), )
JTREG_TOPDIR := $(TOPDIR)
else
JTREG_TOPDIR := $(CUSTOM_ROOT)
endif
################################################################################ ################################################################################
# Parse control variables # Parse control variables
@ -87,6 +89,11 @@ hotspot_JTREG_ASSERT := false
hotspot_JTREG_NATIVEPATH := $(TEST_IMAGE_DIR)/hotspot/jtreg/native hotspot_JTREG_NATIVEPATH := $(TEST_IMAGE_DIR)/hotspot/jtreg/native
jdk_JTREG_NATIVEPATH := $(TEST_IMAGE_DIR)/jdk/jtreg/native jdk_JTREG_NATIVEPATH := $(TEST_IMAGE_DIR)/jdk/jtreg/native
jdk_JTREG_PROBLEM_LIST += $(TOPDIR)/test/jdk/ProblemList.txt
jaxp_JTREG_PROBLEM_LIST += $(TOPDIR)/test/jaxp/ProblemList.txt
langtools_JTREG_PROBLEM_LIST += $(TOPDIR)/test/langtools/ProblemList.txt
nashorn_JTREG_PROBLEM_LIST += $(TOPDIR)/test/nashorn/ProblemList.txt
hotspot_JTREG_PROBLEM_LIST += $(TOPDIR)/test/hotspot/jtreg/ProblemList.txt
################################################################################ ################################################################################
# Parse test selection # Parse test selection
@ -119,12 +126,46 @@ define ParseGtestTestSelection
) )
endef endef
# Helper function that removes the TOPDIR part
CleanupJtregPath = \
$(strip $(patsubst %/, %, $(subst $(JTREG_TOPDIR)/,, $1)))
# Take a partial Jtreg root path and return a full, absolute path to that Jtreg
# root. Also support having "hotspot" as an alias for "hotspot/jtreg".
ExpandJtregRoot = \
$(call CleanupJtregPath, $(wildcard \
$(if $(filter /%, $1), \
$(if $(wildcard $(strip $1)/TEST.ROOT), \
$1 \
) \
, \
$(filter $(addprefix %, $1), $(JTREG_TESTROOTS) $(addsuffix /, $(JTREG_TESTROOTS))) \
$(filter $(addprefix %, $(strip $1)/jtreg), $(JTREG_TESTROOTS) $(addsuffix /, $(JTREG_TESTROOTS))) \
) \
))
# Take a partial Jtreg test path and return a full, absolute path to that Jtreg
# test. Also support having "hotspot" as an alias for "hotspot/jtreg".
ExpandJtregPath = \
$(if $(call ExpandJtregRoot, $1), \
$(call ExpandJtregRoot, $1) \
, \
$(call CleanupJtregPath, $(wildcard \
$(if $(filter /%, $1), \
$1 \
, \
$(addsuffix /$(strip $1), $(JTREG_TESTROOTS) $(TEST_BASEDIRS)) \
$(addsuffix $(strip $(patsubst hotspot/%, /hotspot/jtreg/%, $1)), $(JTREG_TESTROOTS) $(TEST_BASEDIRS)) \
) \
)) \
)
# Helper function to determine if a test specification is a Jtreg test # Helper function to determine if a test specification is a Jtreg test
# #
# It is a Jtreg test if it optionally begins with jtreg:, and then is either # It is a Jtreg test if it optionally begins with jtreg:, and then is either
# an unspecified group name (possibly prefixed by :), or a group in a # an unspecified group name (possibly prefixed by :), or a group in a
# specified test/<component> directory, or a path to a test or test directory, # specified test root, or a path to a test or test directory,
# either absolute or relative to any of the JTREG_TEST_TOPDIRS. # either absolute or relative to any of the TEST_BASEDIRS or test roots.
define ParseJtregTestSelection define ParseJtregTestSelection
$(eval TEST_NAME := $(strip $(patsubst jtreg:%, %, $1))) \ $(eval TEST_NAME := $(strip $(patsubst jtreg:%, %, $1))) \
$(if $(or $(findstring :, $(TEST_NAME)), $(findstring /, $(TEST_NAME))), , \ $(if $(or $(findstring :, $(TEST_NAME)), $(findstring /, $(TEST_NAME))), , \
@ -132,29 +173,26 @@ define ParseJtregTestSelection
) \ ) \
$(if $(findstring :, $(TEST_NAME)), \ $(if $(findstring :, $(TEST_NAME)), \
$(if $(filter :%, $(TEST_NAME)), \ $(if $(filter :%, $(TEST_NAME)), \
$(foreach root, $(JTREG_TESTROOTS), \ $(eval TEST_GROUP := $(patsubst :%, %, $(TEST_NAME))) \
$(if $(filter $(patsubst :%, %, $(TEST_NAME)), \ $(eval TEST_ROOTS := $(JTREG_TESTROOTS)) \
$($(root)_JTREG_TEST_GROUPS)), \
jtreg:$(root):$(patsubst :%,%,$(TEST_NAME)) \
) \
) \
, \ , \
$(eval ROOT_PART := $(word 1, $(subst :, $(SPACE), $(TEST_NAME)))) \ $(eval TEST_PATH := $(word 1, $(subst :, $(SPACE), $(TEST_NAME)))) \
$(eval ROOT := $(filter $(addprefix %, $(ROOT_PART)), $(JTREG_TESTROOTS))) \ $(eval TEST_GROUP := $(word 2, $(subst :, $(SPACE), $(TEST_NAME)))) \
$(eval GROUP := $(word 2, $(subst :, $(SPACE), $(TEST_NAME)))) \ $(eval TEST_ROOTS := $(call ExpandJtregRoot, $(TEST_PATH))) \
$(foreach root, $(ROOT), \ ) \
$(if $(filter $(GROUP), $($(root)_JTREG_TEST_GROUPS)), \ $(foreach test_root, $(TEST_ROOTS), \
jtreg:$(root):$(GROUP) \ $(if $(filter /%, $(test_root)), \
jtreg:$(test_root):$(TEST_GROUP) \
, \
$(if $(filter $(TEST_GROUP), $($(JTREG_TOPDIR)/$(test_root)_JTREG_TEST_GROUPS)), \
jtreg:$(test_root):$(TEST_GROUP) \
) \ ) \
) \ ) \
) \ ) \
, \ , \
$(if $(filter /%, $(TEST_NAME)), \ $(eval TEST_PATHS := $(call ExpandJtregPath, $(TEST_NAME))) \
$(if $(wildcard $(TEST_NAME)), \ $(foreach test_path, $(TEST_PATHS), \
jtreg:$(TEST_NAME) \ jtreg:$(test_path) \
) \
, \
$(addprefix jtreg:, $(wildcard $(addsuffix /$(TEST_NAME), $(JTREG_TEST_TOPDIRS)))) \
) \ ) \
) )
endef endef
@ -162,7 +200,7 @@ endef
ifeq ($(TEST), ) ifeq ($(TEST), )
$(info No test selection given in TEST!) $(info No test selection given in TEST!)
$(info Please use e.g. 'run-test TEST=tier1' or 'run-test-tier1') $(info Please use e.g. 'run-test TEST=tier1' or 'run-test-tier1')
$(info See common/doc/testing.[md|html] for help) $(info See doc/testing.[md|html] for help)
$(error Cannot continue) $(error Cannot continue)
endif endif
@ -185,7 +223,7 @@ $(foreach test, $(TEST), \
ifneq ($(UNKNOWN_TEST), ) ifneq ($(UNKNOWN_TEST), )
$(info Unknown test selection: '$(UNKNOWN_TEST)') $(info Unknown test selection: '$(UNKNOWN_TEST)')
$(info See common/doc/testing.[md|html] for help) $(info See doc/testing.[md|html] for help)
$(error Cannot continue) $(error Cannot continue)
endif endif
@ -299,8 +337,17 @@ define SetupRunJtregTestBody
$1_TEST_SUPPORT_DIR := $$(TEST_SUPPORT_DIR)/$1 $1_TEST_SUPPORT_DIR := $$(TEST_SUPPORT_DIR)/$1
$1_TEST_NAME := $$(strip $$(patsubst jtreg:%, %, $$($1_TEST))) $1_TEST_NAME := $$(strip $$(patsubst jtreg:%, %, $$($1_TEST)))
$1_COMPONENT := $$(firstword $$(subst /, $$(SPACE), \
$$(patsubst test/%, %, $$($1_TEST_NAME)))) $1_COMPONENT := \
$$(strip $$(foreach root, $$(JTREG_TESTROOTS), \
$$(if $$(filter $$(root)%, $$(JTREG_TOPDIR)/$$($1_TEST_NAME)), \
$$(lastword $$(subst /, $$(SPACE), $$(root))) \
) \
))
# This will work only as long as just hotspot has the additional "jtreg" directory
ifeq ($$($1_COMPONENT), jtreg)
$1_COMPONENT := hotspot
endif
ifeq ($$(JT_HOME), ) ifeq ($$(JT_HOME), )
$$(info Error: jtreg framework is not found.) $$(info Error: jtreg framework is not found.)
@ -317,6 +364,7 @@ define SetupRunJtregTestBody
$$(eval $$(call SetJtregValue,$1,JTREG_MAX_MEM,512m)) $$(eval $$(call SetJtregValue,$1,JTREG_MAX_MEM,512m))
$$(eval $$(call SetJtregValue,$1,JTREG_NATIVEPATH)) $$(eval $$(call SetJtregValue,$1,JTREG_NATIVEPATH))
$$(eval $$(call SetJtregValue,$1,JTREG_BASIC_OPTIONS)) $$(eval $$(call SetJtregValue,$1,JTREG_BASIC_OPTIONS))
$$(eval $$(call SetJtregValue,$1,JTREG_PROBLEM_LIST))
ifneq ($(TEST_JOBS), 0) ifneq ($(TEST_JOBS), 0)
# User has specified TEST_JOBS, use that as fallback default # User has specified TEST_JOBS, use that as fallback default
@ -326,11 +374,6 @@ define SetupRunJtregTestBody
$$(eval $$(call SetJtregValue,$1,JTREG_JOBS,$$(JOBS))) $$(eval $$(call SetJtregValue,$1,JTREG_JOBS,$$(JOBS)))
endif endif
ifeq ($$(shell $$(EXPR) $$($1_JTREG_JOBS) \> 50), 1)
# Until CODETOOLS-7901892 is fixed, JTreg cannot handle more than 50 jobs
$1_JTREG_JOBS := 50
endif
# Make sure MaxRAMPercentage is high enough to not cause OOM or swapping since # Make sure MaxRAMPercentage is high enough to not cause OOM or swapping since
# we may end up with a lot of JVM's # we may end up with a lot of JVM's
$1_JTREG_MAX_RAM_PERCENTAGE := $$(shell $$(EXPR) 25 / $$($1_JTREG_JOBS)) $1_JTREG_MAX_RAM_PERCENTAGE := $$(shell $$(EXPR) 25 / $$($1_JTREG_JOBS))
@ -370,11 +413,18 @@ define SetupRunJtregTestBody
$1_JTREG_BASIC_OPTIONS += -nativepath:$$($1_JTREG_NATIVEPATH) $1_JTREG_BASIC_OPTIONS += -nativepath:$$($1_JTREG_NATIVEPATH)
endif endif
ifneq ($$($1_JTREG_PROBLEM_LIST), )
$1_JTREG_BASIC_OPTIONS += $$(addprefix -exclude:, $$($1_JTREG_PROBLEM_LIST))
endif
ifneq ($$(JIB_JAR), ) ifneq ($$(JIB_JAR), )
$1_JTREG_BASIC_OPTIONS += -cpa:$$(JIB_JAR) $1_JTREG_BASIC_OPTIONS += -cpa:$$(JIB_JAR)
endif endif
run-test-$1: clean-workdir-$1:
$$(RM) -r $$($1_TEST_SUPPORT_DIR)
run-test-$1: clean-workdir-$1
$$(call LogWarn) $$(call LogWarn)
$$(call LogWarn, Running test '$$($1_TEST)') $$(call LogWarn, Running test '$$($1_TEST)')
$$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR)) $$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR))
@ -383,7 +433,7 @@ define SetupRunJtregTestBody
-Dprogram=jtreg -jar $$(JT_HOME)/lib/jtreg.jar \ -Dprogram=jtreg -jar $$(JT_HOME)/lib/jtreg.jar \
$$($1_JTREG_BASIC_OPTIONS) \ $$($1_JTREG_BASIC_OPTIONS) \
-testjdk:$$(JDK_IMAGE_DIR) \ -testjdk:$$(JDK_IMAGE_DIR) \
-dir:$$(TOPDIR) \ -dir:$$(JTREG_TOPDIR) \
-reportDir:$$($1_TEST_RESULTS_DIR) \ -reportDir:$$($1_TEST_RESULTS_DIR) \
-workDir:$$($1_TEST_SUPPORT_DIR) \ -workDir:$$($1_TEST_SUPPORT_DIR) \
$$(JTREG_OPTIONS) \ $$(JTREG_OPTIONS) \
@ -481,12 +531,19 @@ run-test: $(TARGETS)
$(foreach test, $(TESTS_TO_RUN), \ $(foreach test, $(TESTS_TO_RUN), \
$(eval TEST_ID := $(shell $(ECHO) $(strip $(test)) | \ $(eval TEST_ID := $(shell $(ECHO) $(strip $(test)) | \
$(TR) -cs '[a-z][A-Z][0-9]\n' '_')) \ $(TR) -cs '[a-z][A-Z][0-9]\n' '_')) \
$(eval NAME_PATTERN := $(shell $(ECHO) $(test) | $(TR) -c \\n _)) \
$(if $(filter __________________________________________________%, $(NAME_PATTERN)), \
$(eval TEST_NAME := ) \
$(PRINTF) "%2s %-49s\n" " " "$(test)" $(NEWLINE) \
, \
$(eval TEST_NAME := $(test)) \
) \
$(if $(filter $($(TEST_ID)_PASSED), $($(TEST_ID)_TOTAL)), \ $(if $(filter $($(TEST_ID)_PASSED), $($(TEST_ID)_TOTAL)), \
$(PRINTF) "%2s %-49s %5d %5d %5d %5d %2s\n" " " "$(test)" \ $(PRINTF) "%2s %-49s %5d %5d %5d %5d %2s\n" " " "$(TEST_NAME)" \
$($(TEST_ID)_TOTAL) $($(TEST_ID)_PASSED) $($(TEST_ID)_FAILED) \ $($(TEST_ID)_TOTAL) $($(TEST_ID)_PASSED) $($(TEST_ID)_FAILED) \
$($(TEST_ID)_ERROR) " " $(NEWLINE) \ $($(TEST_ID)_ERROR) " " $(NEWLINE) \
, \ , \
$(PRINTF) "%2s %-49s %5d %5d %5d %5d %2s\n" ">>" "$(test)" \ $(PRINTF) "%2s %-49s %5d %5d %5d %5d %2s\n" ">>" "$(TEST_NAME)" \
$($(TEST_ID)_TOTAL) $($(TEST_ID)_PASSED) $($(TEST_ID)_FAILED) \ $($(TEST_ID)_TOTAL) $($(TEST_ID)_PASSED) $($(TEST_ID)_FAILED) \
$($(TEST_ID)_ERROR) "<<" $(NEWLINE) \ $($(TEST_ID)_ERROR) "<<" $(NEWLINE) \
$(eval TEST_FAILURE := true) \ $(eval TEST_FAILURE := true) \

View file

@ -90,13 +90,13 @@ check_autoconf_timestamps() {
check_hg_updates() { check_hg_updates() {
if test "x`which hg 2> /dev/null | grep -v '^no hg in'`" != x; then if test "x`which hg 2> /dev/null | grep -v '^no hg in'`" != x; then
conf_updated_autoconf_files=`cd $conf_script_dir && hg status -mard 2> /dev/null | grep autoconf` conf_updated_autoconf_files=`cd $conf_script_dir && hg status -mard . 2> /dev/null`
if test "x$conf_updated_autoconf_files" != x; then if test "x$conf_updated_autoconf_files" != x; then
echo "Configure source code has been updated, checking time stamps" echo "Configure source code has been updated, checking time stamps"
check_autoconf_timestamps check_autoconf_timestamps
elif test "x$CUSTOM_CONFIG_DIR" != x; then elif test "x$CUSTOM_CONFIG_DIR" != x; then
# If custom source configure is available, make sure it is up-to-date as well. # If custom source configure is available, make sure it is up-to-date as well.
conf_custom_updated_autoconf_files=`cd $CUSTOM_CONFIG_DIR && hg status -mard 2> /dev/null | grep autoconf` conf_custom_updated_autoconf_files=`cd $CUSTOM_CONFIG_DIR && hg status -mard . 2> /dev/null`
if test "x$conf_custom_updated_autoconf_files" != x; then if test "x$conf_custom_updated_autoconf_files" != x; then
echo "Configure custom source code has been updated, checking time stamps" echo "Configure custom source code has been updated, checking time stamps"
check_autoconf_timestamps check_autoconf_timestamps

View file

@ -656,7 +656,6 @@ BUILD_FAILURE_HANDLER
ENABLE_INTREE_EC ENABLE_INTREE_EC
VALID_JVM_FEATURES VALID_JVM_FEATURES
JVM_FEATURES_custom JVM_FEATURES_custom
JVM_FEATURES_zeroshark
JVM_FEATURES_zero JVM_FEATURES_zero
JVM_FEATURES_minimal JVM_FEATURES_minimal
JVM_FEATURES_core JVM_FEATURES_core
@ -676,10 +675,6 @@ PNG_LIBS
PNG_CFLAGS PNG_CFLAGS
USE_EXTERNAL_LIBGIF USE_EXTERNAL_LIBGIF
USE_EXTERNAL_LIBJPEG USE_EXTERNAL_LIBJPEG
LLVM_LIBS
LLVM_LDFLAGS
LLVM_CFLAGS
LLVM_CONFIG
LIBFFI_LIB_FILE LIBFFI_LIB_FILE
ENABLE_LIBFFI_BUNDLING ENABLE_LIBFFI_BUNDLING
LIBFFI_LIBS LIBFFI_LIBS
@ -1993,6 +1988,7 @@ Optional Features:
--enable-cds[=yes/no] enable class data sharing feature in non-minimal VM. --enable-cds[=yes/no] enable class data sharing feature in non-minimal VM.
Default is yes. Default is yes.
--disable-hotspot-gtest Disables building of the Hotspot unit tests --disable-hotspot-gtest Disables building of the Hotspot unit tests
[enabled]
--disable-freetype-bundling --disable-freetype-bundling
disable bundling of the freetype library with the disable bundling of the freetype library with the
build result [enabled on Windows or when using build result [enabled on Windows or when using
@ -2033,8 +2029,7 @@ Optional Packages:
--with-debug-level set the debug level (release, fastdebug, slowdebug, --with-debug-level set the debug level (release, fastdebug, slowdebug,
optimized) [release] optimized) [release]
--with-jvm-variants JVM variants (separated by commas) to build --with-jvm-variants JVM variants (separated by commas) to build
(server,client,minimal,core,zero,zeroshark,custom) (server,client,minimal,core,zero,custom) [server]
[server]
--with-cpu-port specify sources to use for Hotspot 64-bit ARM port --with-cpu-port specify sources to use for Hotspot 64-bit ARM port
(arm64,aarch64) [aarch64] (arm64,aarch64) [aarch64]
--with-devkit use this devkit for compilers, tools and resources --with-devkit use this devkit for compilers, tools and resources
@ -4272,12 +4267,12 @@ pkgadd_help() {
# #
# All valid JVM features, regardless of platform # All valid JVM features, regardless of platform
VALID_JVM_FEATURES="compiler1 compiler2 zero shark minimal dtrace jvmti jvmci \ VALID_JVM_FEATURES="compiler1 compiler2 zero minimal dtrace jvmti jvmci \
graal vm-structs jni-check services management all-gcs nmt cds \ graal vm-structs jni-check services management all-gcs nmt cds \
static-build link-time-opt aot" static-build link-time-opt aot"
# All valid JVM variants # All valid JVM variants
VALID_JVM_VARIANTS="server client minimal core zero zeroshark custom" VALID_JVM_VARIANTS="server client minimal core zero custom"
############################################################################### ###############################################################################
# Check if the specified JVM variant should be built. To be used in shell if # Check if the specified JVM variant should be built. To be used in shell if
@ -4308,7 +4303,6 @@ VALID_JVM_VARIANTS="server client minimal core zero zeroshark custom"
# minimal: reduced form of client with optional features stripped out # minimal: reduced form of client with optional features stripped out
# core: normal interpreter only, no compiler # core: normal interpreter only, no compiler
# zero: C++ based interpreter only, no compiler # zero: C++ based interpreter only, no compiler
# zeroshark: C++ based interpreter, and a llvm-based compiler
# custom: baseline JVM with no default features # custom: baseline JVM with no default features
# #
@ -4808,11 +4802,6 @@ VALID_JVM_VARIANTS="server client minimal core zero zeroshark custom"
################################################################################ ################################################################################
################################################################################
# Setup llvm (Low-Level VM)
################################################################################
################################################################################ ################################################################################
# Setup various libraries, typically small system libraries # Setup various libraries, typically small system libraries
################################################################################ ################################################################################
@ -5166,7 +5155,7 @@ VS_SDK_PLATFORM_NAME_2013=
#CUSTOM_AUTOCONF_INCLUDE #CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks: # Do not change or remove the following line, it is needed for consistency checks:
DATE_WHEN_GENERATED=1509128484 DATE_WHEN_GENERATED=1511359342
############################################################################### ###############################################################################
# #
@ -17069,7 +17058,7 @@ $as_echo "$as_me: Unknown variant(s) specified: $INVALID_VARIANTS" >&6;}
if [[ " $JVM_VARIANTS " =~ " zero " ]] || [[ " $JVM_VARIANTS " =~ " zeroshark " ]] ; then if [[ " $JVM_VARIANTS " =~ " zero " ]] ; then
# zero behaves as a platform and rewrites these values. This is really weird. :( # zero behaves as a platform and rewrites these values. This is really weird. :(
# We are guaranteed that we do not build any other variants when building zero. # We are guaranteed that we do not build any other variants when building zero.
HOTSPOT_TARGET_CPU=zero HOTSPOT_TARGET_CPU=zero
@ -25114,7 +25103,7 @@ fi
# Should we build the serviceability agent (SA)? # Should we build the serviceability agent (SA)?
INCLUDE_SA=true INCLUDE_SA=true
if [[ " $JVM_VARIANTS " =~ " zero " ]] || [[ " $JVM_VARIANTS " =~ " zeroshark " ]] ; then if [[ " $JVM_VARIANTS " =~ " zero " ]] ; then
INCLUDE_SA=false INCLUDE_SA=false
fi fi
if test "x$OPENJDK_TARGET_OS" = xaix ; then if test "x$OPENJDK_TARGET_OS" = xaix ; then
@ -51971,7 +51960,7 @@ $as_echo "$as_me: GCC >= 6 detected; adding ${NO_DELETE_NULL_POINTER_CHECKS_CFLA
fi fi
if ! [[ " $JVM_VARIANTS " =~ " zero " ]] && ! [[ " $JVM_VARIANTS " =~ " zeroshark " ]] ; then if ! [[ " $JVM_VARIANTS " =~ " zero " ]] ; then
# Non-zero builds have stricter warnings # Non-zero builds have stricter warnings
JVM_CFLAGS="$JVM_CFLAGS -Wreturn-type -Wundef -Wformat=2" JVM_CFLAGS="$JVM_CFLAGS -Wreturn-type -Wundef -Wformat=2"
else else
@ -52852,7 +52841,7 @@ $as_echo "$as_me: GCC >= 6 detected; adding ${NO_DELETE_NULL_POINTER_CHECKS_CFLA
fi fi
if ! [[ " $JVM_VARIANTS " =~ " zero " ]] && ! [[ " $JVM_VARIANTS " =~ " zeroshark " ]] ; then if ! [[ " $JVM_VARIANTS " =~ " zero " ]] ; then
# Non-zero builds have stricter warnings # Non-zero builds have stricter warnings
OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -Wreturn-type -Wundef -Wformat=2" OPENJDK_BUILD_JVM_CFLAGS="$OPENJDK_BUILD_JVM_CFLAGS -Wreturn-type -Wundef -Wformat=2"
else else
@ -54091,13 +54080,13 @@ if test "${with_native_debug_symbols+set}" = set; then :
else else
if test "x$OPENJDK_TARGET_OS" = xaix; then if test "x$OPENJDK_TARGET_OS" = xaix; then
# AIX doesn't support 'zipped' so use 'internal' as default # AIX doesn't support 'external' so use 'internal' as default
with_native_debug_symbols="internal" with_native_debug_symbols="internal"
else else
if test "x$STATIC_BUILD" = xtrue; then if test "x$STATIC_BUILD" = xtrue; then
with_native_debug_symbols="none" with_native_debug_symbols="none"
else else
with_native_debug_symbols="zipped" with_native_debug_symbols="external"
fi fi
fi fi
@ -54613,7 +54602,7 @@ $as_echo "yes" >&6; }
fi fi
# Check if ffi is needed # Check if ffi is needed
if [[ " $JVM_VARIANTS " =~ " zero " ]] || [[ " $JVM_VARIANTS " =~ " zeroshark " ]] ; then if [[ " $JVM_VARIANTS " =~ " zero " ]] ; then
NEEDS_LIB_FFI=true NEEDS_LIB_FFI=true
else else
NEEDS_LIB_FFI=false NEEDS_LIB_FFI=false
@ -54686,8 +54675,7 @@ $as_echo "$has_static_libstdcxx" >&6; }
# If dynamic wasn't requested, go with static unless it isn't available. # If dynamic wasn't requested, go with static unless it isn't available.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libstdc++" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libstdc++" >&5
$as_echo_n "checking how to link with libstdc++... " >&6; } $as_echo_n "checking how to link with libstdc++... " >&6; }
if test "x$with_stdc__lib" = xdynamic || test "x$has_static_libstdcxx" = xno \ if test "x$with_stdc__lib" = xdynamic || test "x$has_static_libstdcxx" = xno ; then
|| [[ " $JVM_VARIANTS " =~ " zeroshark " ]] ; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: dynamic" >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: result: dynamic" >&5
$as_echo "dynamic" >&6; } $as_echo "dynamic" >&6; }
else else
@ -65169,94 +65157,6 @@ $as_echo "${LIBFFI_LIB_FILE}" >&6; }
if [[ " $JVM_VARIANTS " =~ " zeroshark " ]] ; then
# Extract the first word of "llvm-config", so it can be a program name with args.
set dummy llvm-config; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
if ${ac_cv_prog_LLVM_CONFIG+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$LLVM_CONFIG"; then
ac_cv_prog_LLVM_CONFIG="$LLVM_CONFIG" # Let the user override the test.
else
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_LLVM_CONFIG="llvm-config"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
fi
done
done
IFS=$as_save_IFS
fi
fi
LLVM_CONFIG=$ac_cv_prog_LLVM_CONFIG
if test -n "$LLVM_CONFIG"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $LLVM_CONFIG" >&5
$as_echo "$LLVM_CONFIG" >&6; }
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
fi
if test "x$LLVM_CONFIG" != xllvm-config; then
as_fn_error $? "llvm-config not found in $PATH." "$LINENO" 5
fi
llvm_components="jit mcjit engine nativecodegen native"
unset LLVM_CFLAGS
for flag in $("$LLVM_CONFIG" --cxxflags); do
if echo "${flag}" | grep -q '^-[ID]'; then
if test "${flag}" != "-D_DEBUG" ; then
if test "${LLVM_CFLAGS}" != "" ; then
LLVM_CFLAGS="${LLVM_CFLAGS} "
fi
LLVM_CFLAGS="${LLVM_CFLAGS}${flag}"
fi
fi
done
llvm_version=$("${LLVM_CONFIG}" --version | $SED 's/\.//; s/svn.*//')
LLVM_CFLAGS="${LLVM_CFLAGS} -DSHARK_LLVM_VERSION=${llvm_version}"
unset LLVM_LDFLAGS
for flag in $("${LLVM_CONFIG}" --ldflags); do
if echo "${flag}" | grep -q '^-L'; then
if test "${LLVM_LDFLAGS}" != ""; then
LLVM_LDFLAGS="${LLVM_LDFLAGS} "
fi
LLVM_LDFLAGS="${LLVM_LDFLAGS}${flag}"
fi
done
unset LLVM_LIBS
for flag in $("${LLVM_CONFIG}" --libs ${llvm_components}); do
if echo "${flag}" | grep -q '^-l'; then
if test "${LLVM_LIBS}" != ""; then
LLVM_LIBS="${LLVM_LIBS} "
fi
LLVM_LIBS="${LLVM_LIBS}${flag}"
fi
done
# Due to https://llvm.org/bugs/show_bug.cgi?id=16902, llvm does not
# always properly detect -ltinfo
LLVM_LIBS="${LLVM_LIBS} -ltinfo"
fi
# Check whether --with-libjpeg was given. # Check whether --with-libjpeg was given.
if test "${with_libjpeg+set}" = set; then : if test "${with_libjpeg+set}" = set; then :
@ -66053,7 +65953,6 @@ $as_echo "no, not found at $STLPORT_LIB" >&6; }
# Hotspot setup depends on lib checks. # Hotspot setup depends on lib checks.
@ -66124,15 +66023,9 @@ $as_echo "$JVM_FEATURES" >&6; }
fi fi
fi fi
if ! [[ " $JVM_VARIANTS " =~ " zero " ]] && ! [[ " $JVM_VARIANTS " =~ " zeroshark " ]] ; then if ! [[ " $JVM_VARIANTS " =~ " zero " ]] ; then
if [[ " $JVM_FEATURES " =~ " zero " ]] ; then if [[ " $JVM_FEATURES " =~ " zero " ]] ; then
as_fn_error $? "To enable zero/zeroshark, you must use --with-jvm-variants=zero/zeroshark" "$LINENO" 5 as_fn_error $? "To enable zero, you must use --with-jvm-variants=zero" "$LINENO" 5
fi
fi
if ! [[ " $JVM_VARIANTS " =~ " zeroshark " ]] ; then
if [[ " $JVM_FEATURES " =~ " shark " ]] ; then
as_fn_error $? "To enable shark, you must use --with-jvm-variants=zeroshark" "$LINENO" 5
fi fi
fi fi
@ -66216,7 +66109,6 @@ $as_echo "no" >&6; }
JVM_FEATURES_core="$NON_MINIMAL_FEATURES $JVM_FEATURES" JVM_FEATURES_core="$NON_MINIMAL_FEATURES $JVM_FEATURES"
JVM_FEATURES_minimal="compiler1 minimal $JVM_FEATURES $JVM_FEATURES_link_time_opt" JVM_FEATURES_minimal="compiler1 minimal $JVM_FEATURES $JVM_FEATURES_link_time_opt"
JVM_FEATURES_zero="zero $NON_MINIMAL_FEATURES $JVM_FEATURES" JVM_FEATURES_zero="zero $NON_MINIMAL_FEATURES $JVM_FEATURES"
JVM_FEATURES_zeroshark="zero shark $NON_MINIMAL_FEATURES $JVM_FEATURES"
JVM_FEATURES_custom="$JVM_FEATURES" JVM_FEATURES_custom="$JVM_FEATURES"
@ -66226,7 +66118,6 @@ $as_echo "no" >&6; }
# Used for verification of Makefiles by check-jvm-feature # Used for verification of Makefiles by check-jvm-feature
@ -68104,7 +67995,6 @@ $as_echo "$OUTPUT_DIR_IS_LOCAL" >&6; }
JVM_FEATURES_core="$($ECHO $($PRINTF '%s\n' $JVM_FEATURES_core | $SORT -u))" JVM_FEATURES_core="$($ECHO $($PRINTF '%s\n' $JVM_FEATURES_core | $SORT -u))"
JVM_FEATURES_minimal="$($ECHO $($PRINTF '%s\n' $JVM_FEATURES_minimal | $SORT -u))" JVM_FEATURES_minimal="$($ECHO $($PRINTF '%s\n' $JVM_FEATURES_minimal | $SORT -u))"
JVM_FEATURES_zero="$($ECHO $($PRINTF '%s\n' $JVM_FEATURES_zero | $SORT -u))" JVM_FEATURES_zero="$($ECHO $($PRINTF '%s\n' $JVM_FEATURES_zero | $SORT -u))"
JVM_FEATURES_zeroshark="$($ECHO $($PRINTF '%s\n' $JVM_FEATURES_zeroshark | $SORT -u))"
JVM_FEATURES_custom="$($ECHO $($PRINTF '%s\n' $JVM_FEATURES_custom | $SORT -u))" JVM_FEATURES_custom="$($ECHO $($PRINTF '%s\n' $JVM_FEATURES_custom | $SORT -u))"
# Validate features # Validate features

View file

@ -476,7 +476,7 @@ AC_DEFUN([SETUP_HOTSPOT_TARGET_CPU_PORT],
AC_DEFUN_ONCE([HOTSPOT_ENABLE_DISABLE_GTEST], AC_DEFUN_ONCE([HOTSPOT_ENABLE_DISABLE_GTEST],
[ [
AC_ARG_ENABLE([hotspot-gtest], [AS_HELP_STRING([--disable-hotspot-gtest], AC_ARG_ENABLE([hotspot-gtest], [AS_HELP_STRING([--disable-hotspot-gtest],
[Disables building of the Hotspot unit tests])]) [Disables building of the Hotspot unit tests @<:@enabled@:>@])])
if test -e "${TOPDIR}/test/hotspot/gtest"; then if test -e "${TOPDIR}/test/hotspot/gtest"; then
GTEST_DIR_EXISTS="true" GTEST_DIR_EXISTS="true"

View file

@ -296,13 +296,13 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_DEBUG_SYMBOLS],
], ],
[ [
if test "x$OPENJDK_TARGET_OS" = xaix; then if test "x$OPENJDK_TARGET_OS" = xaix; then
# AIX doesn't support 'zipped' so use 'internal' as default # AIX doesn't support 'external' so use 'internal' as default
with_native_debug_symbols="internal" with_native_debug_symbols="internal"
else else
if test "x$STATIC_BUILD" = xtrue; then if test "x$STATIC_BUILD" = xtrue; then
with_native_debug_symbols="none" with_native_debug_symbols="none"
else else
with_native_debug_symbols="zipped" with_native_debug_symbols="external"
fi fi
fi fi
]) ])

View file

@ -29,6 +29,9 @@ _FIND_TESTS_GMK := 1
# Hook to include the corresponding custom file, if present. # Hook to include the corresponding custom file, if present.
$(eval $(call IncludeCustomExtension, common/FindTests.gmk)) $(eval $(call IncludeCustomExtension, common/FindTests.gmk))
# TEST_BASEDIRS might have been set by a custom extension
TEST_BASEDIRS += $(TOPDIR)/test $(TOPDIR)
# JTREG_TESTROOTS might have been set by a custom extension # JTREG_TESTROOTS might have been set by a custom extension
JTREG_TESTROOTS += $(addprefix $(TOPDIR)/test/, hotspot/jtreg jdk langtools nashorn jaxp) JTREG_TESTROOTS += $(addprefix $(TOPDIR)/test/, hotspot/jtreg jdk langtools nashorn jaxp)

View file

@ -328,8 +328,9 @@ $(MODULE_DEPS_MAKEFILE): $(MODULE_INFOS) \
$(NAWK) -v MODULE=$(call GetModuleNameFromModuleInfo, $m) '\ $(NAWK) -v MODULE=$(call GetModuleNameFromModuleInfo, $m) '\
BEGIN { if (MODULE != "java.base") printf(" java.base"); } \ BEGIN { if (MODULE != "java.base") printf(" java.base"); } \
/^ *requires/ { sub(/;/, ""); \ /^ *requires/ { sub(/;/, ""); \
sub(/requires/, ""); \ sub(/requires /, " "); \
sub(/transitive/, ""); \ sub(/ static /, " "); \
sub(/ transitive /, " "); \
sub(/\/\/.*/, ""); \ sub(/\/\/.*/, ""); \
sub(/\/\*.*\*\//, ""); \ sub(/\/\*.*\*\//, ""); \
gsub(/^ +\*.*/, ""); \ gsub(/^ +\*.*/, ""); \

View file

@ -203,7 +203,7 @@ var getJibProfiles = function (input) {
data.src_bundle_excludes = "./build webrev* */webrev* */*/webrev* */*/*/webrev* .hg */.hg */*/.hg */*/*/.hg"; data.src_bundle_excludes = "./build webrev* */webrev* */*/webrev* */*/*/webrev* .hg */.hg */*/.hg */*/*/.hg";
// Include list to use when creating a minimal jib source bundle which // Include list to use when creating a minimal jib source bundle which
// contains just the jib configuration files. // contains just the jib configuration files.
data.conf_bundle_includes = "*/conf/jib-profiles.* common/autoconf/version-numbers" data.conf_bundle_includes = "*/conf/jib-profiles.* make/autoconf/version-numbers"
// Define some common values // Define some common values
var common = getJibProfilesCommon(input, data); var common = getJibProfilesCommon(input, data);
@ -1043,7 +1043,7 @@ var concatObjects = function (o1, o2) {
/** /**
* Constructs the numeric version string from reading the * Constructs the numeric version string from reading the
* common/autoconf/version-numbers file and removing all trailing ".0". * make/autoconf/version-numbers file and removing all trailing ".0".
* *
* @param major Override major version * @param major Override major version
* @param minor Override minor version * @param minor Override minor version
@ -1080,17 +1080,17 @@ var versionArgs = function(input, common) {
return args; return args;
} }
// Properties representation of the common/autoconf/version-numbers file. Lazily // Properties representation of the make/autoconf/version-numbers file. Lazily
// initiated by the function below. // initiated by the function below.
var version_numbers; var version_numbers;
/** /**
* Read the common/autoconf/version-numbers file into a Properties object. * Read the make/autoconf/version-numbers file into a Properties object.
* *
* @returns {java.utilProperties} * @returns {java.utilProperties}
*/ */
var getVersionNumbers = function () { var getVersionNumbers = function () {
// Read version information from common/autoconf/version-numbers // Read version information from make/autoconf/version-numbers
if (version_numbers == null) { if (version_numbers == null) {
version_numbers = new java.util.Properties(); version_numbers = new java.util.Properties();
var stream = new java.io.FileInputStream(__DIR__ + "/../autoconf/version-numbers"); var stream = new java.io.FileInputStream(__DIR__ + "/../autoconf/version-numbers");

File diff suppressed because it is too large Load diff

View file

@ -112,7 +112,7 @@ JVM_LIBS += \
# #
# These files and directories are always excluded # These files and directories are always excluded
JVM_EXCLUDE_FILES += jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp args.cc JVM_EXCLUDE_FILES += jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp args.cc
JVM_EXCLUDES += adlc JVM_EXCLUDES += adlc
# Needed by vm_version.cpp # Needed by vm_version.cpp

View file

@ -81,7 +81,7 @@ ifneq ($(OPENJDK_TARGET_OS), windows)
$(error Unknown target OS $(OPENJDK_TARGET_OS) in CompileLibjsig.gmk) $(error Unknown target OS $(OPENJDK_TARGET_OS) in CompileLibjsig.gmk)
endif endif
LIBJSIG_SRC_FILE := $(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS)/native/libjsig/jsig.c LIBJSIG_SRC_DIR := $(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS)/native/libjsig
LIBJSIG_MAPFILE := $(wildcard $(TOPDIR)/make/mapfiles/libjsig/mapfile-vers-$(OPENJDK_TARGET_OS)) LIBJSIG_MAPFILE := $(wildcard $(TOPDIR)/make/mapfiles/libjsig/mapfile-vers-$(OPENJDK_TARGET_OS))
LIBJSIG_OUTPUTDIR := $(HOTSPOT_OUTPUTDIR)/libjsig LIBJSIG_OUTPUTDIR := $(HOTSPOT_OUTPUTDIR)/libjsig
@ -91,7 +91,7 @@ ifneq ($(OPENJDK_TARGET_OS), windows)
$(eval $(call SetupNativeCompilation, BUILD_LIBJSIG, \ $(eval $(call SetupNativeCompilation, BUILD_LIBJSIG, \
LIBRARY := jsig, \ LIBRARY := jsig, \
EXTRA_FILES := $(LIBJSIG_SRC_FILE), \ SRC := $(LIBJSIG_SRC_DIR), \
OUTPUT_DIR := $(LIB_OUTPUTDIR), \ OUTPUT_DIR := $(LIB_OUTPUTDIR), \
LANG := C, \ LANG := C, \
CFLAGS := $(LIBJSIG_CFLAGS) $(LIBJSIG_CPU_FLAGS), \ CFLAGS := $(LIBJSIG_CFLAGS) $(LIBJSIG_CPU_FLAGS), \

View file

@ -153,7 +153,7 @@ ifeq ($(call check-jvm-feature, link-time-opt), true)
# like the old build, but it's probably not right. # like the old build, but it's probably not right.
JVM_OPTIMIZATION := JVM_OPTIMIZATION :=
JVM_CFLAGS_FEATURES += -O3 -flto JVM_CFLAGS_FEATURES += -O3 -flto
JVM_LDFLAGS_FEATURES += -O3 -flto -fwhole-program -fno-strict-aliasing JVM_LDFLAGS_FEATURES += -O3 -flto -fuse-linker-plugin -fno-strict-aliasing
endif endif
ifeq ($(call check-jvm-feature, minimal), true) ifeq ($(call check-jvm-feature, minimal), true)

View file

@ -278,7 +278,7 @@
<arg value="${javadoc.option}"/> <arg value="${javadoc.option}"/>
<fileset dir="${nashorn.module.src.dir}" includes="**/*.java"/> <fileset dir="${nashorn.module.src.dir}" includes="**/*.java"/>
<fileset dir="${dynalink.module.src.dir}" includes="**/*.java"/> <fileset dir="${dynalink.module.src.dir}" includes="**/*.java"/>
<link href="http://docs.oracle.com/javase/8/docs/api/"/> <link offline="true" href="${javadoc.base.url}" packagelistLoc="${javadoc.pkg.list}"/>
</javadoc> </javadoc>
</target> </target>
@ -296,7 +296,7 @@
<arg value="."/> <arg value="."/>
<arg value="${javadoc.option}"/> <arg value="${javadoc.option}"/>
<fileset dir="${nashorn.module.src.dir}" includes="jdk/nashorn/api/**/*.java"/> <fileset dir="${nashorn.module.src.dir}" includes="jdk/nashorn/api/**/*.java"/>
<link href="http://docs.oracle.com/javase/8/docs/api/"/> <link offline="true" href="${javadoc.base.url}" packagelistLoc="${javadoc.pkg.list}"/>
</javadoc> </javadoc>
</target> </target>
@ -314,7 +314,7 @@
<arg value="."/> <arg value="."/>
<arg value="${javadoc.option}"/> <arg value="${javadoc.option}"/>
<fileset dir="${dynalink.module.src.dir}" includes="**/*.java"/> <fileset dir="${dynalink.module.src.dir}" includes="**/*.java"/>
<link href="http://docs.oracle.com/javase/8/docs/api/"/> <link offline="true" href="${javadoc.base.url}" packagelistLoc="${javadoc.pkg.list}"/>
</javadoc> </javadoc>
</target> </target>

315
make/nashorn/package-list Normal file
View file

@ -0,0 +1,315 @@
com.sun.jarsigner
com.sun.java.accessibility.util
com.sun.javadoc
com.sun.jdi
com.sun.jdi.connect
com.sun.jdi.connect.spi
com.sun.jdi.event
com.sun.jdi.request
com.sun.management
com.sun.net.httpserver
com.sun.net.httpserver.spi
com.sun.nio.sctp
com.sun.security.auth
com.sun.security.auth.callback
com.sun.security.auth.login
com.sun.security.auth.module
com.sun.security.jgss
com.sun.source.doctree
com.sun.source.tree
com.sun.source.util
com.sun.tools.attach
com.sun.tools.attach.spi
com.sun.tools.doclets
com.sun.tools.doclets.standard
com.sun.tools.javac
com.sun.tools.javadoc
com.sun.tools.jconsole
java.applet
java.awt
java.awt.color
java.awt.datatransfer
java.awt.desktop
java.awt.dnd
java.awt.event
java.awt.font
java.awt.geom
java.awt.im
java.awt.im.spi
java.awt.image
java.awt.image.renderable
java.awt.print
java.beans
java.beans.beancontext
java.io
java.lang
java.lang.annotation
java.lang.instrument
java.lang.invoke
java.lang.management
java.lang.module
java.lang.ref
java.lang.reflect
java.math
java.net
java.net.spi
java.nio
java.nio.channels
java.nio.channels.spi
java.nio.charset
java.nio.charset.spi
java.nio.file
java.nio.file.attribute
java.nio.file.spi
java.rmi
java.rmi.activation
java.rmi.dgc
java.rmi.registry
java.rmi.server
java.security
java.security.acl
java.security.cert
java.security.interfaces
java.security.spec
java.sql
java.text
java.text.spi
java.time
java.time.chrono
java.time.format
java.time.temporal
java.time.zone
java.util
java.util.concurrent
java.util.concurrent.atomic
java.util.concurrent.locks
java.util.function
java.util.jar
java.util.logging
java.util.prefs
java.util.regex
java.util.spi
java.util.stream
java.util.zip
javafx.animation
javafx.application
javafx.beans
javafx.beans.binding
javafx.beans.property
javafx.beans.property.adapter
javafx.beans.value
javafx.collections
javafx.collections.transformation
javafx.concurrent
javafx.css
javafx.css.converter
javafx.embed.swing
javafx.event
javafx.fxml
javafx.geometry
javafx.print
javafx.scene
javafx.scene.canvas
javafx.scene.chart
javafx.scene.control
javafx.scene.control.cell
javafx.scene.control.skin
javafx.scene.effect
javafx.scene.image
javafx.scene.input
javafx.scene.layout
javafx.scene.media
javafx.scene.paint
javafx.scene.shape
javafx.scene.text
javafx.scene.transform
javafx.scene.web
javafx.stage
javafx.util
javafx.util.converter
javax.accessibility
javax.activation
javax.activity
javax.annotation
javax.annotation.processing
javax.crypto
javax.crypto.interfaces
javax.crypto.spec
javax.imageio
javax.imageio.event
javax.imageio.metadata
javax.imageio.plugins.bmp
javax.imageio.plugins.jpeg
javax.imageio.plugins.tiff
javax.imageio.spi
javax.imageio.stream
javax.jnlp
javax.jws
javax.jws.soap
javax.lang.model
javax.lang.model.element
javax.lang.model.type
javax.lang.model.util
javax.management
javax.management.loading
javax.management.modelmbean
javax.management.monitor
javax.management.openmbean
javax.management.relation
javax.management.remote
javax.management.remote.rmi
javax.management.timer
javax.naming
javax.naming.directory
javax.naming.event
javax.naming.ldap
javax.naming.spi
javax.net
javax.net.ssl
javax.print
javax.print.attribute
javax.print.attribute.standard
javax.print.event
javax.rmi
javax.rmi.CORBA
javax.rmi.ssl
javax.script
javax.security.auth
javax.security.auth.callback
javax.security.auth.kerberos
javax.security.auth.login
javax.security.auth.spi
javax.security.auth.x500
javax.security.cert
javax.security.sasl
javax.smartcardio
javax.sound.midi
javax.sound.midi.spi
javax.sound.sampled
javax.sound.sampled.spi
javax.sql
javax.sql.rowset
javax.sql.rowset.serial
javax.sql.rowset.spi
javax.swing
javax.swing.border
javax.swing.colorchooser
javax.swing.event
javax.swing.filechooser
javax.swing.plaf
javax.swing.plaf.basic
javax.swing.plaf.metal
javax.swing.plaf.multi
javax.swing.plaf.nimbus
javax.swing.plaf.synth
javax.swing.table
javax.swing.text
javax.swing.text.html
javax.swing.text.html.parser
javax.swing.text.rtf
javax.swing.tree
javax.swing.undo
javax.tools
javax.transaction
javax.transaction.xa
javax.xml
javax.xml.bind
javax.xml.bind.annotation
javax.xml.bind.annotation.adapters
javax.xml.bind.attachment
javax.xml.bind.helpers
javax.xml.bind.util
javax.xml.catalog
javax.xml.crypto
javax.xml.crypto.dom
javax.xml.crypto.dsig
javax.xml.crypto.dsig.dom
javax.xml.crypto.dsig.keyinfo
javax.xml.crypto.dsig.spec
javax.xml.datatype
javax.xml.namespace
javax.xml.parsers
javax.xml.soap
javax.xml.stream
javax.xml.stream.events
javax.xml.stream.util
javax.xml.transform
javax.xml.transform.dom
javax.xml.transform.sax
javax.xml.transform.stax
javax.xml.transform.stream
javax.xml.validation
javax.xml.ws
javax.xml.ws.handler
javax.xml.ws.handler.soap
javax.xml.ws.http
javax.xml.ws.soap
javax.xml.ws.spi
javax.xml.ws.spi.http
javax.xml.ws.wsaddressing
javax.xml.xpath
jdk.dynalink
jdk.dynalink.beans
jdk.dynalink.linker
jdk.dynalink.linker.support
jdk.dynalink.support
jdk.incubator.http
jdk.javadoc.doclet
jdk.jfr
jdk.jfr.consumer
jdk.jshell
jdk.jshell.execution
jdk.jshell.spi
jdk.jshell.tool
jdk.management.cmm
jdk.management.jfr
jdk.management.resource
jdk.nashorn.api.scripting
jdk.nashorn.api.tree
jdk.net
jdk.packager.services
jdk.security.jarsigner
netscape.javascript
org.ietf.jgss
org.omg.CORBA
org.omg.CORBA_2_3
org.omg.CORBA_2_3.portable
org.omg.CORBA.DynAnyPackage
org.omg.CORBA.ORBPackage
org.omg.CORBA.portable
org.omg.CORBA.TypeCodePackage
org.omg.CosNaming
org.omg.CosNaming.NamingContextExtPackage
org.omg.CosNaming.NamingContextPackage
org.omg.Dynamic
org.omg.DynamicAny
org.omg.DynamicAny.DynAnyFactoryPackage
org.omg.DynamicAny.DynAnyPackage
org.omg.IOP
org.omg.IOP.CodecFactoryPackage
org.omg.IOP.CodecPackage
org.omg.Messaging
org.omg.PortableInterceptor
org.omg.PortableInterceptor.ORBInitInfoPackage
org.omg.PortableServer
org.omg.PortableServer.CurrentPackage
org.omg.PortableServer.POAManagerPackage
org.omg.PortableServer.POAPackage
org.omg.PortableServer.portable
org.omg.PortableServer.ServantLocatorPackage
org.omg.SendingContext
org.omg.stub.java.rmi
org.w3c.dom
org.w3c.dom.bootstrap
org.w3c.dom.css
org.w3c.dom.events
org.w3c.dom.html
org.w3c.dom.ls
org.w3c.dom.ranges
org.w3c.dom.stylesheets
org.w3c.dom.traversal
org.w3c.dom.views
org.w3c.dom.xpath
org.xml.sax
org.xml.sax.ext
org.xml.sax.helpers

View file

@ -33,6 +33,12 @@ jdk.jline.src.dir=src/jdk.internal.le/share/classes
# source and target levels # source and target levels
build.compiler=modern build.compiler=modern
jdk.build.dir=build
nashorn.make.dir=make/nashorn
javadoc.base.url=https://docs.oracle.com/javase/9/docs/api/
javadoc.pkg.list=make/nashorn
javadoc.option=\ javadoc.option=\
-tag "implSpec:a:Implementation Requirements:" \ -tag "implSpec:a:Implementation Requirements:" \
-tag "implNote:a:Implementation Note:" \ -tag "implNote:a:Implementation Note:" \
@ -43,9 +49,6 @@ nashorn.version=0.1
nashorn.fullversion=0.1 nashorn.fullversion=0.1
nashorn.product.name=Oracle Nashorn nashorn.product.name=Oracle Nashorn
jdk.build.dir=build
nashorn.make.dir=make/nashorn
# This directory is removed when the project is cleaned: # This directory is removed when the project is cleaned:
build.dir=${jdk.build.dir}/nashorn build.dir=${jdk.build.dir}/nashorn
build.classes.dir=${build.dir}/classes build.classes.dir=${build.dir}/classes

View file

@ -60,6 +60,7 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC += \
$(TOPDIR)/test/hotspot/jtreg/runtime/SameObject \ $(TOPDIR)/test/hotspot/jtreg/runtime/SameObject \
$(TOPDIR)/test/hotspot/jtreg/runtime/BoolReturn \ $(TOPDIR)/test/hotspot/jtreg/runtime/BoolReturn \
$(TOPDIR)/test/hotspot/jtreg/runtime/noClassDefFoundMsg \ $(TOPDIR)/test/hotspot/jtreg/runtime/noClassDefFoundMsg \
$(TOPDIR)/test/hotspot/jtreg/runtime/handshake \
$(TOPDIR)/test/hotspot/jtreg/runtime/RedefineTests \ $(TOPDIR)/test/hotspot/jtreg/runtime/RedefineTests \
$(TOPDIR)/test/hotspot/jtreg/compiler/floatingpoint/ \ $(TOPDIR)/test/hotspot/jtreg/compiler/floatingpoint/ \
$(TOPDIR)/test/hotspot/jtreg/compiler/calls \ $(TOPDIR)/test/hotspot/jtreg/compiler/calls \
@ -108,6 +109,7 @@ ifeq ($(TOOLCHAIN_TYPE), solstudio)
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAThreadStart := -lc BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAThreadStart := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libAllowedFunctions := -lc BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libAllowedFunctions := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libRedefineDoubleDelete := -lc BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libRedefineDoubleDelete := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libHandshakeTransitionTest := -lc
endif endif
ifeq ($(OPENJDK_TARGET_OS), linux) ifeq ($(OPENJDK_TARGET_OS), linux)

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, Red Hat Inc. All rights reserved. * Copyright (c) 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -79,6 +79,8 @@ define_pd_global(bool, CompactStrings, true);
// Clear short arrays bigger than one word in an arch-specific way // Clear short arrays bigger than one word in an arch-specific way
define_pd_global(intx, InitArrayShortSize, BytesPerLong); define_pd_global(intx, InitArrayShortSize, BytesPerLong);
define_pd_global(bool, ThreadLocalHandshakes, false);
#if defined(COMPILER1) || defined(COMPILER2) #if defined(COMPILER1) || defined(COMPILER2)
define_pd_global(intx, InlineSmallCode, 1000); define_pd_global(intx, InlineSmallCode, 1000);
#endif #endif

View file

@ -3228,6 +3228,102 @@ void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len,
mvnw(crc, crc); mvnw(crc, crc);
} }
void MacroAssembler::kernel_crc32c_using_crc32c(Register crc, Register buf,
Register len, Register tmp0, Register tmp1, Register tmp2,
Register tmp3) {
Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit;
assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3);
subs(len, len, 128);
br(Assembler::GE, CRC_by64_pre);
BIND(CRC_less64);
adds(len, len, 128-32);
br(Assembler::GE, CRC_by32_loop);
BIND(CRC_less32);
adds(len, len, 32-4);
br(Assembler::GE, CRC_by4_loop);
adds(len, len, 4);
br(Assembler::GT, CRC_by1_loop);
b(L_exit);
BIND(CRC_by32_loop);
ldp(tmp0, tmp1, Address(post(buf, 16)));
subs(len, len, 32);
crc32cx(crc, crc, tmp0);
ldr(tmp2, Address(post(buf, 8)));
crc32cx(crc, crc, tmp1);
ldr(tmp3, Address(post(buf, 8)));
crc32cx(crc, crc, tmp2);
crc32cx(crc, crc, tmp3);
br(Assembler::GE, CRC_by32_loop);
cmn(len, 32);
br(Assembler::NE, CRC_less32);
b(L_exit);
BIND(CRC_by4_loop);
ldrw(tmp0, Address(post(buf, 4)));
subs(len, len, 4);
crc32cw(crc, crc, tmp0);
br(Assembler::GE, CRC_by4_loop);
adds(len, len, 4);
br(Assembler::LE, L_exit);
BIND(CRC_by1_loop);
ldrb(tmp0, Address(post(buf, 1)));
subs(len, len, 1);
crc32cb(crc, crc, tmp0);
br(Assembler::GT, CRC_by1_loop);
b(L_exit);
BIND(CRC_by64_pre);
sub(buf, buf, 8);
ldp(tmp0, tmp1, Address(buf, 8));
crc32cx(crc, crc, tmp0);
ldr(tmp2, Address(buf, 24));
crc32cx(crc, crc, tmp1);
ldr(tmp3, Address(buf, 32));
crc32cx(crc, crc, tmp2);
ldr(tmp0, Address(buf, 40));
crc32cx(crc, crc, tmp3);
ldr(tmp1, Address(buf, 48));
crc32cx(crc, crc, tmp0);
ldr(tmp2, Address(buf, 56));
crc32cx(crc, crc, tmp1);
ldr(tmp3, Address(pre(buf, 64)));
b(CRC_by64_loop);
align(CodeEntryAlignment);
BIND(CRC_by64_loop);
subs(len, len, 64);
crc32cx(crc, crc, tmp2);
ldr(tmp0, Address(buf, 8));
crc32cx(crc, crc, tmp3);
ldr(tmp1, Address(buf, 16));
crc32cx(crc, crc, tmp0);
ldr(tmp2, Address(buf, 24));
crc32cx(crc, crc, tmp1);
ldr(tmp3, Address(buf, 32));
crc32cx(crc, crc, tmp2);
ldr(tmp0, Address(buf, 40));
crc32cx(crc, crc, tmp3);
ldr(tmp1, Address(buf, 48));
crc32cx(crc, crc, tmp0);
ldr(tmp2, Address(buf, 56));
crc32cx(crc, crc, tmp1);
ldr(tmp3, Address(pre(buf, 64)));
br(Assembler::GE, CRC_by64_loop);
// post-loop
crc32cx(crc, crc, tmp2);
crc32cx(crc, crc, tmp3);
sub(len, len, 64);
add(buf, buf, 8);
cmn(len, 128);
br(Assembler::NE, CRC_less64);
BIND(L_exit);
}
/** /**
* @param crc register containing existing CRC (32-bit) * @param crc register containing existing CRC (32-bit)
* @param buf register pointing to input byte buffer (byte*) * @param buf register pointing to input byte buffer (byte*)
@ -3238,55 +3334,10 @@ void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len,
void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len,
Register table0, Register table1, Register table2, Register table3, Register table0, Register table1, Register table2, Register table3,
Register tmp, Register tmp2, Register tmp3) { Register tmp, Register tmp2, Register tmp3) {
Label L_exit; kernel_crc32c_using_crc32c(crc, buf, len, table0, table1, table2, table3);
Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop;
subs(len, len, 64);
br(Assembler::GE, CRC_by64_loop);
adds(len, len, 64-4);
br(Assembler::GE, CRC_by4_loop);
adds(len, len, 4);
br(Assembler::GT, CRC_by1_loop);
b(L_exit);
BIND(CRC_by4_loop);
ldrw(tmp, Address(post(buf, 4)));
subs(len, len, 4);
crc32cw(crc, crc, tmp);
br(Assembler::GE, CRC_by4_loop);
adds(len, len, 4);
br(Assembler::LE, L_exit);
BIND(CRC_by1_loop);
ldrb(tmp, Address(post(buf, 1)));
subs(len, len, 1);
crc32cb(crc, crc, tmp);
br(Assembler::GT, CRC_by1_loop);
b(L_exit);
align(CodeEntryAlignment);
BIND(CRC_by64_loop);
subs(len, len, 64);
ldp(tmp, tmp3, Address(post(buf, 16)));
crc32cx(crc, crc, tmp);
crc32cx(crc, crc, tmp3);
ldp(tmp, tmp3, Address(post(buf, 16)));
crc32cx(crc, crc, tmp);
crc32cx(crc, crc, tmp3);
ldp(tmp, tmp3, Address(post(buf, 16)));
crc32cx(crc, crc, tmp);
crc32cx(crc, crc, tmp3);
ldp(tmp, tmp3, Address(post(buf, 16)));
crc32cx(crc, crc, tmp);
crc32cx(crc, crc, tmp3);
br(Assembler::GE, CRC_by64_loop);
adds(len, len, 64-4);
br(Assembler::GE, CRC_by4_loop);
adds(len, len, 4);
br(Assembler::GT, CRC_by1_loop);
BIND(L_exit);
return;
} }
SkipIfEqual::SkipIfEqual( SkipIfEqual::SkipIfEqual(
MacroAssembler* masm, const bool* flag_addr, bool value) { MacroAssembler* masm, const bool* flag_addr, bool value) {
_masm = masm; _masm = masm;

View file

@ -1264,6 +1264,9 @@ private:
void kernel_crc32_using_crc32(Register crc, Register buf, void kernel_crc32_using_crc32(Register crc, Register buf,
Register len, Register tmp0, Register tmp1, Register tmp2, Register len, Register tmp0, Register tmp1, Register tmp2,
Register tmp3); Register tmp3);
void kernel_crc32c_using_crc32c(Register crc, Register buf,
Register len, Register tmp0, Register tmp1, Register tmp2,
Register tmp3);
public: public:
void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z,
Register zlen, Register tmp1, Register tmp2, Register tmp3, Register zlen, Register tmp1, Register tmp2, Register tmp3,

View file

@ -447,7 +447,8 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
} }
address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
int step) { int step,
address continuation) {
address entry = __ pc(); address entry = __ pc();
__ restore_bcp(); __ restore_bcp();
__ restore_locals(); __ restore_locals();
@ -505,7 +506,11 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
__ bind(L); __ bind(L);
} }
if (continuation == NULL) {
__ dispatch_next(state, step); __ dispatch_next(state, step);
} else {
__ jump_to_entry(continuation);
}
return entry; return entry;
} }

View file

@ -2195,13 +2195,6 @@ void TemplateTable::_return(TosState state)
__ bind(skip_register_finalizer); __ bind(skip_register_finalizer);
} }
// Explicitly reset last_sp, for handling special case in TemplateInterpreter::deopt_reexecute_entry
#ifdef ASSERT
if (state == vtos) {
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
}
#endif
// Issue a StoreStore barrier after all stores but before return // Issue a StoreStore barrier after all stores but before return
// from any constructor for any class with a final field. We don't // from any constructor for any class with a final field. We don't
// know if this is a finalizer, so we always do so. // know if this is a finalizer, so we always do so.

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -79,6 +79,8 @@ define_pd_global(bool, CompactStrings, false);
define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong); define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
define_pd_global(bool, ThreadLocalHandshakes, false);
#define ARCH_FLAGS(develop, \ #define ARCH_FLAGS(develop, \
product, \ product, \
diagnostic, \ diagnostic, \

View file

@ -314,7 +314,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
} }
address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) {
address entry = __ pc(); address entry = __ pc();
__ interp_verify_oop(R0_tos, state, __FILE__, __LINE__); __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
@ -343,7 +343,11 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
__ bind(L); __ bind(L);
} }
if (continuation == NULL) {
__ dispatch_next(state, step); __ dispatch_next(state, step);
} else {
__ jump_to_entry(continuation);
}
return entry; return entry;
} }

View file

@ -2844,19 +2844,6 @@ void TemplateTable::_return(TosState state) {
__ bind(skip_register_finalizer); __ bind(skip_register_finalizer);
} }
// Explicitly reset last_sp, for handling special case in TemplateInterpreter::deopt_reexecute_entry
#ifdef ASSERT
if (state == vtos) {
#ifndef AARCH64
__ mov(Rtemp, 0);
__ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
#else
__ restore_sp_after_call(Rtemp);
__ restore_stack_top();
#endif
}
#endif
// Narrow result if state is itos but result type is smaller. // Narrow result if state is itos but result type is smaller.
// Need to narrow in the return bytecode rather than in generate_return_entry // Need to narrow in the return bytecode rather than in generate_return_entry
// since compiled code callers expect the result to already be narrowed. // since compiled code callers expect the result to already be narrowed.

View file

@ -83,6 +83,8 @@ define_pd_global(bool, CompactStrings, true);
// 2x unrolled loop is shorter with more than 9 HeapWords. // 2x unrolled loop is shorter with more than 9 HeapWords.
define_pd_global(intx, InitArrayShortSize, 9*BytesPerLong); define_pd_global(intx, InitArrayShortSize, 9*BytesPerLong);
define_pd_global(bool, ThreadLocalHandshakes, false);
// Platform dependent flag handling: flags only defined on this platform. // Platform dependent flag handling: flags only defined on this platform.
#define ARCH_FLAGS(develop, \ #define ARCH_FLAGS(develop, \
product, \ product, \

View file

@ -694,7 +694,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
return entry; return entry;
} }
address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) {
address entry = __ pc(); address entry = __ pc();
// If state != vtos, we're returning from a native method, which put it's result // If state != vtos, we're returning from a native method, which put it's result
// into the result register. So move the value out of the return register back // into the result register. So move the value out of the return register back
@ -721,7 +721,11 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
__ check_and_forward_exception(R11_scratch1, R12_scratch2); __ check_and_forward_exception(R11_scratch1, R12_scratch2);
// Start executing bytecodes. // Start executing bytecodes.
if (continuation == NULL) {
__ dispatch_next(state, step); __ dispatch_next(state, step);
} else {
__ jump_to_entry(continuation, R11_scratch1);
}
return entry; return entry;
} }

View file

@ -85,6 +85,8 @@ define_pd_global(bool, CompactStrings, true);
// 8146801 (Short Array Allocation): No performance work done here yet. // 8146801 (Short Array Allocation): No performance work done here yet.
define_pd_global(intx, InitArrayShortSize, 1*BytesPerLong); define_pd_global(intx, InitArrayShortSize, 1*BytesPerLong);
define_pd_global(bool, ThreadLocalHandshakes, false);
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint, writeable) \ #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint, writeable) \
\ \
/* Reoptimize code-sequences of calls at runtime, e.g. replace an */ \ /* Reoptimize code-sequences of calls at runtime, e.g. replace an */ \

View file

@ -687,7 +687,8 @@ address TemplateInterpreterGenerator::generate_return_entry_for (TosState state,
} }
address TemplateInterpreterGenerator::generate_deopt_entry_for (TosState state, address TemplateInterpreterGenerator::generate_deopt_entry_for (TosState state,
int step) { int step,
address continuation) {
address entry = __ pc(); address entry = __ pc();
BLOCK_COMMENT("deopt_entry {"); BLOCK_COMMENT("deopt_entry {");
@ -710,7 +711,11 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for (TosState state,
__ should_not_reach_here(); __ should_not_reach_here();
__ bind(L); __ bind(L);
} }
if (continuation == NULL) {
__ dispatch_next(state, step); __ dispatch_next(state, step);
} else {
__ jump_to_entry(continuation, Z_R1_scratch);
}
BLOCK_COMMENT("} deopt_entry"); BLOCK_COMMENT("} deopt_entry");

View file

@ -35,6 +35,7 @@
#include "gc/shared/collectedHeap.hpp" #include "gc/shared/collectedHeap.hpp"
#include "nativeInst_sparc.hpp" #include "nativeInst_sparc.hpp"
#include "oops/objArrayKlass.hpp" #include "oops/objArrayKlass.hpp"
#include "runtime/safepointMechanism.inline.hpp"
#include "runtime/sharedRuntime.hpp" #include "runtime/sharedRuntime.hpp"
#define __ _masm-> #define __ _masm->
@ -1415,7 +1416,11 @@ void LIR_Assembler::return_op(LIR_Opr result) {
if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
__ reserved_stack_check(); __ reserved_stack_check();
} }
if (SafepointMechanism::uses_thread_local_poll()) {
__ ld_ptr(Address(G2_thread, Thread::polling_page_offset()), L0);
} else {
__ set((intptr_t)os::get_polling_page(), L0); __ set((intptr_t)os::get_polling_page(), L0);
}
__ relocate(relocInfo::poll_return_type); __ relocate(relocInfo::poll_return_type);
__ ld_ptr(L0, 0, G0); __ ld_ptr(L0, 0, G0);
__ ret(); __ ret();
@ -1424,11 +1429,16 @@ void LIR_Assembler::return_op(LIR_Opr result) {
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
if (SafepointMechanism::uses_thread_local_poll()) {
__ ld_ptr(Address(G2_thread, Thread::polling_page_offset()), tmp->as_register());
} else {
__ set((intptr_t)os::get_polling_page(), tmp->as_register()); __ set((intptr_t)os::get_polling_page(), tmp->as_register());
}
if (info != NULL) { if (info != NULL) {
add_debug_info_for_branch(info); add_debug_info_for_branch(info);
} }
int offset = __ offset(); int offset = __ offset();
__ relocate(relocInfo::poll_type); __ relocate(relocInfo::poll_type);
__ ld_ptr(tmp->as_register(), 0, G0); __ ld_ptr(tmp->as_register(), 0, G0);
return offset; return offset;

View file

@ -33,6 +33,7 @@
#include "ci/ciArray.hpp" #include "ci/ciArray.hpp"
#include "ci/ciObjArrayKlass.hpp" #include "ci/ciObjArrayKlass.hpp"
#include "ci/ciTypeArrayKlass.hpp" #include "ci/ciTypeArrayKlass.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp" #include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp" #include "runtime/stubRoutines.hpp"
#include "vmreg_sparc.inline.hpp" #include "vmreg_sparc.inline.hpp"
@ -1304,7 +1305,7 @@ void LIRGenerator::do_If(If* x) {
if (x->is_safepoint()) { if (x->is_safepoint()) {
// increment backedge counter if needed // increment backedge counter if needed
increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci()); increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
__ safepoint(new_register(T_INT), state_for(x, x->state_before())); __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
} }
__ cmp(lir_cond(cond), left, right); __ cmp(lir_cond(cond), left, right);

View file

@ -52,4 +52,7 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
#define SUPPORT_RESERVED_STACK_AREA #define SUPPORT_RESERVED_STACK_AREA
#endif #endif
// SPARC have implemented the local polling
#define THREAD_LOCAL_POLL
#endif // CPU_SPARC_VM_GLOBALDEFINITIONS_SPARC_HPP #endif // CPU_SPARC_VM_GLOBALDEFINITIONS_SPARC_HPP

View file

@ -87,6 +87,8 @@ define_pd_global(bool, CompactStrings, true);
define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong); define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
define_pd_global(bool, ThreadLocalHandshakes, true);
#define ARCH_FLAGS(develop, \ #define ARCH_FLAGS(develop, \
product, \ product, \
diagnostic, \ diagnostic, \

View file

@ -36,6 +36,7 @@
#include "prims/jvmtiThreadState.hpp" #include "prims/jvmtiThreadState.hpp"
#include "runtime/basicLock.hpp" #include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp" #include "runtime/biasedLocking.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp" #include "runtime/sharedRuntime.hpp"
#include "runtime/thread.inline.hpp" #include "runtime/thread.inline.hpp"
#include "utilities/align.hpp" #include "utilities/align.hpp"
@ -95,12 +96,11 @@ void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) {
else delayed()->nop(); else delayed()->nop();
} }
void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr, bool generate_poll) {
void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) {
// %%%% consider branching to a single shared dispatch stub (for each bcp_incr) // %%%% consider branching to a single shared dispatch stub (for each bcp_incr)
assert_not_delayed(); assert_not_delayed();
ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr); dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr, true, generate_poll);
} }
@ -261,15 +261,34 @@ void InterpreterMacroAssembler::dispatch_only(TosState state) {
// common code to dispatch and dispatch_only // common code to dispatch and dispatch_only
// dispatch value in Lbyte_code and increment Lbcp // dispatch value in Lbyte_code and increment Lbcp
void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) { void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify, bool generate_poll) {
verify_FPU(1, state); verify_FPU(1, state);
// %%%%% maybe implement +VerifyActivationFrameSize here // %%%%% maybe implement +VerifyActivationFrameSize here
//verify_thread(); //too slow; we will just verify on method entry & exit //verify_thread(); //too slow; we will just verify on method entry & exit
if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__); if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
// dispatch table to use // dispatch table to use
AddressLiteral tbl(table); AddressLiteral tbl(table);
sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize Label dispatch;
if (SafepointMechanism::uses_thread_local_poll() && generate_poll) {
AddressLiteral sfpt_tbl(Interpreter::safept_table(state));
Label no_safepoint;
if (tbl.value() != sfpt_tbl.value()) {
ldx(Address(G2_thread, Thread::polling_page_offset()), G3_scratch, 0);
// Armed page has poll_bit set, if poll bit is cleared just continue.
and3(G3_scratch, SafepointMechanism::poll_bit(), G3_scratch);
br_null_short(G3_scratch, Assembler::pt, no_safepoint);
set(sfpt_tbl, G3_scratch);
ba_short(dispatch);
}
bind(no_safepoint);
}
set(tbl, G3_scratch); // compute addr of table set(tbl, G3_scratch); // compute addr of table
bind(dispatch);
sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr
jmp( G3_scratch, 0 ); jmp( G3_scratch, 0 );
if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr);

View file

@ -98,7 +98,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void dispatch_epilog(TosState state, int step = 0); void dispatch_epilog(TosState state, int step = 0);
void dispatch_only(TosState state); void dispatch_only(TosState state);
void dispatch_normal(TosState state); void dispatch_normal(TosState state);
void dispatch_next(TosState state, int step = 0); void dispatch_next(TosState state, int step = 0, bool generate_poll = false);
void dispatch_next_noverify_oop(TosState state, int step = 0); void dispatch_next_noverify_oop(TosState state, int step = 0);
void dispatch_via (TosState state, address* table); void dispatch_via (TosState state, address* table);
@ -113,7 +113,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
bool install_monitor_exception = true); bool install_monitor_exception = true);
protected: protected:
void dispatch_Lbyte_code(TosState state, address* table, int bcp_incr = 0, bool verify = true); void dispatch_Lbyte_code(TosState state, address* table, int bcp_incr = 0, bool verify = true, bool generate_poll = false);
public: public:
// Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls // Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls

View file

@ -37,6 +37,8 @@
#include "runtime/interfaceSupport.hpp" #include "runtime/interfaceSupport.hpp"
#include "runtime/objectMonitor.hpp" #include "runtime/objectMonitor.hpp"
#include "runtime/os.inline.hpp" #include "runtime/os.inline.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp" #include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp" #include "runtime/stubRoutines.hpp"
#include "utilities/align.hpp" #include "utilities/align.hpp"
@ -236,6 +238,20 @@ void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register t
} }
void MacroAssembler::safepoint_poll(Label& slow_path, bool a, Register thread_reg, Register temp_reg) {
if (SafepointMechanism::uses_thread_local_poll()) {
ldx(Address(thread_reg, Thread::polling_page_offset()), temp_reg, 0);
// Armed page has poll bit set.
and3(temp_reg, SafepointMechanism::poll_bit(), temp_reg);
br_notnull(temp_reg, a, Assembler::pn, slow_path);
} else {
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
load_contents(sync_state, temp_reg);
cmp(temp_reg, SafepointSynchronize::_not_synchronized);
br(Assembler::notEqual, a, Assembler::pn, slow_path);
}
}
void MacroAssembler::enter() { void MacroAssembler::enter() {
Unimplemented(); Unimplemented();

View file

@ -986,6 +986,8 @@ public:
// Support for serializing memory accesses between threads // Support for serializing memory accesses between threads
void serialize_memory(Register thread, Register tmp1, Register tmp2); void serialize_memory(Register thread, Register tmp1, Register tmp2);
void safepoint_poll(Label& slow_path, bool a, Register thread_reg, Register temp_reg);
// Stack frame creation/removal // Stack frame creation/removal
void enter(); void enter();
void leave(); void leave();

View file

@ -2359,7 +2359,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Block, if necessary, before resuming in _thread_in_Java state. // Block, if necessary, before resuming in _thread_in_Java state.
// In order for GC to work, don't clear the last_Java_sp until after blocking. // In order for GC to work, don't clear the last_Java_sp until after blocking.
{ Label no_block; { Label no_block;
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
// Switch thread to "native transition" state before reading the synchronization state. // Switch thread to "native transition" state before reading the synchronization state.
// This additional state is necessary because reading and testing the synchronization // This additional state is necessary because reading and testing the synchronization
@ -2382,12 +2381,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ serialize_memory(G2_thread, G1_scratch, G3_scratch); __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
} }
} }
__ load_contents(sync_state, G3_scratch);
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
Label L; Label L;
Address suspend_state(G2_thread, JavaThread::suspend_flags_offset()); Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
__ br(Assembler::notEqual, false, Assembler::pn, L); __ safepoint_poll(L, false, G2_thread, G3_scratch);
__ delayed()->ld(suspend_state, G3_scratch); __ delayed()->ld(suspend_state, G3_scratch);
__ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block); __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
__ bind(L); __ bind(L);
@ -3118,7 +3115,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
} else { } else {
// Make it look like we were called via the poll // Make it look like we were called via the poll
// so that frame constructor always sees a valid return address // so that frame constructor always sees a valid return address
__ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7); __ ld_ptr(Address(G2_thread, JavaThread::saved_exception_pc_offset()), O7);
__ sub(O7, frame::pc_return_offset, O7); __ sub(O7, frame::pc_return_offset, O7);
} }
@ -3127,6 +3124,15 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
// setup last_Java_sp (blows G4) // setup last_Java_sp (blows G4)
__ set_last_Java_frame(SP, noreg); __ set_last_Java_frame(SP, noreg);
Register saved_O7 = O7->after_save();
if (!cause_return && SafepointMechanism::uses_thread_local_poll()) {
// Keep a copy of the return pc in L0 to detect if it gets modified
__ mov(saved_O7, L0);
// Adjust and keep a copy of our npc saved by the signal handler
__ ld_ptr(Address(G2_thread, JavaThread::saved_exception_npc_offset()), L1);
__ sub(L1, frame::pc_return_offset, L1);
}
// call into the runtime to handle illegal instructions exception // call into the runtime to handle illegal instructions exception
// Do not use call_VM_leaf, because we need to make a GC map at this call site. // Do not use call_VM_leaf, because we need to make a GC map at this call site.
__ mov(G2_thread, O0); __ mov(G2_thread, O0);
@ -3150,6 +3156,12 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
__ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1); __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
__ br_notnull_short(O1, Assembler::pn, pending); __ br_notnull_short(O1, Assembler::pn, pending);
if (!cause_return && SafepointMechanism::uses_thread_local_poll()) {
// If nobody modified our return pc then we must return to the npc which he saved in L1
__ cmp(saved_O7, L0);
__ movcc(Assembler::equal, false, Assembler::ptr_cc, L1, saved_O7);
}
RegisterSaver::restore_live_registers(masm); RegisterSaver::restore_live_registers(masm);
// We are back the the original state on entry and ready to go. // We are back the the original state on entry and ready to go.

View file

@ -1206,7 +1206,11 @@ void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
Compile* C = ra_->C; Compile* C = ra_->C;
if(do_polling() && ra_->C->is_method_compilation()) { if(do_polling() && ra_->C->is_method_compilation()) {
if (SafepointMechanism::uses_global_page_poll()) {
st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t"); st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t");
} else {
st->print("LDX [R_G2 + #poll_offset],L0\t! Load local polling address\n\t");
}
st->print("LDX [L0],G0\t!Poll for Safepointing\n\t"); st->print("LDX [L0],G0\t!Poll for Safepointing\n\t");
} }
@ -1233,8 +1237,12 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
// If this does safepoint polling, then do it here // If this does safepoint polling, then do it here
if(do_polling() && ra_->C->is_method_compilation()) { if(do_polling() && ra_->C->is_method_compilation()) {
if (SafepointMechanism::uses_thread_local_poll()) {
__ ld_ptr(Address(G2_thread, Thread::polling_page_offset()), L0);
} else {
AddressLiteral polling_page(os::get_polling_page()); AddressLiteral polling_page(os::get_polling_page());
__ sethi(polling_page, L0); __ sethi(polling_page, L0);
}
__ relocate(relocInfo::poll_return_type); __ relocate(relocInfo::poll_return_type);
__ ld_ptr(L0, 0, G0); __ ld_ptr(L0, 0, G0);
} }
@ -1266,6 +1274,7 @@ const Pipeline * MachEpilogNode::pipeline() const {
} }
int MachEpilogNode::safepoint_offset() const { int MachEpilogNode::safepoint_offset() const {
assert(SafepointMechanism::uses_global_page_poll(), "sanity");
assert( do_polling(), "no return for this epilog node"); assert( do_polling(), "no return for this epilog node");
return MacroAssembler::insts_for_sethi(os::get_polling_page()) * BytesPerInstWord; return MacroAssembler::insts_for_sethi(os::get_polling_page()) * BytesPerInstWord;
} }

View file

@ -313,7 +313,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
} }
address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) {
address entry = __ pc(); address entry = __ pc();
__ get_constant_pool_cache(LcpoolCache); // load LcpoolCache __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
#if INCLUDE_JVMCI #if INCLUDE_JVMCI
@ -350,7 +350,11 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
__ should_not_reach_here(); __ should_not_reach_here();
__ bind(L); __ bind(L);
} }
if (continuation == NULL) {
__ dispatch_next(state, step); __ dispatch_next(state, step);
} else {
__ jump_to_entry(continuation);
}
return entry; return entry;
} }
@ -912,10 +916,8 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
Label L_slow_path; Label L_slow_path;
// If we need a safepoint check, generate full interpreter entry. // If we need a safepoint check, generate full interpreter entry.
ExternalAddress state(SafepointSynchronize::address_of_state()); __ safepoint_poll(L_slow_path, false, G2_thread, O2);
__ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2); __ delayed()->nop();
__ set(SafepointSynchronize::_not_synchronized, O3);
__ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path);
// Load parameters // Load parameters
const Register crc = O0; // initial crc const Register crc = O0; // initial crc
@ -956,10 +958,9 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
Label L_slow_path; Label L_slow_path;
// If we need a safepoint check, generate full interpreter entry. // If we need a safepoint check, generate full interpreter entry.
ExternalAddress state(SafepointSynchronize::address_of_state());
__ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2); __ safepoint_poll(L_slow_path, false, G2_thread, O2);
__ set(SafepointSynchronize::_not_synchronized, O3); __ delayed()->nop();
__ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path);
// Load parameters from the stack // Load parameters from the stack
const Register crc = O0; // initial crc const Register crc = O0; // initial crc
@ -1397,7 +1398,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// Block, if necessary, before resuming in _thread_in_Java state. // Block, if necessary, before resuming in _thread_in_Java state.
// In order for GC to work, don't clear the last_Java_sp until after blocking. // In order for GC to work, don't clear the last_Java_sp until after blocking.
{ Label no_block; { Label no_block;
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
// Switch thread to "native transition" state before reading the synchronization state. // Switch thread to "native transition" state before reading the synchronization state.
// This additional state is necessary because reading and testing the synchronization // This additional state is necessary because reading and testing the synchronization
@ -1420,11 +1420,9 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ serialize_memory(G2_thread, G1_scratch, G3_scratch); __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
} }
} }
__ load_contents(sync_state, G3_scratch);
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
Label L; Label L;
__ br(Assembler::notEqual, false, Assembler::pn, L); __ safepoint_poll(L, false, G2_thread, G3_scratch);
__ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch); __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
__ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block); __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
__ bind(L); __ bind(L);

View file

@ -1499,7 +1499,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// Push returnAddress for "ret" on stack // Push returnAddress for "ret" on stack
__ push_ptr(Otos_i); __ push_ptr(Otos_i);
// And away we go! // And away we go!
__ dispatch_next(vtos); __ dispatch_next(vtos, 0, true);
return; return;
} }
@ -1607,7 +1607,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// continue with bytecode @ target // continue with bytecode @ target
// %%%%% Like Intel, could speed things up by moving bytecode fetch to code above, // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
// %%%%% and changing dispatch_next to dispatch_only // %%%%% and changing dispatch_next to dispatch_only
__ dispatch_next(vtos); __ dispatch_next(vtos, 0, true);
} }
@ -1676,7 +1676,7 @@ void TemplateTable::ret() {
__ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
__ add(G3_scratch, Otos_i, G3_scratch); __ add(G3_scratch, Otos_i, G3_scratch);
__ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp); __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
__ dispatch_next(vtos); __ dispatch_next(vtos, 0, true);
} }
@ -1691,7 +1691,7 @@ void TemplateTable::wide_ret() {
__ ld_ptr(Lmethod, Method::const_offset(), G3_scratch); __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
__ add(G3_scratch, Otos_i, G3_scratch); __ add(G3_scratch, Otos_i, G3_scratch);
__ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp); __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
__ dispatch_next(vtos); __ dispatch_next(vtos, 0, true);
} }
@ -1727,7 +1727,7 @@ void TemplateTable::tableswitch() {
// continue execution // continue execution
__ bind(continue_execution); __ bind(continue_execution);
__ add(Lbcp, O2, Lbcp); __ add(Lbcp, O2, Lbcp);
__ dispatch_next(vtos); __ dispatch_next(vtos, 0, true);
} }
@ -1779,7 +1779,7 @@ void TemplateTable::fast_linearswitch() {
__ bind(continue_execution); __ bind(continue_execution);
} }
__ add(Lbcp, O4, Lbcp); __ add(Lbcp, O4, Lbcp);
__ dispatch_next(vtos); __ dispatch_next(vtos, 0, true);
} }
@ -1888,7 +1888,7 @@ void TemplateTable::fast_binaryswitch() {
__ bind(continue_execution); __ bind(continue_execution);
__ add( Lbcp, Rj, Lbcp ); __ add( Lbcp, Rj, Lbcp );
__ dispatch_next( vtos ); __ dispatch_next(vtos, 0, true);
} }
@ -1914,6 +1914,18 @@ void TemplateTable::_return(TosState state) {
__ bind(skip_register_finalizer); __ bind(skip_register_finalizer);
} }
if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) {
Label no_safepoint;
__ ldx(Address(G2_thread, Thread::polling_page_offset()), G3_scratch, 0);
__ btst(SafepointMechanism::poll_bit(), G3_scratch);
__ br(Assembler::zero, false, Assembler::pt, no_safepoint);
__ delayed()->nop();
__ push(state);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint));
__ pop(state);
__ bind(no_safepoint);
}
// Narrow result if state is itos but result type is smaller. // Narrow result if state is itos but result type is smaller.
// Need to narrow in the return bytecode rather than in generate_return_entry // Need to narrow in the return bytecode rather than in generate_return_entry
// since compiled code callers expect the result to already be narrowed. // since compiled code callers expect the result to already be narrowed.

View file

@ -526,6 +526,16 @@ void LIR_Assembler::return_op(LIR_Opr result) {
// Note: we do not need to round double result; float result has the right precision // Note: we do not need to round double result; float result has the right precision
// the poll sets the condition code, but no data registers // the poll sets the condition code, but no data registers
if (SafepointMechanism::uses_thread_local_poll()) {
#ifdef _LP64
__ movptr(rscratch1, Address(r15_thread, Thread::polling_page_offset()));
__ relocate(relocInfo::poll_return_type);
__ testl(rax, Address(rscratch1, 0));
#else
ShouldNotReachHere();
#endif
} else {
AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type); AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type);
if (Assembler::is_polling_page_far()) { if (Assembler::is_polling_page_far()) {
@ -535,14 +545,28 @@ void LIR_Assembler::return_op(LIR_Opr result) {
} else { } else {
__ testl(rax, polling_page); __ testl(rax, polling_page);
} }
}
__ ret(0); __ ret(0);
} }
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_type);
guarantee(info != NULL, "Shouldn't be NULL"); guarantee(info != NULL, "Shouldn't be NULL");
int offset = __ offset(); int offset = __ offset();
if (SafepointMechanism::uses_thread_local_poll()) {
#ifdef _LP64
__ movptr(rscratch1, Address(r15_thread, Thread::polling_page_offset()));
add_debug_info_for_branch(info);
__ relocate(relocInfo::poll_type);
address pre_pc = __ pc();
__ testl(rax, Address(rscratch1, 0));
address post_pc = __ pc();
guarantee(pointer_delta(post_pc, pre_pc, 1) == 3, "must be exact length");
#else
ShouldNotReachHere();
#endif
} else {
AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_type);
if (Assembler::is_polling_page_far()) { if (Assembler::is_polling_page_far()) {
__ lea(rscratch1, polling_page); __ lea(rscratch1, polling_page);
offset = __ offset(); offset = __ offset();
@ -553,6 +577,7 @@ int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
add_debug_info_for_branch(info); add_debug_info_for_branch(info);
__ testl(rax, polling_page); __ testl(rax, polling_page);
} }
}
return offset; return offset;
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -65,4 +65,9 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
#define SUPPORT_RESERVED_STACK_AREA #define SUPPORT_RESERVED_STACK_AREA
#endif #endif
#ifdef _LP64
// X64 have implemented the local polling
#define THREAD_LOCAL_POLL
#endif
#endif // CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP #endif // CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP

View file

@ -97,6 +97,12 @@ define_pd_global(bool, PreserveFramePointer, false);
define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong); define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
#ifdef _LP64
define_pd_global(bool, ThreadLocalHandshakes, true);
#else
define_pd_global(bool, ThreadLocalHandshakes, false);
#endif
#define ARCH_FLAGS(develop, \ #define ARCH_FLAGS(develop, \
product, \ product, \
diagnostic, \ diagnostic, \

View file

@ -35,6 +35,7 @@
#include "prims/jvmtiThreadState.hpp" #include "prims/jvmtiThreadState.hpp"
#include "runtime/basicLock.hpp" #include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp" #include "runtime/biasedLocking.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp" #include "runtime/sharedRuntime.hpp"
#include "runtime/thread.inline.hpp" #include "runtime/thread.inline.hpp"
@ -809,7 +810,8 @@ void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
void InterpreterMacroAssembler::dispatch_base(TosState state, void InterpreterMacroAssembler::dispatch_base(TosState state,
address* table, address* table,
bool verifyoop) { bool verifyoop,
bool generate_poll) {
verify_FPU(1, state); verify_FPU(1, state);
if (VerifyActivationFrameSize) { if (VerifyActivationFrameSize) {
Label L; Label L;
@ -827,8 +829,24 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
verify_oop(rax, state); verify_oop(rax, state);
} }
#ifdef _LP64 #ifdef _LP64
Label no_safepoint, dispatch;
address* const safepoint_table = Interpreter::safept_table(state);
if (SafepointMechanism::uses_thread_local_poll() && table != safepoint_table && generate_poll) {
NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
jccb(Assembler::zero, no_safepoint);
lea(rscratch1, ExternalAddress((address)safepoint_table));
jmpb(dispatch);
}
bind(no_safepoint);
lea(rscratch1, ExternalAddress((address)table)); lea(rscratch1, ExternalAddress((address)table));
bind(dispatch);
jmp(Address(rscratch1, rbx, Address::times_8)); jmp(Address(rscratch1, rbx, Address::times_8));
#else #else
Address index(noreg, rbx, Address::times_ptr); Address index(noreg, rbx, Address::times_ptr);
ExternalAddress tbl((address)table); ExternalAddress tbl((address)table);
@ -837,8 +855,8 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
#endif // _LP64 #endif // _LP64
} }
void InterpreterMacroAssembler::dispatch_only(TosState state) { void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll) {
dispatch_base(state, Interpreter::dispatch_table(state)); dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll);
} }
void InterpreterMacroAssembler::dispatch_only_normal(TosState state) { void InterpreterMacroAssembler::dispatch_only_normal(TosState state) {
@ -850,12 +868,12 @@ void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) {
} }
void InterpreterMacroAssembler::dispatch_next(TosState state, int step) { void InterpreterMacroAssembler::dispatch_next(TosState state, int step, bool generate_poll) {
// load next bytecode (load before advancing _bcp_register to prevent AGI) // load next bytecode (load before advancing _bcp_register to prevent AGI)
load_unsigned_byte(rbx, Address(_bcp_register, step)); load_unsigned_byte(rbx, Address(_bcp_register, step));
// advance _bcp_register // advance _bcp_register
increment(_bcp_register, step); increment(_bcp_register, step);
dispatch_base(state, Interpreter::dispatch_table(state)); dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll);
} }
void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {

View file

@ -49,7 +49,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
bool check_exceptions); bool check_exceptions);
// base routine for all dispatches // base routine for all dispatches
void dispatch_base(TosState state, address* table, bool verifyoop = true); void dispatch_base(TosState state, address* table, bool verifyoop = true, bool generate_poll = false);
public: public:
InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code), InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code),
@ -184,12 +184,12 @@ class InterpreterMacroAssembler: public MacroAssembler {
void dispatch_prolog(TosState state, int step = 0); void dispatch_prolog(TosState state, int step = 0);
void dispatch_epilog(TosState state, int step = 0); void dispatch_epilog(TosState state, int step = 0);
// dispatch via rbx (assume rbx is loaded already) // dispatch via rbx (assume rbx is loaded already)
void dispatch_only(TosState state); void dispatch_only(TosState state, bool generate_poll = false);
// dispatch normal table via rbx (assume rbx is loaded already) // dispatch normal table via rbx (assume rbx is loaded already)
void dispatch_only_normal(TosState state); void dispatch_only_normal(TosState state);
void dispatch_only_noverify(TosState state); void dispatch_only_noverify(TosState state);
// load rbx from [_bcp_register + step] and dispatch via rbx // load rbx from [_bcp_register + step] and dispatch via rbx
void dispatch_next(TosState state, int step = 0); void dispatch_next(TosState state, int step = 0, bool generate_poll = false);
// load rbx from [_bcp_register] and dispatch via rbx and table // load rbx from [_bcp_register] and dispatch via rbx and table
void dispatch_via (TosState state, address* table); void dispatch_via (TosState state, address* table);

View file

@ -38,6 +38,8 @@
#include "runtime/interfaceSupport.hpp" #include "runtime/interfaceSupport.hpp"
#include "runtime/objectMonitor.hpp" #include "runtime/objectMonitor.hpp"
#include "runtime/os.hpp" #include "runtime/os.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp" #include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp" #include "runtime/stubRoutines.hpp"
#include "runtime/thread.hpp" #include "runtime/thread.hpp"
@ -3759,6 +3761,25 @@ void MacroAssembler::serialize_memory(Register thread, Register tmp) {
movl(as_Address(ArrayAddress(page, index)), tmp); movl(as_Address(ArrayAddress(page, index)), tmp);
} }
#ifdef _LP64
void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, Register temp_reg) {
if (SafepointMechanism::uses_thread_local_poll()) {
testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
jcc(Assembler::notZero, slow_path); // handshake bit set implies poll
} else {
cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
jcc(Assembler::notEqual, slow_path);
}
}
#else
void MacroAssembler::safepoint_poll(Label& slow_path) {
cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
jcc(Assembler::notEqual, slow_path);
}
#endif
// Calls to C land // Calls to C land
// //
// When entering C land, the rbp, & rsp of the last Java frame have to be recorded // When entering C land, the rbp, & rsp of the last Java frame have to be recorded

View file

@ -656,6 +656,12 @@ class MacroAssembler: public Assembler {
// Support for serializing memory accesses between threads // Support for serializing memory accesses between threads
void serialize_memory(Register thread, Register tmp); void serialize_memory(Register thread, Register tmp);
#ifdef _LP64
void safepoint_poll(Label& slow_path, Register thread_reg, Register temp_reg);
#else
void safepoint_poll(Label& slow_path);
#endif
void verify_tlab(); void verify_tlab();
// Biased locking support // Biased locking support

View file

@ -29,6 +29,7 @@
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "runtime/icache.hpp" #include "runtime/icache.hpp"
#include "runtime/os.hpp" #include "runtime/os.hpp"
#include "runtime/safepointMechanism.hpp"
// We have interfaces for the following instructions: // We have interfaces for the following instructions:
// - NativeInstruction // - NativeInstruction
@ -678,6 +679,7 @@ class NativeTstRegMem: public NativeInstruction {
enum Intel_specific_constants { enum Intel_specific_constants {
instruction_rex_prefix_mask = 0xF0, instruction_rex_prefix_mask = 0xF0,
instruction_rex_prefix = Assembler::REX, instruction_rex_prefix = Assembler::REX,
instruction_rex_b_prefix = Assembler::REX_B,
instruction_code_memXregl = 0x85, instruction_code_memXregl = 0x85,
modrm_mask = 0x38, // select reg from the ModRM byte modrm_mask = 0x38, // select reg from the ModRM byte
modrm_reg = 0x00 // rax modrm_reg = 0x00 // rax
@ -703,6 +705,16 @@ inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) =
(ubyte_at(0) & 0xF0) == 0x70; /* short jump */ } (ubyte_at(0) & 0xF0) == 0x70; /* short jump */ }
inline bool NativeInstruction::is_safepoint_poll() { inline bool NativeInstruction::is_safepoint_poll() {
#ifdef AMD64 #ifdef AMD64
if (SafepointMechanism::uses_thread_local_poll()) {
// We know that the poll must have a REX_B prefix since we enforce its source to be
// a rex-register and the destination to be rax.
const bool has_rex_prefix = ubyte_at(0) == NativeTstRegMem::instruction_rex_b_prefix;
const bool is_test_opcode = ubyte_at(1) == NativeTstRegMem::instruction_code_memXregl;
const bool is_rax_target = (ubyte_at(2) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg;
if (has_rex_prefix && is_test_opcode && is_rax_target) {
return true;
}
}
// Try decoding a near safepoint first: // Try decoding a near safepoint first:
if (ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl && if (ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl &&
ubyte_at(1) == 0x05) { // 00 rax 101 ubyte_at(1) == 0x05) { // 00 rax 101

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "oops/klass.inline.hpp" #include "oops/klass.inline.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "runtime/safepoint.hpp" #include "runtime/safepoint.hpp"
#include "runtime/safepointMechanism.hpp"
void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) { void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
@ -183,9 +184,12 @@ void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffe
typedef Assembler::WhichOperand WhichOperand; typedef Assembler::WhichOperand WhichOperand;
WhichOperand which = (WhichOperand) format(); WhichOperand which = (WhichOperand) format();
#if !INCLUDE_JVMCI #if !INCLUDE_JVMCI
if (SafepointMechanism::uses_global_page_poll()) {
assert((which == Assembler::disp32_operand) == !Assembler::is_polling_page_far(), "format not set correctly"); assert((which == Assembler::disp32_operand) == !Assembler::is_polling_page_far(), "format not set correctly");
}
#endif #endif
if (which == Assembler::disp32_operand) { if (which == Assembler::disp32_operand) {
assert(SafepointMechanism::uses_global_page_poll(), "should only have generated such a poll if global polling enabled");
address orig_addr = old_addr_for(addr(), src, dest); address orig_addr = old_addr_for(addr(), src, dest);
NativeInstruction* oni = nativeInstruction_at(orig_addr); NativeInstruction* oni = nativeInstruction_at(orig_addr);
int32_t* orig_disp = (int32_t*) Assembler::locate_operand(orig_addr, which); int32_t* orig_disp = (int32_t*) Assembler::locate_operand(orig_addr, which);

View file

@ -30,6 +30,7 @@
#include "asm/macroAssembler.inline.hpp" #include "asm/macroAssembler.inline.hpp"
#include "code/debugInfoRec.hpp" #include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp" #include "code/icBuffer.hpp"
#include "code/nativeInst.hpp"
#include "code/vtableStubs.hpp" #include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp" #include "interpreter/interpreter.hpp"
#include "logging/log.hpp" #include "logging/log.hpp"
@ -2474,15 +2475,13 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// check for safepoint operation in progress and/or pending suspend requests // check for safepoint operation in progress and/or pending suspend requests
{ {
Label Continue; Label Continue;
Label slow_path;
__ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()), __ safepoint_poll(slow_path, r15_thread, rscratch1);
SafepointSynchronize::_not_synchronized);
Label L;
__ jcc(Assembler::notEqual, L);
__ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0); __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
__ jcc(Assembler::equal, Continue); __ jcc(Assembler::equal, Continue);
__ bind(L); __ bind(slow_path);
// Don't use call_VM as it will see a possible pending exception and forward it // Don't use call_VM as it will see a possible pending exception and forward it
// and never return here preventing us from clearing _last_native_pc down below. // and never return here preventing us from clearing _last_native_pc down below.
@ -3355,9 +3354,11 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
// sees an invalid pc. // sees an invalid pc.
if (!cause_return) { if (!cause_return) {
// overwrite the dummy value we pushed on entry // Get the return pc saved by the signal handler and stash it in its appropriate place on the stack.
__ movptr(c_rarg0, Address(r15_thread, JavaThread::saved_exception_pc_offset())); // Additionally, rbx is a callee saved register and we can look at it later to determine
__ movptr(Address(rbp, wordSize), c_rarg0); // if someone changed the return address for us!
__ movptr(rbx, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
__ movptr(Address(rbp, wordSize), rbx);
} }
// Do the call // Do the call
@ -3387,11 +3388,38 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
// No exception case // No exception case
__ bind(noException); __ bind(noException);
Label no_adjust, bail;
if (SafepointMechanism::uses_thread_local_poll() && !cause_return) {
// If our stashed return pc was modified by the runtime we avoid touching it
__ cmpptr(rbx, Address(rbp, wordSize));
__ jccb(Assembler::notEqual, no_adjust);
#ifdef ASSERT
// Verify the correct encoding of the poll we're about to skip.
// See NativeInstruction::is_safepoint_poll()
__ cmpb(Address(rbx, 0), NativeTstRegMem::instruction_rex_b_prefix);
__ jcc(Assembler::notEqual, bail);
__ cmpb(Address(rbx, 1), NativeTstRegMem::instruction_code_memXregl);
__ jcc(Assembler::notEqual, bail);
// Mask out the modrm bits
__ testb(Address(rbx, 2), NativeTstRegMem::modrm_mask);
// rax encodes to 0, so if the bits are nonzero it's incorrect
__ jcc(Assembler::notZero, bail);
#endif
// Adjust return pc forward to step over the safepoint poll instruction
__ addptr(Address(rbp, wordSize), 3);
}
__ bind(no_adjust);
// Normal exit, restore registers and exit. // Normal exit, restore registers and exit.
RegisterSaver::restore_live_registers(masm, save_vectors); RegisterSaver::restore_live_registers(masm, save_vectors);
__ ret(0); __ ret(0);
#ifdef ASSERT
__ bind(bail);
__ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
#endif
// Make sure all code is generated // Make sure all code is generated
masm->flush(); masm->flush();

View file

@ -237,7 +237,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
} }
address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) {
address entry = __ pc(); address entry = __ pc();
#ifndef _LP64 #ifndef _LP64
@ -291,7 +291,11 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
__ should_not_reach_here(); __ should_not_reach_here();
__ bind(L); __ bind(L);
} }
if (continuation == NULL) {
__ dispatch_next(state, step); __ dispatch_next(state, step);
} else {
__ jump_to_entry(continuation);
}
return entry; return entry;
} }
@ -1141,14 +1145,17 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// check for safepoint operation in progress and/or pending suspend requests // check for safepoint operation in progress and/or pending suspend requests
{ {
Label Continue; Label Continue;
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), Label slow_path;
SafepointSynchronize::_not_synchronized);
#ifndef _LP64
__ safepoint_poll(slow_path);
#else
__ safepoint_poll(slow_path, r15_thread, rscratch1);
#endif
Label L;
__ jcc(Assembler::notEqual, L);
__ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
__ jcc(Assembler::equal, Continue); __ jcc(Assembler::equal, Continue);
__ bind(L); __ bind(slow_path);
// Don't use call_VM as it will see a possible pending exception // Don't use call_VM as it will see a possible pending exception
// and forward it and never return here preventing us from // and forward it and never return here preventing us from

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -190,11 +190,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
// c_rarg1: scratch (rsi on non-Win64, rdx on Win64) // c_rarg1: scratch (rsi on non-Win64, rdx on Win64)
Label slow_path; Label slow_path;
// If we need a safepoint check, generate full interpreter entry. __ safepoint_poll(slow_path, r15_thread, rscratch1);
ExternalAddress state(SafepointSynchronize::address_of_state());
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ jcc(Assembler::notEqual, slow_path);
// We don't generate local frame and don't align stack because // We don't generate local frame and don't align stack because
// we call stub code and there is no safepoint on this path. // we call stub code and there is no safepoint on this path.
@ -240,11 +236,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
// r13: senderSP must preserved for slow path, set SP to it on fast path // r13: senderSP must preserved for slow path, set SP to it on fast path
Label slow_path; Label slow_path;
// If we need a safepoint check, generate full interpreter entry. __ safepoint_poll(slow_path, r15_thread, rscratch1);
ExternalAddress state(SafepointSynchronize::address_of_state());
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ jcc(Assembler::notEqual, slow_path);
// We don't generate local frame and don't align stack because // We don't generate local frame and don't align stack because
// we call stub code and there is no safepoint on this path. // we call stub code and there is no safepoint on this path.

View file

@ -2084,7 +2084,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ addptr(rbcp, rdx); __ addptr(rbcp, rdx);
// jsr returns atos that is not an oop // jsr returns atos that is not an oop
__ push_i(rax); __ push_i(rax);
__ dispatch_only(vtos); __ dispatch_only(vtos, true);
return; return;
} }
@ -2203,7 +2203,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// rax: return bci for jsr's, unused otherwise // rax: return bci for jsr's, unused otherwise
// rbx: target bytecode // rbx: target bytecode
// r13: target bcp // r13: target bcp
__ dispatch_only(vtos); __ dispatch_only(vtos, true);
if (UseLoopCounter) { if (UseLoopCounter) {
if (ProfileInterpreter) { if (ProfileInterpreter) {
@ -2332,7 +2332,7 @@ void TemplateTable::ret() {
__ movptr(rbcp, Address(rax, Method::const_offset())); __ movptr(rbcp, Address(rax, Method::const_offset()));
__ lea(rbcp, Address(rbcp, rbx, Address::times_1, __ lea(rbcp, Address(rbcp, rbx, Address::times_1,
ConstMethod::codes_offset())); ConstMethod::codes_offset()));
__ dispatch_next(vtos); __ dispatch_next(vtos, 0, true);
} }
void TemplateTable::wide_ret() { void TemplateTable::wide_ret() {
@ -2343,7 +2343,7 @@ void TemplateTable::wide_ret() {
__ get_method(rax); __ get_method(rax);
__ movptr(rbcp, Address(rax, Method::const_offset())); __ movptr(rbcp, Address(rax, Method::const_offset()));
__ lea(rbcp, Address(rbcp, rbx, Address::times_1, ConstMethod::codes_offset())); __ lea(rbcp, Address(rbcp, rbx, Address::times_1, ConstMethod::codes_offset()));
__ dispatch_next(vtos); __ dispatch_next(vtos, 0, true);
} }
void TemplateTable::tableswitch() { void TemplateTable::tableswitch() {
@ -2373,7 +2373,7 @@ void TemplateTable::tableswitch() {
LP64_ONLY(__ movl2ptr(rdx, rdx)); LP64_ONLY(__ movl2ptr(rdx, rdx));
__ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1)); __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
__ addptr(rbcp, rdx); __ addptr(rbcp, rdx);
__ dispatch_only(vtos); __ dispatch_only(vtos, true);
// handle default // handle default
__ bind(default_case); __ bind(default_case);
__ profile_switch_default(rax); __ profile_switch_default(rax);
@ -2421,7 +2421,7 @@ void TemplateTable::fast_linearswitch() {
__ movl2ptr(rdx, rdx); __ movl2ptr(rdx, rdx);
__ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1)); __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
__ addptr(rbcp, rdx); __ addptr(rbcp, rdx);
__ dispatch_only(vtos); __ dispatch_only(vtos, true);
} }
void TemplateTable::fast_binaryswitch() { void TemplateTable::fast_binaryswitch() {
@ -2525,7 +2525,7 @@ void TemplateTable::fast_binaryswitch() {
__ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1)); __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
__ addptr(rbcp, j); __ addptr(rbcp, j);
__ dispatch_only(vtos); __ dispatch_only(vtos, true);
// default case -> j = default offset // default case -> j = default offset
__ bind(default_case); __ bind(default_case);
@ -2539,7 +2539,7 @@ void TemplateTable::fast_binaryswitch() {
__ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1)); __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
__ addptr(rbcp, j); __ addptr(rbcp, j);
__ dispatch_only(vtos); __ dispatch_only(vtos, true);
} }
void TemplateTable::_return(TosState state) { void TemplateTable::_return(TosState state) {
@ -2563,10 +2563,17 @@ void TemplateTable::_return(TosState state) {
__ bind(skip_register_finalizer); __ bind(skip_register_finalizer);
} }
// Explicitly reset last_sp, for handling special case in TemplateInterpreter::deopt_reexecute_entry #ifdef _LP64
#ifdef ASSERT if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) {
if (state == vtos) { Label no_safepoint;
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); NOT_PRODUCT(__ block_comment("Thread-local Safepoint poll"));
__ testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
__ jcc(Assembler::zero, no_safepoint);
__ push(state);
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::at_safepoint));
__ pop(state);
__ bind(no_safepoint);
} }
#endif #endif

View file

@ -317,6 +317,18 @@ reg_class ptr_rsp_reg(RSP, RSP_H);
// Singleton class for TLS pointer // Singleton class for TLS pointer
reg_class ptr_r15_reg(R15, R15_H); reg_class ptr_r15_reg(R15, R15_H);
// The registers which can be used for
// a thread local safepoint poll
// * R12 is reserved for heap base
// * R13 cannot be encoded for addressing without an offset byte
// * R15 is reserved for the JavaThread
reg_class ptr_rex_reg(R8, R8_H,
R9, R9_H,
R10, R10_H,
R11, R11_H,
R14, R14_H);
// Class for all long registers (excluding RSP) // Class for all long registers (excluding RSP)
reg_class long_reg_with_rbp(RAX, RAX_H, reg_class long_reg_with_rbp(RAX, RAX_H,
RDX, RDX_H, RDX, RDX_H,
@ -566,7 +578,7 @@ int MachCallRuntimeNode::ret_addr_offset() {
// it does if the polling page is more than disp32 away. // it does if the polling page is more than disp32 away.
bool SafePointNode::needs_polling_address_input() bool SafePointNode::needs_polling_address_input()
{ {
return Assembler::is_polling_page_far(); return SafepointMechanism::uses_thread_local_poll() || Assembler::is_polling_page_far();
} }
// //
@ -938,7 +950,11 @@ void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
st->print_cr("popq rbp"); st->print_cr("popq rbp");
if (do_polling() && C->is_method_compilation()) { if (do_polling() && C->is_method_compilation()) {
st->print("\t"); st->print("\t");
if (Assembler::is_polling_page_far()) { if (SafepointMechanism::uses_thread_local_poll()) {
st->print_cr("movq rscratch1, poll_offset[r15_thread] #polling_page_address\n\t"
"testl rax, [rscratch1]\t"
"# Safepoint: poll for GC");
} else if (Assembler::is_polling_page_far()) {
st->print_cr("movq rscratch1, #polling_page_address\n\t" st->print_cr("movq rscratch1, #polling_page_address\n\t"
"testl rax, [rscratch1]\t" "testl rax, [rscratch1]\t"
"# Safepoint: poll for GC"); "# Safepoint: poll for GC");
@ -989,6 +1005,11 @@ void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
if (do_polling() && C->is_method_compilation()) { if (do_polling() && C->is_method_compilation()) {
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
if (SafepointMechanism::uses_thread_local_poll()) {
__ movq(rscratch1, Address(r15_thread, Thread::polling_page_offset()));
__ relocate(relocInfo::poll_return_type);
__ testl(rax, Address(rscratch1, 0));
} else {
AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type); AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type);
if (Assembler::is_polling_page_far()) { if (Assembler::is_polling_page_far()) {
__ lea(rscratch1, polling_page); __ lea(rscratch1, polling_page);
@ -998,6 +1019,7 @@ void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
__ testl(rax, polling_page); __ testl(rax, polling_page);
} }
} }
}
} }
uint MachEpilogNode::size(PhaseRegAlloc* ra_) const uint MachEpilogNode::size(PhaseRegAlloc* ra_) const
@ -3511,6 +3533,16 @@ operand r15_RegP()
interface(REG_INTER); interface(REG_INTER);
%} %}
operand rex_RegP()
%{
constraint(ALLOC_IN_RC(ptr_rex_reg));
match(RegP);
match(rRegP);
format %{ %}
interface(REG_INTER);
%}
operand rRegL() operand rRegL()
%{ %{
constraint(ALLOC_IN_RC(long_reg)); constraint(ALLOC_IN_RC(long_reg));
@ -12060,7 +12092,7 @@ instruct cmpFastUnlock(rFlagsReg cr, rRegP object, rax_RegP box, rRegP tmp) %{
// Safepoint Instructions // Safepoint Instructions
instruct safePoint_poll(rFlagsReg cr) instruct safePoint_poll(rFlagsReg cr)
%{ %{
predicate(!Assembler::is_polling_page_far()); predicate(!Assembler::is_polling_page_far() && SafepointMechanism::uses_global_page_poll());
match(SafePoint); match(SafePoint);
effect(KILL cr); effect(KILL cr);
@ -12076,7 +12108,7 @@ instruct safePoint_poll(rFlagsReg cr)
instruct safePoint_poll_far(rFlagsReg cr, rRegP poll) instruct safePoint_poll_far(rFlagsReg cr, rRegP poll)
%{ %{
predicate(Assembler::is_polling_page_far()); predicate(Assembler::is_polling_page_far() && SafepointMechanism::uses_global_page_poll());
match(SafePoint poll); match(SafePoint poll);
effect(KILL cr, USE poll); effect(KILL cr, USE poll);
@ -12090,6 +12122,26 @@ instruct safePoint_poll_far(rFlagsReg cr, rRegP poll)
ins_pipe(ialu_reg_mem); ins_pipe(ialu_reg_mem);
%} %}
instruct safePoint_poll_tls(rFlagsReg cr, rex_RegP poll)
%{
predicate(SafepointMechanism::uses_thread_local_poll());
match(SafePoint poll);
effect(KILL cr, USE poll);
format %{ "testl rax, [$poll]\t"
"# Safepoint: poll for GC" %}
ins_cost(125);
size(3); /* setting an explicit size will cause debug builds to assert if size is incorrect */
ins_encode %{
__ relocate(relocInfo::poll_type);
address pre_pc = __ pc();
__ testl(rax, Address($poll$$Register, 0));
address post_pc = __ pc();
guarantee(pre_pc[0] == 0x41 && pre_pc[1] == 0x85, "must emit #rex test-ax [reg]");
%}
ins_pipe(ialu_reg_mem);
%}
// ============================================================================ // ============================================================================
// Procedure Call/Return Instructions // Procedure Call/Return Instructions
// Call Java Static Instruction // Call Java Static Instruction

View file

@ -379,7 +379,7 @@ int CppInterpreter::native_entry(Method* method, intptr_t UNUSED, TRAPS) {
// Handle safepoint operations, pending suspend requests, // Handle safepoint operations, pending suspend requests,
// and pending asynchronous exceptions. // and pending asynchronous exceptions.
if (SafepointSynchronize::do_call_back() || if (SafepointMechanism::poll(thread) ||
thread->has_special_condition_for_native_trans()) { thread->has_special_condition_for_native_trans()) {
JavaThread::check_special_condition_for_native_trans(thread); JavaThread::check_special_condition_for_native_trans(thread);
CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops()); CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops());
@ -511,7 +511,7 @@ int CppInterpreter::accessor_entry(Method* method, intptr_t UNUSED, TRAPS) {
intptr_t *locals = stack->sp(); intptr_t *locals = stack->sp();
// Drop into the slow path if we need a safepoint check // Drop into the slow path if we need a safepoint check
if (SafepointSynchronize::do_call_back()) { if (SafepointMechanism::poll(THREAD)) {
return normal_entry(method, 0, THREAD); return normal_entry(method, 0, THREAD);
} }
@ -643,7 +643,7 @@ int CppInterpreter::empty_entry(Method* method, intptr_t UNUSED, TRAPS) {
ZeroStack *stack = thread->zero_stack(); ZeroStack *stack = thread->zero_stack();
// Drop into the slow path if we need a safepoint check // Drop into the slow path if we need a safepoint check
if (SafepointSynchronize::do_call_back()) { if (SafepointMechanism::poll(THREAD)) {
return normal_entry(method, 0, THREAD); return normal_entry(method, 0, THREAD);
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc. * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -81,6 +81,8 @@ define_pd_global(bool, PreserveFramePointer, false);
// No performance work done here yet. // No performance work done here yet.
define_pd_global(bool, CompactStrings, false); define_pd_global(bool, CompactStrings, false);
define_pd_global(bool, ThreadLocalHandshakes, false);
#define ARCH_FLAGS(develop, \ #define ARCH_FLAGS(develop, \
product, \ product, \
diagnostic, \ diagnostic, \

View file

@ -3477,75 +3477,6 @@ jint os::init_2(void) {
LoadedLibraries::print(tty); LoadedLibraries::print(tty);
} }
const int page_size = Aix::page_size();
const int map_size = page_size;
address map_address = (address) MAP_FAILED;
const int prot = PROT_READ;
const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
// Use optimized addresses for the polling page,
// e.g. map it to a special 32-bit address.
if (OptimizePollingPageLocation) {
// architecture-specific list of address wishes:
address address_wishes[] = {
// AIX: addresses lower than 0x30000000 don't seem to work on AIX.
// PPC64: all address wishes are non-negative 32 bit values where
// the lower 16 bits are all zero. we can load these addresses
// with a single ppc_lis instruction.
(address) 0x30000000, (address) 0x31000000,
(address) 0x32000000, (address) 0x33000000,
(address) 0x40000000, (address) 0x41000000,
(address) 0x42000000, (address) 0x43000000,
(address) 0x50000000, (address) 0x51000000,
(address) 0x52000000, (address) 0x53000000,
(address) 0x60000000, (address) 0x61000000,
(address) 0x62000000, (address) 0x63000000
};
int address_wishes_length = sizeof(address_wishes)/sizeof(address);
// iterate over the list of address wishes:
for (int i=0; i<address_wishes_length; i++) {
// Try to map with current address wish.
// AIX: AIX needs MAP_FIXED if we provide an address and mmap will
// fail if the address is already mapped.
map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
map_size, prot,
flags | MAP_FIXED,
-1, 0);
trcVerbose("SafePoint Polling Page address: %p (wish) => %p",
address_wishes[i], map_address + (ssize_t)page_size);
if (map_address + (ssize_t)page_size == address_wishes[i]) {
// Map succeeded and map_address is at wished address, exit loop.
break;
}
if (map_address != (address) MAP_FAILED) {
// Map succeeded, but polling_page is not at wished address, unmap and continue.
::munmap(map_address, map_size);
map_address = (address) MAP_FAILED;
}
// Map failed, continue loop.
}
} // end OptimizePollingPageLocation
if (map_address == (address) MAP_FAILED) {
map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
}
guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
os::set_polling_page(map_address);
if (!UseMembar) {
address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
os::set_memory_serialize_page(mem_serialize_page);
trcVerbose("Memory Serialize Page address: %p - %p, size %IX (%IB)",
mem_serialize_page, mem_serialize_page + Aix::page_size(),
Aix::page_size(), Aix::page_size());
}
// initialize suspend/resume support - must do this before signal_sets_init() // initialize suspend/resume support - must do this before signal_sets_init()
if (SR_initialize() != 0) { if (SR_initialize() != 0) {
perror("SR_initialize failed"); perror("SR_initialize failed");
@ -3614,6 +3545,14 @@ void os::make_polling_page_readable(void) {
}; };
int os::active_processor_count() { int os::active_processor_count() {
// User has overridden the number of active processors
if (ActiveProcessorCount > 0) {
log_trace(os)("active_processor_count: "
"active processor count set by user : %d",
ActiveProcessorCount);
return ActiveProcessorCount;
}
int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN); int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check"); assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
return online_cpus; return online_cpus;

View file

@ -0,0 +1,85 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "logging/log.hpp"
#include "runtime/globals.hpp"
#include "runtime/os.hpp"
#include "runtime/safepointMechanism.hpp"
#include <sys/mman.h>
void SafepointMechanism::pd_initialize() {
char* map_address = (char*)MAP_FAILED;
const size_t page_size = os::vm_page_size();
// Use optimized addresses for the polling page,
// e.g. map it to a special 32-bit address.
if (OptimizePollingPageLocation) {
// architecture-specific list of address wishes:
char* address_wishes[] = {
// AIX: addresses lower than 0x30000000 don't seem to work on AIX.
// PPC64: all address wishes are non-negative 32 bit values where
// the lower 16 bits are all zero. we can load these addresses
// with a single ppc_lis instruction.
(char*) 0x30000000, (char*) 0x31000000,
(char*) 0x32000000, (char*) 0x33000000,
(char*) 0x40000000, (char*) 0x41000000,
(char*) 0x42000000, (char*) 0x43000000,
(char*) 0x50000000, (char*) 0x51000000,
(char*) 0x52000000, (char*) 0x53000000,
(char*) 0x60000000, (char*) 0x61000000,
(char*) 0x62000000, (char*) 0x63000000
};
int address_wishes_length = sizeof(address_wishes)/sizeof(char*);
// iterate over the list of address wishes:
for (int i = 0; i < address_wishes_length; i++) {
// Try to map with current address wish.
// AIX: AIX needs MAP_FIXED if we provide an address and mmap will
// fail if the address is already mapped.
map_address = (char*) ::mmap(address_wishes[i] - (ssize_t)page_size,
page_size, PROT_READ,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
-1, 0);
log_debug(os)("SafePoint Polling Page address: %p (wish) => %p",
address_wishes[i], map_address + (ssize_t)page_size);
if (map_address + (ssize_t)page_size == address_wishes[i]) {
// Map succeeded and map_address is at wished address, exit loop.
break;
}
if (map_address != (char*)MAP_FAILED) {
// Map succeeded, but polling_page is not at wished address, unmap and continue.
::munmap(map_address, page_size);
map_address = (char*)MAP_FAILED;
}
// Map failed, continue loop.
}
}
if (map_address == (char*)MAP_FAILED) {
map_address = os::reserve_memory(page_size, NULL, page_size);
}
guarantee(map_address != (char*)MAP_FAILED, "SafepointMechanism::pd_initialize: failed to allocate polling page");
os::set_polling_page((address)(map_address));
}

View file

@ -3391,20 +3391,6 @@ jint os::init_2(void) {
os::Posix::init_2(); os::Posix::init_2();
// Allocate a single page and mark it as readable for safepoint polling
address polling_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
guarantee(polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page");
os::set_polling_page(polling_page);
log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page));
if (!UseMembar) {
address mem_serialize_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
guarantee(mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
os::set_memory_serialize_page(mem_serialize_page);
log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page));
}
// initialize suspend/resume support - must do this before signal_sets_init() // initialize suspend/resume support - must do this before signal_sets_init()
if (SR_initialize() != 0) { if (SR_initialize() != 0) {
perror("SR_initialize failed"); perror("SR_initialize failed");
@ -3491,6 +3477,14 @@ void os::make_polling_page_readable(void) {
} }
int os::active_processor_count() { int os::active_processor_count() {
// User has overridden the number of active processors
if (ActiveProcessorCount > 0) {
log_trace(os)("active_processor_count: "
"active processor count set by user : %d",
ActiveProcessorCount);
return ActiveProcessorCount;
}
return _processor_count; return _processor_count;
} }

View file

@ -59,6 +59,9 @@
product(bool, UseSHM, false, \ product(bool, UseSHM, false, \
"Use SYSV shared memory for large pages") \ "Use SYSV shared memory for large pages") \
\ \
product(bool, UseContainerSupport, true, \
"Enable detection and runtime container configuration support") \
\
diagnostic(bool, UseCpuAllocPath, false, \ diagnostic(bool, UseCpuAllocPath, false, \
"Use CPU_ALLOC code path in os::active_processor_count ") "Use CPU_ALLOC code path in os::active_processor_count ")

View file

@ -0,0 +1,594 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include <string.h>
#include <math.h>
#include <errno.h>
#include "utilities/globalDefinitions.hpp"
#include "memory/allocation.hpp"
#include "runtime/os.hpp"
#include "logging/log.hpp"
#include "osContainer_linux.hpp"
/*
* Warning: Some linux distros use 0x7FFFFFFFFFFFF000
* and others use 0x7FFFFFFFFFFFFFFF for unlimited.
*/
#define UNLIMITED_MEM CONST64(0x7FFFFFFFFFFFF000)
#define PER_CPU_SHARES 1024
bool OSContainer::_is_initialized = false;
bool OSContainer::_is_containerized = false;
class CgroupSubsystem: CHeapObj<mtInternal> {
friend class OSContainer;
private:
/* mountinfo contents */
char *_root;
char *_mount_point;
/* Constructed subsystem directory */
char *_path;
public:
CgroupSubsystem(char *root, char *mountpoint) {
_root = os::strdup(root);
_mount_point = os::strdup(mountpoint);
_path = NULL;
}
/*
* Set directory to subsystem specific files based
* on the contents of the mountinfo and cgroup files.
*/
void set_subsystem_path(char *cgroup_path) {
char buf[MAXPATHLEN+1];
if (_root != NULL && cgroup_path != NULL) {
if (strcmp(_root, "/") == 0) {
int buflen;
strncpy(buf, _mount_point, MAXPATHLEN);
buf[MAXPATHLEN-1] = '\0';
if (strcmp(cgroup_path,"/") != 0) {
buflen = strlen(buf);
if ((buflen + strlen(cgroup_path)) > (MAXPATHLEN-1)) {
return;
}
strncat(buf, cgroup_path, MAXPATHLEN-buflen);
buf[MAXPATHLEN-1] = '\0';
}
_path = os::strdup(buf);
} else {
if (strcmp(_root, cgroup_path) == 0) {
strncpy(buf, _mount_point, MAXPATHLEN);
buf[MAXPATHLEN-1] = '\0';
_path = os::strdup(buf);
} else {
char *p = strstr(_root, cgroup_path);
if (p != NULL && p == _root) {
if (strlen(cgroup_path) > strlen(_root)) {
int buflen;
strncpy(buf, _mount_point, MAXPATHLEN);
buf[MAXPATHLEN-1] = '\0';
buflen = strlen(buf);
if ((buflen + strlen(cgroup_path)) > (MAXPATHLEN-1)) {
return;
}
strncat(buf, cgroup_path + strlen(_root), MAXPATHLEN-buflen);
buf[MAXPATHLEN-1] = '\0';
_path = os::strdup(buf);
}
}
}
}
}
}
char *subsystem_path() { return _path; }
};
CgroupSubsystem* memory = NULL;
CgroupSubsystem* cpuset = NULL;
CgroupSubsystem* cpu = NULL;
CgroupSubsystem* cpuacct = NULL;
typedef char * cptr;
PRAGMA_DIAG_PUSH
PRAGMA_FORMAT_NONLITERAL_IGNORED
template <typename T> int subsystem_file_contents(CgroupSubsystem* c,
const char *filename,
const char *scan_fmt,
T returnval) {
FILE *fp = NULL;
char *p;
char file[MAXPATHLEN+1];
char buf[MAXPATHLEN+1];
if (c != NULL && c->subsystem_path() != NULL) {
strncpy(file, c->subsystem_path(), MAXPATHLEN);
file[MAXPATHLEN-1] = '\0';
int filelen = strlen(file);
if ((filelen + strlen(filename)) > (MAXPATHLEN-1)) {
log_debug(os, container)("File path too long %s, %s", file, filename);
return OSCONTAINER_ERROR;
}
strncat(file, filename, MAXPATHLEN-filelen);
log_trace(os, container)("Path to %s is %s", filename, file);
fp = fopen(file, "r");
if (fp != NULL) {
p = fgets(buf, MAXPATHLEN, fp);
if (p != NULL) {
int matched = sscanf(p, scan_fmt, returnval);
if (matched == 1) {
fclose(fp);
return 0;
} else {
log_debug(os, container)("Type %s not found in file %s",
scan_fmt , file);
}
} else {
log_debug(os, container)("Empty file %s", file);
}
} else {
log_debug(os, container)("Open of file %s failed, %s", file,
os::strerror(errno));
}
}
if (fp != NULL)
fclose(fp);
return OSCONTAINER_ERROR;
}
PRAGMA_DIAG_POP
#define GET_CONTAINER_INFO(return_type, subsystem, filename, \
logstring, scan_fmt, variable) \
return_type variable; \
{ \
int err; \
err = subsystem_file_contents(subsystem, \
filename, \
scan_fmt, \
&variable); \
if (err != 0) \
return (return_type) OSCONTAINER_ERROR; \
\
log_trace(os, container)(logstring, variable); \
}
#define GET_CONTAINER_INFO_CPTR(return_type, subsystem, filename, \
logstring, scan_fmt, variable, bufsize) \
char variable[bufsize]; \
{ \
int err; \
err = subsystem_file_contents(subsystem, \
filename, \
scan_fmt, \
variable); \
if (err != 0) \
return (return_type) NULL; \
\
log_trace(os, container)(logstring, variable); \
}
/* init
*
* Initialize the container support and determine if
* we are running under cgroup control.
*/
void OSContainer::init() {
int mountid;
int parentid;
int major;
int minor;
FILE *mntinfo = NULL;
FILE *cgroup = NULL;
char buf[MAXPATHLEN+1];
char tmproot[MAXPATHLEN+1];
char tmpmount[MAXPATHLEN+1];
char tmpbase[MAXPATHLEN+1];
char *p;
jlong mem_limit;
assert(!_is_initialized, "Initializing OSContainer more than once");
_is_initialized = true;
_is_containerized = false;
log_trace(os, container)("OSContainer::init: Initializing Container Support");
if (!UseContainerSupport) {
log_trace(os, container)("Container Support not enabled");
return;
}
/*
* Find the cgroup mount point for memory and cpuset
* by reading /proc/self/mountinfo
*
* Example for docker:
* 219 214 0:29 /docker/7208cebd00fa5f2e342b1094f7bed87fa25661471a4637118e65f1c995be8a34 /sys/fs/cgroup/memory ro,nosuid,nodev,noexec,relatime - cgroup cgroup rw,memory
*
* Example for host:
* 34 28 0:29 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,memory
*/
mntinfo = fopen("/proc/self/mountinfo", "r");
if (mntinfo == NULL) {
log_debug(os, container)("Can't open /proc/self/mountinfo, %s",
os::strerror(errno));
return;
}
while ( (p = fgets(buf, MAXPATHLEN, mntinfo)) != NULL) {
// Look for the filesystem type and see if it's cgroup
char fstype[MAXPATHLEN+1];
fstype[0] = '\0';
char *s = strstr(p, " - ");
if (s != NULL &&
sscanf(s, " - %s", fstype) == 1 &&
strcmp(fstype, "cgroup") == 0) {
if (strstr(p, "memory") != NULL) {
int matched = sscanf(p, "%d %d %d:%d %s %s",
&mountid,
&parentid,
&major,
&minor,
tmproot,
tmpmount);
if (matched == 6) {
memory = new CgroupSubsystem(tmproot, tmpmount);
}
else
log_debug(os, container)("Incompatible str containing cgroup and memory: %s", p);
} else if (strstr(p, "cpuset") != NULL) {
int matched = sscanf(p, "%d %d %d:%d %s %s",
&mountid,
&parentid,
&major,
&minor,
tmproot,
tmpmount);
if (matched == 6) {
cpuset = new CgroupSubsystem(tmproot, tmpmount);
}
else {
log_debug(os, container)("Incompatible str containing cgroup and cpuset: %s", p);
}
} else if (strstr(p, "cpu,cpuacct") != NULL) {
int matched = sscanf(p, "%d %d %d:%d %s %s",
&mountid,
&parentid,
&major,
&minor,
tmproot,
tmpmount);
if (matched == 6) {
cpu = new CgroupSubsystem(tmproot, tmpmount);
cpuacct = new CgroupSubsystem(tmproot, tmpmount);
}
else {
log_debug(os, container)("Incompatible str containing cgroup and cpu,cpuacct: %s", p);
}
} else if (strstr(p, "cpuacct") != NULL) {
int matched = sscanf(p, "%d %d %d:%d %s %s",
&mountid,
&parentid,
&major,
&minor,
tmproot,
tmpmount);
if (matched == 6) {
cpuacct = new CgroupSubsystem(tmproot, tmpmount);
}
else {
log_debug(os, container)("Incompatible str containing cgroup and cpuacct: %s", p);
}
} else if (strstr(p, "cpu") != NULL) {
int matched = sscanf(p, "%d %d %d:%d %s %s",
&mountid,
&parentid,
&major,
&minor,
tmproot,
tmpmount);
if (matched == 6) {
cpu = new CgroupSubsystem(tmproot, tmpmount);
}
else {
log_debug(os, container)("Incompatible str containing cgroup and cpu: %s", p);
}
}
}
}
if (mntinfo != NULL) fclose(mntinfo);
/*
* Read /proc/self/cgroup and map host mount point to
* local one via /proc/self/mountinfo content above
*
* Docker example:
* 5:memory:/docker/6558aed8fc662b194323ceab5b964f69cf36b3e8af877a14b80256e93aecb044
*
* Host example:
* 5:memory:/user.slice
*
* Construct a path to the process specific memory and cpuset
* cgroup directory.
*
* For a container running under Docker from memory example above
* the paths would be:
*
* /sys/fs/cgroup/memory
*
* For a Host from memory example above the path would be:
*
* /sys/fs/cgroup/memory/user.slice
*
*/
cgroup = fopen("/proc/self/cgroup", "r");
if (cgroup == NULL) {
log_debug(os, container)("Can't open /proc/self/cgroup, %s",
os::strerror(errno));
return;
}
while ( (p = fgets(buf, MAXPATHLEN, cgroup)) != NULL) {
int cgno;
int matched;
char *controller;
char *base;
/* Skip cgroup number */
strsep(&p, ":");
/* Get controller and base */
controller = strsep(&p, ":");
base = strsep(&p, "\n");
if (controller != NULL) {
if (strstr(controller, "memory") != NULL) {
memory->set_subsystem_path(base);
} else if (strstr(controller, "cpuset") != NULL) {
cpuset->set_subsystem_path(base);
} else if (strstr(controller, "cpu,cpuacct") != NULL) {
cpu->set_subsystem_path(base);
cpuacct->set_subsystem_path(base);
} else if (strstr(controller, "cpuacct") != NULL) {
cpuacct->set_subsystem_path(base);
} else if (strstr(controller, "cpu") != NULL) {
cpu->set_subsystem_path(base);
}
}
}
if (cgroup != NULL) fclose(cgroup);
if (memory == NULL || cpuset == NULL || cpu == NULL) {
log_debug(os, container)("Required cgroup subsystems not found");
return;
}
// We need to update the amount of physical memory now that
// command line arguments have been processed.
if ((mem_limit = memory_limit_in_bytes()) > 0) {
os::Linux::set_physical_memory(mem_limit);
}
_is_containerized = true;
}
char * OSContainer::container_type() {
if (is_containerized()) {
return (char *)"cgroupv1";
} else {
return NULL;
}
}
/* memory_limit_in_bytes
*
* Return the limit of available memory for this process.
*
* return:
* memory limit in bytes or
* -1 for unlimited
* OSCONTAINER_ERROR for not supported
*/
jlong OSContainer::memory_limit_in_bytes() {
GET_CONTAINER_INFO(jlong, memory, "/memory.limit_in_bytes",
"Memory Limit is: " JLONG_FORMAT, JLONG_FORMAT, memlimit);
if (memlimit >= UNLIMITED_MEM) {
log_trace(os, container)("Memory Limit is: Unlimited");
return (jlong)-1;
}
else {
return memlimit;
}
}
jlong OSContainer::memory_and_swap_limit_in_bytes() {
GET_CONTAINER_INFO(jlong, memory, "/memory.memsw.limit_in_bytes",
"Memory and Swap Limit is: " JLONG_FORMAT, JLONG_FORMAT, memswlimit);
if (memswlimit >= UNLIMITED_MEM) {
log_trace(os, container)("Memory and Swap Limit is: Unlimited");
return (jlong)-1;
} else {
return memswlimit;
}
}
jlong OSContainer::memory_soft_limit_in_bytes() {
GET_CONTAINER_INFO(jlong, memory, "/memory.soft_limit_in_bytes",
"Memory Soft Limit is: " JLONG_FORMAT, JLONG_FORMAT, memsoftlimit);
if (memsoftlimit >= UNLIMITED_MEM) {
log_trace(os, container)("Memory Soft Limit is: Unlimited");
return (jlong)-1;
} else {
return memsoftlimit;
}
}
/* memory_usage_in_bytes
*
* Return the amount of used memory for this process.
*
* return:
* memory usage in bytes or
* -1 for unlimited
* OSCONTAINER_ERROR for not supported
*/
jlong OSContainer::memory_usage_in_bytes() {
GET_CONTAINER_INFO(jlong, memory, "/memory.usage_in_bytes",
"Memory Usage is: " JLONG_FORMAT, JLONG_FORMAT, memusage);
return memusage;
}
/* memory_max_usage_in_bytes
*
* Return the maximum amount of used memory for this process.
*
* return:
* max memory usage in bytes or
* OSCONTAINER_ERROR for not supported
*/
jlong OSContainer::memory_max_usage_in_bytes() {
GET_CONTAINER_INFO(jlong, memory, "/memory.max_usage_in_bytes",
"Maximum Memory Usage is: " JLONG_FORMAT, JLONG_FORMAT, memmaxusage);
return memmaxusage;
}
/* active_processor_count
*
* Calculate an appropriate number of active processors for the
* VM to use based on these three cgroup options.
*
* cpu affinity
* cpu quota & cpu period
* cpu shares
*
* Algorithm:
*
* Determine the number of available CPUs from sched_getaffinity
*
* If user specified a quota (quota != -1), calculate the number of
* required CPUs by dividing quota by period.
*
* If shares are in effect (shares != -1), calculate the number
* of cpus required for the shares by dividing the share value
* by PER_CPU_SHARES.
*
* All results of division are rounded up to the next whole number.
*
* Return the smaller number from the three different settings.
*
* return:
* number of cpus
* OSCONTAINER_ERROR if failure occured during extract of cpuset info
*/
int OSContainer::active_processor_count() {
int cpu_count, share_count, quota_count;
int share, quota, period;
int result;
cpu_count = os::Linux::active_processor_count();
share = cpu_shares();
if (share > -1) {
share_count = ceilf((float)share / (float)PER_CPU_SHARES);
log_trace(os, container)("cpu_share count: %d", share_count);
} else {
share_count = cpu_count;
}
quota = cpu_quota();
period = cpu_period();
if (quota > -1 && period > 0) {
quota_count = ceilf((float)quota / (float)period);
log_trace(os, container)("quota_count: %d", quota_count);
} else {
quota_count = cpu_count;
}
result = MIN2(cpu_count, MIN2(share_count, quota_count));
log_trace(os, container)("OSContainer::active_processor_count: %d", result);
return result;
}
char * OSContainer::cpu_cpuset_cpus() {
GET_CONTAINER_INFO_CPTR(cptr, cpuset, "/cpuset.cpus",
"cpuset.cpus is: %s", "%1023s", cpus, 1024);
return os::strdup(cpus);
}
char * OSContainer::cpu_cpuset_memory_nodes() {
GET_CONTAINER_INFO_CPTR(cptr, cpuset, "/cpuset.mems",
"cpuset.mems is: %s", "%1023s", mems, 1024);
return os::strdup(mems);
}
/* cpu_quota
*
* Return the number of milliseconds per period
* process is guaranteed to run.
*
* return:
* quota time in milliseconds
* -1 for no quota
* OSCONTAINER_ERROR for not supported
*/
int OSContainer::cpu_quota() {
GET_CONTAINER_INFO(int, cpu, "/cpu.cfs_quota_us",
"CPU Quota is: %d", "%d", quota);
return quota;
}
int OSContainer::cpu_period() {
GET_CONTAINER_INFO(int, cpu, "/cpu.cfs_period_us",
"CPU Period is: %d", "%d", period);
return period;
}
/* cpu_shares
*
* Return the amount of cpu shares available to the process
*
* return:
* Share number (typically a number relative to 1024)
* (2048 typically expresses 2 CPUs worth of processing)
* -1 for no share setup
* OSCONTAINER_ERROR for not supported
*/
int OSContainer::cpu_shares() {
GET_CONTAINER_INFO(int, cpu, "/cpu.shares",
"CPU Shares is: %d", "%d", shares);
// Convert 1024 to no shares setup
if (shares == 1024) return -1;
return shares;
}

View file

@ -0,0 +1,68 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_LINUX_VM_OSCONTAINER_LINUX_HPP
#define OS_LINUX_VM_OSCONTAINER_LINUX_HPP
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include "memory/allocation.hpp"
#define OSCONTAINER_ERROR (-2)
class OSContainer: AllStatic {
private:
static bool _is_initialized;
static bool _is_containerized;
public:
static void init();
static inline bool is_containerized();
static char * container_type();
static jlong memory_limit_in_bytes();
static jlong memory_and_swap_limit_in_bytes();
static jlong memory_soft_limit_in_bytes();
static jlong memory_usage_in_bytes();
static jlong memory_max_usage_in_bytes();
static int active_processor_count();
static char * cpu_cpuset_cpus();
static char * cpu_cpuset_memory_nodes();
static int cpu_quota();
static int cpu_period();
static int cpu_shares();
};
inline bool OSContainer::is_containerized() {
assert(_is_initialized, "OSContainer not initialized");
return _is_containerized;
}
#endif // OS_LINUX_VM_OSCONTAINER_LINUX_HPP

View file

@ -38,6 +38,7 @@
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "os_linux.inline.hpp" #include "os_linux.inline.hpp"
#include "os_share_linux.hpp" #include "os_share_linux.hpp"
#include "osContainer_linux.hpp"
#include "prims/jniFastGetField.hpp" #include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp" #include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp" #include "runtime/arguments.hpp"
@ -171,13 +172,52 @@ julong os::available_memory() {
julong os::Linux::available_memory() { julong os::Linux::available_memory() {
// values in struct sysinfo are "unsigned long" // values in struct sysinfo are "unsigned long"
struct sysinfo si; struct sysinfo si;
sysinfo(&si); julong avail_mem;
return (julong)si.freeram * si.mem_unit; if (OSContainer::is_containerized()) {
jlong mem_limit, mem_usage;
if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) {
if ((mem_usage = OSContainer::memory_usage_in_bytes()) > 0) {
if (mem_limit > mem_usage) {
avail_mem = (julong)mem_limit - (julong)mem_usage;
} else {
avail_mem = 0;
}
log_trace(os)("available container memory: " JULONG_FORMAT, avail_mem);
return avail_mem;
} else {
log_debug(os,container)("container memory usage call failed: " JLONG_FORMAT, mem_usage);
}
} else {
log_debug(os,container)("container memory unlimited or failed: " JLONG_FORMAT, mem_limit);
}
}
sysinfo(&si);
avail_mem = (julong)si.freeram * si.mem_unit;
log_trace(os)("available memory: " JULONG_FORMAT, avail_mem);
return avail_mem;
} }
julong os::physical_memory() { julong os::physical_memory() {
return Linux::physical_memory(); if (OSContainer::is_containerized()) {
jlong mem_limit;
if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) {
log_trace(os)("total container memory: " JLONG_FORMAT, mem_limit);
return (julong)mem_limit;
} else {
if (mem_limit == OSCONTAINER_ERROR) {
log_debug(os,container)("container memory limit call failed");
}
if (mem_limit == -1) {
log_debug(os,container)("container memory unlimited, using host value");
}
}
}
jlong phys_mem = Linux::physical_memory();
log_trace(os)("total system memory: " JLONG_FORMAT, phys_mem);
return phys_mem;
} }
// Return true if user is running as root. // Return true if user is running as root.
@ -1950,6 +1990,8 @@ void os::print_os_info(outputStream* st) {
os::Posix::print_load_average(st); os::Posix::print_load_average(st);
os::Linux::print_full_memory_info(st); os::Linux::print_full_memory_info(st);
os::Linux::print_container_info(st);
} }
// Try to identify popular distros. // Try to identify popular distros.
@ -2087,6 +2129,66 @@ void os::Linux::print_full_memory_info(outputStream* st) {
st->cr(); st->cr();
} }
void os::Linux::print_container_info(outputStream* st) {
if (OSContainer::is_containerized()) {
st->print("container (cgroup) information:\n");
char *p = OSContainer::container_type();
if (p == NULL)
st->print("container_type() failed\n");
else {
st->print("container_type: %s\n", p);
}
p = OSContainer::cpu_cpuset_cpus();
if (p == NULL)
st->print("cpu_cpuset_cpus() failed\n");
else {
st->print("cpu_cpuset_cpus: %s\n", p);
free(p);
}
p = OSContainer::cpu_cpuset_memory_nodes();
if (p < 0)
st->print("cpu_memory_nodes() failed\n");
else {
st->print("cpu_memory_nodes: %s\n", p);
free(p);
}
int i = OSContainer::active_processor_count();
if (i < 0)
st->print("active_processor_count() failed\n");
else
st->print("active_processor_count: %d\n", i);
i = OSContainer::cpu_quota();
st->print("cpu_quota: %d\n", i);
i = OSContainer::cpu_period();
st->print("cpu_period: %d\n", i);
i = OSContainer::cpu_shares();
st->print("cpu_shares: %d\n", i);
jlong j = OSContainer::memory_limit_in_bytes();
st->print("memory_limit_in_bytes: " JLONG_FORMAT "\n", j);
j = OSContainer::memory_and_swap_limit_in_bytes();
st->print("memory_and_swap_limit_in_bytes: " JLONG_FORMAT "\n", j);
j = OSContainer::memory_soft_limit_in_bytes();
st->print("memory_soft_limit_in_bytes: " JLONG_FORMAT "\n", j);
j = OSContainer::OSContainer::memory_usage_in_bytes();
st->print("memory_usage_in_bytes: " JLONG_FORMAT "\n", j);
j = OSContainer::OSContainer::memory_max_usage_in_bytes();
st->print("memory_max_usage_in_bytes: " JLONG_FORMAT "\n", j);
st->cr();
}
}
void os::print_memory_info(outputStream* st) { void os::print_memory_info(outputStream* st) {
st->print("Memory:"); st->print("Memory:");
@ -4798,6 +4900,10 @@ extern "C" {
} }
} }
void os::pd_init_container_support() {
OSContainer::init();
}
// this is called _after_ the global arguments have been parsed // this is called _after_ the global arguments have been parsed
jint os::init_2(void) { jint os::init_2(void) {
@ -4805,20 +4911,6 @@ jint os::init_2(void) {
Linux::fast_thread_clock_init(); Linux::fast_thread_clock_init();
// Allocate a single page and mark it as readable for safepoint polling
address polling_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
guarantee(polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page");
os::set_polling_page(polling_page);
log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page));
if (!UseMembar) {
address mem_serialize_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
guarantee(mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
os::set_memory_serialize_page(mem_serialize_page);
log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page));
}
// initialize suspend/resume support - must do this before signal_sets_init() // initialize suspend/resume support - must do this before signal_sets_init()
if (SR_initialize() != 0) { if (SR_initialize() != 0) {
perror("SR_initialize failed"); perror("SR_initialize failed");
@ -4960,12 +5052,12 @@ static int _cpu_count(const cpu_set_t* cpus) {
// dynamic check - see 6515172 for details. // dynamic check - see 6515172 for details.
// If anything goes wrong we fallback to returning the number of online // If anything goes wrong we fallback to returning the number of online
// processors - which can be greater than the number available to the process. // processors - which can be greater than the number available to the process.
int os::active_processor_count() { int os::Linux::active_processor_count() {
cpu_set_t cpus; // can represent at most 1024 (CPU_SETSIZE) processors cpu_set_t cpus; // can represent at most 1024 (CPU_SETSIZE) processors
cpu_set_t* cpus_p = &cpus; cpu_set_t* cpus_p = &cpus;
int cpus_size = sizeof(cpu_set_t); int cpus_size = sizeof(cpu_set_t);
int configured_cpus = processor_count(); // upper bound on available cpus int configured_cpus = os::processor_count(); // upper bound on available cpus
int cpu_count = 0; int cpu_count = 0;
// old build platforms may not support dynamic cpu sets // old build platforms may not support dynamic cpu sets
@ -5028,10 +5120,44 @@ int os::active_processor_count() {
CPU_FREE(cpus_p); CPU_FREE(cpus_p);
} }
assert(cpu_count > 0 && cpu_count <= processor_count(), "sanity check"); assert(cpu_count > 0 && cpu_count <= os::processor_count(), "sanity check");
return cpu_count; return cpu_count;
} }
// Determine the active processor count from one of
// three different sources:
//
// 1. User option -XX:ActiveProcessorCount
// 2. kernel os calls (sched_getaffinity or sysconf(_SC_NPROCESSORS_ONLN)
// 3. extracted from cgroup cpu subsystem (shares and quotas)
//
// Option 1, if specified, will always override.
// If the cgroup subsystem is active and configured, we
// will return the min of the cgroup and option 2 results.
// This is required since tools, such as numactl, that
// alter cpu affinity do not update cgroup subsystem
// cpuset configuration files.
int os::active_processor_count() {
// User has overridden the number of active processors
if (ActiveProcessorCount > 0) {
log_trace(os)("active_processor_count: "
"active processor count set by user : %d",
ActiveProcessorCount);
return ActiveProcessorCount;
}
int active_cpus;
if (OSContainer::is_containerized()) {
active_cpus = OSContainer::active_processor_count();
log_trace(os)("active_processor_count: determined by OSContainer: %d",
active_cpus);
} else {
active_cpus = os::Linux::active_processor_count();
}
return active_cpus;
}
void os::set_native_thread_name(const char *name) { void os::set_native_thread_name(const char *name) {
if (Linux::_pthread_setname_np) { if (Linux::_pthread_setname_np) {
char buf [16]; // according to glibc manpage, 16 chars incl. '/0' char buf [16]; // according to glibc manpage, 16 chars incl. '/0'

View file

@ -32,6 +32,7 @@ static bool zero_page_read_protected() { return true; }
class Linux { class Linux {
friend class os; friend class os;
friend class OSContainer;
friend class TestReserveMemorySpecial; friend class TestReserveMemorySpecial;
static bool libjsig_is_loaded; // libjsig that interposes sigaction(), static bool libjsig_is_loaded; // libjsig that interposes sigaction(),
@ -75,6 +76,9 @@ class Linux {
static julong available_memory(); static julong available_memory();
static julong physical_memory() { return _physical_memory; } static julong physical_memory() { return _physical_memory; }
static void set_physical_memory(julong phys_mem) { _physical_memory = phys_mem; }
static int active_processor_count();
static void initialize_system_info(); static void initialize_system_info();
static int commit_memory_impl(char* addr, size_t bytes, bool exec); static int commit_memory_impl(char* addr, size_t bytes, bool exec);
@ -106,6 +110,7 @@ class Linux {
static bool release_memory_special_huge_tlbfs(char* base, size_t bytes); static bool release_memory_special_huge_tlbfs(char* base, size_t bytes);
static void print_full_memory_info(outputStream* st); static void print_full_memory_info(outputStream* st);
static void print_container_info(outputStream* st);
static void print_distro_info(outputStream* st); static void print_distro_info(outputStream* st);
static void print_libversion_info(outputStream* st); static void print_libversion_info(outputStream* st);

View file

@ -290,6 +290,14 @@ void os::Solaris::initialize_system_info() {
} }
int os::active_processor_count() { int os::active_processor_count() {
// User has overridden the number of active processors
if (ActiveProcessorCount > 0) {
log_trace(os)("active_processor_count: "
"active processor count set by user : %d",
ActiveProcessorCount);
return ActiveProcessorCount;
}
int online_cpus = sysconf(_SC_NPROCESSORS_ONLN); int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
pid_t pid = getpid(); pid_t pid = getpid();
psetid_t pset = PS_NONE; psetid_t pset = PS_NONE;
@ -2190,10 +2198,6 @@ int os::signal_wait() {
static int page_size = -1; static int page_size = -1;
// The mmap MAP_ALIGN flag is supported on Solaris 9 and later. init_2() will
// clear this var if support is not available.
static bool has_map_align = true;
int os::vm_page_size() { int os::vm_page_size() {
assert(page_size != -1, "must call os::init"); assert(page_size != -1, "must call os::init");
return page_size; return page_size;
@ -2560,7 +2564,7 @@ char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes,
if (fixed) { if (fixed) {
flags |= MAP_FIXED; flags |= MAP_FIXED;
} else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) { } else if (alignment_hint > (size_t) vm_page_size()) {
flags |= MAP_ALIGN; flags |= MAP_ALIGN;
addr = (char*) alignment_hint; addr = (char*) alignment_hint;
} }
@ -4222,28 +4226,6 @@ jint os::init_2(void) {
// try to enable extended file IO ASAP, see 6431278 // try to enable extended file IO ASAP, see 6431278
os::Solaris::try_enable_extended_io(); os::Solaris::try_enable_extended_io();
// Allocate a single page and mark it as readable for safepoint polling. Also
// use this first mmap call to check support for MAP_ALIGN.
address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
page_size,
MAP_PRIVATE | MAP_ALIGN,
PROT_READ);
if (polling_page == NULL) {
has_map_align = false;
polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
PROT_READ);
}
os::set_polling_page(polling_page);
log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page));
if (!UseMembar) {
address mem_serialize_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE);
guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
os::set_memory_serialize_page(mem_serialize_page);
log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page));
}
// Check and sets minimum stack sizes against command line options // Check and sets minimum stack sizes against command line options
if (Posix::set_minimum_stack_sizes() == JNI_ERR) { if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
return JNI_ERR; return JNI_ERR;

View file

@ -723,6 +723,14 @@ bool os::has_allocatable_memory_limit(julong* limit) {
} }
int os::active_processor_count() { int os::active_processor_count() {
// User has overridden the number of active processors
if (ActiveProcessorCount > 0) {
log_trace(os)("active_processor_count: "
"active processor count set by user : %d",
ActiveProcessorCount);
return ActiveProcessorCount;
}
DWORD_PTR lpProcessAffinityMask = 0; DWORD_PTR lpProcessAffinityMask = 0;
DWORD_PTR lpSystemAffinityMask = 0; DWORD_PTR lpSystemAffinityMask = 0;
int proc_count = processor_count(); int proc_count = processor_count();
@ -2487,6 +2495,20 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
} // /EXCEPTION_ACCESS_VIOLATION } // /EXCEPTION_ACCESS_VIOLATION
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
CompiledMethod* nm = NULL;
JavaThread* thread = (JavaThread*)t;
if (in_java) {
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
}
if ((thread->thread_state() == _thread_in_vm &&
thread->doing_unsafe_access()) ||
(nm != NULL && nm->has_unsafe_access())) {
return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, (address)Assembler::locate_next_instruction(pc)));
}
}
if (in_java) { if (in_java) {
switch (exception_code) { switch (exception_code) {
case EXCEPTION_INT_DIVIDE_BY_ZERO: case EXCEPTION_INT_DIVIDE_BY_ZERO:
@ -3911,27 +3933,6 @@ static jint initSock();
// this is called _after_ the global arguments have been parsed // this is called _after_ the global arguments have been parsed
jint os::init_2(void) { jint os::init_2(void) {
// Allocate a single page and mark it as readable for safepoint polling
address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY);
guarantee(polling_page != NULL, "Reserve Failed for polling page");
address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY);
guarantee(return_page != NULL, "Commit Failed for polling page");
os::set_polling_page(polling_page);
log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page));
if (!UseMembar) {
address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
guarantee(return_page != NULL, "Commit Failed for memory serialize page");
os::set_memory_serialize_page(mem_serialize_page);
log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page));
}
// Setup Windows Exceptions // Setup Windows Exceptions
// for debugging float code generation bugs // for debugging float code generation bugs

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -68,7 +68,7 @@ public:
address o_reg_temps(int i) { return (address)&_o_reg_temps[i]; } address o_reg_temps(int i) { return (address)&_o_reg_temps[i]; }
#endif #endif
static int saved_exception_npc_offset_in_bytes() { return offset_of(JavaThread,_saved_exception_npc); } static ByteSize saved_exception_npc_offset() { return byte_offset_of(JavaThread,_saved_exception_npc); }
address saved_exception_npc() { return _saved_exception_npc; } address saved_exception_npc() { return _saved_exception_npc; }
void set_saved_exception_npc(address a) { _saved_exception_npc = a; } void set_saved_exception_npc(address a) { _saved_exception_npc = a; }

View file

@ -447,7 +447,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
// a fault inside compiled code, the interpreter, or a stub // a fault inside compiled code, the interpreter, or a stub
// Support Safepoint Polling // Support Safepoint Polling
if ( sig == SIGSEGV && (address)info->si_addr == os::get_polling_page() ) { if (sig == SIGSEGV && os::is_poll_address((address)info->si_addr)) {
stub = SharedRuntime::get_poll_stub(pc); stub = SharedRuntime::get_poll_stub(pc);
} }

View file

@ -63,7 +63,7 @@ public:
static int o_reg_temps_offset_in_bytes() { return offset_of(JavaThread, _o_reg_temps); } static int o_reg_temps_offset_in_bytes() { return offset_of(JavaThread, _o_reg_temps); }
static int saved_exception_npc_offset_in_bytes() { return offset_of(JavaThread,_saved_exception_npc); } static ByteSize saved_exception_npc_offset() { return byte_offset_of(JavaThread,_saved_exception_npc); }
address saved_exception_npc() { return _saved_exception_npc; } address saved_exception_npc() { return _saved_exception_npc; }
void set_saved_exception_npc(address a) { _saved_exception_npc = a; } void set_saved_exception_npc(address a) { _saved_exception_npc = a; }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -187,10 +187,6 @@ private:
} }
} }
void ensure_metadata_alive(ciMetadata* m) {
_factory->ensure_metadata_alive(m);
}
ciInstance* get_instance(oop o) { ciInstance* get_instance(oop o) {
if (o == NULL) return NULL; if (o == NULL) return NULL;
return get_object(o)->as_instance(); return get_object(o)->as_instance();

View file

@ -34,12 +34,36 @@
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "oops/fieldStreams.hpp" #include "oops/fieldStreams.hpp"
#include "runtime/fieldDescriptor.hpp" #include "runtime/fieldDescriptor.hpp"
#if INCLUDE_ALL_GCS
# include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
// ciInstanceKlass // ciInstanceKlass
// //
// This class represents a Klass* in the HotSpot virtual machine // This class represents a Klass* in the HotSpot virtual machine
// whose Klass part in an InstanceKlass. // whose Klass part in an InstanceKlass.
// ------------------------------------------------------------------
// ensure_metadata_alive
//
// Ensure that the metadata wrapped by the ciMetadata is kept alive by GC.
// This is primarily useful for metadata which is considered as weak roots
// by the GC but need to be strong roots if reachable from a current compilation.
// InstanceKlass are created for both weak and strong metadata. Ensuring this metadata
// alive covers the cases where there are weak roots without performance cost.
//
static void ensure_metadata_alive(oop metadata_holder) {
#if INCLUDE_ALL_GCS
if (!UseG1GC) {
return;
}
if (metadata_holder != NULL) {
G1SATBCardTableModRefBS::enqueue(metadata_holder);
}
#endif
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciInstanceKlass::ciInstanceKlass // ciInstanceKlass::ciInstanceKlass
// //
@ -64,6 +88,18 @@ ciInstanceKlass::ciInstanceKlass(Klass* k) :
_has_injected_fields = -1; _has_injected_fields = -1;
_implementor = NULL; // we will fill these lazily _implementor = NULL; // we will fill these lazily
oop holder = ik->klass_holder();
ensure_metadata_alive(holder);
if (ik->is_anonymous()) {
// Though ciInstanceKlass records class loader oop, it's not enough to keep
// VM anonymous classes alive (loader == NULL). Klass holder should be used instead.
// It is enough to record a ciObject, since cached elements are never removed
// during ciObjectFactory lifetime. ciObjectFactory itself is created for
// every compilation and lives for the whole duration of the compilation.
assert(holder != NULL, "holder of anonymous class is the mirror which is never null");
(void)CURRENT_ENV->get_object(holder);
}
Thread *thread = Thread::current(); Thread *thread = Thread::current();
if (ciObjectFactory::is_initialized()) { if (ciObjectFactory::is_initialized()) {
_loader = JNIHandles::make_local(thread, ik->class_loader()); _loader = JNIHandles::make_local(thread, ik->class_loader());

View file

@ -188,7 +188,6 @@ void ciReceiverTypeData::translate_receiver_data_from(const ProfileData* data) {
Klass* k = data->as_ReceiverTypeData()->receiver(row); Klass* k = data->as_ReceiverTypeData()->receiver(row);
if (k != NULL) { if (k != NULL) {
ciKlass* klass = CURRENT_ENV->get_klass(k); ciKlass* klass = CURRENT_ENV->get_klass(k);
CURRENT_ENV->ensure_metadata_alive(klass);
set_receiver(row, klass); set_receiver(row, klass);
} }
} }
@ -210,7 +209,6 @@ void ciReturnTypeEntry::translate_type_data_from(const ReturnTypeEntry* ret) {
void ciSpeculativeTrapData::translate_from(const ProfileData* data) { void ciSpeculativeTrapData::translate_from(const ProfileData* data) {
Method* m = data->as_SpeculativeTrapData()->method(); Method* m = data->as_SpeculativeTrapData()->method();
ciMethod* ci_m = CURRENT_ENV->get_method(m); ciMethod* ci_m = CURRENT_ENV->get_method(m);
CURRENT_ENV->ensure_metadata_alive(ci_m);
set_method(ci_m); set_method(ci_m);
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -70,7 +70,6 @@ protected:
Klass* v = TypeEntries::valid_klass(k); Klass* v = TypeEntries::valid_klass(k);
if (v != NULL) { if (v != NULL) {
ciKlass* klass = CURRENT_ENV->get_klass(v); ciKlass* klass = CURRENT_ENV->get_klass(v);
CURRENT_ENV->ensure_metadata_alive(klass);
return with_status(klass, k); return with_status(klass, k);
} }
return with_status(NULL, k); return with_status(NULL, k);

View file

@ -47,9 +47,6 @@
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "runtime/fieldType.hpp" #include "runtime/fieldType.hpp"
#include "utilities/macros.hpp" #include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
# include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
// ciObjectFactory // ciObjectFactory
// //
@ -363,19 +360,6 @@ ciObject* ciObjectFactory::create_new_object(oop o) {
ciMetadata* ciObjectFactory::create_new_metadata(Metadata* o) { ciMetadata* ciObjectFactory::create_new_metadata(Metadata* o) {
EXCEPTION_CONTEXT; EXCEPTION_CONTEXT;
// Hold metadata from unloading by keeping it's holder alive.
if (_initialized && o->is_klass()) {
Klass* holder = ((Klass*)o);
if (holder->is_instance_klass() && InstanceKlass::cast(holder)->is_anonymous()) {
// Though ciInstanceKlass records class loader oop, it's not enough to keep
// VM anonymous classes alive (loader == NULL). Klass holder should be used instead.
// It is enough to record a ciObject, since cached elements are never removed
// during ciObjectFactory lifetime. ciObjectFactory itself is created for
// every compilation and lives for the whole duration of the compilation.
ciObject* h = get(holder->klass_holder());
}
}
if (o->is_klass()) { if (o->is_klass()) {
Klass* k = (Klass*)o; Klass* k = (Klass*)o;
if (k->is_instance_klass()) { if (k->is_instance_klass()) {
@ -401,38 +385,6 @@ ciMetadata* ciObjectFactory::create_new_metadata(Metadata* o) {
return NULL; return NULL;
} }
// ------------------------------------------------------------------
// ciObjectFactory::ensure_metadata_alive
//
// Ensure that the metadata wrapped by the ciMetadata is kept alive by GC.
// This is primarily useful for metadata which is considered as weak roots
// by the GC but need to be strong roots if reachable from a current compilation.
//
void ciObjectFactory::ensure_metadata_alive(ciMetadata* m) {
ASSERT_IN_VM; // We're handling raw oops here.
#if INCLUDE_ALL_GCS
if (!UseG1GC) {
return;
}
Klass* metadata_owner_klass;
if (m->is_klass()) {
metadata_owner_klass = m->as_klass()->get_Klass();
} else if (m->is_method()) {
metadata_owner_klass = m->as_method()->get_Method()->constants()->pool_holder();
} else {
fatal("Not implemented for other types of metadata");
return;
}
oop metadata_holder = metadata_owner_klass->klass_holder();
if (metadata_holder != NULL) {
G1SATBCardTableModRefBS::enqueue(metadata_holder);
}
#endif
}
//------------------------------------------------------------------ //------------------------------------------------------------------
// ciObjectFactory::get_unloaded_method // ciObjectFactory::get_unloaded_method
// //

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -73,8 +73,6 @@ private:
ciObject* create_new_object(oop o); ciObject* create_new_object(oop o);
ciMetadata* create_new_metadata(Metadata* o); ciMetadata* create_new_metadata(Metadata* o);
void ensure_metadata_alive(ciMetadata* m);
static bool is_equal(NonPermObject* p, oop key) { static bool is_equal(NonPermObject* p, oop key) {
return p->object()->get_oop() == key; return p->object()->get_oop() == key;
} }

View file

@ -0,0 +1,201 @@
/*
* Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/cms/cmsArguments.hpp"
#include "gc/cms/cmsCollectorPolicy.hpp"
#include "gc/cms/cmsHeap.hpp"
#include "gc/cms/compactibleFreeListSpace.hpp"
#include "gc/shared/gcArguments.inline.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "runtime/arguments.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/defaultStream.hpp"
size_t CMSArguments::conservative_max_heap_alignment() {
return GenCollectedHeap::conservative_max_heap_alignment();
}
void CMSArguments::set_parnew_gc_flags() {
assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC && !UseG1GC,
"control point invariant");
assert(UseConcMarkSweepGC, "CMS is expected to be on here");
if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
FLAG_SET_DEFAULT(ParallelGCThreads, Abstract_VM_Version::parallel_worker_threads());
assert(ParallelGCThreads > 0, "We should always have at least one thread by default");
} else if (ParallelGCThreads == 0) {
jio_fprintf(defaultStream::error_stream(),
"The ParNew GC can not be combined with -XX:ParallelGCThreads=0\n");
vm_exit(1);
}
// By default YoungPLABSize and OldPLABSize are set to 4096 and 1024 respectively,
// these settings are default for Parallel Scavenger. For ParNew+Tenured configuration
// we set them to 1024 and 1024.
// See CR 6362902.
if (FLAG_IS_DEFAULT(YoungPLABSize)) {
FLAG_SET_DEFAULT(YoungPLABSize, (intx)1024);
}
if (FLAG_IS_DEFAULT(OldPLABSize)) {
FLAG_SET_DEFAULT(OldPLABSize, (intx)1024);
}
// When using compressed oops, we use local overflow stacks,
// rather than using a global overflow list chained through
// the klass word of the object's pre-image.
if (UseCompressedOops && !ParGCUseLocalOverflow) {
if (!FLAG_IS_DEFAULT(ParGCUseLocalOverflow)) {
warning("Forcing +ParGCUseLocalOverflow: needed if using compressed references");
}
FLAG_SET_DEFAULT(ParGCUseLocalOverflow, true);
}
assert(ParGCUseLocalOverflow || !UseCompressedOops, "Error");
}
// Adjust some sizes to suit CMS and/or ParNew needs; these work well on
// sparc/solaris for certain applications, but would gain from
// further optimization and tuning efforts, and would almost
// certainly gain from analysis of platform and environment.
void CMSArguments::initialize_flags() {
GCArguments::initialize_flags();
assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC, "Error");
assert(UseConcMarkSweepGC, "CMS is expected to be on here");
// Set CMS global values
CompactibleFreeListSpace::set_cms_values();
// Turn off AdaptiveSizePolicy by default for cms until it is complete.
disable_adaptive_size_policy("UseConcMarkSweepGC");
set_parnew_gc_flags();
size_t max_heap = align_down(MaxHeapSize,
CardTableRS::ct_max_alignment_constraint());
// Now make adjustments for CMS
intx tenuring_default = (intx)6;
size_t young_gen_per_worker = CMSYoungGenPerWorker;
// Preferred young gen size for "short" pauses:
// upper bound depends on # of threads and NewRatio.
const size_t preferred_max_new_size_unaligned =
MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * ParallelGCThreads));
size_t preferred_max_new_size =
align_up(preferred_max_new_size_unaligned, os::vm_page_size());
// Unless explicitly requested otherwise, size young gen
// for "short" pauses ~ CMSYoungGenPerWorker*ParallelGCThreads
// If either MaxNewSize or NewRatio is set on the command line,
// assume the user is trying to set the size of the young gen.
if (FLAG_IS_DEFAULT(MaxNewSize) && FLAG_IS_DEFAULT(NewRatio)) {
// Set MaxNewSize to our calculated preferred_max_new_size unless
// NewSize was set on the command line and it is larger than
// preferred_max_new_size.
if (!FLAG_IS_DEFAULT(NewSize)) { // NewSize explicitly set at command-line
FLAG_SET_ERGO(size_t, MaxNewSize, MAX2(NewSize, preferred_max_new_size));
} else {
FLAG_SET_ERGO(size_t, MaxNewSize, preferred_max_new_size);
}
log_trace(gc, heap)("CMS ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize);
// Code along this path potentially sets NewSize and OldSize
log_trace(gc, heap)("CMS set min_heap_size: " SIZE_FORMAT " initial_heap_size: " SIZE_FORMAT " max_heap: " SIZE_FORMAT,
Arguments::min_heap_size(), InitialHeapSize, max_heap);
size_t min_new = preferred_max_new_size;
if (FLAG_IS_CMDLINE(NewSize)) {
min_new = NewSize;
}
if (max_heap > min_new && Arguments::min_heap_size() > min_new) {
// Unless explicitly requested otherwise, make young gen
// at least min_new, and at most preferred_max_new_size.
if (FLAG_IS_DEFAULT(NewSize)) {
FLAG_SET_ERGO(size_t, NewSize, MAX2(NewSize, min_new));
FLAG_SET_ERGO(size_t, NewSize, MIN2(preferred_max_new_size, NewSize));
log_trace(gc, heap)("CMS ergo set NewSize: " SIZE_FORMAT, NewSize);
}
// Unless explicitly requested otherwise, size old gen
// so it's NewRatio x of NewSize.
if (FLAG_IS_DEFAULT(OldSize)) {
if (max_heap > NewSize) {
FLAG_SET_ERGO(size_t, OldSize, MIN2(NewRatio*NewSize, max_heap - NewSize));
log_trace(gc, heap)("CMS ergo set OldSize: " SIZE_FORMAT, OldSize);
}
}
}
}
// Unless explicitly requested otherwise, definitely
// promote all objects surviving "tenuring_default" scavenges.
if (FLAG_IS_DEFAULT(MaxTenuringThreshold) &&
FLAG_IS_DEFAULT(SurvivorRatio)) {
FLAG_SET_ERGO(uintx, MaxTenuringThreshold, tenuring_default);
}
// If we decided above (or user explicitly requested)
// `promote all' (via MaxTenuringThreshold := 0),
// prefer minuscule survivor spaces so as not to waste
// space for (non-existent) survivors
if (FLAG_IS_DEFAULT(SurvivorRatio) && MaxTenuringThreshold == 0) {
FLAG_SET_ERGO(uintx, SurvivorRatio, MAX2((uintx)1024, SurvivorRatio));
}
// OldPLABSize is interpreted in CMS as not the size of the PLAB in words,
// but rather the number of free blocks of a given size that are used when
// replenishing the local per-worker free list caches.
if (FLAG_IS_DEFAULT(OldPLABSize)) {
if (!FLAG_IS_DEFAULT(ResizeOldPLAB) && !ResizeOldPLAB) {
// OldPLAB sizing manually turned off: Use a larger default setting,
// unless it was manually specified. This is because a too-low value
// will slow down scavenges.
FLAG_SET_ERGO(size_t, OldPLABSize, CompactibleFreeListSpaceLAB::_default_static_old_plab_size); // default value before 6631166
} else {
FLAG_SET_DEFAULT(OldPLABSize, CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size); // old CMSParPromoteBlocksToClaim default
}
}
// If either of the static initialization defaults have changed, note this
// modification.
if (!FLAG_IS_DEFAULT(OldPLABSize) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
CompactibleFreeListSpaceLAB::modify_initialization(OldPLABSize, OldPLABWeight);
}
log_trace(gc)("MarkStackSize: %uk MarkStackSizeMax: %uk", (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
}
void CMSArguments::disable_adaptive_size_policy(const char* collector_name) {
if (UseAdaptiveSizePolicy) {
if (FLAG_IS_CMDLINE(UseAdaptiveSizePolicy)) {
warning("Disabling UseAdaptiveSizePolicy; it is incompatible with %s.",
collector_name);
}
FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false);
}
}
CollectedHeap* CMSArguments::create_heap() {
return create_heap_with_policy<CMSHeap, ConcurrentMarkSweepPolicy>();
}

View file

@ -0,0 +1,42 @@
/*
* Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_CMS_CMSARGUMENTS_HPP
#define SHARE_GC_CMS_CMSARGUMENTS_HPP
#include "gc/shared/gcArguments.hpp"
class CollectedHeap;
class CMSArguments : public GCArguments {
private:
void disable_adaptive_size_policy(const char* collector_name);
void set_parnew_gc_flags();
public:
virtual void initialize_flags();
virtual size_t conservative_max_heap_alignment();
virtual CollectedHeap* create_heap();
};
#endif // SHARE_GC_CMS_CMSARGUMENTS_HPP

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -247,8 +247,8 @@ public:
_g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {} _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {}
void work(uint worker_id) { void work(uint worker_id) {
ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size); ParKnownGarbageHRClosure par_known_garbage_cl(_hrSorted, _chunk_size);
_g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer); _g1->heap_region_par_iterate_from_worker_offset(&par_known_garbage_cl, &_hrclaimer, worker_id);
} }
}; };

View file

@ -0,0 +1,99 @@
/*
* Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1Arguments.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/shared/gcArguments.inline.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/vm_version.hpp"
size_t G1Arguments::conservative_max_heap_alignment() {
return HeapRegion::max_region_size();
}
void G1Arguments::initialize_flags() {
GCArguments::initialize_flags();
assert(UseG1GC, "Error");
#if defined(COMPILER1) || INCLUDE_JVMCI
FastTLABRefill = false;
#endif
FLAG_SET_DEFAULT(ParallelGCThreads, Abstract_VM_Version::parallel_worker_threads());
if (ParallelGCThreads == 0) {
assert(!FLAG_IS_DEFAULT(ParallelGCThreads), "The default value for ParallelGCThreads should not be 0.");
vm_exit_during_initialization("The flag -XX:+UseG1GC can not be combined with -XX:ParallelGCThreads=0", NULL);
}
#if INCLUDE_ALL_GCS
if (FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
FLAG_SET_ERGO(uint, G1ConcRefinementThreads, ParallelGCThreads);
}
#endif
// MarkStackSize will be set (if it hasn't been set by the user)
// when concurrent marking is initialized.
// Its value will be based upon the number of parallel marking threads.
// But we do set the maximum mark stack size here.
if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
FLAG_SET_DEFAULT(MarkStackSizeMax, 128 * TASKQUEUE_SIZE);
}
if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) {
// In G1, we want the default GC overhead goal to be higher than
// it is for PS, or the heap might be expanded too aggressively.
// We set it here to ~8%.
FLAG_SET_DEFAULT(GCTimeRatio, 12);
}
// Below, we might need to calculate the pause time interval based on
// the pause target. When we do so we are going to give G1 maximum
// flexibility and allow it to do pauses when it needs to. So, we'll
// arrange that the pause interval to be pause time target + 1 to
// ensure that a) the pause time target is maximized with respect to
// the pause interval and b) we maintain the invariant that pause
// time target < pause interval. If the user does not want this
// maximum flexibility, they will have to set the pause interval
// explicitly.
if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
// The default pause time target in G1 is 200ms
FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
}
// Then, if the interval parameter was not set, set it according to
// the pause time target (this will also deal with the case when the
// pause time target is the default value).
if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
}
log_trace(gc)("MarkStackSize: %uk MarkStackSizeMax: %uk", (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
}
CollectedHeap* G1Arguments::create_heap() {
return create_heap_with_policy<G1CollectedHeap, G1CollectorPolicy>();
}

View file

@ -0,0 +1,39 @@
/*
* Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_G1_G1ARGUMENTS_HPP
#define SHARE_GC_G1_G1ARGUMENTS_HPP
#include "gc/shared/gcArguments.hpp"
class CollectedHeap;
class G1Arguments : public GCArguments {
public:
virtual void initialize_flags();
virtual size_t conservative_max_heap_alignment();
virtual CollectedHeap* create_heap();
};
#endif // SHARE_GC_G1_G1ARGUMENTS_HPP

View file

@ -314,7 +314,7 @@ public:
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1ConcurrentMark* cm = g1h->concurrent_mark(); G1ConcurrentMark* cm = g1h->concurrent_mark();
G1CreateLiveDataClosure cl(g1h, cm, cm->next_mark_bitmap(), _live_data); G1CreateLiveDataClosure cl(g1h, cm, cm->next_mark_bitmap(), _live_data);
g1h->heap_region_par_iterate(&cl, worker_id, &_hr_claimer); g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id);
} }
}; };
@ -381,7 +381,7 @@ public:
void work(uint worker_id) { void work(uint worker_id) {
G1FinalizeCardLiveDataClosure cl(G1CollectedHeap::heap(), _bitmap, _live_data); G1FinalizeCardLiveDataClosure cl(G1CollectedHeap::heap(), _bitmap, _live_data);
G1CollectedHeap::heap()->heap_region_par_iterate(&cl, worker_id, &_hr_claimer); G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id);
} }
}; };
@ -560,7 +560,7 @@ public:
_mark_bitmap, _mark_bitmap,
_act_live_data, _act_live_data,
&_exp_live_data); &_exp_live_data);
_g1h->heap_region_par_iterate(&cl, worker_id, &_hr_claimer); _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id);
Atomic::add(cl.failures(), &_failures); Atomic::add(cl.failures(), &_failures);
} }

View file

@ -38,6 +38,7 @@
#include "gc/g1/g1ConcurrentRefine.hpp" #include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp" #include "gc/g1/g1ConcurrentRefineThread.hpp"
#include "gc/g1/g1EvacStats.inline.hpp" #include "gc/g1/g1EvacStats.inline.hpp"
#include "gc/g1/g1FullCollector.hpp"
#include "gc/g1/g1FullGCScope.hpp" #include "gc/g1/g1FullGCScope.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp" #include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1HeapSizingPolicy.hpp" #include "gc/g1/g1HeapSizingPolicy.hpp"
@ -48,10 +49,9 @@
#include "gc/g1/g1ParScanThreadState.inline.hpp" #include "gc/g1/g1ParScanThreadState.inline.hpp"
#include "gc/g1/g1Policy.hpp" #include "gc/g1/g1Policy.hpp"
#include "gc/g1/g1RegionToSpaceMapper.hpp" #include "gc/g1/g1RegionToSpaceMapper.hpp"
#include "gc/g1/g1RemSet.inline.hpp" #include "gc/g1/g1RemSet.hpp"
#include "gc/g1/g1RootClosures.hpp" #include "gc/g1/g1RootClosures.hpp"
#include "gc/g1/g1RootProcessor.hpp" #include "gc/g1/g1RootProcessor.hpp"
#include "gc/g1/g1SerialFullCollector.hpp"
#include "gc/g1/g1StringDedup.hpp" #include "gc/g1/g1StringDedup.hpp"
#include "gc/g1/g1YCTypes.hpp" #include "gc/g1/g1YCTypes.hpp"
#include "gc/g1/g1YoungRemSetSamplingThread.hpp" #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
@ -143,6 +143,12 @@ void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_region
reset_from_card_cache(start_idx, num_regions); reset_from_card_cache(start_idx, num_regions);
} }
HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
MemRegion mr) {
return new HeapRegion(hrs_index, bot(), mr);
}
// Private methods. // Private methods.
HeapRegion* HeapRegion*
@ -1155,7 +1161,6 @@ void G1CollectedHeap::prepare_heap_for_mutators() {
void G1CollectedHeap::abort_refinement() { void G1CollectedHeap::abort_refinement() {
if (_hot_card_cache->use_cache()) { if (_hot_card_cache->use_cache()) {
_hot_card_cache->reset_card_counts();
_hot_card_cache->reset_hot_cache(); _hot_card_cache->reset_hot_cache();
} }
@ -1199,6 +1204,10 @@ void G1CollectedHeap::verify_after_full_collection() {
} }
void G1CollectedHeap::print_heap_after_full_collection(G1HeapTransition* heap_transition) { void G1CollectedHeap::print_heap_after_full_collection(G1HeapTransition* heap_transition) {
// Post collection logging.
// We should do this after we potentially resize the heap so
// that all the COMMIT / UNCOMMIT events are generated before
// the compaction events.
print_hrm_post_compaction(); print_hrm_post_compaction();
heap_transition->print(); heap_transition->print();
print_heap_after_gc(); print_heap_after_gc();
@ -1221,23 +1230,18 @@ void G1CollectedHeap::do_full_collection_inner(G1FullGCScope* scope) {
gc_prologue(true); gc_prologue(true);
prepare_heap_for_full_collection(); prepare_heap_for_full_collection();
G1SerialFullCollector serial(scope, ref_processor_stw()); G1FullCollector collector(scope, ref_processor_stw(), concurrent_mark()->next_mark_bitmap(), workers()->active_workers());
serial.prepare_collection(); collector.prepare_collection();
serial.collect(); collector.collect();
serial.complete_collection(); collector.complete_collection();
prepare_heap_for_mutators(); prepare_heap_for_mutators();
g1_policy()->record_full_collection_end(); g1_policy()->record_full_collection_end();
gc_epilogue(true); gc_epilogue(true);
// Post collection verification.
verify_after_full_collection(); verify_after_full_collection();
// Post collection logging.
// We should do this after we potentially resize the heap so
// that all the COMMIT / UNCOMMIT events are generated before
// the compaction events.
print_heap_after_full_collection(scope->heap_transition()); print_heap_after_full_collection(scope->heap_transition());
} }
@ -1269,10 +1273,10 @@ void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
} }
void G1CollectedHeap::resize_if_necessary_after_full_collection() { void G1CollectedHeap::resize_if_necessary_after_full_collection() {
// Include bytes that will be pre-allocated to support collections, as "used". // Capacity, free and used after the GC counted as full regions to
const size_t used_after_gc = used(); // include the waste in the following calculations.
const size_t capacity_after_gc = capacity(); const size_t capacity_after_gc = capacity();
const size_t free_after_gc = capacity_after_gc - used_after_gc; const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes();
// This is enforced in arguments.cpp. // This is enforced in arguments.cpp.
assert(MinHeapFreeRatio <= MaxHeapFreeRatio, assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
@ -1326,8 +1330,9 @@ void G1CollectedHeap::resize_if_necessary_after_full_collection() {
size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity after Full GC). " log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity after Full GC). "
"Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)", "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio); "min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
capacity_after_gc, used_after_gc, used(), minimum_desired_capacity, MinHeapFreeRatio);
expand(expand_bytes, _workers); expand(expand_bytes, _workers);
@ -1337,8 +1342,9 @@ void G1CollectedHeap::resize_if_necessary_after_full_collection() {
size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity after Full GC). " log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity after Full GC). "
"Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)", "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio); "maximum_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
capacity_after_gc, used_after_gc, used(), maximum_desired_capacity, MaxHeapFreeRatio);
shrink(shrink_bytes); shrink(shrink_bytes);
} }
@ -1959,6 +1965,10 @@ size_t G1CollectedHeap::capacity() const {
return _hrm.length() * HeapRegion::GrainBytes; return _hrm.length() * HeapRegion::GrainBytes;
} }
size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
return _hrm.total_free_bytes();
}
void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) { void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
hr->reset_gc_time_stamp(); hr->reset_gc_time_stamp();
} }
@ -2262,10 +2272,15 @@ void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
_hrm.iterate(cl); _hrm.iterate(cl);
} }
void G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl, void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
uint worker_id, HeapRegionClaimer *hrclaimer,
uint worker_id) const {
_hrm.par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
}
void G1CollectedHeap::heap_region_par_iterate_from_start(HeapRegionClosure* cl,
HeapRegionClaimer *hrclaimer) const { HeapRegionClaimer *hrclaimer) const {
_hrm.par_iterate(cl, worker_id, hrclaimer); _hrm.par_iterate(cl, hrclaimer, 0);
} }
void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
@ -2276,14 +2291,6 @@ void G1CollectedHeap::collection_set_iterate_from(HeapRegionClosure *cl, uint wo
_collection_set.iterate_from(cl, worker_id, workers()->active_workers()); _collection_set.iterate_from(cl, worker_id, workers()->active_workers());
} }
HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
HeapRegion* result = _hrm.next_region_in_heap(from);
while (result != NULL && result->is_pinned()) {
result = _hrm.next_region_in_heap(result);
}
return result;
}
HeapWord* G1CollectedHeap::block_start(const void* addr) const { HeapWord* G1CollectedHeap::block_start(const void* addr) const {
HeapRegion* hr = heap_region_containing(addr); HeapRegion* hr = heap_region_containing(addr);
return hr->block_start(addr); return hr->block_start(addr);
@ -2375,7 +2382,7 @@ bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
switch (vo) { switch (vo) {
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr); case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr); case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked() && !hr->is_archive(); case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj, hr);
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
return false; // keep some compilers happy return false; // keep some compilers happy
@ -2386,10 +2393,7 @@ bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
switch (vo) { switch (vo) {
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj); case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj); case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
case VerifyOption_G1UseMarkWord: { case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj);
HeapRegion* hr = _hrm.addr_to_region((HeapWord*)obj);
return !obj->is_gc_marked() && !hr->is_archive();
}
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
return false; // keep some compilers happy return false; // keep some compilers happy

View file

@ -1046,6 +1046,7 @@ public:
// The Concurrent Marking reference processor... // The Concurrent Marking reference processor...
ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; } ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
size_t unused_committed_regions_in_bytes() const;
virtual size_t capacity() const; virtual size_t capacity() const;
virtual size_t used() const; virtual size_t used() const;
// This should be called when we're not holding the heap lock. The // This should be called when we're not holding the heap lock. The
@ -1181,6 +1182,8 @@ public:
return barrier_set_cast<G1SATBCardTableLoggingModRefBS>(barrier_set()); return barrier_set_cast<G1SATBCardTableLoggingModRefBS>(barrier_set());
} }
G1HotCardCache* g1_hot_card_cache() const { return _hot_card_cache; }
// Iteration functions. // Iteration functions.
// Iterate over all objects, calling "cl.do_object" on each. // Iterate over all objects, calling "cl.do_object" on each.
@ -1207,14 +1210,17 @@ public:
inline HeapWord* bottom_addr_for_region(uint index) const; inline HeapWord* bottom_addr_for_region(uint index) const;
// Iterate over the heap regions in parallel. Assumes that this will be called // Two functions to iterate over the heap regions in parallel. Threads
// in parallel by a number of worker threads with distinct worker ids // compete using the HeapRegionClaimer to claim the regions before
// in the range passed to the HeapRegionClaimer. Applies "blk->doHeapRegion" // applying the closure on them.
// to each of the regions, by attempting to claim the region using the // The _from_worker_offset version uses the HeapRegionClaimer and
// HeapRegionClaimer and, if successful, applying the closure to the claimed // the worker id to calculate a start offset to prevent all workers to
// region. // start from the point.
void heap_region_par_iterate(HeapRegionClosure* cl, void heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
uint worker_id, HeapRegionClaimer* hrclaimer,
uint worker_id) const;
void heap_region_par_iterate_from_start(HeapRegionClosure* cl,
HeapRegionClaimer* hrclaimer) const; HeapRegionClaimer* hrclaimer) const;
// Iterate over the regions (if any) in the current collection set. // Iterate over the regions (if any) in the current collection set.
@ -1226,8 +1232,6 @@ public:
// collection set regions. // collection set regions.
void collection_set_iterate_from(HeapRegionClosure *blk, uint worker_id); void collection_set_iterate_from(HeapRegionClosure *blk, uint worker_id);
HeapRegion* next_compaction_region(const HeapRegion* from) const;
// Returns the HeapRegion that contains addr. addr must not be NULL. // Returns the HeapRegion that contains addr. addr must not be NULL.
template <class T> template <class T>
inline HeapRegion* heap_region_containing(const T addr) const; inline HeapRegion* heap_region_containing(const T addr) const;
@ -1391,6 +1395,9 @@ public:
inline bool is_obj_ill(const oop obj) const; inline bool is_obj_ill(const oop obj) const;
inline bool is_obj_dead_full(const oop obj, const HeapRegion* hr) const;
inline bool is_obj_dead_full(const oop obj) const;
G1ConcurrentMark* concurrent_mark() const { return _cm; } G1ConcurrentMark* concurrent_mark() const { return _cm; }
// Refinement // Refinement
@ -1437,7 +1444,7 @@ public:
// vo == UsePrevMarking -> use "prev" marking information, // vo == UsePrevMarking -> use "prev" marking information,
// vo == UseNextMarking -> use "next" marking information // vo == UseNextMarking -> use "next" marking information
// vo == UseMarkWord -> use the mark word in the object header // vo == UseFullMarking -> use "next" marking bitmap but no TAMS
// //
// NOTE: Only the "prev" marking information is guaranteed to be // NOTE: Only the "prev" marking information is guaranteed to be
// consistent most of the time, so most calls to this should use // consistent most of the time, so most calls to this should use
@ -1446,7 +1453,7 @@ public:
// vo == UseNextMarking, which is to verify the "next" marking // vo == UseNextMarking, which is to verify the "next" marking
// information at the end of remark. // information at the end of remark.
// Currently there is only one place where this is called with // Currently there is only one place where this is called with
// vo == UseMarkWord, which is to verify the marking during a // vo == UseFullMarking, which is to verify the marking during a
// full GC. // full GC.
void verify(VerifyOption vo); void verify(VerifyOption vo);

View file

@ -264,6 +264,14 @@ inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
return is_obj_ill(obj, heap_region_containing(obj)); return is_obj_ill(obj, heap_region_containing(obj));
} }
inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const {
return !isMarkedNext(obj) && !hr->is_archive();
}
inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {
return is_obj_dead_full(obj, heap_region_containing(obj));
}
inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) { inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object"); assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
_humongous_reclaim_candidates.set_candidate(region, value); _humongous_reclaim_candidates.set_candidate(region, value);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -37,11 +37,6 @@ bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
return false; return false;
} }
HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
MemRegion mr) {
return new HeapRegion(hrs_index, bot(), mr);
}
G1Policy* G1CollectedHeap::create_g1_policy(STWGCTimer* gc_timer) { G1Policy* G1CollectedHeap::create_g1_policy(STWGCTimer* gc_timer) {
return new G1DefaultPolicy(gc_timer); return new G1DefaultPolicy(gc_timer);
} }

View file

@ -634,7 +634,7 @@ public:
void work(uint worker_id) { void work(uint worker_id) {
SuspendibleThreadSetJoiner sts_join(_suspendible); SuspendibleThreadSetJoiner sts_join(_suspendible);
G1CollectedHeap::heap()->heap_region_par_iterate(&_cl, worker_id, &_hr_claimer); G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id);
} }
bool is_complete() { bool is_complete() {
@ -1140,7 +1140,7 @@ public:
HRRSCleanupTask hrrs_cleanup_task; HRRSCleanupTask hrrs_cleanup_task;
G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
&hrrs_cleanup_task); &hrrs_cleanup_task);
_g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); _g1h->heap_region_par_iterate_from_worker_offset(&g1_note_end, &_hrclaimer, worker_id);
assert(g1_note_end.complete(), "Shouldn't have yielded!"); assert(g1_note_end.complete(), "Shouldn't have yielded!");
// Now update the lists // Now update the lists

View file

@ -25,6 +25,7 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp" #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
#include "gc/g1/heapRegion.hpp"
#include "memory/virtualspace.hpp" #include "memory/virtualspace.hpp"
void G1CMBitMap::print_on_error(outputStream* st, const char* prefix) const { void G1CMBitMap::print_on_error(outputStream* st, const char* prefix) const {
@ -65,3 +66,10 @@ void G1CMBitMap::clear_range(MemRegion mr) {
_bm.at_put_range(addr_to_offset(intersection.start()), _bm.at_put_range(addr_to_offset(intersection.start()),
addr_to_offset(intersection.end()), false); addr_to_offset(intersection.end()), false);
} }
void G1CMBitMap::clear_region(HeapRegion* region) {
if (!region->is_empty()) {
MemRegion mr(region->bottom(), region->top());
clear_range(mr);
}
}

View file

@ -28,6 +28,7 @@
#include "gc/g1/g1RegionToSpaceMapper.hpp" #include "gc/g1/g1RegionToSpaceMapper.hpp"
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "memory/memRegion.hpp" #include "memory/memRegion.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/bitMap.hpp" #include "utilities/bitMap.hpp"
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp" #include "utilities/macros.hpp"
@ -35,6 +36,7 @@
class G1CMBitMap; class G1CMBitMap;
class G1CMTask; class G1CMTask;
class G1ConcurrentMark; class G1ConcurrentMark;
class HeapRegion;
// Closure for iteration over bitmaps // Closure for iteration over bitmaps
class G1CMBitMapClosure VALUE_OBJ_CLASS_SPEC { class G1CMBitMapClosure VALUE_OBJ_CLASS_SPEC {
@ -96,6 +98,7 @@ public:
void initialize(MemRegion heap, G1RegionToSpaceMapper* storage); void initialize(MemRegion heap, G1RegionToSpaceMapper* storage);
// Read marks // Read marks
bool is_marked(oop obj) const;
bool is_marked(HeapWord* addr) const { bool is_marked(HeapWord* addr) const {
assert(_covered.contains(addr), assert(_covered.contains(addr),
"Address " PTR_FORMAT " is outside underlying space from " PTR_FORMAT " to " PTR_FORMAT, "Address " PTR_FORMAT " is outside underlying space from " PTR_FORMAT " to " PTR_FORMAT,
@ -120,9 +123,12 @@ public:
// Write marks. // Write marks.
inline void mark(HeapWord* addr); inline void mark(HeapWord* addr);
inline void clear(HeapWord* addr); inline void clear(HeapWord* addr);
inline void clear(oop obj);
inline bool par_mark(HeapWord* addr); inline bool par_mark(HeapWord* addr);
inline bool par_mark(oop obj);
void clear_range(MemRegion mr); void clear_range(MemRegion mr);
void clear_region(HeapRegion* hr);
}; };
#endif // SHARE_VM_GC_G1_G1CONCURRENTMARKBITMAP_HPP #endif // SHARE_VM_GC_G1_G1CONCURRENTMARKBITMAP_HPP

View file

@ -83,4 +83,16 @@ inline bool G1CMBitMap::par_mark(HeapWord* addr) {
return _bm.par_set_bit(addr_to_offset(addr)); return _bm.par_set_bit(addr_to_offset(addr));
} }
inline bool G1CMBitMap::par_mark(oop obj) {
return par_mark((HeapWord*) obj);
}
inline bool G1CMBitMap::is_marked(oop obj) const{
return is_marked((HeapWord*) obj);
}
inline void G1CMBitMap::clear(oop obj) {
clear((HeapWord*) obj);
}
#endif // SHARE_VM_GC_G1_G1CONCURRENTMARKBITMAP_INLINE_HPP #endif // SHARE_VM_GC_G1_G1CONCURRENTMARKBITMAP_INLINE_HPP

View file

@ -0,0 +1,245 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1FullCollector.hpp"
#include "gc/g1/g1FullGCAdjustTask.hpp"
#include "gc/g1/g1FullGCCompactTask.hpp"
#include "gc/g1/g1FullGCMarker.inline.hpp"
#include "gc/g1/g1FullGCMarkTask.hpp"
#include "gc/g1/g1FullGCPrepareTask.hpp"
#include "gc/g1/g1FullGCReferenceProcessorExecutor.hpp"
#include "gc/g1/g1FullGCScope.hpp"
#include "gc/g1/g1OopClosures.hpp"
#include "gc/g1/g1StringDedup.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/preservedMarks.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "gc/shared/weakProcessor.hpp"
#include "logging/log.hpp"
#include "runtime/biasedLocking.hpp"
#include "utilities/debug.hpp"
static void clear_and_activate_derived_pointers() {
#if COMPILER2_OR_JVMCI
DerivedPointerTable::clear();
#endif
}
static void deactivate_derived_pointers() {
#if COMPILER2_OR_JVMCI
DerivedPointerTable::set_active(false);
#endif
}
static void update_derived_pointers() {
#if COMPILER2_OR_JVMCI
DerivedPointerTable::update_pointers();
#endif
}
G1FullCollector::G1FullCollector(G1FullGCScope* scope,
ReferenceProcessor* reference_processor,
G1CMBitMap* bitmap,
uint workers) :
_scope(scope),
_num_workers(workers),
_mark_bitmap(bitmap),
_oop_queue_set(_num_workers),
_array_queue_set(_num_workers),
_preserved_marks_set(true),
_reference_processor(reference_processor),
_serial_compaction_point(),
_is_alive(_mark_bitmap),
_is_alive_mutator(_reference_processor, &_is_alive) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
_preserved_marks_set.init(_num_workers);
_markers = NEW_C_HEAP_ARRAY(G1FullGCMarker*, _num_workers, mtGC);
_compaction_points = NEW_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _num_workers, mtGC);
for (uint i = 0; i < _num_workers; i++) {
_markers[i] = new G1FullGCMarker(i, _preserved_marks_set.get(i), mark_bitmap());
_compaction_points[i] = new G1FullGCCompactionPoint();
_oop_queue_set.register_queue(i, marker(i)->oop_stack());
_array_queue_set.register_queue(i, marker(i)->objarray_stack());
}
}
G1FullCollector::~G1FullCollector() {
for (uint i = 0; i < _num_workers; i++) {
delete _markers[i];
delete _compaction_points[i];
}
FREE_C_HEAP_ARRAY(G1FullGCMarker*, _markers);
FREE_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _compaction_points);
}
void G1FullCollector::prepare_collection() {
_reference_processor->enable_discovery();
_reference_processor->setup_policy(scope()->should_clear_soft_refs());
// When collecting the permanent generation Method*s may be moving,
// so we either have to flush all bcp data or convert it into bci.
CodeCache::gc_prologue();
// We should save the marks of the currently locked biased monitors.
// The marking doesn't preserve the marks of biased objects.
BiasedLocking::preserve_marks();
// Clear and activate derived pointer collection.
clear_and_activate_derived_pointers();
}
void G1FullCollector::collect() {
phase1_mark_live_objects();
verify_after_marking();
// Don't add any more derived pointers during later phases
deactivate_derived_pointers();
phase2_prepare_compaction();
phase3_adjust_pointers();
phase4_do_compaction();
}
void G1FullCollector::complete_collection() {
// Restore all marks.
restore_marks();
// When the pointers have been adjusted and moved, we can
// update the derived pointer table.
update_derived_pointers();
BiasedLocking::restore_marks();
CodeCache::gc_epilogue();
JvmtiExport::gc_epilogue();
}
void G1FullCollector::phase1_mark_live_objects() {
// Recursively traverse all live objects and mark them.
GCTraceTime(Info, gc, phases) info("Phase 1: Mark live objects", scope()->timer());
// Do the actual marking.
G1FullGCMarkTask marking_task(this);
run_task(&marking_task);
// Process references discovered during marking.
G1FullGCReferenceProcessingExecutor reference_processing(this);
reference_processing.execute(scope()->timer(), scope()->tracer());
// Weak oops cleanup.
{
GCTraceTime(Debug, gc, phases) trace("Phase 1: Weak Processing", scope()->timer());
WeakProcessor::weak_oops_do(&_is_alive, &do_nothing_cl);
}
// Class unloading and cleanup.
if (ClassUnloading) {
GCTraceTime(Debug, gc, phases) debug("Phase 1: Class Unloading and Cleanup", scope()->timer());
// Unload classes and purge the SystemDictionary.
bool purged_class = SystemDictionary::do_unloading(&_is_alive, scope()->timer());
G1CollectedHeap::heap()->complete_cleaning(&_is_alive, purged_class);
} else {
GCTraceTime(Debug, gc, phases) debug("Phase 1: String and Symbol Tables Cleanup", scope()->timer());
// If no class unloading just clean out strings and symbols.
G1CollectedHeap::heap()->partial_cleaning(&_is_alive, true, true, G1StringDedup::is_enabled());
}
scope()->tracer()->report_object_count_after_gc(&_is_alive);
}
void G1FullCollector::prepare_compaction_common() {
G1FullGCPrepareTask task(this);
run_task(&task);
// To avoid OOM when there is memory left.
if (!task.has_freed_regions()) {
task.prepare_serial_compaction();
}
}
void G1FullCollector::phase2_prepare_compaction() {
GCTraceTime(Info, gc, phases) info("Phase 2: Prepare for compaction", scope()->timer());
prepare_compaction_ext(); // Will call prepare_compaction_common() above.
}
void G1FullCollector::phase3_adjust_pointers() {
// Adjust the pointers to reflect the new locations
GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers and remembered sets", scope()->timer());
G1FullGCAdjustTask task(this);
run_task(&task);
}
void G1FullCollector::phase4_do_compaction() {
// Compact the heap using the compaction queues created in phase 2.
GCTraceTime(Info, gc, phases) info("Phase 4: Compact heap", scope()->timer());
G1FullGCCompactTask task(this);
run_task(&task);
// Serial compact to avoid OOM when very few free regions.
if (serial_compaction_point()->has_regions()) {
task.serial_compaction();
}
}
void G1FullCollector::restore_marks() {
SharedRestorePreservedMarksTaskExecutor task_executor(G1CollectedHeap::heap()->workers());
_preserved_marks_set.restore(&task_executor);
_preserved_marks_set.reclaim();
}
void G1FullCollector::run_task(AbstractGangTask* task) {
G1CollectedHeap::heap()->workers()->run_task(task, _num_workers);
}
void G1FullCollector::verify_after_marking() {
if (!VerifyDuringGC) {
//Only do verification if VerifyDuringGC is set.
return;
}
HandleMark hm; // handle scope
#if COMPILER2_OR_JVMCI
DerivedPointerTableDeactivate dpt_deact;
#endif
G1CollectedHeap::heap()->prepare_for_verify();
// Note: we can verify only the heap here. When an object is
// marked, the previous value of the mark word (including
// identity hash values, ages, etc) is preserved, and the mark
// word is set to markOop::marked_value - effectively removing
// any hash values from the mark word. These hash values are
// used when verifying the dictionaries and so removing them
// from the mark word can make verification of the dictionaries
// fail. At the end of the GC, the original mark word values
// (including hash values) are restored to the appropriate
// objects.
GCTraceTime(Info, gc, verify)("During GC (full)");
G1CollectedHeap::heap()->verify(VerifyOption_G1UseFullMarking);
}

View file

@ -0,0 +1,98 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_G1_G1FULLCOLLECTOR_HPP
#define SHARE_GC_G1_G1FULLCOLLECTOR_HPP
#include "gc/g1/g1FullGCCompactionPoint.hpp"
#include "gc/g1/g1FullGCMarker.hpp"
#include "gc/g1/g1FullGCOopClosures.hpp"
#include "gc/shared/preservedMarks.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "gc/shared/taskqueue.hpp"
#include "memory/allocation.hpp"
class AbstractGangTask;
class G1CMBitMap;
class G1FullGCMarker;
class G1FullGCScope;
class G1FullGCCompactionPoint;
class ReferenceProcessor;
// The G1FullCollector holds data associated with the current Full GC.
class G1FullCollector : StackObj {
G1FullGCScope* _scope;
uint _num_workers;
G1FullGCMarker** _markers;
G1FullGCCompactionPoint** _compaction_points;
G1CMBitMap* _mark_bitmap;
OopQueueSet _oop_queue_set;
ObjArrayTaskQueueSet _array_queue_set;
PreservedMarksSet _preserved_marks_set;
ReferenceProcessor* _reference_processor;
G1FullGCCompactionPoint _serial_compaction_point;
G1IsAliveClosure _is_alive;
ReferenceProcessorIsAliveMutator _is_alive_mutator;
public:
G1FullCollector(G1FullGCScope* scope,
ReferenceProcessor* reference_processor,
G1CMBitMap* mark_bitmap,
uint workers);
~G1FullCollector();
void prepare_collection();
void collect();
void complete_collection();
G1FullGCScope* scope() { return _scope; }
uint workers() { return _num_workers; }
G1FullGCMarker* marker(uint id) { return _markers[id]; }
G1FullGCCompactionPoint* compaction_point(uint id) { return _compaction_points[id]; }
G1CMBitMap* mark_bitmap() { return _mark_bitmap; }
OopQueueSet* oop_queue_set() { return &_oop_queue_set; }
ObjArrayTaskQueueSet* array_queue_set() { return &_array_queue_set; }
PreservedMarksSet* preserved_mark_set() { return &_preserved_marks_set; }
ReferenceProcessor* reference_processor() { return _reference_processor; }
G1FullGCCompactionPoint* serial_compaction_point() { return &_serial_compaction_point; }
private:
void phase1_mark_live_objects();
void phase2_prepare_compaction();
void phase3_adjust_pointers();
void phase4_do_compaction();
void restore_marks();
void verify_after_marking();
void run_task(AbstractGangTask* task);
// Prepare compaction extension support.
void prepare_compaction_ext();
void prepare_compaction_common();
};
#endif // SHARE_GC_G1_G1FULLCOLLECTOR_HPP

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -23,9 +23,8 @@
*/ */
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc/g1/g1MarkSweep.hpp" #include "gc/g1/g1FullCollector.hpp"
void G1MarkSweep::prepare_compaction() { void G1FullCollector::prepare_compaction_ext() {
G1PrepareCompactClosure blk; prepare_compaction_common();
G1MarkSweep::prepare_compaction_work(&blk);
} }

View file

@ -0,0 +1,119 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
#include "gc/g1/g1FullCollector.hpp"
#include "gc/g1/g1FullGCAdjustTask.hpp"
#include "gc/g1/g1FullGCCompactionPoint.hpp"
#include "gc/g1/g1FullGCMarker.hpp"
#include "gc/g1/g1FullGCOopClosures.inline.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "logging/log.hpp"
#include "utilities/ticks.inline.hpp"
class G1AdjustLiveClosure : public StackObj {
G1AdjustAndRebuildClosure* _adjust_closure;
public:
G1AdjustLiveClosure(G1AdjustAndRebuildClosure* cl) :
_adjust_closure(cl) { }
size_t apply(oop object) {
_adjust_closure->update_compaction_delta(object);
return object->oop_iterate_size(_adjust_closure);
}
};
class G1AdjustRegionClosure : public HeapRegionClosure {
G1CMBitMap* _bitmap;
uint _worker_id;
public:
G1AdjustRegionClosure(G1CMBitMap* bitmap, uint worker_id) :
_bitmap(bitmap),
_worker_id(worker_id) { }
bool doHeapRegion(HeapRegion* r) {
G1AdjustAndRebuildClosure cl(_worker_id);
if (r->is_humongous()) {
oop obj = oop(r->humongous_start_region()->bottom());
cl.update_compaction_delta(obj);
obj->oop_iterate(&cl, MemRegion(r->bottom(), r->top()));
} else if (r->is_open_archive()) {
// Only adjust the open archive regions, the closed ones
// never change.
G1AdjustLiveClosure adjust(&cl);
r->apply_to_marked_objects(_bitmap, &adjust);
// Open archive regions will not be compacted and the marking information is
// no longer needed. Clear it here to avoid having to do it later.
_bitmap->clear_region(r);
} else {
G1AdjustLiveClosure adjust(&cl);
r->apply_to_marked_objects(_bitmap, &adjust);
}
return false;
}
};
G1FullGCAdjustTask::G1FullGCAdjustTask(G1FullCollector* collector) :
G1FullGCTask("G1 Adjust and Rebuild", collector),
_root_processor(G1CollectedHeap::heap(), collector->workers()),
_hrclaimer(collector->workers()),
_adjust(),
_adjust_string_dedup(NULL, &_adjust, G1StringDedup::is_enabled()) {
// Need cleared claim bits for the roots processing
ClassLoaderDataGraph::clear_claimed_marks();
}
void G1FullGCAdjustTask::work(uint worker_id) {
Ticks start = Ticks::now();
ResourceMark rm;
// Adjust preserved marks first since they are not balanced.
G1FullGCMarker* marker = collector()->marker(worker_id);
marker->preserved_stack()->adjust_during_full_gc();
// Adjust the weak_roots.
CLDToOopClosure adjust_cld(&_adjust);
CodeBlobToOopClosure adjust_code(&_adjust, CodeBlobToOopClosure::FixRelocations);
_root_processor.process_full_gc_weak_roots(&_adjust);
// Needs to be last, process_all_roots calls all_tasks_completed(...).
_root_processor.process_all_roots(
&_adjust,
&adjust_cld,
&adjust_code);
// Adjust string dedup if enabled.
if (G1StringDedup::is_enabled()) {
G1StringDedup::parallel_unlink(&_adjust_string_dedup, worker_id);
}
// Now adjust pointers region by region
G1AdjustRegionClosure blk(collector()->mark_bitmap(), worker_id);
G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&blk, &_hrclaimer, worker_id);
log_task("Adjust and Rebuild task", worker_id, start);
}

View file

@ -22,28 +22,27 @@
* *
*/ */
#ifndef SHARE_VM_GC_G1_G1SERIALCOLLECTOR_HPP #ifndef SHARE_GC_G1_G1FULLGCADJUSTTASK_HPP
#define SHARE_VM_GC_G1_G1SERIALCOLLECTOR_HPP #define SHARE_GC_G1_G1FULLGCADJUSTTASK_HPP
#include "memory/allocation.hpp" #include "gc/g1/g1FullGCOopClosures.hpp"
#include "gc/g1/g1FullGCTask.hpp"
#include "gc/g1/g1RootProcessor.hpp"
#include "gc/g1/g1StringDedup.hpp"
#include "gc/g1/heapRegionManager.hpp"
#include "utilities/ticks.hpp"
class G1FullGCScope; class G1CollectedHeap;
class ReferenceProcessor;
class G1SerialFullCollector : StackObj { class G1FullGCAdjustTask : public G1FullGCTask {
G1FullGCScope* _scope; G1RootProcessor _root_processor;
ReferenceProcessor* _reference_processor; HeapRegionClaimer _hrclaimer;
ReferenceProcessorIsAliveMutator _is_alive_mutator; G1AdjustClosure _adjust;
ReferenceProcessorMTDiscoveryMutator _mt_discovery_mutator; G1StringDedupUnlinkOrOopsDoClosure _adjust_string_dedup;
void rebuild_remembered_sets();
public: public:
G1SerialFullCollector(G1FullGCScope* scope, ReferenceProcessor* reference_processor); G1FullGCAdjustTask(G1FullCollector* collector);
void work(uint worker_id);
void prepare_collection();
void collect();
void complete_collection();
}; };
#endif // SHARE_VM_GC_G1_G1SERIALCOLLECTOR_HPP #endif // SHARE_GC_G1_G1FULLGCADJUSTTASK_HPP

Some files were not shown because too many files have changed in this diff Show more