mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-25 13:54:38 +02:00
Merge
This commit is contained in:
commit
7e65ec970c
526 changed files with 3787 additions and 3957 deletions
|
@ -64,8 +64,8 @@ public class DefNewGeneration extends Generation {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Accessing spaces
|
// Accessing spaces
|
||||||
public EdenSpace eden() {
|
public ContiguousSpace eden() {
|
||||||
return (EdenSpace) VMObjectFactory.newObject(EdenSpace.class, edenSpaceField.getValue(addr));
|
return (ContiguousSpace) VMObjectFactory.newObject(ContiguousSpace.class, edenSpaceField.getValue(addr));
|
||||||
}
|
}
|
||||||
|
|
||||||
public ContiguousSpace from() {
|
public ContiguousSpace from() {
|
||||||
|
|
|
@ -219,7 +219,7 @@ public class OopUtilities implements /* imports */ JVMTIThreadState {
|
||||||
if (threadNameField == null) {
|
if (threadNameField == null) {
|
||||||
SystemDictionary sysDict = VM.getVM().getSystemDictionary();
|
SystemDictionary sysDict = VM.getVM().getSystemDictionary();
|
||||||
InstanceKlass k = sysDict.getThreadKlass();
|
InstanceKlass k = sysDict.getThreadKlass();
|
||||||
threadNameField = (OopField) k.findField("name", "[C");
|
threadNameField = (OopField) k.findField("name", "Ljava/lang/String;");
|
||||||
threadGroupField = (OopField) k.findField("group", "Ljava/lang/ThreadGroup;");
|
threadGroupField = (OopField) k.findField("group", "Ljava/lang/ThreadGroup;");
|
||||||
threadEETopField = (LongField) k.findField("eetop", "J");
|
threadEETopField = (LongField) k.findField("eetop", "J");
|
||||||
threadTIDField = (LongField) k.findField("tid", "J");
|
threadTIDField = (LongField) k.findField("tid", "J");
|
||||||
|
@ -258,7 +258,7 @@ public class OopUtilities implements /* imports */ JVMTIThreadState {
|
||||||
|
|
||||||
public static String threadOopGetName(Oop threadOop) {
|
public static String threadOopGetName(Oop threadOop) {
|
||||||
initThreadFields();
|
initThreadFields();
|
||||||
return charArrayToString((TypeArray) threadNameField.getValue(threadOop));
|
return stringOopToString(threadNameField.getValue(threadOop));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** May return null if, e.g., thread was not started */
|
/** May return null if, e.g., thread was not started */
|
||||||
|
|
|
@ -40,6 +40,8 @@ AGENT_DIR = $(GAMMADIR)/agent
|
||||||
|
|
||||||
include $(GAMMADIR)/make/sa.files
|
include $(GAMMADIR)/make/sa.files
|
||||||
|
|
||||||
|
-include $(HS_ALT_MAKE)/bsd/makefiles/sa.make
|
||||||
|
|
||||||
TOPDIR = $(shell echo `pwd`)
|
TOPDIR = $(shell echo `pwd`)
|
||||||
GENERATED = $(TOPDIR)/../generated
|
GENERATED = $(TOPDIR)/../generated
|
||||||
|
|
||||||
|
|
|
@ -214,7 +214,7 @@ ifeq ($(USE_CLANG), true)
|
||||||
WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
|
WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
|
||||||
endif
|
endif
|
||||||
|
|
||||||
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value -Wformat=2
|
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value -Wformat=2 -Wreturn-type
|
||||||
|
|
||||||
ifeq ($(USE_CLANG),)
|
ifeq ($(USE_CLANG),)
|
||||||
# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
|
# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
|
||||||
|
|
|
@ -1,54 +0,0 @@
|
||||||
#
|
|
||||||
# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
#
|
|
||||||
# This code is free software; you can redistribute it and/or modify it
|
|
||||||
# under the terms of the GNU General Public License version 2 only, as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
# version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
# accompanied this code).
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License version
|
|
||||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
#
|
|
||||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
# or visit www.oracle.com if you need additional information or have any
|
|
||||||
# questions.
|
|
||||||
#
|
|
||||||
#
|
|
||||||
|
|
||||||
# Rules to build add_gnu_debuglink, used by vm.make on Solaris
|
|
||||||
|
|
||||||
# Allow $(ADD_GNU_DEBUGLINK) to be called from any directory.
|
|
||||||
# We don't set or use the GENERATED macro to avoid affecting
|
|
||||||
# other HotSpot Makefiles.
|
|
||||||
TOPDIR = $(shell echo `pwd`)
|
|
||||||
ADD_GNU_DEBUGLINK = $(TOPDIR)/../generated/add_gnu_debuglink
|
|
||||||
|
|
||||||
ADD_GNU_DEBUGLINK_DIR = $(GAMMADIR)/src/os/solaris/add_gnu_debuglink
|
|
||||||
ADD_GNU_DEBUGLINK_SRC = $(ADD_GNU_DEBUGLINK_DIR)/add_gnu_debuglink.c
|
|
||||||
ADD_GNU_DEBUGLINK_FLAGS =
|
|
||||||
LIBS_ADD_GNU_DEBUGLINK += -lelf
|
|
||||||
|
|
||||||
ifeq ("${Platform_compiler}", "sparcWorks")
|
|
||||||
# Enable the following ADD_GNU_DEBUGLINK_FLAGS addition if you need to
|
|
||||||
# compare the built ELF objects.
|
|
||||||
#
|
|
||||||
# The -g option makes static data global and the "-W0,-noglobal"
|
|
||||||
# option tells the compiler to not globalize static data using a unique
|
|
||||||
# globalization prefix. Instead force the use of a static globalization
|
|
||||||
# prefix based on the source filepath so the objects from two identical
|
|
||||||
# compilations are the same.
|
|
||||||
#
|
|
||||||
# Note: The blog says to use "-W0,-xglobalstatic", but that doesn't
|
|
||||||
# seem to work. I got "-W0,-noglobal" from Kelly and that works.
|
|
||||||
#ADD_GNU_DEBUGLINK_FLAGS += -W0,-noglobal
|
|
||||||
endif # Platform_compiler == sparcWorks
|
|
||||||
|
|
||||||
$(ADD_GNU_DEBUGLINK): $(ADD_GNU_DEBUGLINK_SRC)
|
|
||||||
$(CC) -g -o $@ $< $(ADD_GNU_DEBUGLINK_FLAGS) $(LIBS_ADD_GNU_DEBUGLINK)
|
|
|
@ -1,5 +1,5 @@
|
||||||
#
|
#
|
||||||
# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
|
# Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
#
|
#
|
||||||
# This code is free software; you can redistribute it and/or modify it
|
# This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -138,6 +138,55 @@ ifeq ($(JDK6_OR_EARLIER),0)
|
||||||
OBJCOPY=$(shell test -x $(ALT_OBJCOPY) && echo $(ALT_OBJCOPY))
|
OBJCOPY=$(shell test -x $(ALT_OBJCOPY) && echo $(ALT_OBJCOPY))
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifneq ($(OBJCOPY),)
|
||||||
|
# OBJCOPY version check:
|
||||||
|
# - version number is last blank separate word on first line
|
||||||
|
# - version number formats that have been seen:
|
||||||
|
# - <major>.<minor>
|
||||||
|
# - <major>.<minor>.<micro>
|
||||||
|
#
|
||||||
|
# Full Debug Symbols on Solaris needs version 2.21.1 or newer.
|
||||||
|
#
|
||||||
|
OBJCOPY_VERS_CHK := $(shell \
|
||||||
|
$(OBJCOPY) --version \
|
||||||
|
| sed -n \
|
||||||
|
-e 's/.* //' \
|
||||||
|
-e '/^[01]\./b bad' \
|
||||||
|
-e '/^2\./{' \
|
||||||
|
-e ' s/^2\.//' \
|
||||||
|
-e ' /^[0-9]$$/b bad' \
|
||||||
|
-e ' /^[0-9]\./b bad' \
|
||||||
|
-e ' /^1[0-9]$$/b bad' \
|
||||||
|
-e ' /^1[0-9]\./b bad' \
|
||||||
|
-e ' /^20\./b bad' \
|
||||||
|
-e ' /^21\.0$$/b bad' \
|
||||||
|
-e ' /^21\.0\./b bad' \
|
||||||
|
-e '}' \
|
||||||
|
-e ':good' \
|
||||||
|
-e 's/.*/VALID_VERSION/p' \
|
||||||
|
-e 'q' \
|
||||||
|
-e ':bad' \
|
||||||
|
-e 's/.*/BAD_VERSION/p' \
|
||||||
|
-e 'q' \
|
||||||
|
)
|
||||||
|
ifeq ($(OBJCOPY_VERS_CHK),BAD_VERSION)
|
||||||
|
_JUNK_ := $(shell \
|
||||||
|
echo >&2 "WARNING: $(OBJCOPY) --version info:"; \
|
||||||
|
$(OBJCOPY) --version | sed -n -e 's/^/WARNING: /p' -e 'q' >&2; \
|
||||||
|
echo >&2 "WARNING: an objcopy version of 2.21.1 or newer" \
|
||||||
|
"is needed to create valid .debuginfo files."; \
|
||||||
|
echo >&2 "WARNING: ignoring above objcopy command."; \
|
||||||
|
echo >&2 "WARNING: patch 149063-01 or newer contains the" \
|
||||||
|
"correct Solaris 10 SPARC version."; \
|
||||||
|
echo >&2 "WARNING: patch 149064-01 or newer contains the" \
|
||||||
|
"correct Solaris 10 X86 version."; \
|
||||||
|
echo >&2 "WARNING: Solaris 11 Update 1 contains the" \
|
||||||
|
"correct version."; \
|
||||||
|
)
|
||||||
|
OBJCOPY=
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
ifeq ($(OBJCOPY),)
|
ifeq ($(OBJCOPY),)
|
||||||
$(eval $(call print_info, "no objcopy cmd found so cannot create .debuginfo files."))
|
$(eval $(call print_info, "no objcopy cmd found so cannot create .debuginfo files."))
|
||||||
ENABLE_FULL_DEBUG_SYMBOLS=0
|
ENABLE_FULL_DEBUG_SYMBOLS=0
|
||||||
|
|
|
@ -101,25 +101,16 @@ XLIBJVM_DB_DIZ = $(XLIBJVM_DIR)/$(LIBJVM_DB_DIZ)
|
||||||
XLIBJVM_DTRACE_DEBUGINFO = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_DEBUGINFO)
|
XLIBJVM_DTRACE_DEBUGINFO = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_DEBUGINFO)
|
||||||
XLIBJVM_DTRACE_DIZ = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_DIZ)
|
XLIBJVM_DTRACE_DIZ = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_DIZ)
|
||||||
|
|
||||||
$(XLIBJVM_DB): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE)
|
$(XLIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE)
|
||||||
@echo $(LOG_INFO) Making $@
|
@echo $(LOG_INFO) Making $@
|
||||||
$(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \
|
$(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \
|
||||||
$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. -I$(GENERATED) \
|
$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. -I$(GENERATED) \
|
||||||
$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
|
$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
|
||||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||||
# gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set.
|
|
||||||
# Clear the SHF_ALLOC flag (if set) from empty section headers.
|
|
||||||
# An empty section header has sh_addr == 0 and sh_size == 0.
|
|
||||||
# This problem has only been seen on Solaris X64, but we call this tool
|
|
||||||
# on all Solaris builds just in case.
|
|
||||||
$(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@
|
|
||||||
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DB_DEBUGINFO)
|
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DB_DEBUGINFO)
|
||||||
# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections.
|
|
||||||
# Use $(ADD_GNU_DEBUGLINK) until a fixed $(OBJCOPY) is available.
|
|
||||||
# $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB) ;
|
|
||||||
# Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) is not
|
# Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) is not
|
||||||
# in the link name:
|
# in the link name:
|
||||||
( cd $(XLIBJVM_DIR) && $(ADD_GNU_DEBUGLINK) $(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB) )
|
( cd $(XLIBJVM_DIR) && $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB) )
|
||||||
ifeq ($(STRIP_POLICY),all_strip)
|
ifeq ($(STRIP_POLICY),all_strip)
|
||||||
$(QUIETLY) $(STRIP) $@
|
$(QUIETLY) $(STRIP) $@
|
||||||
else
|
else
|
||||||
|
@ -136,20 +127,16 @@ ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
$(XLIBJVM_DTRACE): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
|
$(XLIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
|
||||||
@echo $(LOG_INFO) Making $@
|
@echo $(LOG_INFO) Making $@
|
||||||
$(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \
|
$(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \
|
||||||
$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. \
|
$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. \
|
||||||
$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
|
$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
|
||||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||||
# Clear the SHF_ALLOC flag (if set) from empty section headers.
|
|
||||||
$(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@
|
|
||||||
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DTRACE_DEBUGINFO)
|
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DTRACE_DEBUGINFO)
|
||||||
# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections.
|
|
||||||
# $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE) ;
|
|
||||||
# Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) is not
|
# Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) is not
|
||||||
# in the link name:
|
# in the link name:
|
||||||
( cd $(XLIBJVM_DIR) && $(ADD_GNU_DEBUGLINK) $(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE) )
|
( cd $(XLIBJVM_DIR) && $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE) )
|
||||||
ifeq ($(STRIP_POLICY),all_strip)
|
ifeq ($(STRIP_POLICY),all_strip)
|
||||||
$(QUIETLY) $(STRIP) $@
|
$(QUIETLY) $(STRIP) $@
|
||||||
else
|
else
|
||||||
|
@ -206,17 +193,13 @@ $(JVMOFFS).cpp: $(GENOFFS) $(JVMOFFS).h $(JVMOFFS)Index.h
|
||||||
$(JVMOFFS.o): $(JVMOFFS).h $(JVMOFFS).cpp
|
$(JVMOFFS.o): $(JVMOFFS).h $(JVMOFFS).cpp
|
||||||
$(QUIETLY) $(CXX) -c -I. -o $@ $(ARCHFLAG) -D$(TYPE) $(JVMOFFS).cpp
|
$(QUIETLY) $(CXX) -c -I. -o $@ $(ARCHFLAG) -D$(TYPE) $(JVMOFFS).cpp
|
||||||
|
|
||||||
$(LIBJVM_DB): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS.o) $(XLIBJVM_DB) $(LIBJVM_DB_MAPFILE)
|
$(LIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS.o) $(XLIBJVM_DB) $(LIBJVM_DB_MAPFILE)
|
||||||
@echo $(LOG_INFO) Making $@
|
@echo $(LOG_INFO) Making $@
|
||||||
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. -I$(GENERATED) \
|
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. -I$(GENERATED) \
|
||||||
$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
|
$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
|
||||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||||
# Clear the SHF_ALLOC flag (if set) from empty section headers.
|
|
||||||
$(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@
|
|
||||||
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DB_DEBUGINFO)
|
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DB_DEBUGINFO)
|
||||||
# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections.
|
$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $@
|
||||||
# $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $@
|
|
||||||
$(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBJVM_DB_DEBUGINFO) $@
|
|
||||||
ifeq ($(STRIP_POLICY),all_strip)
|
ifeq ($(STRIP_POLICY),all_strip)
|
||||||
$(QUIETLY) $(STRIP) $@
|
$(QUIETLY) $(STRIP) $@
|
||||||
else
|
else
|
||||||
|
@ -231,17 +214,13 @@ ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
$(LIBJVM_DTRACE): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
|
$(LIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
|
||||||
@echo $(LOG_INFO) Making $@
|
@echo $(LOG_INFO) Making $@
|
||||||
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. \
|
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. \
|
||||||
$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
|
$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
|
||||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||||
# Clear the SHF_ALLOC flag (if set) from empty section headers.
|
|
||||||
$(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@
|
|
||||||
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DTRACE_DEBUGINFO)
|
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DTRACE_DEBUGINFO)
|
||||||
# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections.
|
$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $@
|
||||||
# $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $@
|
|
||||||
$(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBJVM_DTRACE_DEBUGINFO) $@
|
|
||||||
ifeq ($(STRIP_POLICY),all_strip)
|
ifeq ($(STRIP_POLICY),all_strip)
|
||||||
$(QUIETLY) $(STRIP) $@
|
$(QUIETLY) $(STRIP) $@
|
||||||
else
|
else
|
||||||
|
|
|
@ -1,54 +0,0 @@
|
||||||
#
|
|
||||||
# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
#
|
|
||||||
# This code is free software; you can redistribute it and/or modify it
|
|
||||||
# under the terms of the GNU General Public License version 2 only, as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
# version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
# accompanied this code).
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License version
|
|
||||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
#
|
|
||||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
# or visit www.oracle.com if you need additional information or have any
|
|
||||||
# questions.
|
|
||||||
#
|
|
||||||
#
|
|
||||||
|
|
||||||
# Rules to build fix_empty_sec_hdr_flags, used by vm.make on Solaris
|
|
||||||
|
|
||||||
# Allow $(FIX_EMPTY_SEC_HDR_FLAGS) to be called from any directory.
|
|
||||||
# We don't set or use the GENERATED macro to avoid affecting
|
|
||||||
# other HotSpot Makefiles.
|
|
||||||
TOPDIR = $(shell echo `pwd`)
|
|
||||||
FIX_EMPTY_SEC_HDR_FLAGS = $(TOPDIR)/../generated/fix_empty_sec_hdr_flags
|
|
||||||
|
|
||||||
FIX_EMPTY_SEC_HDR_FLAGS_DIR = $(GAMMADIR)/src/os/solaris/fix_empty_sec_hdr_flags
|
|
||||||
FIX_EMPTY_SEC_HDR_FLAGS_SRC = $(FIX_EMPTY_SEC_HDR_FLAGS_DIR)/fix_empty_sec_hdr_flags.c
|
|
||||||
FIX_EMPTY_SEC_HDR_FLAGS_FLAGS =
|
|
||||||
LIBS_FIX_EMPTY_SEC_HDR_FLAGS += -lelf
|
|
||||||
|
|
||||||
ifeq ("${Platform_compiler}", "sparcWorks")
|
|
||||||
# Enable the following FIX_EMPTY_SEC_HDR_FLAGS_FLAGS addition if you need to
|
|
||||||
# compare the built ELF objects.
|
|
||||||
#
|
|
||||||
# The -g option makes static data global and the "-W0,-noglobal"
|
|
||||||
# option tells the compiler to not globalize static data using a unique
|
|
||||||
# globalization prefix. Instead force the use of a static globalization
|
|
||||||
# prefix based on the source filepath so the objects from two identical
|
|
||||||
# compilations are the same.
|
|
||||||
#
|
|
||||||
# Note: The blog says to use "-W0,-xglobalstatic", but that doesn't
|
|
||||||
# seem to work. I got "-W0,-noglobal" from Kelly and that works.
|
|
||||||
#FIX_EMPTY_SEC_HDR_FLAGS_FLAGS += -W0,-noglobal
|
|
||||||
endif # Platform_compiler == sparcWorks
|
|
||||||
|
|
||||||
$(FIX_EMPTY_SEC_HDR_FLAGS): $(FIX_EMPTY_SEC_HDR_FLAGS_SRC)
|
|
||||||
$(CC) -g -o $@ $< $(FIX_EMPTY_SEC_HDR_FLAGS_FLAGS) $(LIBS_FIX_EMPTY_SEC_HDR_FLAGS)
|
|
|
@ -47,22 +47,13 @@ else
|
||||||
LFLAGS_JSIG += -mt -xnolib
|
LFLAGS_JSIG += -mt -xnolib
|
||||||
endif
|
endif
|
||||||
|
|
||||||
$(LIBJSIG): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
|
$(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
|
||||||
@echo $(LOG_INFO) Making signal interposition lib...
|
@echo $(LOG_INFO) Making signal interposition lib...
|
||||||
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
|
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
|
||||||
$(LFLAGS_JSIG) -o $@ $(JSIGSRCDIR)/jsig.c -ldl
|
$(LFLAGS_JSIG) -o $@ $(JSIGSRCDIR)/jsig.c -ldl
|
||||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||||
# gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set.
|
|
||||||
# Clear the SHF_ALLOC flag (if set) from empty section headers.
|
|
||||||
# An empty section header has sh_addr == 0 and sh_size == 0.
|
|
||||||
# This problem has only been seen on Solaris X64, but we call this tool
|
|
||||||
# on all Solaris builds just in case.
|
|
||||||
$(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@
|
|
||||||
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO)
|
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO)
|
||||||
# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections.
|
$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@
|
||||||
# Use $(ADD_GNU_DEBUGLINK) until a fixed $(OBJCOPY) is available.
|
|
||||||
# $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@
|
|
||||||
$(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBJSIG_DEBUGINFO) $@
|
|
||||||
ifeq ($(STRIP_POLICY),all_strip)
|
ifeq ($(STRIP_POLICY),all_strip)
|
||||||
$(QUIETLY) $(STRIP) $@
|
$(QUIETLY) $(STRIP) $@
|
||||||
else
|
else
|
||||||
|
|
|
@ -90,7 +90,7 @@ $(shell uname -r -v \
|
||||||
#SOLARIS_11_B159_OR_LATER=-DSOLARIS_11_B159_OR_LATER
|
#SOLARIS_11_B159_OR_LATER=-DSOLARIS_11_B159_OR_LATER
|
||||||
|
|
||||||
|
|
||||||
$(LIBSAPROC): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(SASRCFILES) $(SADISOBJ) $(SAMAPFILE)
|
$(LIBSAPROC): $(SASRCFILES) $(SADISOBJ) $(SAMAPFILE)
|
||||||
$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
|
$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
|
||||||
echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
|
echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
|
||||||
exit 1; \
|
exit 1; \
|
||||||
|
@ -121,17 +121,8 @@ $(SADISOBJ): $(SADISSRCFILES)
|
||||||
-c -o $(SADISOBJ)
|
-c -o $(SADISOBJ)
|
||||||
|
|
||||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||||
# gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set.
|
|
||||||
# Clear the SHF_ALLOC flag (if set) from empty section headers.
|
|
||||||
# An empty section header has sh_addr == 0 and sh_size == 0.
|
|
||||||
# This problem has only been seen on Solaris X64, but we call this tool
|
|
||||||
# on all Solaris builds just in case.
|
|
||||||
$(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@
|
|
||||||
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBSAPROC_DEBUGINFO)
|
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBSAPROC_DEBUGINFO)
|
||||||
# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections.
|
$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@
|
||||||
# Use $(ADD_GNU_DEBUGLINK) until a fixed $(OBJCOPY) is available.
|
|
||||||
# $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@
|
|
||||||
$(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBSAPROC_DEBUGINFO) $@
|
|
||||||
ifeq ($(STRIP_POLICY),all_strip)
|
ifeq ($(STRIP_POLICY),all_strip)
|
||||||
$(QUIETLY) $(STRIP) $@
|
$(QUIETLY) $(STRIP) $@
|
||||||
else
|
else
|
||||||
|
|
|
@ -154,14 +154,6 @@ JDK_LIBDIR = $(JAVA_HOME)/jre/lib/$(LIBARCH)
|
||||||
# jvm_db & dtrace
|
# jvm_db & dtrace
|
||||||
include $(MAKEFILES_DIR)/dtrace.make
|
include $(MAKEFILES_DIR)/dtrace.make
|
||||||
|
|
||||||
#----------------------------------------------------------------------
|
|
||||||
# add_gnu_debuglink tool
|
|
||||||
include $(MAKEFILES_DIR)/add_gnu_debuglink.make
|
|
||||||
|
|
||||||
#----------------------------------------------------------------------
|
|
||||||
# fix_empty_sec_hdr_flags tool
|
|
||||||
include $(MAKEFILES_DIR)/fix_empty_sec_hdr_flags.make
|
|
||||||
|
|
||||||
#----------------------------------------------------------------------
|
#----------------------------------------------------------------------
|
||||||
# JVM
|
# JVM
|
||||||
|
|
||||||
|
@ -302,7 +294,7 @@ else
|
||||||
LINK_VM = $(LINK_LIB.CXX)
|
LINK_VM = $(LINK_LIB.CXX)
|
||||||
endif
|
endif
|
||||||
# making the library:
|
# making the library:
|
||||||
$(LIBJVM): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(LIBJVM.o) $(LIBJVM_MAPFILE)
|
$(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE)
|
||||||
ifeq ($(filter -sbfast -xsbfast, $(CFLAGS_BROWSE)),)
|
ifeq ($(filter -sbfast -xsbfast, $(CFLAGS_BROWSE)),)
|
||||||
@echo $(LOG_INFO) Linking vm...
|
@echo $(LOG_INFO) Linking vm...
|
||||||
$(QUIETLY) $(LINK_LIB.CXX/PRE_HOOK)
|
$(QUIETLY) $(LINK_LIB.CXX/PRE_HOOK)
|
||||||
|
@ -310,17 +302,8 @@ ifeq ($(filter -sbfast -xsbfast, $(CFLAGS_BROWSE)),)
|
||||||
$(QUIETLY) $(LINK_LIB.CXX/POST_HOOK)
|
$(QUIETLY) $(LINK_LIB.CXX/POST_HOOK)
|
||||||
$(QUIETLY) rm -f $@.1 && ln -s $@ $@.1
|
$(QUIETLY) rm -f $@.1 && ln -s $@ $@.1
|
||||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||||
# gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set.
|
|
||||||
# Clear the SHF_ALLOC flag (if set) from empty section headers.
|
|
||||||
# An empty section header has sh_addr == 0 and sh_size == 0.
|
|
||||||
# This problem has only been seen on Solaris X64, but we call this tool
|
|
||||||
# on all Solaris builds just in case.
|
|
||||||
$(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@
|
|
||||||
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DEBUGINFO)
|
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DEBUGINFO)
|
||||||
# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections.
|
$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DEBUGINFO) $@
|
||||||
# Use $(ADD_GNU_DEBUGLINK) until a fixed $(OBJCOPY) is available.
|
|
||||||
# $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DEBUGINFO) $@
|
|
||||||
$(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBJVM_DEBUGINFO) $@
|
|
||||||
ifeq ($(STRIP_POLICY),all_strip)
|
ifeq ($(STRIP_POLICY),all_strip)
|
||||||
$(QUIETLY) $(STRIP) $@
|
$(QUIETLY) $(STRIP) $@
|
||||||
else
|
else
|
||||||
|
|
|
@ -122,7 +122,7 @@ SA_LFLAGS = $(SA_LD_FLAGS) -nologo -subsystem:console -machine:$(MACHINE)
|
||||||
SA_LFLAGS = $(SA_LFLAGS) -map -debug
|
SA_LFLAGS = $(SA_LFLAGS) -map -debug
|
||||||
!endif
|
!endif
|
||||||
!if "$(BUILDARCH)" == "i486"
|
!if "$(BUILDARCH)" == "i486"
|
||||||
SA_LFLAGS = $(SAFESEH_FLAG) $(SA_LFLAGS)
|
SA_LFLAGS = /SAFESEH $(SA_LFLAGS)
|
||||||
!endif
|
!endif
|
||||||
|
|
||||||
SA_CFLAGS = $(SA_CFLAGS) $(MP_FLAG)
|
SA_CFLAGS = $(SA_CFLAGS) $(MP_FLAG)
|
||||||
|
|
|
@ -89,19 +89,24 @@ AGCT_EXPORT=/export:AsyncGetCallTrace
|
||||||
|
|
||||||
# If you modify exports below please do the corresponding changes in
|
# If you modify exports below please do the corresponding changes in
|
||||||
# src/share/tools/ProjectCreator/WinGammaPlatformVC7.java
|
# src/share/tools/ProjectCreator/WinGammaPlatformVC7.java
|
||||||
LD_FLAGS=$(LD_FLAGS) $(STACK_SIZE) /subsystem:windows /dll /base:0x8000000 \
|
!if "$(BUILDARCH)" == "amd64"
|
||||||
/export:JNI_GetDefaultJavaVMInitArgs \
|
EXPORT_LIST=
|
||||||
/export:JNI_CreateJavaVM \
|
!else
|
||||||
/export:JVM_FindClassFromBootLoader \
|
EXPORT_LIST=/export:JNI_GetDefaultJavaVMInitArgs \
|
||||||
/export:JNI_GetCreatedJavaVMs \
|
/export:JNI_CreateJavaVM \
|
||||||
/export:jio_snprintf \
|
/export:JVM_FindClassFromBootLoader \
|
||||||
/export:jio_printf \
|
/export:JNI_GetCreatedJavaVMs \
|
||||||
/export:jio_fprintf \
|
/export:jio_snprintf \
|
||||||
/export:jio_vfprintf \
|
/export:jio_printf \
|
||||||
/export:jio_vsnprintf \
|
/export:jio_fprintf \
|
||||||
$(AGCT_EXPORT) \
|
/export:jio_vfprintf \
|
||||||
/export:JVM_GetVersionInfo \
|
/export:jio_vsnprintf \
|
||||||
/export:JVM_InitAgentProperties
|
$(AGCT_EXPORT) \
|
||||||
|
/export:JVM_GetVersionInfo \
|
||||||
|
/export:JVM_InitAgentProperties
|
||||||
|
!endif
|
||||||
|
|
||||||
|
LD_FLAGS=$(LD_FLAGS) $(STACK_SIZE) /subsystem:windows /dll /base:0x8000000 $(EXPORT_LIST)
|
||||||
|
|
||||||
CXX_INCLUDE_DIRS=/I "..\generated"
|
CXX_INCLUDE_DIRS=/I "..\generated"
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#define CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
|
#define CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
|
||||||
|
|
||||||
#include "asm/assembler.hpp"
|
#include "asm/assembler.hpp"
|
||||||
|
#include "utilities/macros.hpp"
|
||||||
|
|
||||||
// MacroAssembler extends Assembler by a few frequently used macros.
|
// MacroAssembler extends Assembler by a few frequently used macros.
|
||||||
|
|
||||||
|
|
|
@ -3513,7 +3513,7 @@ void TemplateTable::_new() {
|
||||||
Rtags = R3_ARG1,
|
Rtags = R3_ARG1,
|
||||||
Rindex = R5_ARG3;
|
Rindex = R5_ARG3;
|
||||||
|
|
||||||
const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
|
const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc();
|
||||||
|
|
||||||
// --------------------------------------------------------------------------
|
// --------------------------------------------------------------------------
|
||||||
// Check if fast case is possible.
|
// Check if fast case is possible.
|
||||||
|
|
|
@ -2734,12 +2734,12 @@ void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg,
|
||||||
// box->dhw disposition - post-conditions at DONE_LABEL.
|
// box->dhw disposition - post-conditions at DONE_LABEL.
|
||||||
// - Successful inflated lock: box->dhw != 0.
|
// - Successful inflated lock: box->dhw != 0.
|
||||||
// Any non-zero value suffices.
|
// Any non-zero value suffices.
|
||||||
// Consider G2_thread, rsp, boxReg, or unused_mark()
|
// Consider G2_thread, rsp, boxReg, or markOopDesc::unused_mark()
|
||||||
// - Successful Stack-lock: box->dhw == mark.
|
// - Successful Stack-lock: box->dhw == mark.
|
||||||
// box->dhw must contain the displaced mark word value
|
// box->dhw must contain the displaced mark word value
|
||||||
// - Failure -- icc.ZFlag == 0 and box->dhw is undefined.
|
// - Failure -- icc.ZFlag == 0 and box->dhw is undefined.
|
||||||
// The slow-path fast_enter() and slow_enter() operators
|
// The slow-path fast_enter() and slow_enter() operators
|
||||||
// are responsible for setting box->dhw = NonZero (typically ::unused_mark).
|
// are responsible for setting box->dhw = NonZero (typically markOopDesc::unused_mark()).
|
||||||
// - Biased: box->dhw is undefined
|
// - Biased: box->dhw is undefined
|
||||||
//
|
//
|
||||||
// SPARC refworkload performance - specifically jetstream and scimark - are
|
// SPARC refworkload performance - specifically jetstream and scimark - are
|
||||||
|
@ -2855,7 +2855,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
|
||||||
// If m->owner != null goto IsLocked
|
// If m->owner != null goto IsLocked
|
||||||
// Pessimistic form: Test-and-CAS vs CAS
|
// Pessimistic form: Test-and-CAS vs CAS
|
||||||
// The optimistic form avoids RTS->RTO cache line upgrades.
|
// The optimistic form avoids RTS->RTO cache line upgrades.
|
||||||
ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
|
ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rscratch);
|
||||||
andcc(Rscratch, Rscratch, G0);
|
andcc(Rscratch, Rscratch, G0);
|
||||||
brx(Assembler::notZero, false, Assembler::pn, done);
|
brx(Assembler::notZero, false, Assembler::pn, done);
|
||||||
delayed()->nop();
|
delayed()->nop();
|
||||||
|
@ -2864,7 +2864,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
|
||||||
|
|
||||||
// Try to CAS m->owner from null to Self
|
// Try to CAS m->owner from null to Self
|
||||||
// Invariant: if we acquire the lock then _recursions should be 0.
|
// Invariant: if we acquire the lock then _recursions should be 0.
|
||||||
add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
|
add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark);
|
||||||
mov(G2_thread, Rscratch);
|
mov(G2_thread, Rscratch);
|
||||||
cas_ptr(Rmark, G0, Rscratch);
|
cas_ptr(Rmark, G0, Rscratch);
|
||||||
cmp(Rscratch, G0);
|
cmp(Rscratch, G0);
|
||||||
|
@ -2948,7 +2948,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
|
||||||
// Test-and-CAS vs CAS
|
// Test-and-CAS vs CAS
|
||||||
// Pessimistic form avoids futile (doomed) CAS attempts
|
// Pessimistic form avoids futile (doomed) CAS attempts
|
||||||
// The optimistic form avoids RTS->RTO cache line upgrades.
|
// The optimistic form avoids RTS->RTO cache line upgrades.
|
||||||
ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
|
ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rscratch);
|
||||||
andcc(Rscratch, Rscratch, G0);
|
andcc(Rscratch, Rscratch, G0);
|
||||||
brx(Assembler::notZero, false, Assembler::pn, done);
|
brx(Assembler::notZero, false, Assembler::pn, done);
|
||||||
delayed()->nop();
|
delayed()->nop();
|
||||||
|
@ -2957,13 +2957,13 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
|
||||||
|
|
||||||
// Try to CAS m->owner from null to Self
|
// Try to CAS m->owner from null to Self
|
||||||
// Invariant: if we acquire the lock then _recursions should be 0.
|
// Invariant: if we acquire the lock then _recursions should be 0.
|
||||||
add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
|
add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark);
|
||||||
mov(G2_thread, Rscratch);
|
mov(G2_thread, Rscratch);
|
||||||
cas_ptr(Rmark, G0, Rscratch);
|
cas_ptr(Rmark, G0, Rscratch);
|
||||||
cmp(Rscratch, G0);
|
cmp(Rscratch, G0);
|
||||||
// ST box->displaced_header = NonZero.
|
// ST box->displaced_header = NonZero.
|
||||||
// Any non-zero value suffices:
|
// Any non-zero value suffices:
|
||||||
// unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
|
// markOopDesc::unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
|
||||||
st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes());
|
st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes());
|
||||||
// Intentional fall-through into done
|
// Intentional fall-through into done
|
||||||
}
|
}
|
||||||
|
@ -3031,30 +3031,30 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
|
||||||
// Note that we use 1-0 locking by default for the inflated case. We
|
// Note that we use 1-0 locking by default for the inflated case. We
|
||||||
// close the resultant (and rare) race by having contented threads in
|
// close the resultant (and rare) race by having contented threads in
|
||||||
// monitorenter periodically poll _owner.
|
// monitorenter periodically poll _owner.
|
||||||
ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
|
ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rscratch);
|
||||||
ld_ptr(Rmark, ObjectMonitor::recursions_offset_in_bytes() - 2, Rbox);
|
ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions), Rbox);
|
||||||
xor3(Rscratch, G2_thread, Rscratch);
|
xor3(Rscratch, G2_thread, Rscratch);
|
||||||
orcc(Rbox, Rscratch, Rbox);
|
orcc(Rbox, Rscratch, Rbox);
|
||||||
brx(Assembler::notZero, false, Assembler::pn, done);
|
brx(Assembler::notZero, false, Assembler::pn, done);
|
||||||
delayed()->
|
delayed()->
|
||||||
ld_ptr(Rmark, ObjectMonitor::EntryList_offset_in_bytes() - 2, Rscratch);
|
ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList), Rscratch);
|
||||||
ld_ptr(Rmark, ObjectMonitor::cxq_offset_in_bytes() - 2, Rbox);
|
ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq), Rbox);
|
||||||
orcc(Rbox, Rscratch, G0);
|
orcc(Rbox, Rscratch, G0);
|
||||||
if (EmitSync & 65536) {
|
if (EmitSync & 65536) {
|
||||||
Label LSucc ;
|
Label LSucc ;
|
||||||
brx(Assembler::notZero, false, Assembler::pn, LSucc);
|
brx(Assembler::notZero, false, Assembler::pn, LSucc);
|
||||||
delayed()->nop();
|
delayed()->nop();
|
||||||
ba(done);
|
ba(done);
|
||||||
delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
|
delayed()->st_ptr(G0, Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner));
|
||||||
|
|
||||||
bind(LSucc);
|
bind(LSucc);
|
||||||
st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
|
st_ptr(G0, Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner));
|
||||||
if (os::is_MP()) { membar (StoreLoad); }
|
if (os::is_MP()) { membar (StoreLoad); }
|
||||||
ld_ptr(Rmark, ObjectMonitor::succ_offset_in_bytes() - 2, Rscratch);
|
ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ), Rscratch);
|
||||||
andcc(Rscratch, Rscratch, G0);
|
andcc(Rscratch, Rscratch, G0);
|
||||||
brx(Assembler::notZero, false, Assembler::pt, done);
|
brx(Assembler::notZero, false, Assembler::pt, done);
|
||||||
delayed()->andcc(G0, G0, G0);
|
delayed()->andcc(G0, G0, G0);
|
||||||
add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
|
add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark);
|
||||||
mov(G2_thread, Rscratch);
|
mov(G2_thread, Rscratch);
|
||||||
cas_ptr(Rmark, G0, Rscratch);
|
cas_ptr(Rmark, G0, Rscratch);
|
||||||
// invert icc.zf and goto done
|
// invert icc.zf and goto done
|
||||||
|
@ -3066,7 +3066,7 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
|
||||||
brx(Assembler::notZero, false, Assembler::pn, done);
|
brx(Assembler::notZero, false, Assembler::pn, done);
|
||||||
delayed()->nop();
|
delayed()->nop();
|
||||||
ba(done);
|
ba(done);
|
||||||
delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
|
delayed()->st_ptr(G0, Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner));
|
||||||
}
|
}
|
||||||
|
|
||||||
bind (LStacked);
|
bind (LStacked);
|
||||||
|
@ -3196,7 +3196,7 @@ void MacroAssembler::eden_allocate(
|
||||||
assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size");
|
assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size");
|
||||||
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
|
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
|
||||||
|
|
||||||
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
|
if (!Universe::heap()->supports_inline_contig_alloc()) {
|
||||||
// No allocation in the shared eden.
|
// No allocation in the shared eden.
|
||||||
ba(slow_case);
|
ba(slow_case);
|
||||||
delayed()->nop();
|
delayed()->nop();
|
||||||
|
@ -3331,7 +3331,7 @@ void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case
|
||||||
assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */);
|
assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */);
|
||||||
Label do_refill, discard_tlab;
|
Label do_refill, discard_tlab;
|
||||||
|
|
||||||
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
|
if (!Universe::heap()->supports_inline_contig_alloc()) {
|
||||||
// No allocation in the shared eden.
|
// No allocation in the shared eden.
|
||||||
ba(slow_case);
|
ba(slow_case);
|
||||||
delayed()->nop();
|
delayed()->nop();
|
||||||
|
|
|
@ -4813,6 +4813,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
StubRoutines::_atomic_add_entry = generate_atomic_add();
|
StubRoutines::_atomic_add_entry = generate_atomic_add();
|
||||||
StubRoutines::_atomic_xchg_ptr_entry = StubRoutines::_atomic_xchg_entry;
|
StubRoutines::_atomic_xchg_ptr_entry = StubRoutines::_atomic_xchg_entry;
|
||||||
StubRoutines::_atomic_cmpxchg_ptr_entry = StubRoutines::_atomic_cmpxchg_entry;
|
StubRoutines::_atomic_cmpxchg_ptr_entry = StubRoutines::_atomic_cmpxchg_entry;
|
||||||
|
StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub();
|
||||||
StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
|
StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
|
||||||
StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry;
|
StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry;
|
||||||
#endif // COMPILER2 !=> _LP64
|
#endif // COMPILER2 !=> _LP64
|
||||||
|
|
|
@ -3309,7 +3309,7 @@ void TemplateTable::_new() {
|
||||||
// (creates a new TLAB, etc.)
|
// (creates a new TLAB, etc.)
|
||||||
|
|
||||||
const bool allow_shared_alloc =
|
const bool allow_shared_alloc =
|
||||||
Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
|
Universe::heap()->supports_inline_contig_alloc();
|
||||||
|
|
||||||
if(UseTLAB) {
|
if(UseTLAB) {
|
||||||
Register RoldTopValue = RallocatedObject;
|
Register RoldTopValue = RallocatedObject;
|
||||||
|
|
|
@ -1297,6 +1297,17 @@ void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg
|
||||||
emit_operand(reg, adr);
|
emit_operand(reg, adr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The 8-bit cmpxchg compares the value at adr with the contents of rax,
|
||||||
|
// and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
|
||||||
|
// The ZF is set if the compared values were equal, and cleared otherwise.
|
||||||
|
void Assembler::cmpxchgb(Register reg, Address adr) { // cmpxchg
|
||||||
|
InstructionMark im(this);
|
||||||
|
prefix(adr, reg, true);
|
||||||
|
emit_int8(0x0F);
|
||||||
|
emit_int8((unsigned char)0xB0);
|
||||||
|
emit_operand(reg, adr);
|
||||||
|
}
|
||||||
|
|
||||||
void Assembler::comisd(XMMRegister dst, Address src) {
|
void Assembler::comisd(XMMRegister dst, Address src) {
|
||||||
// NOTE: dbx seems to decode this as comiss even though the
|
// NOTE: dbx seems to decode this as comiss even though the
|
||||||
// 0x66 is there. Strangly ucomisd comes out correct
|
// 0x66 is there. Strangly ucomisd comes out correct
|
||||||
|
|
|
@ -1006,6 +1006,7 @@ private:
|
||||||
|
|
||||||
void cmpxchg8 (Address adr);
|
void cmpxchg8 (Address adr);
|
||||||
|
|
||||||
|
void cmpxchgb(Register reg, Address adr);
|
||||||
void cmpxchgl(Register reg, Address adr);
|
void cmpxchgl(Register reg, Address adr);
|
||||||
|
|
||||||
void cmpxchgq(Register reg, Address adr);
|
void cmpxchgq(Register reg, Address adr);
|
||||||
|
|
|
@ -1450,8 +1450,7 @@ void MacroAssembler::rtm_retry_lock_on_abort(Register retry_count_Reg, Register
|
||||||
void MacroAssembler::rtm_retry_lock_on_busy(Register retry_count_Reg, Register box_Reg,
|
void MacroAssembler::rtm_retry_lock_on_busy(Register retry_count_Reg, Register box_Reg,
|
||||||
Register tmp_Reg, Register scr_Reg, Label& retryLabel) {
|
Register tmp_Reg, Register scr_Reg, Label& retryLabel) {
|
||||||
Label SpinLoop, SpinExit, doneRetry;
|
Label SpinLoop, SpinExit, doneRetry;
|
||||||
// Clean monitor_value bit to get valid pointer
|
int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
|
||||||
int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
|
|
||||||
|
|
||||||
testl(retry_count_Reg, retry_count_Reg);
|
testl(retry_count_Reg, retry_count_Reg);
|
||||||
jccb(Assembler::zero, doneRetry);
|
jccb(Assembler::zero, doneRetry);
|
||||||
|
@ -1532,7 +1531,7 @@ void MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Registe
|
||||||
// Use RTM for inflating locks
|
// Use RTM for inflating locks
|
||||||
// inputs: objReg (object to lock)
|
// inputs: objReg (object to lock)
|
||||||
// boxReg (on-stack box address (displaced header location) - KILLED)
|
// boxReg (on-stack box address (displaced header location) - KILLED)
|
||||||
// tmpReg (ObjectMonitor address + 2(monitor_value))
|
// tmpReg (ObjectMonitor address + markOopDesc::monitor_value)
|
||||||
void MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, Register tmpReg,
|
void MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, Register tmpReg,
|
||||||
Register scrReg, Register retry_on_busy_count_Reg,
|
Register scrReg, Register retry_on_busy_count_Reg,
|
||||||
Register retry_on_abort_count_Reg,
|
Register retry_on_abort_count_Reg,
|
||||||
|
@ -1543,8 +1542,7 @@ void MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, Regi
|
||||||
assert(tmpReg == rax, "");
|
assert(tmpReg == rax, "");
|
||||||
assert(scrReg == rdx, "");
|
assert(scrReg == rdx, "");
|
||||||
Label L_rtm_retry, L_decrement_retry, L_on_abort;
|
Label L_rtm_retry, L_decrement_retry, L_on_abort;
|
||||||
// Clean monitor_value bit to get valid pointer
|
int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
|
||||||
int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
|
|
||||||
|
|
||||||
// Without cast to int32_t a movptr will destroy r10 which is typically obj
|
// Without cast to int32_t a movptr will destroy r10 which is typically obj
|
||||||
movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
|
movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
|
||||||
|
@ -1716,7 +1714,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
||||||
atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()), scrReg);
|
atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()), scrReg);
|
||||||
}
|
}
|
||||||
if (EmitSync & 1) {
|
if (EmitSync & 1) {
|
||||||
// set box->dhw = unused_mark (3)
|
// set box->dhw = markOopDesc::unused_mark()
|
||||||
// Force all sync thru slow-path: slow_enter() and slow_exit()
|
// Force all sync thru slow-path: slow_enter() and slow_exit()
|
||||||
movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
|
movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
|
||||||
cmpptr (rsp, (int32_t)NULL_WORD);
|
cmpptr (rsp, (int32_t)NULL_WORD);
|
||||||
|
@ -1769,7 +1767,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
||||||
// at [FETCH], below, will never observe a biased encoding (*101b).
|
// at [FETCH], below, will never observe a biased encoding (*101b).
|
||||||
// If this invariant is not held we risk exclusion (safety) failure.
|
// If this invariant is not held we risk exclusion (safety) failure.
|
||||||
if (UseBiasedLocking && !UseOptoBiasInlining) {
|
if (UseBiasedLocking && !UseOptoBiasInlining) {
|
||||||
biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, counters);
|
biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, counters);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if INCLUDE_RTM_OPT
|
#if INCLUDE_RTM_OPT
|
||||||
|
@ -1811,7 +1809,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
||||||
jmp(DONE_LABEL);
|
jmp(DONE_LABEL);
|
||||||
|
|
||||||
bind(IsInflated);
|
bind(IsInflated);
|
||||||
// The object is inflated. tmpReg contains pointer to ObjectMonitor* + 2(monitor_value)
|
// The object is inflated. tmpReg contains pointer to ObjectMonitor* + markOopDesc::monitor_value
|
||||||
|
|
||||||
#if INCLUDE_RTM_OPT
|
#if INCLUDE_RTM_OPT
|
||||||
// Use the same RTM locking code in 32- and 64-bit VM.
|
// Use the same RTM locking code in 32- and 64-bit VM.
|
||||||
|
@ -1823,25 +1821,10 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
||||||
|
|
||||||
#ifndef _LP64
|
#ifndef _LP64
|
||||||
// The object is inflated.
|
// The object is inflated.
|
||||||
//
|
|
||||||
// TODO-FIXME: eliminate the ugly use of manifest constants:
|
|
||||||
// Use markOopDesc::monitor_value instead of "2".
|
|
||||||
// use markOop::unused_mark() instead of "3".
|
|
||||||
// The tmpReg value is an objectMonitor reference ORed with
|
|
||||||
// markOopDesc::monitor_value (2). We can either convert tmpReg to an
|
|
||||||
// objectmonitor pointer by masking off the "2" bit or we can just
|
|
||||||
// use tmpReg as an objectmonitor pointer but bias the objectmonitor
|
|
||||||
// field offsets with "-2" to compensate for and annul the low-order tag bit.
|
|
||||||
//
|
|
||||||
// I use the latter as it avoids AGI stalls.
|
|
||||||
// As such, we write "mov r, [tmpReg+OFFSETOF(Owner)-2]"
|
|
||||||
// instead of "mov r, [tmpReg+OFFSETOF(Owner)]".
|
|
||||||
//
|
|
||||||
#define OFFSET_SKEWED(f) ((ObjectMonitor::f ## _offset_in_bytes())-2)
|
|
||||||
|
|
||||||
// boxReg refers to the on-stack BasicLock in the current frame.
|
// boxReg refers to the on-stack BasicLock in the current frame.
|
||||||
// We'd like to write:
|
// We'd like to write:
|
||||||
// set box->_displaced_header = markOop::unused_mark(). Any non-0 value suffices.
|
// set box->_displaced_header = markOopDesc::unused_mark(). Any non-0 value suffices.
|
||||||
// This is convenient but results a ST-before-CAS penalty. The following CAS suffers
|
// This is convenient but results a ST-before-CAS penalty. The following CAS suffers
|
||||||
// additional latency as we have another ST in the store buffer that must drain.
|
// additional latency as we have another ST in the store buffer that must drain.
|
||||||
|
|
||||||
|
@ -1853,7 +1836,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
||||||
if (os::is_MP()) {
|
if (os::is_MP()) {
|
||||||
lock();
|
lock();
|
||||||
}
|
}
|
||||||
cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
|
cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
||||||
} else
|
} else
|
||||||
if ((EmitSync & 128) == 0) { // avoid ST-before-CAS
|
if ((EmitSync & 128) == 0) { // avoid ST-before-CAS
|
||||||
movptr(scrReg, boxReg);
|
movptr(scrReg, boxReg);
|
||||||
|
@ -1862,7 +1845,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
||||||
// Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
|
// Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
|
||||||
if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
|
if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
|
||||||
// prefetchw [eax + Offset(_owner)-2]
|
// prefetchw [eax + Offset(_owner)-2]
|
||||||
prefetchw(Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
|
prefetchw(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((EmitSync & 64) == 0) {
|
if ((EmitSync & 64) == 0) {
|
||||||
|
@ -1871,7 +1854,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
||||||
} else {
|
} else {
|
||||||
// Can suffer RTS->RTO upgrades on shared or cold $ lines
|
// Can suffer RTS->RTO upgrades on shared or cold $ lines
|
||||||
// Test-And-CAS instead of CAS
|
// Test-And-CAS instead of CAS
|
||||||
movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); // rax, = m->_owner
|
movptr(tmpReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); // rax, = m->_owner
|
||||||
testptr(tmpReg, tmpReg); // Locked ?
|
testptr(tmpReg, tmpReg); // Locked ?
|
||||||
jccb (Assembler::notZero, DONE_LABEL);
|
jccb (Assembler::notZero, DONE_LABEL);
|
||||||
}
|
}
|
||||||
|
@ -1887,11 +1870,11 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
||||||
if (os::is_MP()) {
|
if (os::is_MP()) {
|
||||||
lock();
|
lock();
|
||||||
}
|
}
|
||||||
cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
|
cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
||||||
movptr(Address(scrReg, 0), 3); // box->_displaced_header = 3
|
movptr(Address(scrReg, 0), 3); // box->_displaced_header = 3
|
||||||
jccb (Assembler::notZero, DONE_LABEL);
|
jccb (Assembler::notZero, DONE_LABEL);
|
||||||
get_thread (scrReg); // beware: clobbers ICCs
|
get_thread (scrReg); // beware: clobbers ICCs
|
||||||
movptr(Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2), scrReg);
|
movptr(Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), scrReg);
|
||||||
xorptr(boxReg, boxReg); // set icc.ZFlag = 1 to indicate success
|
xorptr(boxReg, boxReg); // set icc.ZFlag = 1 to indicate success
|
||||||
|
|
||||||
// If the CAS fails we can either retry or pass control to the slow-path.
|
// If the CAS fails we can either retry or pass control to the slow-path.
|
||||||
|
@ -1908,7 +1891,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
||||||
// Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
|
// Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
|
||||||
if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
|
if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
|
||||||
// prefetchw [eax + Offset(_owner)-2]
|
// prefetchw [eax + Offset(_owner)-2]
|
||||||
prefetchw(Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
|
prefetchw(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((EmitSync & 64) == 0) {
|
if ((EmitSync & 64) == 0) {
|
||||||
|
@ -1916,7 +1899,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
||||||
xorptr (tmpReg, tmpReg);
|
xorptr (tmpReg, tmpReg);
|
||||||
} else {
|
} else {
|
||||||
// Can suffer RTS->RTO upgrades on shared or cold $ lines
|
// Can suffer RTS->RTO upgrades on shared or cold $ lines
|
||||||
movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); // rax, = m->_owner
|
movptr(tmpReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); // rax, = m->_owner
|
||||||
testptr(tmpReg, tmpReg); // Locked ?
|
testptr(tmpReg, tmpReg); // Locked ?
|
||||||
jccb (Assembler::notZero, DONE_LABEL);
|
jccb (Assembler::notZero, DONE_LABEL);
|
||||||
}
|
}
|
||||||
|
@ -1928,7 +1911,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
||||||
if (os::is_MP()) {
|
if (os::is_MP()) {
|
||||||
lock();
|
lock();
|
||||||
}
|
}
|
||||||
cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
|
cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
||||||
|
|
||||||
// If the CAS fails we can either retry or pass control to the slow-path.
|
// If the CAS fails we can either retry or pass control to the slow-path.
|
||||||
// We use the latter tactic.
|
// We use the latter tactic.
|
||||||
|
@ -1951,7 +1934,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
||||||
movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
|
movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
|
||||||
|
|
||||||
movptr (boxReg, tmpReg);
|
movptr (boxReg, tmpReg);
|
||||||
movptr (tmpReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
|
movptr(tmpReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
||||||
testptr(tmpReg, tmpReg);
|
testptr(tmpReg, tmpReg);
|
||||||
jccb (Assembler::notZero, DONE_LABEL);
|
jccb (Assembler::notZero, DONE_LABEL);
|
||||||
|
|
||||||
|
@ -1959,7 +1942,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
||||||
if (os::is_MP()) {
|
if (os::is_MP()) {
|
||||||
lock();
|
lock();
|
||||||
}
|
}
|
||||||
cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
|
cmpxchgptr(r15_thread, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
||||||
// Intentional fall-through into DONE_LABEL ...
|
// Intentional fall-through into DONE_LABEL ...
|
||||||
#endif // _LP64
|
#endif // _LP64
|
||||||
|
|
||||||
|
@ -2065,8 +2048,7 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
|
||||||
#if INCLUDE_RTM_OPT
|
#if INCLUDE_RTM_OPT
|
||||||
if (use_rtm) {
|
if (use_rtm) {
|
||||||
Label L_regular_inflated_unlock;
|
Label L_regular_inflated_unlock;
|
||||||
// Clean monitor_value bit to get valid pointer
|
int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
|
||||||
int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
|
|
||||||
movptr(boxReg, Address(tmpReg, owner_offset));
|
movptr(boxReg, Address(tmpReg, owner_offset));
|
||||||
testptr(boxReg, boxReg);
|
testptr(boxReg, boxReg);
|
||||||
jccb(Assembler::notZero, L_regular_inflated_unlock);
|
jccb(Assembler::notZero, L_regular_inflated_unlock);
|
||||||
|
@ -2102,7 +2084,7 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
|
||||||
get_thread (boxReg);
|
get_thread (boxReg);
|
||||||
if ((EmitSync & 4096) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
|
if ((EmitSync & 4096) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
|
||||||
// prefetchw [ebx + Offset(_owner)-2]
|
// prefetchw [ebx + Offset(_owner)-2]
|
||||||
prefetchw(Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
|
prefetchw(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note that we could employ various encoding schemes to reduce
|
// Note that we could employ various encoding schemes to reduce
|
||||||
|
@ -2111,21 +2093,21 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
|
||||||
// In practice the chain of fetches doesn't seem to impact performance, however.
|
// In practice the chain of fetches doesn't seem to impact performance, however.
|
||||||
if ((EmitSync & 65536) == 0 && (EmitSync & 256)) {
|
if ((EmitSync & 65536) == 0 && (EmitSync & 256)) {
|
||||||
// Attempt to reduce branch density - AMD's branch predictor.
|
// Attempt to reduce branch density - AMD's branch predictor.
|
||||||
xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
|
xorptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
||||||
orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2));
|
orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
|
||||||
orptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2));
|
orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
|
||||||
orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2));
|
orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
|
||||||
jccb (Assembler::notZero, DONE_LABEL);
|
jccb (Assembler::notZero, DONE_LABEL);
|
||||||
movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD);
|
movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
|
||||||
jmpb (DONE_LABEL);
|
jmpb (DONE_LABEL);
|
||||||
} else {
|
} else {
|
||||||
xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
|
xorptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
||||||
orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2));
|
orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
|
||||||
jccb (Assembler::notZero, DONE_LABEL);
|
jccb (Assembler::notZero, DONE_LABEL);
|
||||||
movptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2));
|
movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
|
||||||
orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2));
|
orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
|
||||||
jccb (Assembler::notZero, CheckSucc);
|
jccb (Assembler::notZero, CheckSucc);
|
||||||
movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD);
|
movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
|
||||||
jmpb (DONE_LABEL);
|
jmpb (DONE_LABEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2143,7 +2125,7 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
|
||||||
|
|
||||||
// Optional pre-test ... it's safe to elide this
|
// Optional pre-test ... it's safe to elide this
|
||||||
if ((EmitSync & 16) == 0) {
|
if ((EmitSync & 16) == 0) {
|
||||||
cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD);
|
cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
|
||||||
jccb (Assembler::zero, LGoSlowPath);
|
jccb (Assembler::zero, LGoSlowPath);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2173,7 +2155,7 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
|
||||||
// We currently use (3), although it's likely that switching to (2)
|
// We currently use (3), although it's likely that switching to (2)
|
||||||
// is correct for the future.
|
// is correct for the future.
|
||||||
|
|
||||||
movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD);
|
movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
|
||||||
if (os::is_MP()) {
|
if (os::is_MP()) {
|
||||||
if (VM_Version::supports_sse2() && 1 == FenceInstruction) {
|
if (VM_Version::supports_sse2() && 1 == FenceInstruction) {
|
||||||
mfence();
|
mfence();
|
||||||
|
@ -2182,18 +2164,18 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Ratify _succ remains non-null
|
// Ratify _succ remains non-null
|
||||||
cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0);
|
cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), 0);
|
||||||
jccb (Assembler::notZero, LSuccess);
|
jccb (Assembler::notZero, LSuccess);
|
||||||
|
|
||||||
xorptr(boxReg, boxReg); // box is really EAX
|
xorptr(boxReg, boxReg); // box is really EAX
|
||||||
if (os::is_MP()) { lock(); }
|
if (os::is_MP()) { lock(); }
|
||||||
cmpxchgptr(rsp, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
|
cmpxchgptr(rsp, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
||||||
jccb (Assembler::notEqual, LSuccess);
|
jccb (Assembler::notEqual, LSuccess);
|
||||||
// Since we're low on registers we installed rsp as a placeholding in _owner.
|
// Since we're low on registers we installed rsp as a placeholding in _owner.
|
||||||
// Now install Self over rsp. This is safe as we're transitioning from
|
// Now install Self over rsp. This is safe as we're transitioning from
|
||||||
// non-null to non=null
|
// non-null to non=null
|
||||||
get_thread (boxReg);
|
get_thread (boxReg);
|
||||||
movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), boxReg);
|
movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), boxReg);
|
||||||
// Intentional fall-through into LGoSlowPath ...
|
// Intentional fall-through into LGoSlowPath ...
|
||||||
|
|
||||||
bind (LGoSlowPath);
|
bind (LGoSlowPath);
|
||||||
|
@ -2228,36 +2210,36 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
|
||||||
}
|
}
|
||||||
#else // _LP64
|
#else // _LP64
|
||||||
// It's inflated
|
// It's inflated
|
||||||
movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
|
movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
||||||
xorptr(boxReg, r15_thread);
|
xorptr(boxReg, r15_thread);
|
||||||
orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2));
|
orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
|
||||||
jccb (Assembler::notZero, DONE_LABEL);
|
jccb (Assembler::notZero, DONE_LABEL);
|
||||||
movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2));
|
movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
|
||||||
orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2));
|
orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
|
||||||
jccb (Assembler::notZero, CheckSucc);
|
jccb (Assembler::notZero, CheckSucc);
|
||||||
movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD);
|
movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD);
|
||||||
jmpb (DONE_LABEL);
|
jmpb (DONE_LABEL);
|
||||||
|
|
||||||
if ((EmitSync & 65536) == 0) {
|
if ((EmitSync & 65536) == 0) {
|
||||||
Label LSuccess, LGoSlowPath ;
|
Label LSuccess, LGoSlowPath ;
|
||||||
bind (CheckSucc);
|
bind (CheckSucc);
|
||||||
cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD);
|
cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
|
||||||
jccb (Assembler::zero, LGoSlowPath);
|
jccb (Assembler::zero, LGoSlowPath);
|
||||||
|
|
||||||
// I'd much rather use lock:andl m->_owner, 0 as it's faster than the
|
// I'd much rather use lock:andl m->_owner, 0 as it's faster than the
|
||||||
// the explicit ST;MEMBAR combination, but masm doesn't currently support
|
// the explicit ST;MEMBAR combination, but masm doesn't currently support
|
||||||
// "ANDQ M,IMM". Don't use MFENCE here. lock:add to TOS, xchg, etc
|
// "ANDQ M,IMM". Don't use MFENCE here. lock:add to TOS, xchg, etc
|
||||||
// are all faster when the write buffer is populated.
|
// are all faster when the write buffer is populated.
|
||||||
movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD);
|
movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD);
|
||||||
if (os::is_MP()) {
|
if (os::is_MP()) {
|
||||||
lock (); addl (Address(rsp, 0), 0);
|
lock (); addl (Address(rsp, 0), 0);
|
||||||
}
|
}
|
||||||
cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD);
|
cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
|
||||||
jccb (Assembler::notZero, LSuccess);
|
jccb (Assembler::notZero, LSuccess);
|
||||||
|
|
||||||
movptr (boxReg, (int32_t)NULL_WORD); // box is really EAX
|
movptr (boxReg, (int32_t)NULL_WORD); // box is really EAX
|
||||||
if (os::is_MP()) { lock(); }
|
if (os::is_MP()) { lock(); }
|
||||||
cmpxchgptr(r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
|
cmpxchgptr(r15_thread, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
||||||
jccb (Assembler::notEqual, LSuccess);
|
jccb (Assembler::notEqual, LSuccess);
|
||||||
// Intentional fall-through into slow-path
|
// Intentional fall-through into slow-path
|
||||||
|
|
||||||
|
@ -2964,7 +2946,7 @@ void MacroAssembler::eden_allocate(Register obj,
|
||||||
Label& slow_case) {
|
Label& slow_case) {
|
||||||
assert(obj == rax, "obj must be in rax, for cmpxchg");
|
assert(obj == rax, "obj must be in rax, for cmpxchg");
|
||||||
assert_different_registers(obj, var_size_in_bytes, t1);
|
assert_different_registers(obj, var_size_in_bytes, t1);
|
||||||
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
|
if (!Universe::heap()->supports_inline_contig_alloc()) {
|
||||||
jmp(slow_case);
|
jmp(slow_case);
|
||||||
} else {
|
} else {
|
||||||
Register end = t1;
|
Register end = t1;
|
||||||
|
@ -4437,7 +4419,7 @@ Register MacroAssembler::tlab_refill(Label& retry,
|
||||||
assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
|
assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
|
||||||
Label do_refill, discard_tlab;
|
Label do_refill, discard_tlab;
|
||||||
|
|
||||||
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
|
if (!Universe::heap()->supports_inline_contig_alloc()) {
|
||||||
// No allocation in the shared eden.
|
// No allocation in the shared eden.
|
||||||
jmp(slow_case);
|
jmp(slow_case);
|
||||||
}
|
}
|
||||||
|
|
|
@ -594,9 +594,35 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
return start;
|
return start;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Support for jint atomic::atomic_cmpxchg_long(jlong exchange_value,
|
// Support for jbyte atomic::atomic_cmpxchg(jbyte exchange_value, volatile jbyte* dest,
|
||||||
// volatile jlong* dest,
|
// jbyte compare_value)
|
||||||
// jlong compare_value)
|
//
|
||||||
|
// Arguments :
|
||||||
|
// c_rarg0: exchange_value
|
||||||
|
// c_rarg1: dest
|
||||||
|
// c_rarg2: compare_value
|
||||||
|
//
|
||||||
|
// Result:
|
||||||
|
// if ( compare_value == *dest ) {
|
||||||
|
// *dest = exchange_value
|
||||||
|
// return compare_value;
|
||||||
|
// else
|
||||||
|
// return *dest;
|
||||||
|
address generate_atomic_cmpxchg_byte() {
|
||||||
|
StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte");
|
||||||
|
address start = __ pc();
|
||||||
|
|
||||||
|
__ movsbq(rax, c_rarg2);
|
||||||
|
if ( os::is_MP() ) __ lock();
|
||||||
|
__ cmpxchgb(c_rarg0, Address(c_rarg1, 0));
|
||||||
|
__ ret(0);
|
||||||
|
|
||||||
|
return start;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Support for jlong atomic::atomic_cmpxchg(jlong exchange_value,
|
||||||
|
// volatile jlong* dest,
|
||||||
|
// jlong compare_value)
|
||||||
// Arguments :
|
// Arguments :
|
||||||
// c_rarg0: exchange_value
|
// c_rarg0: exchange_value
|
||||||
// c_rarg1: dest
|
// c_rarg1: dest
|
||||||
|
@ -3894,6 +3920,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
|
StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
|
||||||
StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr();
|
StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr();
|
||||||
StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
|
StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
|
||||||
|
StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte();
|
||||||
StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
|
StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
|
||||||
StubRoutines::_atomic_add_entry = generate_atomic_add();
|
StubRoutines::_atomic_add_entry = generate_atomic_add();
|
||||||
StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr();
|
StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr();
|
||||||
|
|
|
@ -3214,7 +3214,7 @@ void TemplateTable::_new() {
|
||||||
// (creates a new TLAB, etc.)
|
// (creates a new TLAB, etc.)
|
||||||
|
|
||||||
const bool allow_shared_alloc =
|
const bool allow_shared_alloc =
|
||||||
Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
|
Universe::heap()->supports_inline_contig_alloc();
|
||||||
|
|
||||||
const Register thread = rcx;
|
const Register thread = rcx;
|
||||||
if (UseTLAB || allow_shared_alloc) {
|
if (UseTLAB || allow_shared_alloc) {
|
||||||
|
|
|
@ -3269,7 +3269,7 @@ void TemplateTable::_new() {
|
||||||
// (creates a new TLAB, etc.)
|
// (creates a new TLAB, etc.)
|
||||||
|
|
||||||
const bool allow_shared_alloc =
|
const bool allow_shared_alloc =
|
||||||
Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
|
Universe::heap()->supports_inline_contig_alloc();
|
||||||
|
|
||||||
if (UseTLAB) {
|
if (UseTLAB) {
|
||||||
__ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
|
__ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
|
||||||
|
|
|
@ -1210,6 +1210,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
|
||||||
|
|
||||||
|
|
||||||
Unimplemented();
|
Unimplemented();
|
||||||
|
return 0; // Mute compiler
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
|
|
|
@ -30,7 +30,9 @@
|
||||||
|
|
||||||
int ZeroStack::suggest_size(Thread *thread) const {
|
int ZeroStack::suggest_size(Thread *thread) const {
|
||||||
assert(needs_setup(), "already set up");
|
assert(needs_setup(), "already set up");
|
||||||
return align_size_down(abi_stack_available(thread) / 2, wordSize);
|
int abi_available = abi_stack_available(thread);
|
||||||
|
assert(abi_available >= 0, "available abi stack must be >= 0");
|
||||||
|
return align_size_down(abi_available / 2, wordSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZeroStack::handle_overflow(TRAPS) {
|
void ZeroStack::handle_overflow(TRAPS) {
|
||||||
|
|
|
@ -48,9 +48,11 @@ inline void ZeroStack::overflow_check(int required_words, TRAPS) {
|
||||||
// to use under normal circumstances. Note that the returned
|
// to use under normal circumstances. Note that the returned
|
||||||
// value can be negative.
|
// value can be negative.
|
||||||
inline int ZeroStack::abi_stack_available(Thread *thread) const {
|
inline int ZeroStack::abi_stack_available(Thread *thread) const {
|
||||||
int stack_used = thread->stack_base() - (address) &stack_used;
|
guarantee(Thread::current() == thread, "should run in the same thread");
|
||||||
|
int stack_used = thread->stack_base() - (address) &stack_used
|
||||||
|
+ (StackYellowPages+StackRedPages+StackShadowPages) * os::vm_page_size();
|
||||||
int stack_free = thread->stack_size() - stack_used;
|
int stack_free = thread->stack_size() - stack_used;
|
||||||
return stack_free - shadow_pages_size();
|
return stack_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // CPU_ZERO_VM_STACK_ZERO_INLINE_HPP
|
#endif // CPU_ZERO_VM_STACK_ZERO_INLINE_HPP
|
||||||
|
|
|
@ -207,6 +207,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
StubRoutines::_atomic_xchg_ptr_entry = ShouldNotCallThisStub();
|
StubRoutines::_atomic_xchg_ptr_entry = ShouldNotCallThisStub();
|
||||||
StubRoutines::_atomic_cmpxchg_entry = ShouldNotCallThisStub();
|
StubRoutines::_atomic_cmpxchg_entry = ShouldNotCallThisStub();
|
||||||
StubRoutines::_atomic_cmpxchg_ptr_entry = ShouldNotCallThisStub();
|
StubRoutines::_atomic_cmpxchg_ptr_entry = ShouldNotCallThisStub();
|
||||||
|
StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub();
|
||||||
StubRoutines::_atomic_cmpxchg_long_entry = ShouldNotCallThisStub();
|
StubRoutines::_atomic_cmpxchg_long_entry = ShouldNotCallThisStub();
|
||||||
StubRoutines::_atomic_add_entry = ShouldNotCallThisStub();
|
StubRoutines::_atomic_add_entry = ShouldNotCallThisStub();
|
||||||
StubRoutines::_atomic_add_ptr_entry = ShouldNotCallThisStub();
|
StubRoutines::_atomic_add_ptr_entry = ShouldNotCallThisStub();
|
||||||
|
|
|
@ -107,6 +107,12 @@
|
||||||
#include <sys/vminfo.h>
|
#include <sys/vminfo.h>
|
||||||
#include <sys/wait.h>
|
#include <sys/wait.h>
|
||||||
|
|
||||||
|
// If RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
|
||||||
|
// getrusage() is prepared to handle the associated failure.
|
||||||
|
#ifndef RUSAGE_THREAD
|
||||||
|
#define RUSAGE_THREAD (1) /* only the calling thread */
|
||||||
|
#endif
|
||||||
|
|
||||||
// Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
|
// Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
|
||||||
#if !defined(_AIXVERSION_610)
|
#if !defined(_AIXVERSION_610)
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
@ -1065,15 +1071,19 @@ jlong os::elapsed_frequency() {
|
||||||
return (1000 * 1000);
|
return (1000 * 1000);
|
||||||
}
|
}
|
||||||
|
|
||||||
// For now, we say that linux does not support vtime. I have no idea
|
bool os::supports_vtime() { return true; }
|
||||||
// whether it can actually be made to (DLD, 9/13/05).
|
|
||||||
|
|
||||||
bool os::supports_vtime() { return false; }
|
|
||||||
bool os::enable_vtime() { return false; }
|
bool os::enable_vtime() { return false; }
|
||||||
bool os::vtime_enabled() { return false; }
|
bool os::vtime_enabled() { return false; }
|
||||||
|
|
||||||
double os::elapsedVTime() {
|
double os::elapsedVTime() {
|
||||||
// better than nothing, but not much
|
struct rusage usage;
|
||||||
return elapsedTime();
|
int retval = getrusage(RUSAGE_THREAD, &usage);
|
||||||
|
if (retval == 0) {
|
||||||
|
return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
|
||||||
|
} else {
|
||||||
|
// better than nothing, but not much
|
||||||
|
return elapsedTime();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
jlong os::javaTimeMillis() {
|
jlong os::javaTimeMillis() {
|
||||||
|
|
|
@ -422,7 +422,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
|
||||||
// return the name of the user that owns the JVM indicated by the given vmid.
|
// return the name of the user that owns the JVM indicated by the given vmid.
|
||||||
//
|
//
|
||||||
static char* get_user_name(int vmid, TRAPS) {
|
static char* get_user_name(int vmid, TRAPS) {
|
||||||
return get_user_name_slow(vmid, CHECK_NULL);
|
return get_user_name_slow(vmid, THREAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
// return the file name of the backing store file for the named
|
// return the file name of the backing store file for the named
|
||||||
|
|
|
@ -422,7 +422,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
|
||||||
// return the name of the user that owns the JVM indicated by the given vmid.
|
// return the name of the user that owns the JVM indicated by the given vmid.
|
||||||
//
|
//
|
||||||
static char* get_user_name(int vmid, TRAPS) {
|
static char* get_user_name(int vmid, TRAPS) {
|
||||||
return get_user_name_slow(vmid, CHECK_NULL);
|
return get_user_name_slow(vmid, THREAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
// return the file name of the backing store file for the named
|
// return the file name of the backing store file for the named
|
||||||
|
|
|
@ -68,6 +68,7 @@
|
||||||
#include "utilities/events.hpp"
|
#include "utilities/events.hpp"
|
||||||
#include "utilities/elfFile.hpp"
|
#include "utilities/elfFile.hpp"
|
||||||
#include "utilities/growableArray.hpp"
|
#include "utilities/growableArray.hpp"
|
||||||
|
#include "utilities/macros.hpp"
|
||||||
#include "utilities/vmError.hpp"
|
#include "utilities/vmError.hpp"
|
||||||
|
|
||||||
// put OS-includes here
|
// put OS-includes here
|
||||||
|
|
|
@ -422,7 +422,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
|
||||||
// return the name of the user that owns the JVM indicated by the given vmid.
|
// return the name of the user that owns the JVM indicated by the given vmid.
|
||||||
//
|
//
|
||||||
static char* get_user_name(int vmid, TRAPS) {
|
static char* get_user_name(int vmid, TRAPS) {
|
||||||
return get_user_name_slow(vmid, CHECK_NULL);
|
return get_user_name_slow(vmid, THREAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
// return the file name of the backing store file for the named
|
// return the file name of the backing store file for the named
|
||||||
|
|
|
@ -1,285 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Name: add_gnu_debuglink.c
|
|
||||||
*
|
|
||||||
* Description: Add a ".gnu_debuglink" section that refers to the specified
|
|
||||||
* debug_info_path to the specified ELF object.
|
|
||||||
*
|
|
||||||
* This program is adapted from the example program shown on the
|
|
||||||
* elf(3elf) man page and from code from the Solaris compiler
|
|
||||||
* driver.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* needed to define SHF_EXCLUDE
|
|
||||||
*/
|
|
||||||
#define ELF_TARGET_ALL
|
|
||||||
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <libelf.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
|
|
||||||
static void failure(void);
|
|
||||||
static unsigned int gnu_debuglink_crc32(unsigned int crc, unsigned char *buf,
|
|
||||||
size_t len);
|
|
||||||
|
|
||||||
void
|
|
||||||
main(int argc, char ** argv) {
|
|
||||||
/* new ELF section name */
|
|
||||||
static char SEC_NAME[] = ".gnu_debuglink";
|
|
||||||
|
|
||||||
unsigned char buffer[8 * 1024]; /* I/O buffer */
|
|
||||||
int buffer_len; /* buffer length */
|
|
||||||
char * debug_info_path; /* debug info path */
|
|
||||||
void * ehdr; /* ELF header */
|
|
||||||
Elf * elf; /* ELF descriptor */
|
|
||||||
char * elf_ident; /* ELF identity string */
|
|
||||||
char * elf_obj; /* elf_obj file */
|
|
||||||
int fd; /* descriptor for files */
|
|
||||||
unsigned int file_crc = 0; /* CRC for debug info file */
|
|
||||||
int is_elfclass64; /* is an ELFCLASS64 file? */
|
|
||||||
Elf_Data * link_dat; /* ELF data for new debug info link */
|
|
||||||
Elf_Data * name_dat; /* ELF data for new section name */
|
|
||||||
Elf_Scn * new_scn; /* new ELF section descriptor */
|
|
||||||
void * new_shdr; /* new ELF section header */
|
|
||||||
Elf_Scn * scn; /* ELF section descriptor */
|
|
||||||
void * shdr; /* ELF section header */
|
|
||||||
|
|
||||||
if (argc != 3) {
|
|
||||||
(void) fprintf(stderr, "Usage: %s debug_info_path elf_obj\n", argv[0]);
|
|
||||||
exit(2);
|
|
||||||
}
|
|
||||||
|
|
||||||
debug_info_path = argv[1]; /* save for later */
|
|
||||||
if ((fd = open(debug_info_path, O_RDONLY)) == -1) {
|
|
||||||
(void) fprintf(stderr, "%s: cannot open file.\n", debug_info_path);
|
|
||||||
exit(3);
|
|
||||||
}
|
|
||||||
|
|
||||||
(void) printf("Computing CRC for '%s'\n", debug_info_path);
|
|
||||||
(void) fflush(stdout);
|
|
||||||
/* compute CRC for the debug info file */
|
|
||||||
for (;;) {
|
|
||||||
int len = read(fd, buffer, sizeof buffer);
|
|
||||||
if (len <= 0) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
file_crc = gnu_debuglink_crc32(file_crc, buffer, len);
|
|
||||||
}
|
|
||||||
(void) close(fd);
|
|
||||||
|
|
||||||
/* open the elf_obj */
|
|
||||||
elf_obj = argv[2];
|
|
||||||
if ((fd = open(elf_obj, O_RDWR)) == -1) {
|
|
||||||
(void) fprintf(stderr, "%s: cannot open file.\n", elf_obj);
|
|
||||||
exit(4);
|
|
||||||
}
|
|
||||||
|
|
||||||
(void) printf("Opening '%s' for update\n", elf_obj);
|
|
||||||
(void) fflush(stdout);
|
|
||||||
(void) elf_version(EV_CURRENT); /* coordinate ELF versions */
|
|
||||||
|
|
||||||
/* obtain the ELF descriptors from the input file */
|
|
||||||
if ((elf = elf_begin(fd, ELF_C_RDWR, NULL)) == NULL) {
|
|
||||||
failure();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* determine if ELFCLASS64 or not? */
|
|
||||||
elf_ident = elf_getident(elf, NULL);
|
|
||||||
is_elfclass64 = (elf_ident[EI_CLASS] == ELFCLASS64);
|
|
||||||
|
|
||||||
/* get the ELF header */
|
|
||||||
if (is_elfclass64) {
|
|
||||||
ehdr = elf64_getehdr(elf);
|
|
||||||
} else {
|
|
||||||
ehdr = elf32_getehdr(elf);
|
|
||||||
}
|
|
||||||
if (ehdr == NULL) {
|
|
||||||
failure();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* get the ELF section descriptor */
|
|
||||||
if (is_elfclass64) {
|
|
||||||
scn = elf_getscn(elf, ((Elf64_Ehdr *) ehdr)->e_shstrndx);
|
|
||||||
} else {
|
|
||||||
scn = elf_getscn(elf, ((Elf32_Ehdr *) ehdr)->e_shstrndx);
|
|
||||||
}
|
|
||||||
if (scn == NULL) {
|
|
||||||
failure();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* get the section header */
|
|
||||||
if (is_elfclass64) {
|
|
||||||
shdr = elf64_getshdr(scn);
|
|
||||||
} else {
|
|
||||||
shdr = elf32_getshdr(scn);
|
|
||||||
}
|
|
||||||
if (shdr == NULL) {
|
|
||||||
failure();
|
|
||||||
}
|
|
||||||
|
|
||||||
(void) printf("Adding ELF data for new section name\n");
|
|
||||||
(void) fflush(stdout);
|
|
||||||
name_dat = elf_newdata(scn);
|
|
||||||
name_dat->d_buf = (void *) SEC_NAME;
|
|
||||||
if (is_elfclass64) {
|
|
||||||
name_dat->d_off = ((Elf64_Shdr *) shdr)->sh_size + 1;
|
|
||||||
} else {
|
|
||||||
name_dat->d_off = ((Elf32_Shdr *) shdr)->sh_size + 1;
|
|
||||||
}
|
|
||||||
name_dat->d_align = 1;
|
|
||||||
name_dat->d_size = strlen(SEC_NAME) + 1;
|
|
||||||
|
|
||||||
new_scn = elf_newscn(elf);
|
|
||||||
|
|
||||||
if (is_elfclass64) {
|
|
||||||
new_shdr = elf64_getshdr(new_scn);
|
|
||||||
((Elf64_Shdr *) new_shdr)->sh_flags = SHF_EXCLUDE;
|
|
||||||
((Elf64_Shdr *) new_shdr)->sh_type = SHT_PROGBITS;
|
|
||||||
((Elf64_Shdr *) new_shdr)->sh_name = ((Elf64_Shdr *) shdr)->sh_size;
|
|
||||||
((Elf64_Shdr *) new_shdr)->sh_addralign = 1;
|
|
||||||
((Elf64_Shdr *) shdr)->sh_size += (strlen(SEC_NAME) + 1);
|
|
||||||
} else {
|
|
||||||
new_shdr = elf32_getshdr(new_scn);
|
|
||||||
((Elf32_Shdr *) new_shdr)->sh_flags = SHF_EXCLUDE;
|
|
||||||
((Elf32_Shdr *) new_shdr)->sh_type = SHT_PROGBITS;
|
|
||||||
((Elf32_Shdr *) new_shdr)->sh_name = ((Elf32_Shdr *) shdr)->sh_size;
|
|
||||||
((Elf32_Shdr *) new_shdr)->sh_addralign = 1;
|
|
||||||
((Elf32_Shdr *) shdr)->sh_size += (strlen(SEC_NAME) + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
(void) printf("Adding ELF data for debug_info_path value\n");
|
|
||||||
(void) fflush(stdout);
|
|
||||||
(void) memset(buffer, 0, sizeof buffer);
|
|
||||||
buffer_len = strlen(debug_info_path) + 1; /* +1 for NUL */
|
|
||||||
(void) strncpy((char *) buffer, debug_info_path, buffer_len);
|
|
||||||
if (buffer_len % 4 != 0) {
|
|
||||||
/* not on a 4 byte boundary so pad to the next one */
|
|
||||||
buffer_len += (4 - buffer_len % 4);
|
|
||||||
}
|
|
||||||
/* save the CRC */
|
|
||||||
(void) memcpy(&buffer[buffer_len], &file_crc, sizeof file_crc);
|
|
||||||
buffer_len += sizeof file_crc;
|
|
||||||
|
|
||||||
link_dat = elf_newdata(new_scn);
|
|
||||||
link_dat->d_type = ELF_T_BYTE;
|
|
||||||
link_dat->d_size = buffer_len;
|
|
||||||
link_dat->d_buf = buffer;
|
|
||||||
link_dat->d_align = 1;
|
|
||||||
|
|
||||||
(void) printf("Saving updates to '%s'\n", elf_obj);
|
|
||||||
(void) fflush(stdout);
|
|
||||||
(void) elf_update(elf, ELF_C_NULL); /* recalc ELF memory structures */
|
|
||||||
(void) elf_update(elf, ELF_C_WRITE); /* write out changes to ELF obj */
|
|
||||||
(void) elf_end(elf); /* done with ELF obj */
|
|
||||||
(void) close(fd);
|
|
||||||
|
|
||||||
(void) printf("Done updating '%s'\n", elf_obj);
|
|
||||||
(void) fflush(stdout);
|
|
||||||
exit(0);
|
|
||||||
} /* end main */
|
|
||||||
|
|
||||||
|
|
||||||
static void
|
|
||||||
failure() {
|
|
||||||
(void) fprintf(stderr, "%s\n", elf_errmsg(elf_errno()));
|
|
||||||
exit(5);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The CRC used in gnu_debuglink, retrieved from
|
|
||||||
* http://sourceware.org/gdb/current/onlinedocs/gdb/Separate-Debug-Files.html#Separate-Debug-Files.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static unsigned int
|
|
||||||
gnu_debuglink_crc32(unsigned int crc, unsigned char *buf, size_t len) {
|
|
||||||
static const unsigned int crc32_table[256] = {
|
|
||||||
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
|
|
||||||
0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
|
|
||||||
0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
|
|
||||||
0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
|
|
||||||
0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
|
|
||||||
0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
|
|
||||||
0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
|
|
||||||
0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
|
|
||||||
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
|
|
||||||
0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
|
|
||||||
0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
|
|
||||||
0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
|
|
||||||
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
|
|
||||||
0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
|
|
||||||
0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
|
|
||||||
0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
|
|
||||||
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
|
|
||||||
0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
|
|
||||||
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
|
|
||||||
0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
|
|
||||||
0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
|
|
||||||
0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
|
|
||||||
0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
|
|
||||||
0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
|
|
||||||
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
|
|
||||||
0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
|
|
||||||
0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
|
|
||||||
0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
|
|
||||||
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
|
|
||||||
0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
|
|
||||||
0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
|
|
||||||
0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
|
|
||||||
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
|
|
||||||
0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
|
|
||||||
0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
|
|
||||||
0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
|
|
||||||
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
|
|
||||||
0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
|
|
||||||
0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
|
|
||||||
0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
|
|
||||||
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
|
|
||||||
0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
|
|
||||||
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
|
|
||||||
0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
|
|
||||||
0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
|
|
||||||
0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
|
|
||||||
0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
|
|
||||||
0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
|
|
||||||
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
|
|
||||||
0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
|
|
||||||
0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
|
|
||||||
0x2d02ef8d
|
|
||||||
};
|
|
||||||
|
|
||||||
unsigned char *end;
|
|
||||||
|
|
||||||
crc = ~crc & 0xffffffff;
|
|
||||||
for (end = buf + len; buf < end; ++buf) {
|
|
||||||
crc = crc32_table[(crc ^ *buf) & 0xff] ^ (crc >> 8);
|
|
||||||
}
|
|
||||||
return ~crc & 0xffffffff;
|
|
||||||
}
|
|
|
@ -1,181 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Name: fix_empty_sec_hdr_flags.c
|
|
||||||
*
|
|
||||||
* Description: Remove the SHF_ALLOC flag from "empty" section headers.
|
|
||||||
* An "empty" section header has sh_addr == 0 and sh_size == 0.
|
|
||||||
*
|
|
||||||
* This program is adapted from the example program shown on the
|
|
||||||
* elf(3elf) man page and from code from the Solaris compiler
|
|
||||||
* driver.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <libelf.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
|
|
||||||
static void failure(void);
|
|
||||||
|
|
||||||
void
|
|
||||||
main(int argc, char ** argv) {
|
|
||||||
void * ehdr; /* ELF header */
|
|
||||||
unsigned int i; /* section counter */
|
|
||||||
int fd; /* descriptor for file */
|
|
||||||
Elf * elf; /* ELF descriptor */
|
|
||||||
char * elf_ident; /* ELF identity string */
|
|
||||||
char * elf_obj; /* elf_obj file */
|
|
||||||
int fix_count; /* number of flags fixed */
|
|
||||||
int is_elfclass64; /* is an ELFCLASS64 file? */
|
|
||||||
Elf_Scn * scn; /* ELF section descriptor */
|
|
||||||
void * shdr; /* ELF section header */
|
|
||||||
Elf_Data * shstrtab; /* ELF section header string table */
|
|
||||||
|
|
||||||
if (argc != 2) {
|
|
||||||
(void) fprintf(stderr, "Usage: %s elf_obj\n", argv[0]);
|
|
||||||
exit(2);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* open the elf_obj */
|
|
||||||
elf_obj = argv[1];
|
|
||||||
if ((fd = open(elf_obj, O_RDWR)) == -1) {
|
|
||||||
(void) fprintf(stderr, "%s: cannot open file.\n", elf_obj);
|
|
||||||
exit(3);
|
|
||||||
}
|
|
||||||
|
|
||||||
(void) printf("Opening '%s' for update\n", elf_obj);
|
|
||||||
(void) fflush(stdout);
|
|
||||||
(void) elf_version(EV_CURRENT); /* coordinate ELF versions */
|
|
||||||
|
|
||||||
/* obtain the ELF descriptors from the input file */
|
|
||||||
if ((elf = elf_begin(fd, ELF_C_RDWR, NULL)) == NULL) {
|
|
||||||
failure();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* determine if ELFCLASS64 or not? */
|
|
||||||
elf_ident = elf_getident(elf, NULL);
|
|
||||||
is_elfclass64 = (elf_ident[EI_CLASS] == ELFCLASS64);
|
|
||||||
|
|
||||||
/* get the ELF header */
|
|
||||||
if (is_elfclass64) {
|
|
||||||
ehdr = elf64_getehdr(elf);
|
|
||||||
} else {
|
|
||||||
ehdr = elf32_getehdr(elf);
|
|
||||||
}
|
|
||||||
if (ehdr == NULL) {
|
|
||||||
failure();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* get the ELF section descriptor */
|
|
||||||
if (is_elfclass64) {
|
|
||||||
scn = elf_getscn(elf, ((Elf64_Ehdr *) ehdr)->e_shstrndx);
|
|
||||||
} else {
|
|
||||||
scn = elf_getscn(elf, ((Elf32_Ehdr *) ehdr)->e_shstrndx);
|
|
||||||
}
|
|
||||||
if (scn == NULL) {
|
|
||||||
failure();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* get the section header string table */
|
|
||||||
shstrtab = elf_getdata(scn, NULL);
|
|
||||||
if (shstrtab == NULL) {
|
|
||||||
failure();
|
|
||||||
}
|
|
||||||
|
|
||||||
fix_count = 0;
|
|
||||||
|
|
||||||
/* traverse the sections of the input file */
|
|
||||||
for (i = 1, scn = NULL; scn = elf_nextscn(elf, scn); i++) {
|
|
||||||
int has_flag_set; /* is SHF_ALLOC flag set? */
|
|
||||||
int is_empty; /* is section empty? */
|
|
||||||
char * name; /* short hand pointer */
|
|
||||||
|
|
||||||
/* get the section header */
|
|
||||||
if (is_elfclass64) {
|
|
||||||
shdr = elf64_getshdr(scn);
|
|
||||||
} else {
|
|
||||||
shdr = elf32_getshdr(scn);
|
|
||||||
}
|
|
||||||
if (shdr == NULL) {
|
|
||||||
failure();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (is_elfclass64) {
|
|
||||||
name = (char *)shstrtab->d_buf + ((Elf64_Shdr *) shdr)->sh_name;
|
|
||||||
} else {
|
|
||||||
name = (char *)shstrtab->d_buf + ((Elf32_Shdr *) shdr)->sh_name;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (is_elfclass64) {
|
|
||||||
has_flag_set = ((Elf64_Shdr *) shdr)->sh_flags & SHF_ALLOC;
|
|
||||||
is_empty = ((Elf64_Shdr *) shdr)->sh_addr == 0 &&
|
|
||||||
((Elf64_Shdr *) shdr)->sh_size == 0;
|
|
||||||
} else {
|
|
||||||
has_flag_set = ((Elf32_Shdr *) shdr)->sh_flags & SHF_ALLOC;
|
|
||||||
is_empty = ((Elf32_Shdr *) shdr)->sh_addr == 0 &&
|
|
||||||
((Elf32_Shdr *) shdr)->sh_size == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (is_empty && has_flag_set) {
|
|
||||||
(void) printf("section[%u] '%s' is empty, "
|
|
||||||
"but SHF_ALLOC flag is set.\n", i, name);
|
|
||||||
(void) printf("Clearing the SHF_ALLOC flag.\n");
|
|
||||||
|
|
||||||
if (is_elfclass64) {
|
|
||||||
((Elf64_Shdr *) shdr)->sh_flags &= ~SHF_ALLOC;
|
|
||||||
} else {
|
|
||||||
((Elf32_Shdr *) shdr)->sh_flags &= ~SHF_ALLOC;
|
|
||||||
}
|
|
||||||
fix_count++;
|
|
||||||
}
|
|
||||||
} /* end for each ELF section */
|
|
||||||
|
|
||||||
if (fix_count > 0) {
|
|
||||||
(void) printf("Saving %d updates to '%s'\n", fix_count, elf_obj);
|
|
||||||
(void) fflush(stdout);
|
|
||||||
(void) elf_update(elf, ELF_C_NULL); /* recalc ELF memory structures */
|
|
||||||
(void) elf_update(elf, ELF_C_WRITE); /* write out changes to ELF obj */
|
|
||||||
} else {
|
|
||||||
(void) printf("No SHF_ALLOC flags needed to be cleared.\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
(void) elf_end(elf); /* done with ELF obj */
|
|
||||||
(void) close(fd);
|
|
||||||
|
|
||||||
(void) printf("Done %s '%s'\n",
|
|
||||||
(fix_count > 0) ? "updating" : "with", elf_obj);
|
|
||||||
(void) fflush(stdout);
|
|
||||||
exit(0);
|
|
||||||
} /* end main */
|
|
||||||
|
|
||||||
|
|
||||||
static void
|
|
||||||
failure() {
|
|
||||||
(void) fprintf(stderr, "%s\n", elf_errmsg(elf_errno()));
|
|
||||||
exit(6);
|
|
||||||
}
|
|
|
@ -2601,7 +2601,10 @@ void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
||||||
assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
|
assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
|
||||||
assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
|
assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
|
||||||
if (UseLargePages) {
|
if (UseLargePages) {
|
||||||
Solaris::setup_large_pages(addr, bytes, alignment_hint);
|
size_t page_size = Solaris::page_size_for_alignment(alignment_hint);
|
||||||
|
if (page_size > (size_t) vm_page_size()) {
|
||||||
|
Solaris::setup_large_pages(addr, bytes, page_size);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -461,7 +461,7 @@ static char* get_user_name(int vmid, TRAPS) {
|
||||||
// since the structured procfs and old procfs interfaces can't be
|
// since the structured procfs and old procfs interfaces can't be
|
||||||
// mixed, we attempt to find the file through a directory search.
|
// mixed, we attempt to find the file through a directory search.
|
||||||
|
|
||||||
return get_user_name_slow(vmid, CHECK_NULL);
|
return get_user_name_slow(vmid, THREAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
// return the file name of the backing store file for the named
|
// return the file name of the backing store file for the named
|
||||||
|
|
|
@ -88,6 +88,15 @@ inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* des
|
||||||
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
|
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
|
||||||
|
inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
|
||||||
|
int mp = os::is_MP();
|
||||||
|
__asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
|
||||||
|
: "=a" (exchange_value)
|
||||||
|
: "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
|
||||||
|
: "cc", "memory");
|
||||||
|
return exchange_value;
|
||||||
|
}
|
||||||
|
|
||||||
inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) {
|
inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) {
|
||||||
int mp = os::is_MP();
|
int mp = os::is_MP();
|
||||||
|
|
|
@ -88,6 +88,15 @@ inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* des
|
||||||
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
|
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
|
||||||
|
inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
|
||||||
|
int mp = os::is_MP();
|
||||||
|
__asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
|
||||||
|
: "=a" (exchange_value)
|
||||||
|
: "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
|
||||||
|
: "cc", "memory");
|
||||||
|
return exchange_value;
|
||||||
|
}
|
||||||
|
|
||||||
inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) {
|
inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) {
|
||||||
int mp = os::is_MP();
|
int mp = os::is_MP();
|
||||||
|
|
|
@ -31,6 +31,11 @@
|
||||||
|
|
||||||
// Implementation of class OrderAccess.
|
// Implementation of class OrderAccess.
|
||||||
|
|
||||||
|
// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
|
||||||
|
static inline void compiler_barrier() {
|
||||||
|
__asm__ volatile ("" : : : "memory");
|
||||||
|
}
|
||||||
|
|
||||||
inline void OrderAccess::loadload() { acquire(); }
|
inline void OrderAccess::loadload() { acquire(); }
|
||||||
inline void OrderAccess::storestore() { release(); }
|
inline void OrderAccess::storestore() { release(); }
|
||||||
inline void OrderAccess::loadstore() { acquire(); }
|
inline void OrderAccess::loadstore() { acquire(); }
|
||||||
|
@ -46,9 +51,7 @@ inline void OrderAccess::acquire() {
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void OrderAccess::release() {
|
inline void OrderAccess::release() {
|
||||||
// Avoid hitting the same cache-line from
|
compiler_barrier();
|
||||||
// different threads.
|
|
||||||
volatile jint local_dummy = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void OrderAccess::fence() {
|
inline void OrderAccess::fence() {
|
||||||
|
@ -62,34 +65,34 @@ inline void OrderAccess::fence() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; }
|
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { jbyte v = *p; compiler_barrier(); return v; }
|
||||||
inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; }
|
inline jshort OrderAccess::load_acquire(volatile jshort* p) { jshort v = *p; compiler_barrier(); return v; }
|
||||||
inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; }
|
inline jint OrderAccess::load_acquire(volatile jint* p) { jint v = *p; compiler_barrier(); return v; }
|
||||||
inline jlong OrderAccess::load_acquire(volatile jlong* p) { return Atomic::load(p); }
|
inline jlong OrderAccess::load_acquire(volatile jlong* p) { jlong v = Atomic::load(p); compiler_barrier(); return v; }
|
||||||
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; }
|
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { jubyte v = *p; compiler_barrier(); return v; }
|
||||||
inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; }
|
inline jushort OrderAccess::load_acquire(volatile jushort* p) { jushort v = *p; compiler_barrier(); return v; }
|
||||||
inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; }
|
inline juint OrderAccess::load_acquire(volatile juint* p) { juint v = *p; compiler_barrier(); return v; }
|
||||||
inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); }
|
inline julong OrderAccess::load_acquire(volatile julong* p) { julong v = Atomic::load((volatile jlong*)p); compiler_barrier(); return v; }
|
||||||
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; }
|
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { jfloat v = *p; compiler_barrier(); return v; }
|
||||||
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return jdouble_cast(Atomic::load((volatile jlong*)p)); }
|
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { jdouble v = jdouble_cast(Atomic::load((volatile jlong*)p)); compiler_barrier(); return v; }
|
||||||
|
|
||||||
inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return *p; }
|
inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { intptr_t v = *p; compiler_barrier(); return v; }
|
||||||
inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return *(void* volatile *)p; }
|
inline void* OrderAccess::load_ptr_acquire(volatile void* p) { void* v = *(void* volatile *)p; compiler_barrier(); return v; }
|
||||||
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
|
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { void* v = *(void* const volatile *)p; compiler_barrier(); return v; }
|
||||||
|
|
||||||
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; }
|
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { compiler_barrier(); *p = v; }
|
||||||
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; }
|
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { compiler_barrier(); *p = v; }
|
||||||
inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; }
|
inline void OrderAccess::release_store(volatile jint* p, jint v) { compiler_barrier(); *p = v; }
|
||||||
inline void OrderAccess::release_store(volatile jlong* p, jlong v) { Atomic::store(v, p); }
|
inline void OrderAccess::release_store(volatile jlong* p, jlong v) { compiler_barrier(); Atomic::store(v, p); }
|
||||||
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; }
|
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { compiler_barrier(); *p = v; }
|
||||||
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
|
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { compiler_barrier(); *p = v; }
|
||||||
inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; }
|
inline void OrderAccess::release_store(volatile juint* p, juint v) { compiler_barrier(); *p = v; }
|
||||||
inline void OrderAccess::release_store(volatile julong* p, julong v) { Atomic::store((jlong)v, (volatile jlong*)p); }
|
inline void OrderAccess::release_store(volatile julong* p, julong v) { compiler_barrier(); Atomic::store((jlong)v, (volatile jlong*)p); }
|
||||||
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; }
|
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { compiler_barrier(); *p = v; }
|
||||||
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { release_store((volatile jlong *)p, jlong_cast(v)); }
|
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { release_store((volatile jlong *)p, jlong_cast(v)); }
|
||||||
|
|
||||||
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
|
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { compiler_barrier(); *p = v; }
|
||||||
inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { *(void* volatile *)p = v; }
|
inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { compiler_barrier(); *(void* volatile *)p = v; }
|
||||||
|
|
||||||
inline void OrderAccess::store_fence(jbyte* p, jbyte v) {
|
inline void OrderAccess::store_fence(jbyte* p, jbyte v) {
|
||||||
__asm__ volatile ( "xchgb (%2),%0"
|
__asm__ volatile ( "xchgb (%2),%0"
|
||||||
|
|
|
@ -542,6 +542,7 @@ JVM_handle_linux_signal(int sig,
|
||||||
err.report_and_die();
|
err.report_and_die();
|
||||||
|
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
|
return true; // Mute compiler
|
||||||
}
|
}
|
||||||
|
|
||||||
void os::Linux::init_thread_fpu_state(void) {
|
void os::Linux::init_thread_fpu_state(void) {
|
||||||
|
|
|
@ -68,6 +68,8 @@ inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest);
|
||||||
extern "C" {
|
extern "C" {
|
||||||
jint _Atomic_add(jint add_value, volatile jint* dest IS_MP_DECL());
|
jint _Atomic_add(jint add_value, volatile jint* dest IS_MP_DECL());
|
||||||
jint _Atomic_xchg(jint exchange_value, volatile jint* dest);
|
jint _Atomic_xchg(jint exchange_value, volatile jint* dest);
|
||||||
|
jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest,
|
||||||
|
jbyte compare_value IS_MP_DECL());
|
||||||
jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest,
|
jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest,
|
||||||
jint compare_value IS_MP_DECL());
|
jint compare_value IS_MP_DECL());
|
||||||
jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest,
|
jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest,
|
||||||
|
@ -82,6 +84,11 @@ inline jint Atomic::xchg (jint exchange_value, volatile jint*
|
||||||
return _Atomic_xchg(exchange_value, dest);
|
return _Atomic_xchg(exchange_value, dest);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
|
||||||
|
inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
|
||||||
|
return _Atomic_cmpxchg_byte(exchange_value, dest, compare_value IS_MP_ARG());
|
||||||
|
}
|
||||||
|
|
||||||
inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) {
|
inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) {
|
||||||
return _Atomic_cmpxchg(exchange_value, dest, compare_value IS_MP_ARG());
|
return _Atomic_cmpxchg(exchange_value, dest, compare_value IS_MP_ARG());
|
||||||
}
|
}
|
||||||
|
@ -217,6 +224,15 @@ extern "C" {
|
||||||
return exchange_value;
|
return exchange_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
inline jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, int mp) {
|
||||||
|
__asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
|
||||||
|
: "=a" (exchange_value)
|
||||||
|
: "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
|
||||||
|
: "cc", "memory");
|
||||||
|
return exchange_value;
|
||||||
|
}
|
||||||
|
|
||||||
// This is the interface to the atomic instruction in solaris_i486.s.
|
// This is the interface to the atomic instruction in solaris_i486.s.
|
||||||
jlong _Atomic_cmpxchg_long_gcc(jlong exchange_value, volatile jlong* dest, jlong compare_value, int mp);
|
jlong _Atomic_cmpxchg_long_gcc(jlong exchange_value, volatile jlong* dest, jlong compare_value, int mp);
|
||||||
|
|
||||||
|
|
|
@ -76,6 +76,23 @@
|
||||||
xchgl (%ecx), %eax
|
xchgl (%ecx), %eax
|
||||||
.end
|
.end
|
||||||
|
|
||||||
|
// Support for jbyte Atomic::cmpxchg(jbyte exchange_value,
|
||||||
|
// volatile jbyte *dest,
|
||||||
|
// jbyte compare_value)
|
||||||
|
// An additional bool (os::is_MP()) is passed as the last argument.
|
||||||
|
.inline _Atomic_cmpxchg_byte,4
|
||||||
|
movb 8(%esp), %al // compare_value
|
||||||
|
movb 0(%esp), %cl // exchange_value
|
||||||
|
movl 4(%esp), %edx // dest
|
||||||
|
cmp $0, 12(%esp) // MP test
|
||||||
|
jne 1f
|
||||||
|
cmpxchgb %cl, (%edx)
|
||||||
|
jmp 2f
|
||||||
|
1: lock
|
||||||
|
cmpxchgb %cl, (%edx)
|
||||||
|
2:
|
||||||
|
.end
|
||||||
|
|
||||||
// Support for jint Atomic::cmpxchg(jint exchange_value,
|
// Support for jint Atomic::cmpxchg(jint exchange_value,
|
||||||
// volatile jint *dest,
|
// volatile jint *dest,
|
||||||
// jint compare_value)
|
// jint compare_value)
|
||||||
|
|
|
@ -77,6 +77,15 @@
|
||||||
movq %rdi, %rax
|
movq %rdi, %rax
|
||||||
.end
|
.end
|
||||||
|
|
||||||
|
// Support for jbyte Atomic::cmpxchg(jbyte exchange_value,
|
||||||
|
// volatile jbyte *dest,
|
||||||
|
// jbyte compare_value)
|
||||||
|
.inline _Atomic_cmpxchg_byte,3
|
||||||
|
movb %dl, %al // compare_value
|
||||||
|
lock
|
||||||
|
cmpxchgb %dil, (%rsi)
|
||||||
|
.end
|
||||||
|
|
||||||
// Support for jint Atomic::cmpxchg(jint exchange_value,
|
// Support for jint Atomic::cmpxchg(jint exchange_value,
|
||||||
// volatile jint *dest,
|
// volatile jint *dest,
|
||||||
// jint compare_value)
|
// jint compare_value)
|
||||||
|
|
|
@ -123,6 +123,11 @@ inline jint Atomic::cmpxchg (jint exchange_value, volatile jint*
|
||||||
return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value);
|
return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
|
||||||
|
inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
|
||||||
|
return (*os::atomic_cmpxchg_byte_func)(exchange_value, dest, compare_value);
|
||||||
|
}
|
||||||
|
|
||||||
inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value) {
|
inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value) {
|
||||||
return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value);
|
return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value);
|
||||||
}
|
}
|
||||||
|
@ -212,6 +217,19 @@ inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* des
|
||||||
return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
|
return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
|
||||||
|
inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
|
||||||
|
// alternative for InterlockedCompareExchange
|
||||||
|
int mp = os::is_MP();
|
||||||
|
__asm {
|
||||||
|
mov edx, dest
|
||||||
|
mov cl, exchange_value
|
||||||
|
mov al, compare_value
|
||||||
|
LOCK_IF_MP(mp)
|
||||||
|
cmpxchg byte ptr [edx], cl
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) {
|
inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) {
|
||||||
// alternative for InterlockedCompareExchange
|
// alternative for InterlockedCompareExchange
|
||||||
int mp = os::is_MP();
|
int mp = os::is_MP();
|
||||||
|
|
|
@ -220,6 +220,7 @@ void os::initialize_thread(Thread* thr) {
|
||||||
typedef jint xchg_func_t (jint, volatile jint*);
|
typedef jint xchg_func_t (jint, volatile jint*);
|
||||||
typedef intptr_t xchg_ptr_func_t (intptr_t, volatile intptr_t*);
|
typedef intptr_t xchg_ptr_func_t (intptr_t, volatile intptr_t*);
|
||||||
typedef jint cmpxchg_func_t (jint, volatile jint*, jint);
|
typedef jint cmpxchg_func_t (jint, volatile jint*, jint);
|
||||||
|
typedef jbyte cmpxchg_byte_func_t (jbyte, volatile jbyte*, jbyte);
|
||||||
typedef jlong cmpxchg_long_func_t (jlong, volatile jlong*, jlong);
|
typedef jlong cmpxchg_long_func_t (jlong, volatile jlong*, jlong);
|
||||||
typedef jint add_func_t (jint, volatile jint*);
|
typedef jint add_func_t (jint, volatile jint*);
|
||||||
typedef intptr_t add_ptr_func_t (intptr_t, volatile intptr_t*);
|
typedef intptr_t add_ptr_func_t (intptr_t, volatile intptr_t*);
|
||||||
|
@ -272,6 +273,23 @@ jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint
|
||||||
*dest = exchange_value;
|
*dest = exchange_value;
|
||||||
return old_value;
|
return old_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
jbyte os::atomic_cmpxchg_byte_bootstrap(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
|
||||||
|
// try to use the stub:
|
||||||
|
cmpxchg_byte_func_t* func = CAST_TO_FN_PTR(cmpxchg_byte_func_t*, StubRoutines::atomic_cmpxchg_byte_entry());
|
||||||
|
|
||||||
|
if (func != NULL) {
|
||||||
|
os::atomic_cmpxchg_byte_func = func;
|
||||||
|
return (*func)(exchange_value, dest, compare_value);
|
||||||
|
}
|
||||||
|
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
||||||
|
|
||||||
|
jbyte old_value = *dest;
|
||||||
|
if (old_value == compare_value)
|
||||||
|
*dest = exchange_value;
|
||||||
|
return old_value;
|
||||||
|
}
|
||||||
|
|
||||||
#endif // AMD64
|
#endif // AMD64
|
||||||
|
|
||||||
jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
|
jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
|
||||||
|
@ -321,6 +339,7 @@ intptr_t os::atomic_add_ptr_bootstrap(intptr_t add_value, volatile intptr_t* des
|
||||||
xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap;
|
xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap;
|
||||||
xchg_ptr_func_t* os::atomic_xchg_ptr_func = os::atomic_xchg_ptr_bootstrap;
|
xchg_ptr_func_t* os::atomic_xchg_ptr_func = os::atomic_xchg_ptr_bootstrap;
|
||||||
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
|
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
|
||||||
|
cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap;
|
||||||
add_func_t* os::atomic_add_func = os::atomic_add_bootstrap;
|
add_func_t* os::atomic_add_func = os::atomic_add_bootstrap;
|
||||||
add_ptr_func_t* os::atomic_add_ptr_func = os::atomic_add_ptr_bootstrap;
|
add_ptr_func_t* os::atomic_add_ptr_func = os::atomic_add_ptr_bootstrap;
|
||||||
|
|
||||||
|
@ -635,7 +654,11 @@ void os::setup_fpu() {
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
void os::verify_stack_alignment() {
|
void os::verify_stack_alignment() {
|
||||||
#ifdef AMD64
|
#ifdef AMD64
|
||||||
assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
|
// The current_stack_pointer() calls generated get_previous_sp stub routine.
|
||||||
|
// Only enable the assert after the routine becomes available.
|
||||||
|
if (StubRoutines::code1() != NULL) {
|
||||||
|
assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -33,6 +33,7 @@
|
||||||
static intptr_t (*atomic_xchg_ptr_func) (intptr_t, volatile intptr_t*);
|
static intptr_t (*atomic_xchg_ptr_func) (intptr_t, volatile intptr_t*);
|
||||||
|
|
||||||
static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint);
|
static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint);
|
||||||
|
static jbyte (*atomic_cmpxchg_byte_func) (jbyte, volatile jbyte*, jbyte);
|
||||||
static jlong (*atomic_cmpxchg_long_func) (jlong, volatile jlong*, jlong);
|
static jlong (*atomic_cmpxchg_long_func) (jlong, volatile jlong*, jlong);
|
||||||
|
|
||||||
static jint (*atomic_add_func) (jint, volatile jint*);
|
static jint (*atomic_add_func) (jint, volatile jint*);
|
||||||
|
@ -42,6 +43,7 @@
|
||||||
static intptr_t atomic_xchg_ptr_bootstrap (intptr_t, volatile intptr_t*);
|
static intptr_t atomic_xchg_ptr_bootstrap (intptr_t, volatile intptr_t*);
|
||||||
|
|
||||||
static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint);
|
static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint);
|
||||||
|
static jbyte atomic_cmpxchg_byte_bootstrap(jbyte, volatile jbyte*, jbyte);
|
||||||
#else
|
#else
|
||||||
|
|
||||||
static jlong (*atomic_cmpxchg_long_func) (jlong, volatile jlong*, jlong);
|
static jlong (*atomic_cmpxchg_long_func) (jlong, volatile jlong*, jlong);
|
||||||
|
|
|
@ -512,7 +512,9 @@ abstract class GenericDebugConfig extends BuildConfig {
|
||||||
abstract class GenericDebugNonKernelConfig extends GenericDebugConfig {
|
abstract class GenericDebugNonKernelConfig extends GenericDebugConfig {
|
||||||
protected void init(Vector includes, Vector defines) {
|
protected void init(Vector includes, Vector defines) {
|
||||||
super.init(includes, defines);
|
super.init(includes, defines);
|
||||||
getCI().getAdditionalNonKernelLinkerFlags(getV("LinkerFlags"));
|
if (get("PlatformName").equals("Win32")) {
|
||||||
|
getCI().getAdditionalNonKernelLinkerFlags(getV("LinkerFlags"));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -401,16 +401,18 @@ class CompilerInterfaceVC10 extends CompilerInterface {
|
||||||
Vector getBaseLinkerFlags(String outDir, String outDll, String platformName) {
|
Vector getBaseLinkerFlags(String outDir, String outDll, String platformName) {
|
||||||
Vector rv = new Vector();
|
Vector rv = new Vector();
|
||||||
|
|
||||||
addAttr(rv, "AdditionalOptions",
|
if(platformName.equals("Win32")) {
|
||||||
"/export:JNI_GetDefaultJavaVMInitArgs " +
|
addAttr(rv, "AdditionalOptions",
|
||||||
"/export:JNI_CreateJavaVM " +
|
"/export:JNI_GetDefaultJavaVMInitArgs " +
|
||||||
"/export:JVM_FindClassFromBootLoader "+
|
"/export:JNI_CreateJavaVM " +
|
||||||
"/export:JNI_GetCreatedJavaVMs "+
|
"/export:JVM_FindClassFromBootLoader "+
|
||||||
"/export:jio_snprintf /export:jio_printf "+
|
"/export:JNI_GetCreatedJavaVMs "+
|
||||||
"/export:jio_fprintf /export:jio_vfprintf "+
|
"/export:jio_snprintf /export:jio_printf "+
|
||||||
"/export:jio_vsnprintf "+
|
"/export:jio_fprintf /export:jio_vfprintf "+
|
||||||
"/export:JVM_GetVersionInfo "+
|
"/export:jio_vsnprintf "+
|
||||||
"/export:JVM_InitAgentProperties");
|
"/export:JVM_GetVersionInfo "+
|
||||||
|
"/export:JVM_InitAgentProperties");
|
||||||
|
}
|
||||||
addAttr(rv, "AdditionalDependencies", "kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;Wsock32.lib;winmm.lib;psapi.lib;version.lib");
|
addAttr(rv, "AdditionalDependencies", "kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;Wsock32.lib;winmm.lib;psapi.lib;version.lib");
|
||||||
addAttr(rv, "OutputFile", outDll);
|
addAttr(rv, "OutputFile", outDll);
|
||||||
addAttr(rv, "SuppressStartupBanner", "true");
|
addAttr(rv, "SuppressStartupBanner", "true");
|
||||||
|
|
|
@ -7,7 +7,6 @@
|
||||||
-Xbootclasspath/p:<directories and zip/jar files separated by ;>
|
-Xbootclasspath/p:<directories and zip/jar files separated by ;>
|
||||||
prepend in front of bootstrap class path
|
prepend in front of bootstrap class path
|
||||||
-Xnoclassgc disable class garbage collection
|
-Xnoclassgc disable class garbage collection
|
||||||
-Xincgc enable incremental garbage collection
|
|
||||||
-Xloggc:<file> log GC status to a file with time stamps
|
-Xloggc:<file> log GC status to a file with time stamps
|
||||||
-Xbatch disable background compilation
|
-Xbatch disable background compilation
|
||||||
-Xms<size> set initial Java heap size
|
-Xms<size> set initial Java heap size
|
||||||
|
|
|
@ -53,6 +53,7 @@
|
||||||
#include "runtime/reflection.hpp"
|
#include "runtime/reflection.hpp"
|
||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
#include "runtime/thread.inline.hpp"
|
#include "runtime/thread.inline.hpp"
|
||||||
|
#include "trace/tracing.hpp"
|
||||||
#include "utilities/dtrace.hpp"
|
#include "utilities/dtrace.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
#ifdef COMPILER1
|
#ifdef COMPILER1
|
||||||
|
@ -1141,6 +1142,16 @@ void ciEnv::record_failure(const char* reason) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ciEnv::report_failure(const char* reason) {
|
||||||
|
// Create and fire JFR event
|
||||||
|
EventCompilerFailure event;
|
||||||
|
if (event.should_commit()) {
|
||||||
|
event.set_compileID(compile_id());
|
||||||
|
event.set_failure(reason);
|
||||||
|
event.commit();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------
|
// ------------------------------------------------------------------
|
||||||
// ciEnv::record_method_not_compilable()
|
// ciEnv::record_method_not_compilable()
|
||||||
void ciEnv::record_method_not_compilable(const char* reason, bool all_tiers) {
|
void ciEnv::record_method_not_compilable(const char* reason, bool all_tiers) {
|
||||||
|
|
|
@ -450,7 +450,8 @@ public:
|
||||||
// Check for changes to the system dictionary during compilation
|
// Check for changes to the system dictionary during compilation
|
||||||
bool system_dictionary_modification_counter_changed();
|
bool system_dictionary_modification_counter_changed();
|
||||||
|
|
||||||
void record_failure(const char* reason);
|
void record_failure(const char* reason); // Record failure and report later
|
||||||
|
void report_failure(const char* reason); // Report failure immediately
|
||||||
void record_method_not_compilable(const char* reason, bool all_tiers = true);
|
void record_method_not_compilable(const char* reason, bool all_tiers = true);
|
||||||
void record_out_of_memory_failure();
|
void record_out_of_memory_failure();
|
||||||
|
|
||||||
|
|
|
@ -68,7 +68,10 @@
|
||||||
// ciMethod::ciMethod
|
// ciMethod::ciMethod
|
||||||
//
|
//
|
||||||
// Loaded method.
|
// Loaded method.
|
||||||
ciMethod::ciMethod(methodHandle h_m) : ciMetadata(h_m()) {
|
ciMethod::ciMethod(methodHandle h_m, ciInstanceKlass* holder) :
|
||||||
|
ciMetadata(h_m()),
|
||||||
|
_holder(holder)
|
||||||
|
{
|
||||||
assert(h_m() != NULL, "no null method");
|
assert(h_m() != NULL, "no null method");
|
||||||
|
|
||||||
// These fields are always filled in in loaded methods.
|
// These fields are always filled in in loaded methods.
|
||||||
|
@ -124,7 +127,6 @@ ciMethod::ciMethod(methodHandle h_m) : ciMetadata(h_m()) {
|
||||||
// generating _signature may allow GC and therefore move m.
|
// generating _signature may allow GC and therefore move m.
|
||||||
// These fields are always filled in.
|
// These fields are always filled in.
|
||||||
_name = env->get_symbol(h_m()->name());
|
_name = env->get_symbol(h_m()->name());
|
||||||
_holder = env->get_instance_klass(h_m()->method_holder());
|
|
||||||
ciSymbol* sig_symbol = env->get_symbol(h_m()->signature());
|
ciSymbol* sig_symbol = env->get_symbol(h_m()->signature());
|
||||||
constantPoolHandle cpool = h_m()->constants();
|
constantPoolHandle cpool = h_m()->constants();
|
||||||
_signature = new (env->arena()) ciSignature(_holder, cpool, sig_symbol);
|
_signature = new (env->arena()) ciSignature(_holder, cpool, sig_symbol);
|
||||||
|
|
|
@ -91,7 +91,7 @@ class ciMethod : public ciMetadata {
|
||||||
BCEscapeAnalyzer* _bcea;
|
BCEscapeAnalyzer* _bcea;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
ciMethod(methodHandle h_m);
|
ciMethod(methodHandle h_m, ciInstanceKlass* holder);
|
||||||
ciMethod(ciInstanceKlass* holder, ciSymbol* name, ciSymbol* signature, ciInstanceKlass* accessor);
|
ciMethod(ciInstanceKlass* holder, ciSymbol* name, ciSymbol* signature, ciInstanceKlass* accessor);
|
||||||
|
|
||||||
Method* get_Method() const {
|
Method* get_Method() const {
|
||||||
|
|
|
@ -46,6 +46,7 @@
|
||||||
#include "oops/oop.inline.hpp"
|
#include "oops/oop.inline.hpp"
|
||||||
#include "oops/oop.inline2.hpp"
|
#include "oops/oop.inline2.hpp"
|
||||||
#include "runtime/fieldType.hpp"
|
#include "runtime/fieldType.hpp"
|
||||||
|
#include "utilities/macros.hpp"
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
# include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
# include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||||
#endif
|
#endif
|
||||||
|
@ -239,7 +240,7 @@ void ciObjectFactory::remove_symbols() {
|
||||||
ciObject* ciObjectFactory::get(oop key) {
|
ciObject* ciObjectFactory::get(oop key) {
|
||||||
ASSERT_IN_VM;
|
ASSERT_IN_VM;
|
||||||
|
|
||||||
assert(key == NULL || Universe::heap()->is_in_reserved(key), "must be");
|
assert(Universe::heap()->is_in_reserved(key), "must be");
|
||||||
|
|
||||||
NonPermObject* &bucket = find_non_perm(key);
|
NonPermObject* &bucket = find_non_perm(key);
|
||||||
if (bucket != NULL) {
|
if (bucket != NULL) {
|
||||||
|
@ -260,10 +261,10 @@ ciObject* ciObjectFactory::get(oop key) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------
|
// ------------------------------------------------------------------
|
||||||
// ciObjectFactory::get
|
// ciObjectFactory::get_metadata
|
||||||
//
|
//
|
||||||
// Get the ciObject corresponding to some oop. If the ciObject has
|
// Get the ciMetadata corresponding to some Metadata. If the ciMetadata has
|
||||||
// already been created, it is returned. Otherwise, a new ciObject
|
// already been created, it is returned. Otherwise, a new ciMetadata
|
||||||
// is created.
|
// is created.
|
||||||
ciMetadata* ciObjectFactory::get_metadata(Metadata* key) {
|
ciMetadata* ciObjectFactory::get_metadata(Metadata* key) {
|
||||||
ASSERT_IN_VM;
|
ASSERT_IN_VM;
|
||||||
|
@ -290,9 +291,9 @@ ciMetadata* ciObjectFactory::get_metadata(Metadata* key) {
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
if (!is_found_at(index, key, _ci_metadata)) {
|
if (!is_found_at(index, key, _ci_metadata)) {
|
||||||
// The ciObject does not yet exist. Create it and insert it
|
// The ciMetadata does not yet exist. Create it and insert it
|
||||||
// into the cache.
|
// into the cache.
|
||||||
ciMetadata* new_object = create_new_object(key);
|
ciMetadata* new_object = create_new_metadata(key);
|
||||||
init_ident_of(new_object);
|
init_ident_of(new_object);
|
||||||
assert(new_object->is_metadata(), "must be");
|
assert(new_object->is_metadata(), "must be");
|
||||||
|
|
||||||
|
@ -344,15 +345,28 @@ ciObject* ciObjectFactory::create_new_object(oop o) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------
|
// ------------------------------------------------------------------
|
||||||
// ciObjectFactory::create_new_object
|
// ciObjectFactory::create_new_metadata
|
||||||
//
|
//
|
||||||
// Create a new ciObject from a Metadata*.
|
// Create a new ciMetadata from a Metadata*.
|
||||||
//
|
//
|
||||||
// Implementation note: this functionality could be virtual behavior
|
// Implementation note: in order to keep Metadata live, an auxiliary ciObject
|
||||||
// of the oop itself. For now, we explicitly marshal the object.
|
// is used, which points to it's holder.
|
||||||
ciMetadata* ciObjectFactory::create_new_object(Metadata* o) {
|
ciMetadata* ciObjectFactory::create_new_metadata(Metadata* o) {
|
||||||
EXCEPTION_CONTEXT;
|
EXCEPTION_CONTEXT;
|
||||||
|
|
||||||
|
// Hold metadata from unloading by keeping it's holder alive.
|
||||||
|
if (_initialized && o->is_klass()) {
|
||||||
|
Klass* holder = ((Klass*)o);
|
||||||
|
if (holder->oop_is_instance() && InstanceKlass::cast(holder)->is_anonymous()) {
|
||||||
|
// Though ciInstanceKlass records class loader oop, it's not enough to keep
|
||||||
|
// VM anonymous classes alive (loader == NULL). Klass holder should be used instead.
|
||||||
|
// It is enough to record a ciObject, since cached elements are never removed
|
||||||
|
// during ciObjectFactory lifetime. ciObjectFactory itself is created for
|
||||||
|
// every compilation and lives for the whole duration of the compilation.
|
||||||
|
ciObject* h = get(holder->klass_holder());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (o->is_klass()) {
|
if (o->is_klass()) {
|
||||||
KlassHandle h_k(THREAD, (Klass*)o);
|
KlassHandle h_k(THREAD, (Klass*)o);
|
||||||
Klass* k = (Klass*)o;
|
Klass* k = (Klass*)o;
|
||||||
|
@ -365,14 +379,16 @@ ciMetadata* ciObjectFactory::create_new_object(Metadata* o) {
|
||||||
}
|
}
|
||||||
} else if (o->is_method()) {
|
} else if (o->is_method()) {
|
||||||
methodHandle h_m(THREAD, (Method*)o);
|
methodHandle h_m(THREAD, (Method*)o);
|
||||||
return new (arena()) ciMethod(h_m);
|
ciEnv *env = CURRENT_THREAD_ENV;
|
||||||
|
ciInstanceKlass* holder = env->get_instance_klass(h_m()->method_holder());
|
||||||
|
return new (arena()) ciMethod(h_m, holder);
|
||||||
} else if (o->is_methodData()) {
|
} else if (o->is_methodData()) {
|
||||||
// Hold methodHandle alive - might not be necessary ???
|
// Hold methodHandle alive - might not be necessary ???
|
||||||
methodHandle h_m(THREAD, ((MethodData*)o)->method());
|
methodHandle h_m(THREAD, ((MethodData*)o)->method());
|
||||||
return new (arena()) ciMethodData((MethodData*)o);
|
return new (arena()) ciMethodData((MethodData*)o);
|
||||||
}
|
}
|
||||||
|
|
||||||
// The oop is of some type not supported by the compiler interface.
|
// The Metadata* is of some type not supported by the compiler interface.
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -701,7 +717,7 @@ static ciObjectFactory::NonPermObject* emptyBucket = NULL;
|
||||||
// If there is no entry in the cache corresponding to this oop, return
|
// If there is no entry in the cache corresponding to this oop, return
|
||||||
// the null tail of the bucket into which the oop should be inserted.
|
// the null tail of the bucket into which the oop should be inserted.
|
||||||
ciObjectFactory::NonPermObject* &ciObjectFactory::find_non_perm(oop key) {
|
ciObjectFactory::NonPermObject* &ciObjectFactory::find_non_perm(oop key) {
|
||||||
assert(Universe::heap()->is_in_reserved_or_null(key), "must be");
|
assert(Universe::heap()->is_in_reserved(key), "must be");
|
||||||
ciMetadata* klass = get_metadata(key->klass());
|
ciMetadata* klass = get_metadata(key->klass());
|
||||||
NonPermObject* *bp = &_non_perm_bucket[(unsigned) klass->hash() % NON_PERM_BUCKETS];
|
NonPermObject* *bp = &_non_perm_bucket[(unsigned) klass->hash() % NON_PERM_BUCKETS];
|
||||||
for (NonPermObject* p; (p = (*bp)) != NULL; bp = &p->next()) {
|
for (NonPermObject* p; (p = (*bp)) != NULL; bp = &p->next()) {
|
||||||
|
|
|
@ -73,7 +73,7 @@ private:
|
||||||
void insert(int index, ciMetadata* obj, GrowableArray<ciMetadata*>* objects);
|
void insert(int index, ciMetadata* obj, GrowableArray<ciMetadata*>* objects);
|
||||||
|
|
||||||
ciObject* create_new_object(oop o);
|
ciObject* create_new_object(oop o);
|
||||||
ciMetadata* create_new_object(Metadata* o);
|
ciMetadata* create_new_metadata(Metadata* o);
|
||||||
|
|
||||||
void ensure_metadata_alive(ciMetadata* m);
|
void ensure_metadata_alive(ciMetadata* m);
|
||||||
|
|
||||||
|
|
|
@ -332,7 +332,7 @@ class CompileReplay : public StackObj {
|
||||||
// Lookup a klass
|
// Lookup a klass
|
||||||
Klass* resolve_klass(const char* klass, TRAPS) {
|
Klass* resolve_klass(const char* klass, TRAPS) {
|
||||||
Symbol* klass_name = SymbolTable::lookup(klass, (int)strlen(klass), CHECK_NULL);
|
Symbol* klass_name = SymbolTable::lookup(klass, (int)strlen(klass), CHECK_NULL);
|
||||||
return SystemDictionary::resolve_or_fail(klass_name, _loader, _protection_domain, true, CHECK_NULL);
|
return SystemDictionary::resolve_or_fail(klass_name, _loader, _protection_domain, true, THREAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse the standard tuple of <klass> <name> <signature>
|
// Parse the standard tuple of <klass> <name> <signature>
|
||||||
|
|
|
@ -35,6 +35,7 @@
|
||||||
#include "interpreter/bytecode.hpp"
|
#include "interpreter/bytecode.hpp"
|
||||||
#include "interpreter/bytecodes.hpp"
|
#include "interpreter/bytecodes.hpp"
|
||||||
#include "memory/allocation.inline.hpp"
|
#include "memory/allocation.inline.hpp"
|
||||||
|
#include "opto/compile.hpp"
|
||||||
#include "runtime/deoptimization.hpp"
|
#include "runtime/deoptimization.hpp"
|
||||||
#include "utilities/growableArray.hpp"
|
#include "utilities/growableArray.hpp"
|
||||||
|
|
||||||
|
@ -2646,7 +2647,7 @@ void ciTypeFlow::df_flow_types(Block* start,
|
||||||
assert (!blk->has_pre_order(), "");
|
assert (!blk->has_pre_order(), "");
|
||||||
blk->set_next_pre_order();
|
blk->set_next_pre_order();
|
||||||
|
|
||||||
if (_next_pre_order >= MaxNodeLimit / 2) {
|
if (_next_pre_order >= (int)Compile::current()->max_node_limit() / 2) {
|
||||||
// Too many basic blocks. Bail out.
|
// Too many basic blocks. Bail out.
|
||||||
// This can happen when try/finally constructs are nested to depth N,
|
// This can happen when try/finally constructs are nested to depth N,
|
||||||
// and there is O(2**N) cloning of jsr bodies. See bug 4697245!
|
// and there is O(2**N) cloning of jsr bodies. See bug 4697245!
|
||||||
|
|
|
@ -31,9 +31,6 @@
|
||||||
#include "classfile/javaClasses.hpp"
|
#include "classfile/javaClasses.hpp"
|
||||||
#include "classfile/symbolTable.hpp"
|
#include "classfile/symbolTable.hpp"
|
||||||
#include "classfile/systemDictionary.hpp"
|
#include "classfile/systemDictionary.hpp"
|
||||||
#if INCLUDE_CDS
|
|
||||||
#include "classfile/systemDictionaryShared.hpp"
|
|
||||||
#endif
|
|
||||||
#include "classfile/verificationType.hpp"
|
#include "classfile/verificationType.hpp"
|
||||||
#include "classfile/verifier.hpp"
|
#include "classfile/verifier.hpp"
|
||||||
#include "classfile/vmSymbols.hpp"
|
#include "classfile/vmSymbols.hpp"
|
||||||
|
@ -63,7 +60,11 @@
|
||||||
#include "services/threadService.hpp"
|
#include "services/threadService.hpp"
|
||||||
#include "utilities/array.hpp"
|
#include "utilities/array.hpp"
|
||||||
#include "utilities/globalDefinitions.hpp"
|
#include "utilities/globalDefinitions.hpp"
|
||||||
|
#include "utilities/macros.hpp"
|
||||||
#include "utilities/ostream.hpp"
|
#include "utilities/ostream.hpp"
|
||||||
|
#if INCLUDE_CDS
|
||||||
|
#include "classfile/systemDictionaryShared.hpp"
|
||||||
|
#endif
|
||||||
|
|
||||||
// We generally try to create the oops directly when parsing, rather than
|
// We generally try to create the oops directly when parsing, rather than
|
||||||
// allocating temporary data structures and copying the bytes twice. A
|
// allocating temporary data structures and copying the bytes twice. A
|
||||||
|
@ -2059,7 +2060,7 @@ methodHandle ClassFileParser::parse_method(bool is_interface,
|
||||||
u2** localvariable_table_start;
|
u2** localvariable_table_start;
|
||||||
u2* localvariable_type_table_length;
|
u2* localvariable_type_table_length;
|
||||||
u2** localvariable_type_table_start;
|
u2** localvariable_type_table_start;
|
||||||
u2 method_parameters_length = 0;
|
int method_parameters_length = -1;
|
||||||
u1* method_parameters_data = NULL;
|
u1* method_parameters_data = NULL;
|
||||||
bool method_parameters_seen = false;
|
bool method_parameters_seen = false;
|
||||||
bool parsed_code_attribute = false;
|
bool parsed_code_attribute = false;
|
||||||
|
@ -2278,7 +2279,8 @@ methodHandle ClassFileParser::parse_method(bool is_interface,
|
||||||
}
|
}
|
||||||
method_parameters_seen = true;
|
method_parameters_seen = true;
|
||||||
method_parameters_length = cfs->get_u1_fast();
|
method_parameters_length = cfs->get_u1_fast();
|
||||||
if (method_attribute_length != (method_parameters_length * 4u) + 1u) {
|
const u2 real_length = (method_parameters_length * 4u) + 1u;
|
||||||
|
if (method_attribute_length != real_length) {
|
||||||
classfile_parse_error(
|
classfile_parse_error(
|
||||||
"Invalid MethodParameters method attribute length %u in class file",
|
"Invalid MethodParameters method attribute length %u in class file",
|
||||||
method_attribute_length, CHECK_(nullHandle));
|
method_attribute_length, CHECK_(nullHandle));
|
||||||
|
@ -2288,7 +2290,7 @@ methodHandle ClassFileParser::parse_method(bool is_interface,
|
||||||
cfs->skip_u2_fast(method_parameters_length);
|
cfs->skip_u2_fast(method_parameters_length);
|
||||||
// ignore this attribute if it cannot be reflected
|
// ignore this attribute if it cannot be reflected
|
||||||
if (!SystemDictionary::Parameter_klass_loaded())
|
if (!SystemDictionary::Parameter_klass_loaded())
|
||||||
method_parameters_length = 0;
|
method_parameters_length = -1;
|
||||||
} else if (method_attribute_name == vmSymbols::tag_synthetic()) {
|
} else if (method_attribute_name == vmSymbols::tag_synthetic()) {
|
||||||
if (method_attribute_length != 0) {
|
if (method_attribute_length != 0) {
|
||||||
classfile_parse_error(
|
classfile_parse_error(
|
||||||
|
@ -3491,17 +3493,18 @@ void ClassFileParser::layout_fields(Handle class_loader,
|
||||||
real_offset = next_nonstatic_oop_offset;
|
real_offset = next_nonstatic_oop_offset;
|
||||||
next_nonstatic_oop_offset += heapOopSize;
|
next_nonstatic_oop_offset += heapOopSize;
|
||||||
}
|
}
|
||||||
// Update oop maps
|
|
||||||
|
// Record this oop in the oop maps
|
||||||
if( nonstatic_oop_map_count > 0 &&
|
if( nonstatic_oop_map_count > 0 &&
|
||||||
nonstatic_oop_offsets[nonstatic_oop_map_count - 1] ==
|
nonstatic_oop_offsets[nonstatic_oop_map_count - 1] ==
|
||||||
real_offset -
|
real_offset -
|
||||||
int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) *
|
int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) *
|
||||||
heapOopSize ) {
|
heapOopSize ) {
|
||||||
// Extend current oop map
|
// This oop is adjacent to the previous one, add to current oop map
|
||||||
assert(nonstatic_oop_map_count - 1 < max_nonstatic_oop_maps, "range check");
|
assert(nonstatic_oop_map_count - 1 < max_nonstatic_oop_maps, "range check");
|
||||||
nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1;
|
nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1;
|
||||||
} else {
|
} else {
|
||||||
// Create new oop map
|
// This oop is not adjacent to the previous one, create new oop map
|
||||||
assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check");
|
assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check");
|
||||||
nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
|
nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
|
||||||
nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
|
nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
|
||||||
|
@ -3623,13 +3626,24 @@ void ClassFileParser::layout_fields(Handle class_loader,
|
||||||
real_offset = next_nonstatic_padded_offset;
|
real_offset = next_nonstatic_padded_offset;
|
||||||
next_nonstatic_padded_offset += heapOopSize;
|
next_nonstatic_padded_offset += heapOopSize;
|
||||||
|
|
||||||
// Create new oop map
|
// Record this oop in the oop maps
|
||||||
assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check");
|
if( nonstatic_oop_map_count > 0 &&
|
||||||
nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
|
nonstatic_oop_offsets[nonstatic_oop_map_count - 1] ==
|
||||||
nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
|
real_offset -
|
||||||
nonstatic_oop_map_count += 1;
|
int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) *
|
||||||
if( first_nonstatic_oop_offset == 0 ) { // Undefined
|
heapOopSize ) {
|
||||||
first_nonstatic_oop_offset = real_offset;
|
// This oop is adjacent to the previous one, add to current oop map
|
||||||
|
assert(nonstatic_oop_map_count - 1 < max_nonstatic_oop_maps, "range check");
|
||||||
|
nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1;
|
||||||
|
} else {
|
||||||
|
// This oop is not adjacent to the previous one, create new oop map
|
||||||
|
assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check");
|
||||||
|
nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
|
||||||
|
nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
|
||||||
|
nonstatic_oop_map_count += 1;
|
||||||
|
if( first_nonstatic_oop_offset == 0 ) { // Undefined
|
||||||
|
first_nonstatic_oop_offset = real_offset;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
|
@ -30,10 +30,6 @@
|
||||||
#include "classfile/classLoaderData.inline.hpp"
|
#include "classfile/classLoaderData.inline.hpp"
|
||||||
#include "classfile/imageFile.hpp"
|
#include "classfile/imageFile.hpp"
|
||||||
#include "classfile/javaClasses.hpp"
|
#include "classfile/javaClasses.hpp"
|
||||||
#if INCLUDE_CDS
|
|
||||||
#include "classfile/sharedPathsMiscInfo.hpp"
|
|
||||||
#include "classfile/sharedClassUtil.hpp"
|
|
||||||
#endif
|
|
||||||
#include "classfile/systemDictionary.hpp"
|
#include "classfile/systemDictionary.hpp"
|
||||||
#include "classfile/vmSymbols.hpp"
|
#include "classfile/vmSymbols.hpp"
|
||||||
#include "compiler/compileBroker.hpp"
|
#include "compiler/compileBroker.hpp"
|
||||||
|
@ -65,8 +61,13 @@
|
||||||
#include "services/management.hpp"
|
#include "services/management.hpp"
|
||||||
#include "services/threadService.hpp"
|
#include "services/threadService.hpp"
|
||||||
#include "utilities/events.hpp"
|
#include "utilities/events.hpp"
|
||||||
#include "utilities/hashtable.hpp"
|
|
||||||
#include "utilities/hashtable.inline.hpp"
|
#include "utilities/hashtable.inline.hpp"
|
||||||
|
#include "utilities/macros.hpp"
|
||||||
|
#if INCLUDE_CDS
|
||||||
|
#include "classfile/sharedPathsMiscInfo.hpp"
|
||||||
|
#include "classfile/sharedClassUtil.hpp"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
// Entry points in zip.dll for loading zip/jar file entries and image file entries
|
// Entry points in zip.dll for loading zip/jar file entries and image file entries
|
||||||
|
|
||||||
|
@ -1212,7 +1213,7 @@ instanceKlassHandle ClassLoader::load_classfile(Symbol* h_name, TRAPS) {
|
||||||
h = context.record_result(classpath_index, e, result, THREAD);
|
h = context.record_result(classpath_index, e, result, THREAD);
|
||||||
} else {
|
} else {
|
||||||
if (DumpSharedSpaces) {
|
if (DumpSharedSpaces) {
|
||||||
tty->print_cr("Preload Error: Cannot find %s", class_name);
|
tty->print_cr("Preload Warning: Cannot find %s", class_name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
|
|
||||||
#include "classfile/classFileParser.hpp"
|
#include "classfile/classFileParser.hpp"
|
||||||
#include "runtime/perfData.hpp"
|
#include "runtime/perfData.hpp"
|
||||||
|
#include "utilities/macros.hpp"
|
||||||
|
|
||||||
// The VM class loader.
|
// The VM class loader.
|
||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
|
|
|
@ -65,9 +65,8 @@
|
||||||
#include "utilities/growableArray.hpp"
|
#include "utilities/growableArray.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
#include "utilities/ostream.hpp"
|
#include "utilities/ostream.hpp"
|
||||||
|
|
||||||
#if INCLUDE_TRACE
|
#if INCLUDE_TRACE
|
||||||
#include "trace/tracing.hpp"
|
#include "trace/tracing.hpp"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
|
ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
|
||||||
|
@ -472,7 +471,7 @@ void ClassLoaderData::free_deallocate_list() {
|
||||||
// These anonymous class loaders are to contain classes used for JSR292
|
// These anonymous class loaders are to contain classes used for JSR292
|
||||||
ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(oop loader, TRAPS) {
|
ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(oop loader, TRAPS) {
|
||||||
// Add a new class loader data to the graph.
|
// Add a new class loader data to the graph.
|
||||||
return ClassLoaderDataGraph::add(loader, true, CHECK_NULL);
|
return ClassLoaderDataGraph::add(loader, true, THREAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* ClassLoaderData::loader_name() {
|
const char* ClassLoaderData::loader_name() {
|
||||||
|
@ -978,4 +977,4 @@ void ClassLoaderDataGraph::class_unload_event(Klass* const k) {
|
||||||
event.commit();
|
event.commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* INCLUDE_TRACE */
|
#endif // INCLUDE_TRACE
|
||||||
|
|
|
@ -31,8 +31,9 @@
|
||||||
#include "memory/metaspaceCounters.hpp"
|
#include "memory/metaspaceCounters.hpp"
|
||||||
#include "runtime/mutex.hpp"
|
#include "runtime/mutex.hpp"
|
||||||
#include "utilities/growableArray.hpp"
|
#include "utilities/growableArray.hpp"
|
||||||
|
#include "utilities/macros.hpp"
|
||||||
#if INCLUDE_TRACE
|
#if INCLUDE_TRACE
|
||||||
# include "utilities/ticks.hpp"
|
#include "utilities/ticks.hpp"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
//
|
//
|
||||||
|
|
|
@ -63,6 +63,9 @@ public:
|
||||||
ClassPathEntry* new_entry) {
|
ClassPathEntry* new_entry) {
|
||||||
ClassLoader::add_to_list(new_entry);
|
ClassLoader::add_to_list(new_entry);
|
||||||
}
|
}
|
||||||
|
static void append_boot_classpath(ClassPathEntry* new_entry) {
|
||||||
|
ClassLoader::add_to_list(new_entry);
|
||||||
|
}
|
||||||
static void setup_search_paths() {}
|
static void setup_search_paths() {}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -493,7 +493,7 @@ class MethodFamily : public ResourceObj {
|
||||||
};
|
};
|
||||||
|
|
||||||
Symbol* MethodFamily::generate_no_defaults_message(TRAPS) const {
|
Symbol* MethodFamily::generate_no_defaults_message(TRAPS) const {
|
||||||
return SymbolTable::new_symbol("No qualifying defaults found", CHECK_NULL);
|
return SymbolTable::new_symbol("No qualifying defaults found", THREAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
Symbol* MethodFamily::generate_method_message(Symbol *klass_name, Method* method, TRAPS) const {
|
Symbol* MethodFamily::generate_method_message(Symbol *klass_name, Method* method, TRAPS) const {
|
||||||
|
@ -506,7 +506,7 @@ Symbol* MethodFamily::generate_method_message(Symbol *klass_name, Method* method
|
||||||
ss.write((const char*)name->bytes(), name->utf8_length());
|
ss.write((const char*)name->bytes(), name->utf8_length());
|
||||||
ss.write((const char*)signature->bytes(), signature->utf8_length());
|
ss.write((const char*)signature->bytes(), signature->utf8_length());
|
||||||
ss.print(" is abstract");
|
ss.print(" is abstract");
|
||||||
return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL);
|
return SymbolTable::new_symbol(ss.base(), (int)ss.size(), THREAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
Symbol* MethodFamily::generate_conflicts_message(GrowableArray<Method*>* methods, TRAPS) const {
|
Symbol* MethodFamily::generate_conflicts_message(GrowableArray<Method*>* methods, TRAPS) const {
|
||||||
|
@ -521,7 +521,7 @@ Symbol* MethodFamily::generate_conflicts_message(GrowableArray<Method*>* methods
|
||||||
ss.print(".");
|
ss.print(".");
|
||||||
ss.write((const char*)name->bytes(), name->utf8_length());
|
ss.write((const char*)name->bytes(), name->utf8_length());
|
||||||
}
|
}
|
||||||
return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL);
|
return SymbolTable::new_symbol(ss.base(), (int)ss.size(), THREAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -223,7 +223,7 @@ void Dictionary::remove_classes_in_error_state() {
|
||||||
}
|
}
|
||||||
free_entry(probe);
|
free_entry(probe);
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
tty->print_cr("Removed error class: %s", ik->external_name());
|
tty->print_cr("Preload Warning: Removed error class: %s", ik->external_name());
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -41,6 +41,7 @@
|
||||||
#include "oops/method.hpp"
|
#include "oops/method.hpp"
|
||||||
#include "oops/symbol.hpp"
|
#include "oops/symbol.hpp"
|
||||||
#include "oops/typeArrayOop.hpp"
|
#include "oops/typeArrayOop.hpp"
|
||||||
|
#include "prims/jvmtiRedefineClassesTrace.hpp"
|
||||||
#include "runtime/fieldDescriptor.hpp"
|
#include "runtime/fieldDescriptor.hpp"
|
||||||
#include "runtime/handles.inline.hpp"
|
#include "runtime/handles.inline.hpp"
|
||||||
#include "runtime/interfaceSupport.hpp"
|
#include "runtime/interfaceSupport.hpp"
|
||||||
|
@ -944,7 +945,7 @@ void java_lang_Thread::compute_offsets() {
|
||||||
assert(_group_offset == 0, "offsets should be initialized only once");
|
assert(_group_offset == 0, "offsets should be initialized only once");
|
||||||
|
|
||||||
Klass* k = SystemDictionary::Thread_klass();
|
Klass* k = SystemDictionary::Thread_klass();
|
||||||
compute_offset(_name_offset, k, vmSymbols::name_name(), vmSymbols::char_array_signature());
|
compute_offset(_name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature());
|
||||||
compute_offset(_group_offset, k, vmSymbols::group_name(), vmSymbols::threadgroup_signature());
|
compute_offset(_group_offset, k, vmSymbols::group_name(), vmSymbols::threadgroup_signature());
|
||||||
compute_offset(_contextClassLoader_offset, k, vmSymbols::contextClassLoader_name(), vmSymbols::classloader_signature());
|
compute_offset(_contextClassLoader_offset, k, vmSymbols::contextClassLoader_name(), vmSymbols::classloader_signature());
|
||||||
compute_offset(_inheritedAccessControlContext_offset, k, vmSymbols::inheritedAccessControlContext_name(), vmSymbols::accesscontrolcontext_signature());
|
compute_offset(_inheritedAccessControlContext_offset, k, vmSymbols::inheritedAccessControlContext_name(), vmSymbols::accesscontrolcontext_signature());
|
||||||
|
@ -974,15 +975,12 @@ void java_lang_Thread::set_thread(oop java_thread, JavaThread* thread) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
typeArrayOop java_lang_Thread::name(oop java_thread) {
|
oop java_lang_Thread::name(oop java_thread) {
|
||||||
oop name = java_thread->obj_field(_name_offset);
|
return java_thread->obj_field(_name_offset);
|
||||||
assert(name == NULL || (name->is_typeArray() && TypeArrayKlass::cast(name->klass())->element_type() == T_CHAR), "just checking");
|
|
||||||
return typeArrayOop(name);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void java_lang_Thread::set_name(oop java_thread, typeArrayOop name) {
|
void java_lang_Thread::set_name(oop java_thread, oop name) {
|
||||||
assert(java_thread->obj_field(_name_offset) == NULL, "name should be NULL");
|
|
||||||
java_thread->obj_field_put(_name_offset, name);
|
java_thread->obj_field_put(_name_offset, name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1952,7 +1950,7 @@ Handle java_lang_reflect_Method::create(TRAPS) {
|
||||||
// This class is eagerly initialized during VM initialization, since we keep a refence
|
// This class is eagerly initialized during VM initialization, since we keep a refence
|
||||||
// to one of the methods
|
// to one of the methods
|
||||||
assert(InstanceKlass::cast(klass)->is_initialized(), "must be initialized");
|
assert(InstanceKlass::cast(klass)->is_initialized(), "must be initialized");
|
||||||
return InstanceKlass::cast(klass)->allocate_instance_handle(CHECK_NH);
|
return InstanceKlass::cast(klass)->allocate_instance_handle(THREAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
oop java_lang_reflect_Method::clazz(oop reflect) {
|
oop java_lang_reflect_Method::clazz(oop reflect) {
|
||||||
|
@ -2130,7 +2128,7 @@ Handle java_lang_reflect_Constructor::create(TRAPS) {
|
||||||
instanceKlassHandle klass (THREAD, k);
|
instanceKlassHandle klass (THREAD, k);
|
||||||
// Ensure it is initialized
|
// Ensure it is initialized
|
||||||
klass->initialize(CHECK_NH);
|
klass->initialize(CHECK_NH);
|
||||||
return klass->allocate_instance_handle(CHECK_NH);
|
return klass->allocate_instance_handle(THREAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
oop java_lang_reflect_Constructor::clazz(oop reflect) {
|
oop java_lang_reflect_Constructor::clazz(oop reflect) {
|
||||||
|
@ -2270,7 +2268,7 @@ Handle java_lang_reflect_Field::create(TRAPS) {
|
||||||
instanceKlassHandle klass (THREAD, k);
|
instanceKlassHandle klass (THREAD, k);
|
||||||
// Ensure it is initialized
|
// Ensure it is initialized
|
||||||
klass->initialize(CHECK_NH);
|
klass->initialize(CHECK_NH);
|
||||||
return klass->allocate_instance_handle(CHECK_NH);
|
return klass->allocate_instance_handle(THREAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
oop java_lang_reflect_Field::clazz(oop reflect) {
|
oop java_lang_reflect_Field::clazz(oop reflect) {
|
||||||
|
@ -2397,7 +2395,7 @@ Handle java_lang_reflect_Parameter::create(TRAPS) {
|
||||||
instanceKlassHandle klass (THREAD, k);
|
instanceKlassHandle klass (THREAD, k);
|
||||||
// Ensure it is initialized
|
// Ensure it is initialized
|
||||||
klass->initialize(CHECK_NH);
|
klass->initialize(CHECK_NH);
|
||||||
return klass->allocate_instance_handle(CHECK_NH);
|
return klass->allocate_instance_handle(THREAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
oop java_lang_reflect_Parameter::name(oop param) {
|
oop java_lang_reflect_Parameter::name(oop param) {
|
||||||
|
@ -2447,7 +2445,7 @@ Handle sun_reflect_ConstantPool::create(TRAPS) {
|
||||||
instanceKlassHandle klass (THREAD, k);
|
instanceKlassHandle klass (THREAD, k);
|
||||||
// Ensure it is initialized
|
// Ensure it is initialized
|
||||||
klass->initialize(CHECK_NH);
|
klass->initialize(CHECK_NH);
|
||||||
return klass->allocate_instance_handle(CHECK_NH);
|
return klass->allocate_instance_handle(THREAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -2797,12 +2795,35 @@ Metadata* java_lang_invoke_MemberName::vmtarget(oop mname) {
|
||||||
return (Metadata*)mname->address_field(_vmtarget_offset);
|
return (Metadata*)mname->address_field(_vmtarget_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool java_lang_invoke_MemberName::is_method(oop mname) {
|
||||||
|
assert(is_instance(mname), "must be MemberName");
|
||||||
|
return (flags(mname) & (MN_IS_METHOD | MN_IS_CONSTRUCTOR)) > 0;
|
||||||
|
}
|
||||||
|
|
||||||
#if INCLUDE_JVMTI
|
#if INCLUDE_JVMTI
|
||||||
// Can be executed on VM thread only
|
// Can be executed on VM thread only
|
||||||
void java_lang_invoke_MemberName::adjust_vmtarget(oop mname, Metadata* ref) {
|
void java_lang_invoke_MemberName::adjust_vmtarget(oop mname, Method* old_method,
|
||||||
assert((is_instance(mname) && (flags(mname) & (MN_IS_METHOD | MN_IS_CONSTRUCTOR)) > 0), "wrong type");
|
Method* new_method, bool* trace_name_printed) {
|
||||||
|
assert(is_method(mname), "wrong type");
|
||||||
assert(Thread::current()->is_VM_thread(), "not VM thread");
|
assert(Thread::current()->is_VM_thread(), "not VM thread");
|
||||||
mname->address_field_put(_vmtarget_offset, (address)ref);
|
|
||||||
|
Method* target = (Method*)mname->address_field(_vmtarget_offset);
|
||||||
|
if (target == old_method) {
|
||||||
|
mname->address_field_put(_vmtarget_offset, (address)new_method);
|
||||||
|
|
||||||
|
if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
|
||||||
|
if (!(*trace_name_printed)) {
|
||||||
|
// RC_TRACE_MESG macro has an embedded ResourceMark
|
||||||
|
RC_TRACE_MESG(("adjust: name=%s",
|
||||||
|
old_method->method_holder()->external_name()));
|
||||||
|
*trace_name_printed = true;
|
||||||
|
}
|
||||||
|
// RC_TRACE macro has an embedded ResourceMark
|
||||||
|
RC_TRACE(0x00400000, ("MemberName method update: %s(%s)",
|
||||||
|
new_method->name()->as_C_string(),
|
||||||
|
new_method->signature()->as_C_string()));
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#endif // INCLUDE_JVMTI
|
#endif // INCLUDE_JVMTI
|
||||||
|
|
||||||
|
|
|
@ -345,8 +345,8 @@ class java_lang_Thread : AllStatic {
|
||||||
// Set JavaThread for instance
|
// Set JavaThread for instance
|
||||||
static void set_thread(oop java_thread, JavaThread* thread);
|
static void set_thread(oop java_thread, JavaThread* thread);
|
||||||
// Name
|
// Name
|
||||||
static typeArrayOop name(oop java_thread);
|
static oop name(oop java_thread);
|
||||||
static void set_name(oop java_thread, typeArrayOop name);
|
static void set_name(oop java_thread, oop name);
|
||||||
// Priority
|
// Priority
|
||||||
static ThreadPriority priority(oop java_thread);
|
static ThreadPriority priority(oop java_thread);
|
||||||
static void set_priority(oop java_thread, ThreadPriority priority);
|
static void set_priority(oop java_thread, ThreadPriority priority);
|
||||||
|
@ -1100,7 +1100,8 @@ class java_lang_invoke_MemberName: AllStatic {
|
||||||
static Metadata* vmtarget(oop mname);
|
static Metadata* vmtarget(oop mname);
|
||||||
static void set_vmtarget(oop mname, Metadata* target);
|
static void set_vmtarget(oop mname, Metadata* target);
|
||||||
#if INCLUDE_JVMTI
|
#if INCLUDE_JVMTI
|
||||||
static void adjust_vmtarget(oop mname, Metadata* target);
|
static void adjust_vmtarget(oop mname, Method* old_method, Method* new_method,
|
||||||
|
bool* trace_name_printed);
|
||||||
#endif // INCLUDE_JVMTI
|
#endif // INCLUDE_JVMTI
|
||||||
|
|
||||||
static intptr_t vmindex(oop mname);
|
static intptr_t vmindex(oop mname);
|
||||||
|
@ -1114,6 +1115,8 @@ class java_lang_invoke_MemberName: AllStatic {
|
||||||
return obj != NULL && is_subclass(obj->klass());
|
return obj != NULL && is_subclass(obj->klass());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool is_method(oop obj);
|
||||||
|
|
||||||
// Relevant integer codes (keep these in synch. with MethodHandleNatives.Constants):
|
// Relevant integer codes (keep these in synch. with MethodHandleNatives.Constants):
|
||||||
enum {
|
enum {
|
||||||
MN_IS_METHOD = 0x00010000, // method (not constructor)
|
MN_IS_METHOD = 0x00010000, // method (not constructor)
|
||||||
|
|
|
@ -36,6 +36,7 @@
|
||||||
#include "runtime/atomic.inline.hpp"
|
#include "runtime/atomic.inline.hpp"
|
||||||
#include "runtime/mutexLocker.hpp"
|
#include "runtime/mutexLocker.hpp"
|
||||||
#include "utilities/hashtable.inline.hpp"
|
#include "utilities/hashtable.inline.hpp"
|
||||||
|
#include "utilities/macros.hpp"
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||||
#include "gc_implementation/g1/g1StringDedup.hpp"
|
#include "gc_implementation/g1/g1StringDedup.hpp"
|
||||||
|
|
|
@ -235,7 +235,7 @@ Symbol* SymbolTable::lookup(const char* name, int len, TRAPS) {
|
||||||
MutexLocker ml(SymbolTable_lock, THREAD);
|
MutexLocker ml(SymbolTable_lock, THREAD);
|
||||||
|
|
||||||
// Otherwise, add to symbol to table
|
// Otherwise, add to symbol to table
|
||||||
return the_table()->basic_add(index, (u1*)name, len, hashValue, true, CHECK_NULL);
|
return the_table()->basic_add(index, (u1*)name, len, hashValue, true, THREAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
Symbol* SymbolTable::lookup(const Symbol* sym, int begin, int end, TRAPS) {
|
Symbol* SymbolTable::lookup(const Symbol* sym, int begin, int end, TRAPS) {
|
||||||
|
@ -274,7 +274,7 @@ Symbol* SymbolTable::lookup(const Symbol* sym, int begin, int end, TRAPS) {
|
||||||
// Grab SymbolTable_lock first.
|
// Grab SymbolTable_lock first.
|
||||||
MutexLocker ml(SymbolTable_lock, THREAD);
|
MutexLocker ml(SymbolTable_lock, THREAD);
|
||||||
|
|
||||||
return the_table()->basic_add(index, (u1*)buffer, len, hashValue, true, CHECK_NULL);
|
return the_table()->basic_add(index, (u1*)buffer, len, hashValue, true, THREAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
Symbol* SymbolTable::lookup_only(const char* name, int len,
|
Symbol* SymbolTable::lookup_only(const char* name, int len,
|
||||||
|
|
|
@ -31,10 +31,6 @@
|
||||||
#include "classfile/resolutionErrors.hpp"
|
#include "classfile/resolutionErrors.hpp"
|
||||||
#include "classfile/stringTable.hpp"
|
#include "classfile/stringTable.hpp"
|
||||||
#include "classfile/systemDictionary.hpp"
|
#include "classfile/systemDictionary.hpp"
|
||||||
#if INCLUDE_CDS
|
|
||||||
#include "classfile/sharedClassUtil.hpp"
|
|
||||||
#include "classfile/systemDictionaryShared.hpp"
|
|
||||||
#endif
|
|
||||||
#include "classfile/vmSymbols.hpp"
|
#include "classfile/vmSymbols.hpp"
|
||||||
#include "compiler/compileBroker.hpp"
|
#include "compiler/compileBroker.hpp"
|
||||||
#include "interpreter/bytecodeStream.hpp"
|
#include "interpreter/bytecodeStream.hpp"
|
||||||
|
@ -65,9 +61,12 @@
|
||||||
#include "services/threadService.hpp"
|
#include "services/threadService.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
#include "utilities/ticks.hpp"
|
#include "utilities/ticks.hpp"
|
||||||
|
#if INCLUDE_CDS
|
||||||
|
#include "classfile/sharedClassUtil.hpp"
|
||||||
|
#include "classfile/systemDictionaryShared.hpp"
|
||||||
|
#endif
|
||||||
#if INCLUDE_TRACE
|
#if INCLUDE_TRACE
|
||||||
#include "trace/tracing.hpp"
|
#include "trace/tracing.hpp"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
Dictionary* SystemDictionary::_dictionary = NULL;
|
Dictionary* SystemDictionary::_dictionary = NULL;
|
||||||
|
@ -123,7 +122,7 @@ void SystemDictionary::compute_java_system_loader(TRAPS) {
|
||||||
|
|
||||||
ClassLoaderData* SystemDictionary::register_loader(Handle class_loader, TRAPS) {
|
ClassLoaderData* SystemDictionary::register_loader(Handle class_loader, TRAPS) {
|
||||||
if (class_loader() == NULL) return ClassLoaderData::the_null_class_loader_data();
|
if (class_loader() == NULL) return ClassLoaderData::the_null_class_loader_data();
|
||||||
return ClassLoaderDataGraph::find_or_create(class_loader, CHECK_NULL);
|
return ClassLoaderDataGraph::find_or_create(class_loader, THREAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
|
@ -233,15 +232,15 @@ Klass* SystemDictionary::resolve_or_null(Symbol* class_name, Handle class_loader
|
||||||
class_name->as_C_string(),
|
class_name->as_C_string(),
|
||||||
class_loader.is_null() ? "null" : class_loader->klass()->name()->as_C_string()));
|
class_loader.is_null() ? "null" : class_loader->klass()->name()->as_C_string()));
|
||||||
if (FieldType::is_array(class_name)) {
|
if (FieldType::is_array(class_name)) {
|
||||||
return resolve_array_class_or_null(class_name, class_loader, protection_domain, CHECK_NULL);
|
return resolve_array_class_or_null(class_name, class_loader, protection_domain, THREAD);
|
||||||
} else if (FieldType::is_obj(class_name)) {
|
} else if (FieldType::is_obj(class_name)) {
|
||||||
ResourceMark rm(THREAD);
|
ResourceMark rm(THREAD);
|
||||||
// Ignore wrapping L and ;.
|
// Ignore wrapping L and ;.
|
||||||
TempNewSymbol name = SymbolTable::new_symbol(class_name->as_C_string() + 1,
|
TempNewSymbol name = SymbolTable::new_symbol(class_name->as_C_string() + 1,
|
||||||
class_name->utf8_length() - 2, CHECK_NULL);
|
class_name->utf8_length() - 2, CHECK_NULL);
|
||||||
return resolve_instance_class_or_null(name, class_loader, protection_domain, CHECK_NULL);
|
return resolve_instance_class_or_null(name, class_loader, protection_domain, THREAD);
|
||||||
} else {
|
} else {
|
||||||
return resolve_instance_class_or_null(class_name, class_loader, protection_domain, CHECK_NULL);
|
return resolve_instance_class_or_null(class_name, class_loader, protection_domain, THREAD);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2660,7 +2659,7 @@ void SystemDictionary::post_class_load_event(const Ticks& start_time,
|
||||||
class_loader->klass() : (Klass*)NULL);
|
class_loader->klass() : (Klass*)NULL);
|
||||||
event.commit();
|
event.commit();
|
||||||
}
|
}
|
||||||
#endif /* INCLUDE_TRACE */
|
#endif // INCLUDE_TRACE
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
|
|
|
@ -289,7 +289,7 @@ class VerificationType VALUE_OBJ_CLASS_SPEC {
|
||||||
if (is_reference() && from.is_reference()) {
|
if (is_reference() && from.is_reference()) {
|
||||||
return is_reference_assignable_from(from, context,
|
return is_reference_assignable_from(from, context,
|
||||||
from_field_is_protected,
|
from_field_is_protected,
|
||||||
CHECK_false);
|
THREAD);
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1927,7 +1927,7 @@ Klass* ClassVerifier::load_class(Symbol* name, TRAPS) {
|
||||||
|
|
||||||
return SystemDictionary::resolve_or_fail(
|
return SystemDictionary::resolve_or_fail(
|
||||||
name, Handle(THREAD, loader), Handle(THREAD, protection_domain),
|
name, Handle(THREAD, loader), Handle(THREAD, protection_domain),
|
||||||
true, CHECK_NULL);
|
true, THREAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ClassVerifier::is_protected_access(instanceKlassHandle this_class,
|
bool ClassVerifier::is_protected_access(instanceKlassHandle this_class,
|
||||||
|
|
|
@ -43,7 +43,7 @@
|
||||||
#include "c1/c1_Runtime1.hpp"
|
#include "c1/c1_Runtime1.hpp"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
unsigned int align_code_offset(int offset) {
|
unsigned int CodeBlob::align_code_offset(int offset) {
|
||||||
// align the size to CodeEntryAlignment
|
// align the size to CodeEntryAlignment
|
||||||
return
|
return
|
||||||
((offset + (int)CodeHeap::header_size() + (CodeEntryAlignment-1)) & ~(CodeEntryAlignment-1))
|
((offset + (int)CodeHeap::header_size() + (CodeEntryAlignment-1)) & ~(CodeEntryAlignment-1))
|
||||||
|
|
|
@ -83,6 +83,7 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
|
||||||
public:
|
public:
|
||||||
// Returns the space needed for CodeBlob
|
// Returns the space needed for CodeBlob
|
||||||
static unsigned int allocation_size(CodeBuffer* cb, int header_size);
|
static unsigned int allocation_size(CodeBuffer* cb, int header_size);
|
||||||
|
static unsigned int align_code_offset(int offset);
|
||||||
|
|
||||||
// Creation
|
// Creation
|
||||||
// a) simple CodeBlob
|
// a) simple CodeBlob
|
||||||
|
@ -207,7 +208,7 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class WhiteBox;
|
||||||
//----------------------------------------------------------------------------------------------------
|
//----------------------------------------------------------------------------------------------------
|
||||||
// BufferBlob: used to hold non-relocatable machine code such as the interpreter, stubroutines, etc.
|
// BufferBlob: used to hold non-relocatable machine code such as the interpreter, stubroutines, etc.
|
||||||
|
|
||||||
|
@ -215,6 +216,7 @@ class BufferBlob: public CodeBlob {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
friend class AdapterBlob;
|
friend class AdapterBlob;
|
||||||
friend class MethodHandlesAdapterBlob;
|
friend class MethodHandlesAdapterBlob;
|
||||||
|
friend class WhiteBox;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// Creation support
|
// Creation support
|
||||||
|
|
|
@ -305,7 +305,7 @@ void CodeCache::add_heap(ReservedSpace rs, const char* name, size_t size_initial
|
||||||
MemoryService::add_code_heap_memory_pool(heap, name);
|
MemoryService::add_code_heap_memory_pool(heap, name);
|
||||||
}
|
}
|
||||||
|
|
||||||
CodeHeap* CodeCache::get_code_heap(CodeBlob* cb) {
|
CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) {
|
||||||
assert(cb != NULL, "CodeBlob is null");
|
assert(cb != NULL, "CodeBlob is null");
|
||||||
FOR_ALL_HEAPS(heap) {
|
FOR_ALL_HEAPS(heap) {
|
||||||
if ((*heap)->contains(cb)) {
|
if ((*heap)->contains(cb)) {
|
||||||
|
|
|
@ -77,6 +77,7 @@ class DepChange;
|
||||||
class CodeCache : AllStatic {
|
class CodeCache : AllStatic {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
friend class NMethodIterator;
|
friend class NMethodIterator;
|
||||||
|
friend class WhiteBox;
|
||||||
private:
|
private:
|
||||||
// CodeHeaps of the cache
|
// CodeHeaps of the cache
|
||||||
static GrowableArray<CodeHeap*>* _heaps;
|
static GrowableArray<CodeHeap*>* _heaps;
|
||||||
|
@ -98,7 +99,7 @@ class CodeCache : AllStatic {
|
||||||
static void initialize_heaps(); // Initializes the CodeHeaps
|
static void initialize_heaps(); // Initializes the CodeHeaps
|
||||||
// Creates a new heap with the given name and size, containing CodeBlobs of the given type
|
// Creates a new heap with the given name and size, containing CodeBlobs of the given type
|
||||||
static void add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type);
|
static void add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type);
|
||||||
static CodeHeap* get_code_heap(CodeBlob* cb); // Returns the CodeHeap for the given CodeBlob
|
static CodeHeap* get_code_heap(const CodeBlob* cb); // Returns the CodeHeap for the given CodeBlob
|
||||||
static CodeHeap* get_code_heap(int code_blob_type); // Returns the CodeHeap for the given CodeBlobType
|
static CodeHeap* get_code_heap(int code_blob_type); // Returns the CodeHeap for the given CodeBlobType
|
||||||
// Returns the name of the VM option to set the size of the corresponding CodeHeap
|
// Returns the name of the VM option to set the size of the corresponding CodeHeap
|
||||||
static const char* get_code_heap_flag_name(int code_blob_type);
|
static const char* get_code_heap_flag_name(int code_blob_type);
|
||||||
|
|
|
@ -912,6 +912,8 @@ class ClassHierarchyWalker {
|
||||||
bool is_witness(Klass* k) {
|
bool is_witness(Klass* k) {
|
||||||
if (doing_subtype_search()) {
|
if (doing_subtype_search()) {
|
||||||
return Dependencies::is_concrete_klass(k);
|
return Dependencies::is_concrete_klass(k);
|
||||||
|
} else if (!k->oop_is_instance()) {
|
||||||
|
return false; // no methods to find in an array type
|
||||||
} else {
|
} else {
|
||||||
Method* m = InstanceKlass::cast(k)->find_method(_name, _signature);
|
Method* m = InstanceKlass::cast(k)->find_method(_name, _signature);
|
||||||
if (m == NULL || !Dependencies::is_concrete_method(m)) return false;
|
if (m == NULL || !Dependencies::is_concrete_method(m)) return false;
|
||||||
|
@ -1118,7 +1120,7 @@ Klass* ClassHierarchyWalker::find_witness_anywhere(Klass* context_type,
|
||||||
Klass* chain; // scratch variable
|
Klass* chain; // scratch variable
|
||||||
#define ADD_SUBCLASS_CHAIN(k) { \
|
#define ADD_SUBCLASS_CHAIN(k) { \
|
||||||
assert(chaini < CHAINMAX, "oob"); \
|
assert(chaini < CHAINMAX, "oob"); \
|
||||||
chain = InstanceKlass::cast(k)->subklass(); \
|
chain = k->subklass(); \
|
||||||
if (chain != NULL) chains[chaini++] = chain; }
|
if (chain != NULL) chains[chaini++] = chain; }
|
||||||
|
|
||||||
// Look for non-abstract subclasses.
|
// Look for non-abstract subclasses.
|
||||||
|
@ -1129,35 +1131,37 @@ Klass* ClassHierarchyWalker::find_witness_anywhere(Klass* context_type,
|
||||||
// (Their subclasses are additional indirect implementors.
|
// (Their subclasses are additional indirect implementors.
|
||||||
// See InstanceKlass::add_implementor.)
|
// See InstanceKlass::add_implementor.)
|
||||||
// (Note: nof_implementors is always zero for non-interfaces.)
|
// (Note: nof_implementors is always zero for non-interfaces.)
|
||||||
int nof_impls = InstanceKlass::cast(context_type)->nof_implementors();
|
if (top_level_call) {
|
||||||
if (nof_impls > 1) {
|
int nof_impls = InstanceKlass::cast(context_type)->nof_implementors();
|
||||||
// Avoid this case: *I.m > { A.m, C }; B.m > C
|
if (nof_impls > 1) {
|
||||||
// Here, I.m has 2 concrete implementations, but m appears unique
|
// Avoid this case: *I.m > { A.m, C }; B.m > C
|
||||||
// as A.m, because the search misses B.m when checking C.
|
// Here, I.m has 2 concrete implementations, but m appears unique
|
||||||
// The inherited method B.m was getting missed by the walker
|
// as A.m, because the search misses B.m when checking C.
|
||||||
// when interface 'I' was the starting point.
|
// The inherited method B.m was getting missed by the walker
|
||||||
// %%% Until this is fixed more systematically, bail out.
|
// when interface 'I' was the starting point.
|
||||||
// (Old CHA had the same limitation.)
|
// %%% Until this is fixed more systematically, bail out.
|
||||||
return context_type;
|
// (Old CHA had the same limitation.)
|
||||||
}
|
return context_type;
|
||||||
if (nof_impls > 0) {
|
|
||||||
Klass* impl = InstanceKlass::cast(context_type)->implementor();
|
|
||||||
assert(impl != NULL, "just checking");
|
|
||||||
// If impl is the same as the context_type, then more than one
|
|
||||||
// implementor has seen. No exact info in this case.
|
|
||||||
if (impl == context_type) {
|
|
||||||
return context_type; // report an inexact witness to this sad affair
|
|
||||||
}
|
}
|
||||||
if (do_counts)
|
if (nof_impls > 0) {
|
||||||
{ NOT_PRODUCT(deps_find_witness_steps++); }
|
Klass* impl = InstanceKlass::cast(context_type)->implementor();
|
||||||
if (is_participant(impl)) {
|
assert(impl != NULL, "just checking");
|
||||||
if (!participants_hide_witnesses) {
|
// If impl is the same as the context_type, then more than one
|
||||||
|
// implementor has seen. No exact info in this case.
|
||||||
|
if (impl == context_type) {
|
||||||
|
return context_type; // report an inexact witness to this sad affair
|
||||||
|
}
|
||||||
|
if (do_counts)
|
||||||
|
{ NOT_PRODUCT(deps_find_witness_steps++); }
|
||||||
|
if (is_participant(impl)) {
|
||||||
|
if (!participants_hide_witnesses) {
|
||||||
|
ADD_SUBCLASS_CHAIN(impl);
|
||||||
|
}
|
||||||
|
} else if (is_witness(impl) && !ignore_witness(impl)) {
|
||||||
|
return impl;
|
||||||
|
} else {
|
||||||
ADD_SUBCLASS_CHAIN(impl);
|
ADD_SUBCLASS_CHAIN(impl);
|
||||||
}
|
}
|
||||||
} else if (is_witness(impl) && !ignore_witness(impl)) {
|
|
||||||
return impl;
|
|
||||||
} else {
|
|
||||||
ADD_SUBCLASS_CHAIN(impl);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -35,6 +35,7 @@
|
||||||
#include "oops/method.hpp"
|
#include "oops/method.hpp"
|
||||||
#include "oops/oop.inline.hpp"
|
#include "oops/oop.inline.hpp"
|
||||||
#include "prims/nativeLookup.hpp"
|
#include "prims/nativeLookup.hpp"
|
||||||
|
#include "prims/whitebox.hpp"
|
||||||
#include "runtime/arguments.hpp"
|
#include "runtime/arguments.hpp"
|
||||||
#include "runtime/atomic.inline.hpp"
|
#include "runtime/atomic.inline.hpp"
|
||||||
#include "runtime/compilationPolicy.hpp"
|
#include "runtime/compilationPolicy.hpp"
|
||||||
|
@ -593,7 +594,7 @@ void CompileTask::log_task_done(CompileLog* log) {
|
||||||
* Add a CompileTask to a CompileQueue.
|
* Add a CompileTask to a CompileQueue.
|
||||||
*/
|
*/
|
||||||
void CompileQueue::add(CompileTask* task) {
|
void CompileQueue::add(CompileTask* task) {
|
||||||
assert(lock()->owned_by_self(), "must own lock");
|
assert(MethodCompileQueue_lock->owned_by_self(), "must own lock");
|
||||||
|
|
||||||
task->set_next(NULL);
|
task->set_next(NULL);
|
||||||
task->set_prev(NULL);
|
task->set_prev(NULL);
|
||||||
|
@ -624,7 +625,7 @@ void CompileQueue::add(CompileTask* task) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Notify CompilerThreads that a task is available.
|
// Notify CompilerThreads that a task is available.
|
||||||
lock()->notify_all();
|
MethodCompileQueue_lock->notify_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -634,7 +635,7 @@ void CompileQueue::add(CompileTask* task) {
|
||||||
* compilation is disabled.
|
* compilation is disabled.
|
||||||
*/
|
*/
|
||||||
void CompileQueue::free_all() {
|
void CompileQueue::free_all() {
|
||||||
MutexLocker mu(lock());
|
MutexLocker mu(MethodCompileQueue_lock);
|
||||||
CompileTask* next = _first;
|
CompileTask* next = _first;
|
||||||
|
|
||||||
// Iterate over all tasks in the compile queue
|
// Iterate over all tasks in the compile queue
|
||||||
|
@ -652,14 +653,14 @@ void CompileQueue::free_all() {
|
||||||
_first = NULL;
|
_first = NULL;
|
||||||
|
|
||||||
// Wake up all threads that block on the queue.
|
// Wake up all threads that block on the queue.
|
||||||
lock()->notify_all();
|
MethodCompileQueue_lock->notify_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the next CompileTask from a CompileQueue
|
* Get the next CompileTask from a CompileQueue
|
||||||
*/
|
*/
|
||||||
CompileTask* CompileQueue::get() {
|
CompileTask* CompileQueue::get() {
|
||||||
MutexLocker locker(lock());
|
MutexLocker locker(MethodCompileQueue_lock);
|
||||||
// If _first is NULL we have no more compile jobs. There are two reasons for
|
// If _first is NULL we have no more compile jobs. There are two reasons for
|
||||||
// having no compile jobs: First, we compiled everything we wanted. Second,
|
// having no compile jobs: First, we compiled everything we wanted. Second,
|
||||||
// we ran out of code cache so compilation has been disabled. In the latter
|
// we ran out of code cache so compilation has been disabled. In the latter
|
||||||
|
@ -680,7 +681,7 @@ CompileTask* CompileQueue::get() {
|
||||||
// We need a timed wait here, since compiler threads can exit if compilation
|
// We need a timed wait here, since compiler threads can exit if compilation
|
||||||
// is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
|
// is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
|
||||||
// is not critical and we do not want idle compiler threads to wake up too often.
|
// is not critical and we do not want idle compiler threads to wake up too often.
|
||||||
lock()->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
|
MethodCompileQueue_lock->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (CompileBroker::is_compilation_disabled_forever()) {
|
if (CompileBroker::is_compilation_disabled_forever()) {
|
||||||
|
@ -700,7 +701,7 @@ CompileTask* CompileQueue::get() {
|
||||||
// Clean & deallocate stale compile tasks.
|
// Clean & deallocate stale compile tasks.
|
||||||
// Temporarily releases MethodCompileQueue lock.
|
// Temporarily releases MethodCompileQueue lock.
|
||||||
void CompileQueue::purge_stale_tasks() {
|
void CompileQueue::purge_stale_tasks() {
|
||||||
assert(lock()->owned_by_self(), "must own lock");
|
assert(MethodCompileQueue_lock->owned_by_self(), "must own lock");
|
||||||
if (_first_stale != NULL) {
|
if (_first_stale != NULL) {
|
||||||
// Stale tasks are purged when MCQ lock is released,
|
// Stale tasks are purged when MCQ lock is released,
|
||||||
// but _first_stale updates are protected by MCQ lock.
|
// but _first_stale updates are protected by MCQ lock.
|
||||||
|
@ -709,7 +710,7 @@ void CompileQueue::purge_stale_tasks() {
|
||||||
CompileTask* head = _first_stale;
|
CompileTask* head = _first_stale;
|
||||||
_first_stale = NULL;
|
_first_stale = NULL;
|
||||||
{
|
{
|
||||||
MutexUnlocker ul(lock());
|
MutexUnlocker ul(MethodCompileQueue_lock);
|
||||||
for (CompileTask* task = head; task != NULL; ) {
|
for (CompileTask* task = head; task != NULL; ) {
|
||||||
CompileTask* next_task = task->next();
|
CompileTask* next_task = task->next();
|
||||||
CompileTaskWrapper ctw(task); // Frees the task
|
CompileTaskWrapper ctw(task); // Frees the task
|
||||||
|
@ -721,7 +722,7 @@ void CompileQueue::purge_stale_tasks() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void CompileQueue::remove(CompileTask* task) {
|
void CompileQueue::remove(CompileTask* task) {
|
||||||
assert(lock()->owned_by_self(), "must own lock");
|
assert(MethodCompileQueue_lock->owned_by_self(), "must own lock");
|
||||||
if (task->prev() != NULL) {
|
if (task->prev() != NULL) {
|
||||||
task->prev()->set_next(task->next());
|
task->prev()->set_next(task->next());
|
||||||
} else {
|
} else {
|
||||||
|
@ -741,7 +742,7 @@ void CompileQueue::remove(CompileTask* task) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void CompileQueue::remove_and_mark_stale(CompileTask* task) {
|
void CompileQueue::remove_and_mark_stale(CompileTask* task) {
|
||||||
assert(lock()->owned_by_self(), "must own lock");
|
assert(MethodCompileQueue_lock->owned_by_self(), "must own lock");
|
||||||
remove(task);
|
remove(task);
|
||||||
|
|
||||||
// Enqueue the task for reclamation (should be done outside MCQ lock)
|
// Enqueue the task for reclamation (should be done outside MCQ lock)
|
||||||
|
@ -779,7 +780,7 @@ void CompileBroker::print_compile_queues(outputStream* st) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void CompileQueue::print(outputStream* st) {
|
void CompileQueue::print(outputStream* st) {
|
||||||
assert(lock()->owned_by_self(), "must own lock");
|
assert(MethodCompileQueue_lock->owned_by_self(), "must own lock");
|
||||||
st->print_cr("Contents of %s", name());
|
st->print_cr("Contents of %s", name());
|
||||||
st->print_cr("----------------------------");
|
st->print_cr("----------------------------");
|
||||||
CompileTask* task = _first;
|
CompileTask* task = _first;
|
||||||
|
@ -1065,11 +1066,11 @@ void CompileBroker::init_compiler_sweeper_threads(int c1_compiler_count, int c2_
|
||||||
#endif // !ZERO && !SHARK
|
#endif // !ZERO && !SHARK
|
||||||
// Initialize the compilation queue
|
// Initialize the compilation queue
|
||||||
if (c2_compiler_count > 0) {
|
if (c2_compiler_count > 0) {
|
||||||
_c2_compile_queue = new CompileQueue("C2 compile queue", MethodCompileQueue_lock);
|
_c2_compile_queue = new CompileQueue("C2 compile queue");
|
||||||
_compilers[1]->set_num_compiler_threads(c2_compiler_count);
|
_compilers[1]->set_num_compiler_threads(c2_compiler_count);
|
||||||
}
|
}
|
||||||
if (c1_compiler_count > 0) {
|
if (c1_compiler_count > 0) {
|
||||||
_c1_compile_queue = new CompileQueue("C1 compile queue", MethodCompileQueue_lock);
|
_c1_compile_queue = new CompileQueue("C1 compile queue");
|
||||||
_compilers[0]->set_num_compiler_threads(c1_compiler_count);
|
_compilers[0]->set_num_compiler_threads(c1_compiler_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1213,7 +1214,7 @@ void CompileBroker::compile_method_base(methodHandle method,
|
||||||
|
|
||||||
// Acquire our lock.
|
// Acquire our lock.
|
||||||
{
|
{
|
||||||
MutexLocker locker(queue->lock(), thread);
|
MutexLocker locker(MethodCompileQueue_lock, thread);
|
||||||
|
|
||||||
// Make sure the method has not slipped into the queues since
|
// Make sure the method has not slipped into the queues since
|
||||||
// last we checked; note that those checks were "fast bail-outs".
|
// last we checked; note that those checks were "fast bail-outs".
|
||||||
|
@ -1806,7 +1807,7 @@ void CompileBroker::init_compiler_thread_log() {
|
||||||
os::file_separator(), thread_id, os::current_process_id());
|
os::file_separator(), thread_id, os::current_process_id());
|
||||||
}
|
}
|
||||||
|
|
||||||
fp = fopen(file_name, "at");
|
fp = fopen(file_name, "wt");
|
||||||
if (fp != NULL) {
|
if (fp != NULL) {
|
||||||
if (LogCompilation && Verbose) {
|
if (LogCompilation && Verbose) {
|
||||||
tty->print_cr("Opening compilation log %s", file_name);
|
tty->print_cr("Opening compilation log %s", file_name);
|
||||||
|
@ -1963,6 +1964,12 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||||
if (comp == NULL) {
|
if (comp == NULL) {
|
||||||
ci_env.record_method_not_compilable("no compiler", !TieredCompilation);
|
ci_env.record_method_not_compilable("no compiler", !TieredCompilation);
|
||||||
} else {
|
} else {
|
||||||
|
if (WhiteBoxAPI && WhiteBox::compilation_locked) {
|
||||||
|
MonitorLockerEx locker(Compilation_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
while (WhiteBox::compilation_locked) {
|
||||||
|
locker.wait(Mutex::_no_safepoint_check_flag);
|
||||||
|
}
|
||||||
|
}
|
||||||
comp->compile_method(&ci_env, target, osr_bci);
|
comp->compile_method(&ci_env, target, osr_bci);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1978,6 +1985,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||||
|
|
||||||
if (ci_env.failing()) {
|
if (ci_env.failing()) {
|
||||||
task->set_failure_reason(ci_env.failure_reason());
|
task->set_failure_reason(ci_env.failure_reason());
|
||||||
|
ci_env.report_failure(ci_env.failure_reason());
|
||||||
const char* retry_message = ci_env.retry_message();
|
const char* retry_message = ci_env.retry_message();
|
||||||
if (_compilation_log != NULL) {
|
if (_compilation_log != NULL) {
|
||||||
_compilation_log->log_failure(thread, task, ci_env.failure_reason(), retry_message);
|
_compilation_log->log_failure(thread, task, ci_env.failure_reason(), retry_message);
|
||||||
|
|
|
@ -195,7 +195,6 @@ class CompilerCounters : public CHeapObj<mtCompiler> {
|
||||||
class CompileQueue : public CHeapObj<mtCompiler> {
|
class CompileQueue : public CHeapObj<mtCompiler> {
|
||||||
private:
|
private:
|
||||||
const char* _name;
|
const char* _name;
|
||||||
Monitor* _lock;
|
|
||||||
|
|
||||||
CompileTask* _first;
|
CompileTask* _first;
|
||||||
CompileTask* _last;
|
CompileTask* _last;
|
||||||
|
@ -206,9 +205,8 @@ class CompileQueue : public CHeapObj<mtCompiler> {
|
||||||
|
|
||||||
void purge_stale_tasks();
|
void purge_stale_tasks();
|
||||||
public:
|
public:
|
||||||
CompileQueue(const char* name, Monitor* lock) {
|
CompileQueue(const char* name) {
|
||||||
_name = name;
|
_name = name;
|
||||||
_lock = lock;
|
|
||||||
_first = NULL;
|
_first = NULL;
|
||||||
_last = NULL;
|
_last = NULL;
|
||||||
_size = 0;
|
_size = 0;
|
||||||
|
@ -216,7 +214,6 @@ class CompileQueue : public CHeapObj<mtCompiler> {
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* name() const { return _name; }
|
const char* name() const { return _name; }
|
||||||
Monitor* lock() const { return _lock; }
|
|
||||||
|
|
||||||
void add(CompileTask* task);
|
void add(CompileTask* task);
|
||||||
void remove(CompileTask* task);
|
void remove(CompileTask* task);
|
||||||
|
@ -418,6 +415,7 @@ class CompileBroker: AllStatic {
|
||||||
shutdown_compilaton = 2
|
shutdown_compilaton = 2
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static jint get_compilation_activity_mode() { return _should_compile_new_jobs; }
|
||||||
static bool should_compile_new_jobs() { return UseCompiler && (_should_compile_new_jobs == run_compilation); }
|
static bool should_compile_new_jobs() { return UseCompiler && (_should_compile_new_jobs == run_compilation); }
|
||||||
static bool set_should_compile_new_jobs(jint new_state) {
|
static bool set_should_compile_new_jobs(jint new_state) {
|
||||||
// Return success if the current caller set it
|
// Return success if the current caller set it
|
||||||
|
|
|
@ -56,8 +56,10 @@ CompileLog::CompileLog(const char* file_name, FILE* fp, intx thread_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
CompileLog::~CompileLog() {
|
CompileLog::~CompileLog() {
|
||||||
delete _out;
|
delete _out; // Close fd in fileStream::~fileStream()
|
||||||
_out = NULL;
|
_out = NULL;
|
||||||
|
// Remove partial file after merging in CompileLog::finish_log_on_error
|
||||||
|
unlink(_file);
|
||||||
FREE_C_HEAP_ARRAY(char, _identities, mtCompiler);
|
FREE_C_HEAP_ARRAY(char, _identities, mtCompiler);
|
||||||
FREE_C_HEAP_ARRAY(char, _file, mtCompiler);
|
FREE_C_HEAP_ARRAY(char, _file, mtCompiler);
|
||||||
}
|
}
|
||||||
|
@ -278,10 +280,9 @@ void CompileLog::finish_log_on_error(outputStream* file, char* buf, int buflen)
|
||||||
}
|
}
|
||||||
file->print_raw_cr("</compilation_log>");
|
file->print_raw_cr("</compilation_log>");
|
||||||
close(partial_fd);
|
close(partial_fd);
|
||||||
unlink(partial_file);
|
|
||||||
}
|
}
|
||||||
CompileLog* next_log = log->_next;
|
CompileLog* next_log = log->_next;
|
||||||
delete log;
|
delete log; // Removes partial file
|
||||||
log = next_log;
|
log = next_log;
|
||||||
}
|
}
|
||||||
_first = NULL;
|
_first = NULL;
|
||||||
|
|
|
@ -89,9 +89,3 @@ void ConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
|
||||||
_gc_policy_counters = new GCPolicyCounters("Copy:CMS", 2, 3);
|
_gc_policy_counters = new GCPolicyCounters("Copy:CMS", 2, 3);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns true if the incremental mode is enabled.
|
|
||||||
bool ConcurrentMarkSweepPolicy::has_soft_ended_eden()
|
|
||||||
{
|
|
||||||
return CMSIncrementalMode;
|
|
||||||
}
|
|
||||||
|
|
|
@ -42,9 +42,6 @@ class ConcurrentMarkSweepPolicy : public GenCollectorPolicy {
|
||||||
virtual void initialize_size_policy(size_t init_eden_size,
|
virtual void initialize_size_policy(size_t init_eden_size,
|
||||||
size_t init_promo_size,
|
size_t init_promo_size,
|
||||||
size_t init_survivor_size);
|
size_t init_survivor_size);
|
||||||
|
|
||||||
// Returns true if the incremental mode is enabled.
|
|
||||||
virtual bool has_soft_ended_eden();
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSCOLLECTORPOLICY_HPP
|
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSCOLLECTORPOLICY_HPP
|
||||||
|
|
|
@ -2083,17 +2083,13 @@ bool CompactibleFreeListSpace::should_concurrent_collect() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Support for compaction
|
// Support for compaction
|
||||||
|
|
||||||
void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
|
void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
|
||||||
SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
|
scan_and_forward(this, cp);
|
||||||
// Prepare_for_compaction() uses the space between live objects
|
// Prepare_for_compaction() uses the space between live objects
|
||||||
// so that later phase can skip dead space quickly. So verification
|
// so that later phase can skip dead space quickly. So verification
|
||||||
// of the free lists doesn't work after.
|
// of the free lists doesn't work after.
|
||||||
}
|
}
|
||||||
|
|
||||||
#define obj_size(q) adjustObjectSize(oop(q)->size())
|
|
||||||
#define adjust_obj_size(s) adjustObjectSize(s)
|
|
||||||
|
|
||||||
void CompactibleFreeListSpace::adjust_pointers() {
|
void CompactibleFreeListSpace::adjust_pointers() {
|
||||||
// In other versions of adjust_pointers(), a bail out
|
// In other versions of adjust_pointers(), a bail out
|
||||||
// based on the amount of live data in the generation
|
// based on the amount of live data in the generation
|
||||||
|
@ -2101,12 +2097,12 @@ void CompactibleFreeListSpace::adjust_pointers() {
|
||||||
// Cannot test used() == 0 here because the free lists have already
|
// Cannot test used() == 0 here because the free lists have already
|
||||||
// been mangled by the compaction.
|
// been mangled by the compaction.
|
||||||
|
|
||||||
SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
|
scan_and_adjust_pointers(this);
|
||||||
// See note about verification in prepare_for_compaction().
|
// See note about verification in prepare_for_compaction().
|
||||||
}
|
}
|
||||||
|
|
||||||
void CompactibleFreeListSpace::compact() {
|
void CompactibleFreeListSpace::compact() {
|
||||||
SCAN_AND_COMPACT(obj_size);
|
scan_and_compact(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
|
// Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
|
||||||
|
@ -2629,7 +2625,7 @@ void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>*
|
||||||
// Get the #blocks we want to claim
|
// Get the #blocks we want to claim
|
||||||
size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
|
size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
|
||||||
assert(n_blks > 0, "Error");
|
assert(n_blks > 0, "Error");
|
||||||
assert(ResizePLAB || n_blks == OldPLABSize, "Error");
|
assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
|
||||||
// In some cases, when the application has a phase change,
|
// In some cases, when the application has a phase change,
|
||||||
// there may be a sudden and sharp shift in the object survival
|
// there may be a sudden and sharp shift in the object survival
|
||||||
// profile, and updating the counts at the end of a scavenge
|
// profile, and updating the counts at the end of a scavenge
|
||||||
|
|
|
@ -73,6 +73,13 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||||
friend class CMSCollector;
|
friend class CMSCollector;
|
||||||
// Local alloc buffer for promotion into this space.
|
// Local alloc buffer for promotion into this space.
|
||||||
friend class CFLS_LAB;
|
friend class CFLS_LAB;
|
||||||
|
// Allow scan_and_* functions to call (private) overrides of the auxiliary functions on this class
|
||||||
|
template <typename SpaceType>
|
||||||
|
friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space);
|
||||||
|
template <typename SpaceType>
|
||||||
|
friend void CompactibleSpace::scan_and_compact(SpaceType* space);
|
||||||
|
template <typename SpaceType>
|
||||||
|
friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
|
||||||
|
|
||||||
// "Size" of chunks of work (executed during parallel remark phases
|
// "Size" of chunks of work (executed during parallel remark phases
|
||||||
// of CMS collection); this probably belongs in CMSCollector, although
|
// of CMS collection); this probably belongs in CMSCollector, although
|
||||||
|
@ -288,6 +295,28 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||||
_bt.freed(start, size);
|
_bt.freed(start, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
|
||||||
|
// See comments for CompactibleSpace for more information.
|
||||||
|
inline HeapWord* scan_limit() const {
|
||||||
|
return end();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline bool scanned_block_is_obj(const HeapWord* addr) const {
|
||||||
|
return CompactibleFreeListSpace::block_is_obj(addr); // Avoid virtual call
|
||||||
|
}
|
||||||
|
|
||||||
|
inline size_t scanned_block_size(const HeapWord* addr) const {
|
||||||
|
return CompactibleFreeListSpace::block_size(addr); // Avoid virtual call
|
||||||
|
}
|
||||||
|
|
||||||
|
inline size_t adjust_obj_size(size_t size) const {
|
||||||
|
return adjustObjectSize(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline size_t obj_size(const HeapWord* addr) const {
|
||||||
|
return adjustObjectSize(oop(addr)->size());
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// Reset the indexed free list to its initial empty condition.
|
// Reset the indexed free list to its initial empty condition.
|
||||||
void resetIndexedFreeListArray();
|
void resetIndexedFreeListArray();
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -356,7 +356,6 @@ class CMSStats VALUE_OBJ_CLASS_SPEC {
|
||||||
size_t _gc0_promoted; // bytes promoted per gc0
|
size_t _gc0_promoted; // bytes promoted per gc0
|
||||||
double _cms_duration;
|
double _cms_duration;
|
||||||
double _cms_duration_pre_sweep; // time from initiation to start of sweep
|
double _cms_duration_pre_sweep; // time from initiation to start of sweep
|
||||||
double _cms_duration_per_mb;
|
|
||||||
double _cms_period;
|
double _cms_period;
|
||||||
size_t _cms_allocated; // bytes of direct allocation per gc0 period
|
size_t _cms_allocated; // bytes of direct allocation per gc0 period
|
||||||
|
|
||||||
|
@ -383,17 +382,7 @@ class CMSStats VALUE_OBJ_CLASS_SPEC {
|
||||||
|
|
||||||
unsigned int _valid_bits;
|
unsigned int _valid_bits;
|
||||||
|
|
||||||
unsigned int _icms_duty_cycle; // icms duty cycle (0-100).
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
// Return a duty cycle that avoids wild oscillations, by limiting the amount
|
|
||||||
// of change between old_duty_cycle and new_duty_cycle (the latter is treated
|
|
||||||
// as a recommended value).
|
|
||||||
static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle,
|
|
||||||
unsigned int new_duty_cycle);
|
|
||||||
unsigned int icms_update_duty_cycle_impl();
|
|
||||||
|
|
||||||
// In support of adjusting of cms trigger ratios based on history
|
// In support of adjusting of cms trigger ratios based on history
|
||||||
// of concurrent mode failure.
|
// of concurrent mode failure.
|
||||||
double cms_free_adjustment_factor(size_t free) const;
|
double cms_free_adjustment_factor(size_t free) const;
|
||||||
|
@ -426,7 +415,6 @@ class CMSStats VALUE_OBJ_CLASS_SPEC {
|
||||||
size_t gc0_promoted() const { return _gc0_promoted; }
|
size_t gc0_promoted() const { return _gc0_promoted; }
|
||||||
double cms_period() const { return _cms_period; }
|
double cms_period() const { return _cms_period; }
|
||||||
double cms_duration() const { return _cms_duration; }
|
double cms_duration() const { return _cms_duration; }
|
||||||
double cms_duration_per_mb() const { return _cms_duration_per_mb; }
|
|
||||||
size_t cms_allocated() const { return _cms_allocated; }
|
size_t cms_allocated() const { return _cms_allocated; }
|
||||||
|
|
||||||
size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
|
size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
|
||||||
|
@ -458,12 +446,6 @@ class CMSStats VALUE_OBJ_CLASS_SPEC {
|
||||||
|
|
||||||
// End of higher level statistics.
|
// End of higher level statistics.
|
||||||
|
|
||||||
// Returns the cms incremental mode duty cycle, as a percentage (0-100).
|
|
||||||
unsigned int icms_duty_cycle() const { return _icms_duty_cycle; }
|
|
||||||
|
|
||||||
// Update the duty cycle and return the new value.
|
|
||||||
unsigned int icms_update_duty_cycle();
|
|
||||||
|
|
||||||
// Debugging.
|
// Debugging.
|
||||||
void print_on(outputStream* st) const PRODUCT_RETURN;
|
void print_on(outputStream* st) const PRODUCT_RETURN;
|
||||||
void print() const { print_on(gclog_or_tty); }
|
void print() const { print_on(gclog_or_tty); }
|
||||||
|
@ -626,7 +608,6 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||||
GCHeapSummary _last_heap_summary;
|
GCHeapSummary _last_heap_summary;
|
||||||
MetaspaceSummary _last_metaspace_summary;
|
MetaspaceSummary _last_metaspace_summary;
|
||||||
|
|
||||||
void register_foreground_gc_start(GCCause::Cause cause);
|
|
||||||
void register_gc_start(GCCause::Cause cause);
|
void register_gc_start(GCCause::Cause cause);
|
||||||
void register_gc_end();
|
void register_gc_end();
|
||||||
void save_heap_summary();
|
void save_heap_summary();
|
||||||
|
@ -713,8 +694,6 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||||
int _numYields;
|
int _numYields;
|
||||||
size_t _numDirtyCards;
|
size_t _numDirtyCards;
|
||||||
size_t _sweep_count;
|
size_t _sweep_count;
|
||||||
// Number of full gc's since the last concurrent gc.
|
|
||||||
uint _full_gcs_since_conc_gc;
|
|
||||||
|
|
||||||
// Occupancy used for bootstrapping stats
|
// Occupancy used for bootstrapping stats
|
||||||
double _bootstrap_occupancy;
|
double _bootstrap_occupancy;
|
||||||
|
@ -725,13 +704,6 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||||
// Timing, allocation and promotion statistics, used for scheduling.
|
// Timing, allocation and promotion statistics, used for scheduling.
|
||||||
CMSStats _stats;
|
CMSStats _stats;
|
||||||
|
|
||||||
// Allocation limits installed in the young gen, used only in
|
|
||||||
// CMSIncrementalMode. When an allocation in the young gen would cross one of
|
|
||||||
// these limits, the cms generation is notified and the cms thread is started
|
|
||||||
// or stopped, respectively.
|
|
||||||
HeapWord* _icms_start_limit;
|
|
||||||
HeapWord* _icms_stop_limit;
|
|
||||||
|
|
||||||
enum CMS_op_type {
|
enum CMS_op_type {
|
||||||
CMS_op_checkpointRootsInitial,
|
CMS_op_checkpointRootsInitial,
|
||||||
CMS_op_checkpointRootsFinal
|
CMS_op_checkpointRootsFinal
|
||||||
|
@ -785,14 +757,14 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||||
NOT_PRODUCT(bool par_simulate_overflow();) // MT version
|
NOT_PRODUCT(bool par_simulate_overflow();) // MT version
|
||||||
|
|
||||||
// CMS work methods
|
// CMS work methods
|
||||||
void checkpointRootsInitialWork(bool asynch); // Initial checkpoint work
|
void checkpointRootsInitialWork(); // Initial checkpoint work
|
||||||
|
|
||||||
// A return value of false indicates failure due to stack overflow
|
// A return value of false indicates failure due to stack overflow
|
||||||
bool markFromRootsWork(bool asynch); // Concurrent marking work
|
bool markFromRootsWork(); // Concurrent marking work
|
||||||
|
|
||||||
public: // FIX ME!!! only for testing
|
public: // FIX ME!!! only for testing
|
||||||
bool do_marking_st(bool asynch); // Single-threaded marking
|
bool do_marking_st(); // Single-threaded marking
|
||||||
bool do_marking_mt(bool asynch); // Multi-threaded marking
|
bool do_marking_mt(); // Multi-threaded marking
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
|
@ -813,20 +785,19 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||||
void reset_survivor_plab_arrays();
|
void reset_survivor_plab_arrays();
|
||||||
|
|
||||||
// Final (second) checkpoint work
|
// Final (second) checkpoint work
|
||||||
void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
|
void checkpointRootsFinalWork();
|
||||||
bool init_mark_was_synchronous);
|
|
||||||
// Work routine for parallel version of remark
|
// Work routine for parallel version of remark
|
||||||
void do_remark_parallel();
|
void do_remark_parallel();
|
||||||
// Work routine for non-parallel version of remark
|
// Work routine for non-parallel version of remark
|
||||||
void do_remark_non_parallel();
|
void do_remark_non_parallel();
|
||||||
// Reference processing work routine (during second checkpoint)
|
// Reference processing work routine (during second checkpoint)
|
||||||
void refProcessingWork(bool asynch, bool clear_all_soft_refs);
|
void refProcessingWork();
|
||||||
|
|
||||||
// Concurrent sweeping work
|
// Concurrent sweeping work
|
||||||
void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
|
void sweepWork(ConcurrentMarkSweepGeneration* gen);
|
||||||
|
|
||||||
// (Concurrent) resetting of support data structures
|
// (Concurrent) resetting of support data structures
|
||||||
void reset(bool asynch);
|
void reset(bool concurrent);
|
||||||
|
|
||||||
// Clear _expansion_cause fields of constituent generations
|
// Clear _expansion_cause fields of constituent generations
|
||||||
void clear_expansion_cause();
|
void clear_expansion_cause();
|
||||||
|
@ -835,22 +806,10 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||||
// used regions of each generation to limit the extent of sweep
|
// used regions of each generation to limit the extent of sweep
|
||||||
void save_sweep_limits();
|
void save_sweep_limits();
|
||||||
|
|
||||||
// A work method used by foreground collection to determine
|
|
||||||
// what type of collection (compacting or not, continuing or fresh)
|
|
||||||
// it should do.
|
|
||||||
void decide_foreground_collection_type(bool clear_all_soft_refs,
|
|
||||||
bool* should_compact, bool* should_start_over);
|
|
||||||
|
|
||||||
// A work method used by the foreground collector to do
|
// A work method used by the foreground collector to do
|
||||||
// a mark-sweep-compact.
|
// a mark-sweep-compact.
|
||||||
void do_compaction_work(bool clear_all_soft_refs);
|
void do_compaction_work(bool clear_all_soft_refs);
|
||||||
|
|
||||||
// A work method used by the foreground collector to do
|
|
||||||
// a mark-sweep, after taking over from a possibly on-going
|
|
||||||
// concurrent mark-sweep collection.
|
|
||||||
void do_mark_sweep_work(bool clear_all_soft_refs,
|
|
||||||
CollectorState first_state, bool should_start_over);
|
|
||||||
|
|
||||||
// Work methods for reporting concurrent mode interruption or failure
|
// Work methods for reporting concurrent mode interruption or failure
|
||||||
bool is_external_interruption();
|
bool is_external_interruption();
|
||||||
void report_concurrent_mode_interruption();
|
void report_concurrent_mode_interruption();
|
||||||
|
@ -867,10 +826,6 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||||
// collector.
|
// collector.
|
||||||
bool waitForForegroundGC();
|
bool waitForForegroundGC();
|
||||||
|
|
||||||
// Incremental mode triggering: recompute the icms duty cycle and set the
|
|
||||||
// allocation limits in the young gen.
|
|
||||||
void icms_update_allocation_limits();
|
|
||||||
|
|
||||||
size_t block_size_using_printezis_bits(HeapWord* addr) const;
|
size_t block_size_using_printezis_bits(HeapWord* addr) const;
|
||||||
size_t block_size_if_printezis_bits(HeapWord* addr) const;
|
size_t block_size_if_printezis_bits(HeapWord* addr) const;
|
||||||
HeapWord* next_card_start_after_block(HeapWord* addr) const;
|
HeapWord* next_card_start_after_block(HeapWord* addr) const;
|
||||||
|
@ -897,15 +852,13 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||||
// Locking checks
|
// Locking checks
|
||||||
NOT_PRODUCT(static bool have_cms_token();)
|
NOT_PRODUCT(static bool have_cms_token();)
|
||||||
|
|
||||||
// XXXPERM bool should_collect(bool full, size_t size, bool tlab);
|
|
||||||
bool shouldConcurrentCollect();
|
bool shouldConcurrentCollect();
|
||||||
|
|
||||||
void collect(bool full,
|
void collect(bool full,
|
||||||
bool clear_all_soft_refs,
|
bool clear_all_soft_refs,
|
||||||
size_t size,
|
size_t size,
|
||||||
bool tlab);
|
bool tlab);
|
||||||
void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause);
|
void collect_in_background(GCCause::Cause cause);
|
||||||
void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause);
|
|
||||||
|
|
||||||
// In support of ExplicitGCInvokesConcurrent
|
// In support of ExplicitGCInvokesConcurrent
|
||||||
static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
|
static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
|
||||||
|
@ -928,9 +881,6 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||||
void promoted(bool par, HeapWord* start,
|
void promoted(bool par, HeapWord* start,
|
||||||
bool is_obj_array, size_t obj_size);
|
bool is_obj_array, size_t obj_size);
|
||||||
|
|
||||||
HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
|
|
||||||
size_t word_size);
|
|
||||||
|
|
||||||
void getFreelistLocks() const;
|
void getFreelistLocks() const;
|
||||||
void releaseFreelistLocks() const;
|
void releaseFreelistLocks() const;
|
||||||
bool haveFreelistLocks() const;
|
bool haveFreelistLocks() const;
|
||||||
|
@ -960,18 +910,16 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||||
void directAllocated(HeapWord* start, size_t size);
|
void directAllocated(HeapWord* start, size_t size);
|
||||||
|
|
||||||
// Main CMS steps and related support
|
// Main CMS steps and related support
|
||||||
void checkpointRootsInitial(bool asynch);
|
void checkpointRootsInitial();
|
||||||
bool markFromRoots(bool asynch); // a return value of false indicates failure
|
bool markFromRoots(); // a return value of false indicates failure
|
||||||
// due to stack overflow
|
// due to stack overflow
|
||||||
void preclean();
|
void preclean();
|
||||||
void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
|
void checkpointRootsFinal();
|
||||||
bool init_mark_was_synchronous);
|
void sweep();
|
||||||
void sweep(bool asynch);
|
|
||||||
|
|
||||||
// Check that the currently executing thread is the expected
|
// Check that the currently executing thread is the expected
|
||||||
// one (foreground collector or background collector).
|
// one (foreground collector or background collector).
|
||||||
static void check_correct_thread_executing() PRODUCT_RETURN;
|
static void check_correct_thread_executing() PRODUCT_RETURN;
|
||||||
// XXXPERM void print_statistics() PRODUCT_RETURN;
|
|
||||||
|
|
||||||
bool is_cms_reachable(HeapWord* addr);
|
bool is_cms_reachable(HeapWord* addr);
|
||||||
|
|
||||||
|
@ -1001,14 +949,6 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||||
// Timers/stats for gc scheduling and incremental mode pacing.
|
// Timers/stats for gc scheduling and incremental mode pacing.
|
||||||
CMSStats& stats() { return _stats; }
|
CMSStats& stats() { return _stats; }
|
||||||
|
|
||||||
// Convenience methods that check whether CMSIncrementalMode is enabled and
|
|
||||||
// forward to the corresponding methods in ConcurrentMarkSweepThread.
|
|
||||||
static void start_icms();
|
|
||||||
static void stop_icms(); // Called at the end of the cms cycle.
|
|
||||||
static void disable_icms(); // Called before a foreground collection.
|
|
||||||
static void enable_icms(); // Called after a foreground collection.
|
|
||||||
void icms_wait(); // Called at yield points.
|
|
||||||
|
|
||||||
// Adaptive size policy
|
// Adaptive size policy
|
||||||
AdaptiveSizePolicy* size_policy();
|
AdaptiveSizePolicy* size_policy();
|
||||||
|
|
||||||
|
@ -1100,15 +1040,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||||
// In support of MinChunkSize being larger than min object size
|
// In support of MinChunkSize being larger than min object size
|
||||||
const double _dilatation_factor;
|
const double _dilatation_factor;
|
||||||
|
|
||||||
enum CollectionTypes {
|
|
||||||
Concurrent_collection_type = 0,
|
|
||||||
MS_foreground_collection_type = 1,
|
|
||||||
MSC_foreground_collection_type = 2,
|
|
||||||
Unknown_collection_type = 3
|
|
||||||
};
|
|
||||||
|
|
||||||
CollectionTypes _debug_collection_type;
|
|
||||||
|
|
||||||
// True if a compacting collection was done.
|
// True if a compacting collection was done.
|
||||||
bool _did_compact;
|
bool _did_compact;
|
||||||
bool did_compact() { return _did_compact; }
|
bool did_compact() { return _did_compact; }
|
||||||
|
@ -1192,7 +1123,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||||
// hack to allow the collection of the younger gen first if the flag is
|
// hack to allow the collection of the younger gen first if the flag is
|
||||||
// set.
|
// set.
|
||||||
virtual bool full_collects_younger_generations() const {
|
virtual bool full_collects_younger_generations() const {
|
||||||
return UseCMSCompactAtFullCollection && !ScavengeBeforeFullGC;
|
return !ScavengeBeforeFullGC;
|
||||||
}
|
}
|
||||||
|
|
||||||
void space_iterate(SpaceClosure* blk, bool usedOnly = false);
|
void space_iterate(SpaceClosure* blk, bool usedOnly = false);
|
||||||
|
@ -1211,9 +1142,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||||
return allocate(size, tlab);
|
return allocate(size, tlab);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Incremental mode triggering.
|
|
||||||
HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
|
|
||||||
size_t word_size);
|
|
||||||
|
|
||||||
// Used by CMSStats to track direct allocation. The value is sampled and
|
// Used by CMSStats to track direct allocation. The value is sampled and
|
||||||
// reset after each young gen collection.
|
// reset after each young gen collection.
|
||||||
|
@ -1338,9 +1266,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||||
// Resize the generation after a non-compacting
|
// Resize the generation after a non-compacting
|
||||||
// collection.
|
// collection.
|
||||||
void compute_new_size_free_list();
|
void compute_new_size_free_list();
|
||||||
|
|
||||||
CollectionTypes debug_collection_type() { return _debug_collection_type; }
|
|
||||||
void rotate_debug_collection_type();
|
|
||||||
};
|
};
|
||||||
|
|
||||||
//
|
//
|
||||||
|
@ -1387,7 +1312,6 @@ class Par_MarkFromRootsClosure: public BitMapClosure {
|
||||||
CMSBitMap* _mut;
|
CMSBitMap* _mut;
|
||||||
OopTaskQueue* _work_queue;
|
OopTaskQueue* _work_queue;
|
||||||
CMSMarkStack* _overflow_stack;
|
CMSMarkStack* _overflow_stack;
|
||||||
bool _yield;
|
|
||||||
int _skip_bits;
|
int _skip_bits;
|
||||||
HeapWord* _finger;
|
HeapWord* _finger;
|
||||||
HeapWord* _threshold;
|
HeapWord* _threshold;
|
||||||
|
@ -1397,8 +1321,7 @@ class Par_MarkFromRootsClosure: public BitMapClosure {
|
||||||
MemRegion span,
|
MemRegion span,
|
||||||
CMSBitMap* bit_map,
|
CMSBitMap* bit_map,
|
||||||
OopTaskQueue* work_queue,
|
OopTaskQueue* work_queue,
|
||||||
CMSMarkStack* overflow_stack,
|
CMSMarkStack* overflow_stack);
|
||||||
bool should_yield);
|
|
||||||
bool do_bit(size_t offset);
|
bool do_bit(size_t offset);
|
||||||
inline void do_yield_check();
|
inline void do_yield_check();
|
||||||
|
|
||||||
|
|
|
@ -234,36 +234,6 @@ inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void CMSCollector::start_icms() {
|
|
||||||
if (CMSIncrementalMode) {
|
|
||||||
ConcurrentMarkSweepThread::start_icms();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void CMSCollector::stop_icms() {
|
|
||||||
if (CMSIncrementalMode) {
|
|
||||||
ConcurrentMarkSweepThread::stop_icms();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void CMSCollector::disable_icms() {
|
|
||||||
if (CMSIncrementalMode) {
|
|
||||||
ConcurrentMarkSweepThread::disable_icms();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void CMSCollector::enable_icms() {
|
|
||||||
if (CMSIncrementalMode) {
|
|
||||||
ConcurrentMarkSweepThread::enable_icms();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void CMSCollector::icms_wait() {
|
|
||||||
if (CMSIncrementalMode) {
|
|
||||||
cmsThread()->icms_wait();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void CMSCollector::save_sweep_limits() {
|
inline void CMSCollector::save_sweep_limits() {
|
||||||
_cmsGen->save_sweep_limit();
|
_cmsGen->save_sweep_limit();
|
||||||
}
|
}
|
||||||
|
@ -363,12 +333,6 @@ inline void CMSStats::record_cms_end() {
|
||||||
_cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
|
_cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
|
||||||
cur_duration, _cms_alpha);
|
cur_duration, _cms_alpha);
|
||||||
|
|
||||||
// Avoid division by 0.
|
|
||||||
const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1);
|
|
||||||
_cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb,
|
|
||||||
cur_duration / cms_used_mb,
|
|
||||||
_cms_alpha);
|
|
||||||
|
|
||||||
_cms_end_time.update();
|
_cms_end_time.update();
|
||||||
_cms_alpha = _saved_alpha;
|
_cms_alpha = _saved_alpha;
|
||||||
_allow_duty_cycle_reduction = true;
|
_allow_duty_cycle_reduction = true;
|
||||||
|
@ -400,15 +364,6 @@ inline double CMSStats::cms_consumption_rate() const {
|
||||||
return (gc0_promoted() + cms_allocated()) / gc0_period();
|
return (gc0_promoted() + cms_allocated()) / gc0_period();
|
||||||
}
|
}
|
||||||
|
|
||||||
inline unsigned int CMSStats::icms_update_duty_cycle() {
|
|
||||||
// Update the duty cycle only if pacing is enabled and the stats are valid
|
|
||||||
// (after at least one young gen gc and one cms cycle have completed).
|
|
||||||
if (CMSIncrementalPacing && valid()) {
|
|
||||||
return icms_update_duty_cycle_impl();
|
|
||||||
}
|
|
||||||
return _icms_duty_cycle;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
|
inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
|
||||||
cmsSpace()->save_sweep_limit();
|
cmsSpace()->save_sweep_limit();
|
||||||
}
|
}
|
||||||
|
@ -443,8 +398,7 @@ inline void MarkFromRootsClosure::do_yield_check() {
|
||||||
|
|
||||||
inline void Par_MarkFromRootsClosure::do_yield_check() {
|
inline void Par_MarkFromRootsClosure::do_yield_check() {
|
||||||
if (ConcurrentMarkSweepThread::should_yield() &&
|
if (ConcurrentMarkSweepThread::should_yield() &&
|
||||||
!_collector->foregroundGCIsActive() &&
|
!_collector->foregroundGCIsActive()) {
|
||||||
_yield) {
|
|
||||||
do_yield_work();
|
do_yield_work();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,13 +49,6 @@ bool ConcurrentMarkSweepThread::_should_terminate = false;
|
||||||
int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil;
|
int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil;
|
||||||
|
|
||||||
volatile jint ConcurrentMarkSweepThread::_pending_yields = 0;
|
volatile jint ConcurrentMarkSweepThread::_pending_yields = 0;
|
||||||
volatile jint ConcurrentMarkSweepThread::_pending_decrements = 0;
|
|
||||||
|
|
||||||
volatile jint ConcurrentMarkSweepThread::_icms_disabled = 0;
|
|
||||||
volatile bool ConcurrentMarkSweepThread::_should_run = false;
|
|
||||||
// When icms is enabled, the icms thread is stopped until explicitly
|
|
||||||
// started.
|
|
||||||
volatile bool ConcurrentMarkSweepThread::_should_stop = true;
|
|
||||||
|
|
||||||
SurrogateLockerThread*
|
SurrogateLockerThread*
|
||||||
ConcurrentMarkSweepThread::_slt = NULL;
|
ConcurrentMarkSweepThread::_slt = NULL;
|
||||||
|
@ -99,7 +92,6 @@ ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_sltMonitor = SLT_lock;
|
_sltMonitor = SLT_lock;
|
||||||
assert(!CMSIncrementalMode || icms_is_enabled(), "Error");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConcurrentMarkSweepThread::run() {
|
void ConcurrentMarkSweepThread::run() {
|
||||||
|
@ -142,7 +134,7 @@ void ConcurrentMarkSweepThread::run() {
|
||||||
if (_should_terminate) break;
|
if (_should_terminate) break;
|
||||||
GCCause::Cause cause = _collector->_full_gc_requested ?
|
GCCause::Cause cause = _collector->_full_gc_requested ?
|
||||||
_collector->_full_gc_cause : GCCause::_cms_concurrent_mark;
|
_collector->_full_gc_cause : GCCause::_cms_concurrent_mark;
|
||||||
_collector->collect_in_background(false, cause);
|
_collector->collect_in_background(cause);
|
||||||
}
|
}
|
||||||
assert(_should_terminate, "just checking");
|
assert(_should_terminate, "just checking");
|
||||||
// Check that the state of any protocol for synchronization
|
// Check that the state of any protocol for synchronization
|
||||||
|
@ -184,11 +176,6 @@ ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::start(CMSCollector* collec
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConcurrentMarkSweepThread::stop() {
|
void ConcurrentMarkSweepThread::stop() {
|
||||||
if (CMSIncrementalMode) {
|
|
||||||
// Disable incremental mode and wake up the thread so it notices the change.
|
|
||||||
disable_icms();
|
|
||||||
start_icms();
|
|
||||||
}
|
|
||||||
// it is ok to take late safepoints here, if needed
|
// it is ok to take late safepoints here, if needed
|
||||||
{
|
{
|
||||||
MutexLockerEx x(Terminator_lock);
|
MutexLockerEx x(Terminator_lock);
|
||||||
|
@ -387,23 +374,13 @@ void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
|
||||||
|
|
||||||
void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
|
void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
|
||||||
while (!_should_terminate) {
|
while (!_should_terminate) {
|
||||||
if (CMSIncrementalMode) {
|
if(CMSWaitDuration >= 0) {
|
||||||
icms_wait();
|
// Wait until the next synchronous GC, a concurrent full gc
|
||||||
if(CMSWaitDuration >= 0) {
|
// request or a timeout, whichever is earlier.
|
||||||
// Wait until the next synchronous GC, a concurrent full gc
|
wait_on_cms_lock_for_scavenge(CMSWaitDuration);
|
||||||
// request or a timeout, whichever is earlier.
|
|
||||||
wait_on_cms_lock_for_scavenge(CMSWaitDuration);
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
} else {
|
} else {
|
||||||
if(CMSWaitDuration >= 0) {
|
// Wait until any cms_lock event or check interval not to call shouldConcurrentCollect permanently
|
||||||
// Wait until the next synchronous GC, a concurrent full gc
|
wait_on_cms_lock(CMSCheckInterval);
|
||||||
// request or a timeout, whichever is earlier.
|
|
||||||
wait_on_cms_lock_for_scavenge(CMSWaitDuration);
|
|
||||||
} else {
|
|
||||||
// Wait until any cms_lock event or check interval not to call shouldConcurrentCollect permanently
|
|
||||||
wait_on_cms_lock(CMSCheckInterval);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// Check if we should start a CMS collection cycle
|
// Check if we should start a CMS collection cycle
|
||||||
if (_collector->shouldConcurrentCollect()) {
|
if (_collector->shouldConcurrentCollect()) {
|
||||||
|
@ -414,42 +391,6 @@ void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Incremental CMS
|
|
||||||
void ConcurrentMarkSweepThread::start_icms() {
|
|
||||||
assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
|
|
||||||
MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
|
|
||||||
trace_state("start_icms");
|
|
||||||
_should_run = true;
|
|
||||||
iCMS_lock->notify_all();
|
|
||||||
}
|
|
||||||
|
|
||||||
void ConcurrentMarkSweepThread::stop_icms() {
|
|
||||||
assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
|
|
||||||
MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
|
|
||||||
if (!_should_stop) {
|
|
||||||
trace_state("stop_icms");
|
|
||||||
_should_stop = true;
|
|
||||||
_should_run = false;
|
|
||||||
asynchronous_yield_request();
|
|
||||||
iCMS_lock->notify_all();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ConcurrentMarkSweepThread::icms_wait() {
|
|
||||||
assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
|
|
||||||
if (_should_stop && icms_is_enabled()) {
|
|
||||||
MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
|
|
||||||
trace_state("pause_icms");
|
|
||||||
_collector->stats().stop_cms_timer();
|
|
||||||
while(!_should_run && icms_is_enabled()) {
|
|
||||||
iCMS_lock->wait(Mutex::_no_safepoint_check_flag);
|
|
||||||
}
|
|
||||||
_collector->stats().start_cms_timer();
|
|
||||||
_should_stop = false;
|
|
||||||
trace_state("pause_icms end");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note: this method, although exported by the ConcurrentMarkSweepThread,
|
// Note: this method, although exported by the ConcurrentMarkSweepThread,
|
||||||
// which is a non-JavaThread, can only be called by a JavaThread.
|
// which is a non-JavaThread, can only be called by a JavaThread.
|
||||||
// Currently this is done at vm creation time (post-vm-init) by the
|
// Currently this is done at vm creation time (post-vm-init) by the
|
||||||
|
|
|
@ -64,20 +64,11 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
|
||||||
static bool clear_CMS_flag(int b) { return (_CMS_flag &= ~b) != 0; }
|
static bool clear_CMS_flag(int b) { return (_CMS_flag &= ~b) != 0; }
|
||||||
void sleepBeforeNextCycle();
|
void sleepBeforeNextCycle();
|
||||||
|
|
||||||
// CMS thread should yield for a young gen collection, direct allocation,
|
// CMS thread should yield for a young gen collection and direct allocations
|
||||||
// and iCMS activity.
|
|
||||||
static char _pad_1[64 - sizeof(jint)]; // prevent cache-line sharing
|
static char _pad_1[64 - sizeof(jint)]; // prevent cache-line sharing
|
||||||
static volatile jint _pending_yields;
|
static volatile jint _pending_yields;
|
||||||
static volatile jint _pending_decrements; // decrements to _pending_yields
|
|
||||||
static char _pad_2[64 - sizeof(jint)]; // prevent cache-line sharing
|
static char _pad_2[64 - sizeof(jint)]; // prevent cache-line sharing
|
||||||
|
|
||||||
// Tracing messages, enabled by CMSTraceThreadState.
|
|
||||||
static inline void trace_state(const char* desc);
|
|
||||||
|
|
||||||
static volatile int _icms_disabled; // a counter to track #iCMS disable & enable
|
|
||||||
static volatile bool _should_run; // iCMS may run
|
|
||||||
static volatile bool _should_stop; // iCMS should stop
|
|
||||||
|
|
||||||
// debugging
|
// debugging
|
||||||
void verify_ok_to_terminate() const PRODUCT_RETURN;
|
void verify_ok_to_terminate() const PRODUCT_RETURN;
|
||||||
|
|
||||||
|
@ -135,44 +126,13 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
|
||||||
void wait_on_cms_lock_for_scavenge(long t_millis);
|
void wait_on_cms_lock_for_scavenge(long t_millis);
|
||||||
|
|
||||||
// The CMS thread will yield during the work portion of its cycle
|
// The CMS thread will yield during the work portion of its cycle
|
||||||
// only when requested to. Both synchronous and asychronous requests
|
// only when requested to.
|
||||||
// are provided:
|
// A synchronous request is used for young gen collections and
|
||||||
// (1) A synchronous request is used for young gen collections and
|
// for direct allocations. The requesting thread increments
|
||||||
// for direct allocations. The requesting thread increments
|
// _pending_yields at the beginning of an operation, and decrements
|
||||||
// _pending_yields at the beginning of an operation, and decrements
|
// _pending_yields when that operation is completed.
|
||||||
// _pending_yields when that operation is completed.
|
// In turn, the CMS thread yields when _pending_yields is positive,
|
||||||
// In turn, the CMS thread yields when _pending_yields is positive,
|
// and continues to yield until the value reverts to 0.
|
||||||
// and continues to yield until the value reverts to 0.
|
|
||||||
// (2) An asynchronous request, on the other hand, is used by iCMS
|
|
||||||
// for the stop_icms() operation. A single yield satisfies all of
|
|
||||||
// the outstanding asynch yield requests, of which there may
|
|
||||||
// occasionally be several in close succession. To accomplish
|
|
||||||
// this, an asynch-requesting thread atomically increments both
|
|
||||||
// _pending_yields and _pending_decrements. An asynchr requesting
|
|
||||||
// thread does not wait and "acknowledge" completion of an operation
|
|
||||||
// and deregister the request, like the synchronous version described
|
|
||||||
// above does. In turn, after yielding, the CMS thread decrements both
|
|
||||||
// _pending_yields and _pending_decrements by the value seen in
|
|
||||||
// _pending_decrements before the decrement.
|
|
||||||
// NOTE: The above scheme is isomorphic to having two request counters,
|
|
||||||
// one for async requests and one for sync requests, and for the CMS thread
|
|
||||||
// to check the sum of the two counters to decide whether it should yield
|
|
||||||
// and to clear only the async counter when it yields. However, it turns out
|
|
||||||
// to be more efficient for CMS code to just check a single counter
|
|
||||||
// _pending_yields that holds the sum (of both sync and async requests), and
|
|
||||||
// a second counter _pending_decrements that only holds the async requests,
|
|
||||||
// for greater efficiency, since in a typical CMS run, there are many more
|
|
||||||
// potential (i.e. static) yield points than there are actual
|
|
||||||
// (i.e. dynamic) yields because of requests, which are few and far between.
|
|
||||||
//
|
|
||||||
// Note that, while "_pending_yields >= _pending_decrements" is an invariant,
|
|
||||||
// we cannot easily test that invariant, since the counters are manipulated via
|
|
||||||
// atomic instructions without explicit locking and we cannot read
|
|
||||||
// the two counters atomically together: one suggestion is to
|
|
||||||
// use (for example) 16-bit counters so as to be able to read the
|
|
||||||
// two counters atomically even on 32-bit platforms. Notice that
|
|
||||||
// the second assert in acknowledge_yield_request() below does indeed
|
|
||||||
// check a form of the above invariant, albeit indirectly.
|
|
||||||
|
|
||||||
static void increment_pending_yields() {
|
static void increment_pending_yields() {
|
||||||
Atomic::inc(&_pending_yields);
|
Atomic::inc(&_pending_yields);
|
||||||
|
@ -182,67 +142,9 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
|
||||||
Atomic::dec(&_pending_yields);
|
Atomic::dec(&_pending_yields);
|
||||||
assert(_pending_yields >= 0, "can't be negative");
|
assert(_pending_yields >= 0, "can't be negative");
|
||||||
}
|
}
|
||||||
static void asynchronous_yield_request() {
|
|
||||||
assert(CMSIncrementalMode, "Currently only used w/iCMS");
|
|
||||||
increment_pending_yields();
|
|
||||||
Atomic::inc(&_pending_decrements);
|
|
||||||
assert(_pending_decrements >= 0, "can't be negative");
|
|
||||||
}
|
|
||||||
static void acknowledge_yield_request() {
|
|
||||||
jint decrement = _pending_decrements;
|
|
||||||
if (decrement > 0) {
|
|
||||||
assert(CMSIncrementalMode, "Currently only used w/iCMS");
|
|
||||||
// Order important to preserve: _pending_yields >= _pending_decrements
|
|
||||||
Atomic::add(-decrement, &_pending_decrements);
|
|
||||||
Atomic::add(-decrement, &_pending_yields);
|
|
||||||
assert(_pending_decrements >= 0, "can't be negative");
|
|
||||||
assert(_pending_yields >= 0, "can't be negative");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
static bool should_yield() { return _pending_yields > 0; }
|
static bool should_yield() { return _pending_yields > 0; }
|
||||||
|
|
||||||
// CMS incremental mode.
|
|
||||||
static void start_icms(); // notify thread to start a quantum of work
|
|
||||||
static void stop_icms(); // request thread to stop working
|
|
||||||
void icms_wait(); // if asked to stop, wait until notified to start
|
|
||||||
|
|
||||||
// Incremental mode is enabled globally by the flag CMSIncrementalMode. It
|
|
||||||
// must also be enabled/disabled dynamically to allow foreground collections.
|
|
||||||
#define ICMS_ENABLING_ASSERT \
|
|
||||||
assert((CMSIncrementalMode && _icms_disabled >= 0) || \
|
|
||||||
(!CMSIncrementalMode && _icms_disabled <= 0), "Error")
|
|
||||||
|
|
||||||
static inline void enable_icms() {
|
|
||||||
ICMS_ENABLING_ASSERT;
|
|
||||||
Atomic::dec(&_icms_disabled);
|
|
||||||
}
|
|
||||||
static inline void disable_icms() {
|
|
||||||
ICMS_ENABLING_ASSERT;
|
|
||||||
Atomic::inc(&_icms_disabled);
|
|
||||||
}
|
|
||||||
static inline bool icms_is_disabled() {
|
|
||||||
ICMS_ENABLING_ASSERT;
|
|
||||||
return _icms_disabled > 0;
|
|
||||||
}
|
|
||||||
static inline bool icms_is_enabled() {
|
|
||||||
return !icms_is_disabled();
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
inline void ConcurrentMarkSweepThread::trace_state(const char* desc) {
|
|
||||||
if (CMSTraceThreadState) {
|
|
||||||
char buf[128];
|
|
||||||
TimeStamp& ts = gclog_or_tty->time_stamp();
|
|
||||||
if (!ts.is_updated()) {
|
|
||||||
ts.update();
|
|
||||||
}
|
|
||||||
jio_snprintf(buf, sizeof(buf), " [%.3f: CMSThread %s] ",
|
|
||||||
ts.seconds(), desc);
|
|
||||||
buf[sizeof(buf) - 1] = '\0';
|
|
||||||
gclog_or_tty->print("%s", buf);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// For scoped increment/decrement of (synchronous) yield requests
|
// For scoped increment/decrement of (synchronous) yield requests
|
||||||
class CMSSynchronousYieldRequest: public StackObj {
|
class CMSSynchronousYieldRequest: public StackObj {
|
||||||
public:
|
public:
|
||||||
|
|
|
@ -42,8 +42,12 @@ PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
|
||||||
void VM_CMS_Operation::acquire_pending_list_lock() {
|
void VM_CMS_Operation::acquire_pending_list_lock() {
|
||||||
// The caller may block while communicating
|
// The caller may block while communicating
|
||||||
// with the SLT thread in order to acquire/release the PLL.
|
// with the SLT thread in order to acquire/release the PLL.
|
||||||
ConcurrentMarkSweepThread::slt()->
|
SurrogateLockerThread* slt = ConcurrentMarkSweepThread::slt();
|
||||||
manipulatePLL(SurrogateLockerThread::acquirePLL);
|
if (slt != NULL) {
|
||||||
|
slt->manipulatePLL(SurrogateLockerThread::acquirePLL);
|
||||||
|
} else {
|
||||||
|
SurrogateLockerThread::report_missing_slt();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void VM_CMS_Operation::release_and_notify_pending_list_lock() {
|
void VM_CMS_Operation::release_and_notify_pending_list_lock() {
|
||||||
|
@ -207,12 +211,6 @@ void VM_GenCollectFullConcurrent::doit() {
|
||||||
MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
|
||||||
assert(_full_gc_count_before <= gch->total_full_collections(), "Error");
|
assert(_full_gc_count_before <= gch->total_full_collections(), "Error");
|
||||||
if (gch->total_full_collections() == _full_gc_count_before) {
|
if (gch->total_full_collections() == _full_gc_count_before) {
|
||||||
// Disable iCMS until the full collection is done, and
|
|
||||||
// remember that we did so.
|
|
||||||
CMSCollector::disable_icms();
|
|
||||||
_disabled_icms = true;
|
|
||||||
// In case CMS thread was in icms_wait(), wake it up.
|
|
||||||
CMSCollector::start_icms();
|
|
||||||
// Nudge the CMS thread to start a concurrent collection.
|
// Nudge the CMS thread to start a concurrent collection.
|
||||||
CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
|
CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
|
||||||
} else {
|
} else {
|
||||||
|
@ -276,8 +274,4 @@ void VM_GenCollectFullConcurrent::doit_epilogue() {
|
||||||
FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
|
FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Enable iCMS back if we disabled it earlier.
|
|
||||||
if (_disabled_icms) {
|
|
||||||
CMSCollector::enable_icms();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -128,13 +128,11 @@ class VM_CMS_Final_Remark: public VM_CMS_Operation {
|
||||||
// VM operation to invoke a concurrent collection of the heap as a
|
// VM operation to invoke a concurrent collection of the heap as a
|
||||||
// GenCollectedHeap heap.
|
// GenCollectedHeap heap.
|
||||||
class VM_GenCollectFullConcurrent: public VM_GC_Operation {
|
class VM_GenCollectFullConcurrent: public VM_GC_Operation {
|
||||||
bool _disabled_icms;
|
|
||||||
public:
|
public:
|
||||||
VM_GenCollectFullConcurrent(unsigned int gc_count_before,
|
VM_GenCollectFullConcurrent(unsigned int gc_count_before,
|
||||||
unsigned int full_gc_count_before,
|
unsigned int full_gc_count_before,
|
||||||
GCCause::Cause gc_cause)
|
GCCause::Cause gc_cause)
|
||||||
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */),
|
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */)
|
||||||
_disabled_icms(false)
|
|
||||||
{
|
{
|
||||||
assert(FullGCCount_lock != NULL, "Error");
|
assert(FullGCCount_lock != NULL, "Error");
|
||||||
assert(UseAsyncConcMarkSweepGC, "Else will hang caller");
|
assert(UseAsyncConcMarkSweepGC, "Else will hang caller");
|
||||||
|
|
|
@ -1888,7 +1888,7 @@ jint G1CollectedHeap::initialize() {
|
||||||
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
|
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
|
||||||
|
|
||||||
// Create the gen rem set (and barrier set) for the entire reserved region.
|
// Create the gen rem set (and barrier set) for the entire reserved region.
|
||||||
_rem_set = collector_policy()->create_rem_set(reserved_region(), 2);
|
_rem_set = collector_policy()->create_rem_set(reserved_region());
|
||||||
set_barrier_set(rem_set()->bs());
|
set_barrier_set(rem_set()->bs());
|
||||||
if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
|
if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
|
||||||
vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
|
vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
|
||||||
|
@ -4270,10 +4270,11 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
|
||||||
|
|
||||||
if (state == G1CollectedHeap::InCSet) {
|
if (state == G1CollectedHeap::InCSet) {
|
||||||
oop forwardee;
|
oop forwardee;
|
||||||
if (obj->is_forwarded()) {
|
markOop m = obj->mark();
|
||||||
forwardee = obj->forwardee();
|
if (m->is_marked()) {
|
||||||
|
forwardee = (oop) m->decode_pointer();
|
||||||
} else {
|
} else {
|
||||||
forwardee = _par_scan_state->copy_to_survivor_space(obj);
|
forwardee = _par_scan_state->copy_to_survivor_space(obj, m);
|
||||||
}
|
}
|
||||||
assert(forwardee != NULL, "forwardee should not be NULL");
|
assert(forwardee != NULL, "forwardee should not be NULL");
|
||||||
oopDesc::encode_store_heap_oop(p, forwardee);
|
oopDesc::encode_store_heap_oop(p, forwardee);
|
||||||
|
|
|
@ -1248,7 +1248,7 @@ public:
|
||||||
// The same as above but assume that the caller holds the Heap_lock.
|
// The same as above but assume that the caller holds the Heap_lock.
|
||||||
void collect_locked(GCCause::Cause cause);
|
void collect_locked(GCCause::Cause cause);
|
||||||
|
|
||||||
virtual void copy_allocation_context_stats(const jint* contexts,
|
virtual bool copy_allocation_context_stats(const jint* contexts,
|
||||||
jlong* totals,
|
jlong* totals,
|
||||||
jbyte* accuracy,
|
jbyte* accuracy,
|
||||||
jint len);
|
jint len);
|
||||||
|
|
|
@ -25,8 +25,9 @@
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||||
|
|
||||||
void G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
|
bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
|
||||||
jlong* totals,
|
jlong* totals,
|
||||||
jbyte* accuracy,
|
jbyte* accuracy,
|
||||||
jint len) {
|
jint len) {
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1585,34 +1585,22 @@ public:
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) {
|
||||||
|
assert(n_workers > 0, "Active gc workers should be greater than 0");
|
||||||
|
const uint overpartition_factor = 4;
|
||||||
|
const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
|
||||||
|
return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
|
G1CollectorPolicy::record_concurrent_mark_cleanup_end(uint n_workers) {
|
||||||
_collectionSetChooser->clear();
|
_collectionSetChooser->clear();
|
||||||
|
|
||||||
uint region_num = _g1->num_regions();
|
uint n_regions = _g1->num_regions();
|
||||||
const uint OverpartitionFactor = 4;
|
uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
|
||||||
uint WorkUnit;
|
_collectionSetChooser->prepare_for_par_region_addition(n_regions, chunk_size);
|
||||||
// The use of MinChunkSize = 8 in the original code
|
ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers);
|
||||||
// causes some assertion failures when the total number of
|
_g1->workers()->run_task(&par_known_garbage_task);
|
||||||
// region is less than 8. The code here tries to fix that.
|
|
||||||
// Should the original code also be fixed?
|
|
||||||
if (no_of_gc_threads > 0) {
|
|
||||||
const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U);
|
|
||||||
WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor),
|
|
||||||
MinWorkUnit);
|
|
||||||
} else {
|
|
||||||
assert(no_of_gc_threads > 0,
|
|
||||||
"The active gc workers should be greater than 0");
|
|
||||||
// In a product build do something reasonable to avoid a crash.
|
|
||||||
const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);
|
|
||||||
WorkUnit =
|
|
||||||
MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
|
|
||||||
MinWorkUnit);
|
|
||||||
}
|
|
||||||
_collectionSetChooser->prepare_for_par_region_addition(_g1->num_regions(),
|
|
||||||
WorkUnit);
|
|
||||||
ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, WorkUnit, (uint) no_of_gc_threads);
|
|
||||||
_g1->workers()->run_task(&parKnownGarbageTask);
|
|
||||||
|
|
||||||
_collectionSetChooser->sort_regions();
|
_collectionSetChooser->sort_regions();
|
||||||
|
|
||||||
|
|
|
@ -612,6 +612,10 @@ private:
|
||||||
uint desired_min_length,
|
uint desired_min_length,
|
||||||
uint desired_max_length);
|
uint desired_max_length);
|
||||||
|
|
||||||
|
// Calculate and return chunk size (in number of regions) for parallel
|
||||||
|
// concurrent mark cleanup.
|
||||||
|
uint calculate_parallel_work_chunk_size(uint n_workers, uint n_regions);
|
||||||
|
|
||||||
// Check whether a given young length (young_length) fits into the
|
// Check whether a given young length (young_length) fits into the
|
||||||
// given target pause time and whether the prediction for the amount
|
// given target pause time and whether the prediction for the amount
|
||||||
// of objects to be copied for the given length will fit into the
|
// of objects to be copied for the given length will fit into the
|
||||||
|
@ -687,7 +691,7 @@ public:
|
||||||
|
|
||||||
// Record start, end, and completion of cleanup.
|
// Record start, end, and completion of cleanup.
|
||||||
void record_concurrent_mark_cleanup_start();
|
void record_concurrent_mark_cleanup_start();
|
||||||
void record_concurrent_mark_cleanup_end(int no_of_gc_threads);
|
void record_concurrent_mark_cleanup_end(uint n_workers);
|
||||||
void record_concurrent_mark_cleanup_completed();
|
void record_concurrent_mark_cleanup_completed();
|
||||||
|
|
||||||
// Records the information about the heap size for reporting in
|
// Records the information about the heap size for reporting in
|
||||||
|
|
|
@ -150,7 +150,8 @@ void G1ParScanThreadState::trim_queue() {
|
||||||
} while (!_refs->is_empty());
|
} while (!_refs->is_empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
|
oop G1ParScanThreadState::copy_to_survivor_space(oop const old,
|
||||||
|
markOop const old_mark) {
|
||||||
size_t word_sz = old->size();
|
size_t word_sz = old->size();
|
||||||
HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
|
HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
|
||||||
// +1 to make the -1 indexes valid...
|
// +1 to make the -1 indexes valid...
|
||||||
|
@ -158,9 +159,8 @@ oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
|
||||||
assert( (from_region->is_young() && young_index > 0) ||
|
assert( (from_region->is_young() && young_index > 0) ||
|
||||||
(!from_region->is_young() && young_index == 0), "invariant" );
|
(!from_region->is_young() && young_index == 0), "invariant" );
|
||||||
G1CollectorPolicy* g1p = _g1h->g1_policy();
|
G1CollectorPolicy* g1p = _g1h->g1_policy();
|
||||||
markOop m = old->mark();
|
uint age = old_mark->has_displaced_mark_helper() ? old_mark->displaced_mark_helper()->age()
|
||||||
int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
|
: old_mark->age();
|
||||||
: m->age();
|
|
||||||
GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
|
GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
|
||||||
word_sz);
|
word_sz);
|
||||||
AllocationContext_t context = from_region->allocation_context();
|
AllocationContext_t context = from_region->allocation_context();
|
||||||
|
@ -196,30 +196,22 @@ oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
|
||||||
alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
|
alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
|
||||||
|
|
||||||
if (g1p->track_object_age(alloc_purpose)) {
|
if (g1p->track_object_age(alloc_purpose)) {
|
||||||
// We could simply do obj->incr_age(). However, this causes a
|
if (age < markOopDesc::max_age) {
|
||||||
// performance issue. obj->incr_age() will first check whether
|
age++;
|
||||||
// the object has a displaced mark by checking its mark word;
|
}
|
||||||
// getting the mark word from the new location of the object
|
if (old_mark->has_displaced_mark_helper()) {
|
||||||
// stalls. So, given that we already have the mark word and we
|
// In this case, we have to install the mark word first,
|
||||||
// are about to install it anyway, it's better to increase the
|
|
||||||
// age on the mark word, when the object does not have a
|
|
||||||
// displaced mark word. We're not expecting many objects to have
|
|
||||||
// a displaced marked word, so that case is not optimized
|
|
||||||
// further (it could be...) and we simply call obj->incr_age().
|
|
||||||
|
|
||||||
if (m->has_displaced_mark_helper()) {
|
|
||||||
// in this case, we have to install the mark word first,
|
|
||||||
// otherwise obj looks to be forwarded (the old mark word,
|
// otherwise obj looks to be forwarded (the old mark word,
|
||||||
// which contains the forward pointer, was copied)
|
// which contains the forward pointer, was copied)
|
||||||
obj->set_mark(m);
|
obj->set_mark(old_mark);
|
||||||
obj->incr_age();
|
markOop new_mark = old_mark->displaced_mark_helper()->set_age(age);
|
||||||
|
old_mark->set_displaced_mark_helper(new_mark);
|
||||||
} else {
|
} else {
|
||||||
m = m->incr_age();
|
obj->set_mark(old_mark->set_age(age));
|
||||||
obj->set_mark(m);
|
|
||||||
}
|
}
|
||||||
age_table()->add(obj, word_sz);
|
age_table()->add(age, word_sz);
|
||||||
} else {
|
} else {
|
||||||
obj->set_mark(m);
|
obj->set_mark(old_mark);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (G1StringDedup::is_enabled()) {
|
if (G1StringDedup::is_enabled()) {
|
||||||
|
|
|
@ -195,7 +195,7 @@ class G1ParScanThreadState : public StackObj {
|
||||||
inline void dispatch_reference(StarTask ref);
|
inline void dispatch_reference(StarTask ref);
|
||||||
public:
|
public:
|
||||||
|
|
||||||
oop copy_to_survivor_space(oop const obj);
|
oop copy_to_survivor_space(oop const obj, markOop const old_mark);
|
||||||
|
|
||||||
void trim_queue();
|
void trim_queue();
|
||||||
|
|
||||||
|
|
|
@ -41,10 +41,11 @@ template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from
|
||||||
G1CollectedHeap::in_cset_state_t in_cset_state = _g1h->in_cset_state(obj);
|
G1CollectedHeap::in_cset_state_t in_cset_state = _g1h->in_cset_state(obj);
|
||||||
if (in_cset_state == G1CollectedHeap::InCSet) {
|
if (in_cset_state == G1CollectedHeap::InCSet) {
|
||||||
oop forwardee;
|
oop forwardee;
|
||||||
if (obj->is_forwarded()) {
|
markOop m = obj->mark();
|
||||||
forwardee = obj->forwardee();
|
if (m->is_marked()) {
|
||||||
|
forwardee = (oop) m->decode_pointer();
|
||||||
} else {
|
} else {
|
||||||
forwardee = copy_to_survivor_space(obj);
|
forwardee = copy_to_survivor_space(obj, m);
|
||||||
}
|
}
|
||||||
oopDesc::encode_store_heap_oop(p, forwardee);
|
oopDesc::encode_store_heap_oop(p, forwardee);
|
||||||
} else if (in_cset_state == G1CollectedHeap::IsHumongous) {
|
} else if (in_cset_state == G1CollectedHeap::IsHumongous) {
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue