This commit is contained in:
Jiangli Zhou 2013-09-27 13:53:43 -04:00
commit b49de4da48
941 changed files with 20753 additions and 8128 deletions

View file

@ -229,3 +229,5 @@ b5ed503c26ad38869c247c5e32debec217fd056b jdk8-b104
589f4fdc584e373a47cde0162e9eceec9165c381 jdk8-b105
514b0b69fb9683ef52062fd962a3e0644431f64d jdk8-b106
892889f445755790ae90e61775bfb59ddc6182b5 jdk8-b107
74049f7a28b48c14910106a75d9f2504169c352e jdk8-b108
af9a674e12a16da1a4bd53e4990ddb1121a21ef1 jdk8-b109

View file

@ -229,3 +229,5 @@ b7e64be81c8a7690703df5711f4fc2375da8a9cb jdk8-b103
5166118c59178b5d31001bc4058e92486ee07d9b jdk8-b105
8e7b4d9fb00fdf1334376aeac050c9bca6d1b383 jdk8-b106
0874bb4707b723d5bb108d379c557cf41529d1a7 jdk8-b107
9286a6e61291246d88af713f1ef79adeea30fe2e jdk8-b108
91f47e8da5c60de58ed195e9b57f3bf192a18f83 jdk8-b109

View file

@ -76,13 +76,13 @@ diff_text() {
TMP=1
if [[ "$THIS_FILE" = *"META-INF/MANIFEST.MF" ]]; then
TMP=$(LANG=C $DIFF $OTHER_FILE $THIS_FILE | \
TMP=$(LC_ALL=C $DIFF $OTHER_FILE $THIS_FILE | \
$GREP '^[<>]' | \
$SED -e '/[<>] Ant-Version: Apache Ant .*/d' \
-e '/[<>] Created-By: .* (Oracle Corporation).*/d')
fi
if test "x$SUFFIX" = "xjava"; then
TMP=$(LANG=C $DIFF $OTHER_FILE $THIS_FILE | \
TMP=$(LC_ALL=C $DIFF $OTHER_FILE $THIS_FILE | \
$GREP '^[<>]' | \
$SED -e '/[<>] \* from.*\.idl/d' \
-e '/[<>] \*.*[0-9]\{4\}_[0-9]\{2\}_[0-9]\{2\}_[0-9]\{2\}_[0-9]\{2\}-b[0-9]\{2\}.*/d' \
@ -121,8 +121,8 @@ diff_text() {
# | $SED -e '/^#/d' -e '/^$/d' \
# -e :a -e '/\\$/N; s/\\\n//; ta' \
# -e 's/^[ \t]*//;s/[ \t]*$//' \
# -e 's/\\=/=/' | LANG=C $SORT > $OTHER_FILE.cleaned
TMP=$(LANG=C $DIFF $OTHER_FILE.cleaned $THIS_FILE)
# -e 's/\\=/=/' | LC_ALL=C $SORT > $OTHER_FILE.cleaned
TMP=$(LC_ALL=C $DIFF $OTHER_FILE.cleaned $THIS_FILE)
fi
if test -n "$TMP"; then
echo Files $OTHER_FILE and $THIS_FILE differ
@ -410,11 +410,11 @@ compare_zip_file() {
CONTENTS_DIFF_FILE=$WORK_DIR/$ZIP_FILE.diff
# On solaris, there is no -q option.
if [ "$OPENJDK_TARGET_OS" = "solaris" ]; then
LANG=C $DIFF -r $OTHER_UNZIPDIR $THIS_UNZIPDIR \
LC_ALL=C $DIFF -r $OTHER_UNZIPDIR $THIS_UNZIPDIR \
| $GREP -v -e "^<" -e "^>" -e "^Common subdirectories:" \
> $CONTENTS_DIFF_FILE
else
LANG=C $DIFF -rq $OTHER_UNZIPDIR $THIS_UNZIPDIR > $CONTENTS_DIFF_FILE
LC_ALL=C $DIFF -rq $OTHER_UNZIPDIR $THIS_UNZIPDIR > $CONTENTS_DIFF_FILE
fi
ONLY_OTHER=$($GREP "^Only in $OTHER_UNZIPDIR" $CONTENTS_DIFF_FILE)
@ -459,11 +459,11 @@ compare_zip_file() {
if [ -n "$SHOW_DIFFS" ]; then
for i in $(cat $WORK_DIR/$ZIP_FILE.difflist) ; do
if [ -f "${OTHER_UNZIPDIR}/$i.javap" ]; then
LANG=C $DIFF ${OTHER_UNZIPDIR}/$i.javap ${THIS_UNZIPDIR}/$i.javap
LC_ALL=C $DIFF ${OTHER_UNZIPDIR}/$i.javap ${THIS_UNZIPDIR}/$i.javap
elif [ -f "${OTHER_UNZIPDIR}/$i.cleaned" ]; then
LANG=C $DIFF ${OTHER_UNZIPDIR}/$i.cleaned ${THIS_UNZIPDIR}/$i
LC_ALL=C $DIFF ${OTHER_UNZIPDIR}/$i.cleaned ${THIS_UNZIPDIR}/$i
else
LANG=C $DIFF ${OTHER_UNZIPDIR}/$i ${THIS_UNZIPDIR}/$i
LC_ALL=C $DIFF ${OTHER_UNZIPDIR}/$i ${THIS_UNZIPDIR}/$i
fi
done
fi
@ -703,7 +703,7 @@ compare_bin_file() {
$NM -a $ORIG_THIS_FILE 2> /dev/null | $GREP -v $NAME | $AWK '{print $2, $3, $4, $5}' | $SYM_SORT_CMD > $WORK_FILE_BASE.symbols.this
fi
LANG=C $DIFF $WORK_FILE_BASE.symbols.other $WORK_FILE_BASE.symbols.this > $WORK_FILE_BASE.symbols.diff
LC_ALL=C $DIFF $WORK_FILE_BASE.symbols.other $WORK_FILE_BASE.symbols.this > $WORK_FILE_BASE.symbols.diff
if [ -s $WORK_FILE_BASE.symbols.diff ]; then
SYM_MSG=" diff "
if [[ "$ACCEPTED_SYM_DIFF" != *"$BIN_FILE"* ]]; then
@ -732,8 +732,8 @@ compare_bin_file() {
(cd $FILE_WORK_DIR && $CP $THIS_FILE . && $LDD_CMD $NAME 2</dev/null | $AWK '{ print $1;}' | $SORT | $TEE $WORK_FILE_BASE.deps.this | $UNIQ > $WORK_FILE_BASE.deps.this.uniq)
(cd $FILE_WORK_DIR && $RM -f $NAME)
LANG=C $DIFF $WORK_FILE_BASE.deps.other $WORK_FILE_BASE.deps.this > $WORK_FILE_BASE.deps.diff
LANG=C $DIFF $WORK_FILE_BASE.deps.other.uniq $WORK_FILE_BASE.deps.this.uniq > $WORK_FILE_BASE.deps.diff.uniq
LC_ALL=C $DIFF $WORK_FILE_BASE.deps.other $WORK_FILE_BASE.deps.this > $WORK_FILE_BASE.deps.diff
LC_ALL=C $DIFF $WORK_FILE_BASE.deps.other.uniq $WORK_FILE_BASE.deps.this.uniq > $WORK_FILE_BASE.deps.diff.uniq
if [ -s $WORK_FILE_BASE.deps.diff ]; then
if [ -s $WORK_FILE_BASE.deps.diff.uniq ]; then
@ -768,7 +768,7 @@ compare_bin_file() {
if [ -n "$FULLDUMP_CMD" ] && [ -z "$SKIP_FULLDUMP_DIFF" ]; then
$FULLDUMP_CMD $OTHER_FILE > $WORK_FILE_BASE.fulldump.other 2>&1
$FULLDUMP_CMD $THIS_FILE > $WORK_FILE_BASE.fulldump.this 2>&1
LANG=C $DIFF $WORK_FILE_BASE.fulldump.other $WORK_FILE_BASE.fulldump.this > $WORK_FILE_BASE.fulldump.diff
LC_ALL=C $DIFF $WORK_FILE_BASE.fulldump.other $WORK_FILE_BASE.fulldump.this > $WORK_FILE_BASE.fulldump.diff
if [ -s $WORK_FILE_BASE.fulldump.diff ]; then
ELF_DIFF_SIZE=$(ls -n $WORK_FILE_BASE.fulldump.diff | awk '{print $5}')
@ -802,7 +802,7 @@ compare_bin_file() {
$DIS_CMD $OTHER_FILE | $GREP -v $NAME | $DIS_DIFF_FILTER > $WORK_FILE_BASE.dis.other 2>&1
$DIS_CMD $THIS_FILE | $GREP -v $NAME | $DIS_DIFF_FILTER > $WORK_FILE_BASE.dis.this 2>&1
LANG=C $DIFF $WORK_FILE_BASE.dis.other $WORK_FILE_BASE.dis.this > $WORK_FILE_BASE.dis.diff
LC_ALL=C $DIFF $WORK_FILE_BASE.dis.other $WORK_FILE_BASE.dis.this > $WORK_FILE_BASE.dis.diff
if [ -s $WORK_FILE_BASE.dis.diff ]; then
DIS_DIFF_SIZE=$(ls -n $WORK_FILE_BASE.dis.diff | awk '{print $5}')

View file

@ -1,5 +1,5 @@
#
# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -200,13 +200,8 @@ $(JPRT_ARCHIVE_BUNDLE): bundles
$(RM) $@
$(CP) $(BUILD_OUTPUT)/bundles/$(JDK_IMAGE_SUBDIR).zip $@
ifeq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU_BITS),solaris-64)
SRC_JDK_IMAGE_DIR := $(JDK_OVERLAY_IMAGE_DIR)
SRC_JRE_IMAGE_DIR := $(JRE_OVERLAY_IMAGE_DIR)
else
SRC_JDK_IMAGE_DIR := $(JDK_IMAGE_DIR)
SRC_JRE_IMAGE_DIR := $(JRE_IMAGE_DIR)
endif
SRC_JDK_IMAGE_DIR := $(JDK_IMAGE_DIR)
SRC_JRE_IMAGE_DIR := $(JRE_IMAGE_DIR)
SRC_JDK_BUNDLE_DIR := $(JDK_BUNDLE_DIR)
SRC_JRE_BUNDLE_DIR := $(JRE_BUNDLE_DIR)
@ -215,10 +210,10 @@ bundles: all bundles-only
bundles-only: start-make
@$(call TargetEnter)
$(MKDIR) -p $(BUILD_OUTPUT)/bundles
$(CD) $(SRC_JDK_IMAGE_DIR) && $(ZIP) -q -r $(BUILD_OUTPUT)/bundles/$(JDK_IMAGE_SUBDIR).zip .
$(CD) $(SRC_JRE_IMAGE_DIR) && $(ZIP) -q -r $(BUILD_OUTPUT)/bundles/$(JRE_IMAGE_SUBDIR).zip .
$(CD) $(SRC_JDK_IMAGE_DIR) && $(ZIP) -y -q -r $(BUILD_OUTPUT)/bundles/$(JDK_IMAGE_SUBDIR).zip .
$(CD) $(SRC_JRE_IMAGE_DIR) && $(ZIP) -y -q -r $(BUILD_OUTPUT)/bundles/$(JRE_IMAGE_SUBDIR).zip .
if [ -d $(BUILD_OUTPUT)/install/bundles ] ; then \
$(CD) $(BUILD_OUTPUT)/install/bundles && $(ZIP) -q -r $(JPRT_ARCHIVE_INSTALL_BUNDLE) . ; \
$(CD) $(BUILD_OUTPUT)/install/bundles && $(ZIP) -y -q -r $(JPRT_ARCHIVE_INSTALL_BUNDLE) . ; \
fi
@$(call TargetExit)

View file

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -68,10 +68,6 @@ default: jdk
all: images docs
@$(call CheckIfMakeAtEnd)
ifeq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU_BITS),solaris-64)
all: overlay-images
endif
# Setup a rule for SPEC file that fails if executed. This check makes sure the configuration
# is up to date after changes to configure
$(SPEC): $(wildcard $(SRC_ROOT)/common/autoconf/*)

View file

@ -229,3 +229,5 @@ d411c60a8c2fe8fdc572af907775e90f7eefd513 jdk8-b104
4e38de7c767e34104fa147b5b346d9fe6b731279 jdk8-b105
2e3a056c84a71eba78945c18b05397858ffd7ad0 jdk8-b106
23fc34133152692b725db4bd617b4c8dfd6ccb05 jdk8-b107
a4bb3b4500164748a9c33b2283cfda76d89f25ab jdk8-b108
428428cf5e06163322144cfb5367e1faa86acf20 jdk8-b109

View file

@ -1,5 +1,5 @@
#
# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -39,8 +39,8 @@ jprt.build.targets= \
solaris_x64_5.10-{product|fastdebug}, \
linux_i586_2.6-{product|fastdebug}, \
linux_x64_2.6-{product|fastdebug}, \
windows_i586_5.1-{product|fastdebug}, \
windows_x64_5.2-{product|fastdebug}
windows_i586_6.1-{product|fastdebug}, \
windows_x64_6.1-{product|fastdebug}
# Directories to be excluded from the source bundles
jprt.bundle.exclude.src.dirs=build dist webrev

View file

@ -377,3 +377,7 @@ aed585cafc0d9655726af6d1e1081d1c94cb3b5c jdk8-b106
50794d8ac11c9579b41dec4de23b808fef9f34a1 hs25-b49
5b7f90aab3ad25a25b75b7b2bb18d5ae23d8231c jdk8-b107
a09fe9d1e016c285307507a5793bc4fa6215e9c9 hs25-b50
85072013aad46050a362d10ab78e963121c8014c jdk8-b108
566db1b0e6efca31f181456e54c8911d0192410d hs25-b51
c81dd5393a5e333df7cb1f6621f5897ada6522b5 jdk8-b109
58043478c26d4e8bf48700acea5f97aba8b417d4 hs25-b52

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -698,29 +698,58 @@ err:
// read segments of a shared object
static bool read_lib_segments(struct ps_prochandle* ph, int lib_fd, ELF_EHDR* lib_ehdr, uintptr_t lib_base) {
int i = 0;
ELF_PHDR* phbuf;
ELF_PHDR* lib_php = NULL;
int i = 0;
ELF_PHDR* phbuf;
ELF_PHDR* lib_php = NULL;
if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL)
return false;
int page_size=sysconf(_SC_PAGE_SIZE);
// we want to process only PT_LOAD segments that are not writable.
// i.e., text segments. The read/write/exec (data) segments would
// have been already added from core file segments.
for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
if (add_map_info(ph, lib_fd, lib_php->p_offset, lib_php->p_vaddr + lib_base, lib_php->p_filesz) == NULL)
goto err;
if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL) {
return false;
}
// we want to process only PT_LOAD segments that are not writable.
// i.e., text segments. The read/write/exec (data) segments would
// have been already added from core file segments.
for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
uintptr_t target_vaddr = lib_php->p_vaddr + lib_base;
map_info *existing_map = core_lookup(ph, target_vaddr);
if (existing_map == NULL){
if (add_map_info(ph, lib_fd, lib_php->p_offset,
target_vaddr, lib_php->p_filesz) == NULL) {
goto err;
}
} else {
if ((existing_map->memsz != page_size) &&
(existing_map->fd != lib_fd) &&
(existing_map->memsz != lib_php->p_filesz)){
print_debug("address conflict @ 0x%lx (size = %ld, flags = %d\n)",
target_vaddr, lib_php->p_filesz, lib_php->p_flags);
goto err;
}
/* replace PT_LOAD segment with library segment */
print_debug("overwrote with new address mapping (memsz %ld -> %ld)\n",
existing_map->memsz, lib_php->p_filesz);
existing_map->fd = lib_fd;
existing_map->offset = lib_php->p_offset;
existing_map->memsz = lib_php->p_filesz;
}
lib_php++;
}
}
free(phbuf);
return true;
lib_php++;
}
free(phbuf);
return true;
err:
free(phbuf);
return false;
free(phbuf);
return false;
}
// process segments from interpreter (ld.so or ld-linux.so)

View file

@ -1213,6 +1213,7 @@ public class CommandProcessor {
}
HotSpotTypeDataBase db = (HotSpotTypeDataBase)agent.getTypeDataBase();
if (t.countTokens() == 1) {
String name = t.nextToken();
out.println("intConstant " + name + " " + db.lookupIntConstant(name));
} else if (t.countTokens() == 0) {
Iterator i = db.getIntConstants();
@ -1235,6 +1236,7 @@ public class CommandProcessor {
}
HotSpotTypeDataBase db = (HotSpotTypeDataBase)agent.getTypeDataBase();
if (t.countTokens() == 1) {
String name = t.nextToken();
out.println("longConstant " + name + " " + db.lookupLongConstant(name));
} else if (t.countTokens() == 0) {
Iterator i = db.getLongConstants();

View file

@ -81,7 +81,7 @@ class BsdAddress implements Address {
public Address getCompKlassAddressAt(long offset)
throws UnalignedAddressException, UnmappedAddressException {
return debugger.readCompOopAddress(addr + offset);
return debugger.readCompKlassAddress(addr + offset);
}
//

View file

@ -792,7 +792,7 @@ public class VM {
public boolean isCompressedKlassPointersEnabled() {
if (compressedKlassPointersEnabled == null) {
Flag flag = getCommandLineFlag("UseCompressedKlassPointers");
Flag flag = getCommandLineFlag("UseCompressedClassPointers");
compressedKlassPointersEnabled = (flag == null) ? Boolean.FALSE:
(flag.getBool()? Boolean.TRUE: Boolean.FALSE);
}

View file

@ -66,18 +66,18 @@ public class HeapSummary extends Tool {
printGCAlgorithm(flagMap);
System.out.println();
System.out.println("Heap Configuration:");
printValue("MinHeapFreeRatio = ", getFlagValue("MinHeapFreeRatio", flagMap));
printValue("MaxHeapFreeRatio = ", getFlagValue("MaxHeapFreeRatio", flagMap));
printValMB("MaxHeapSize = ", getFlagValue("MaxHeapSize", flagMap));
printValMB("NewSize = ", getFlagValue("NewSize", flagMap));
printValMB("MaxNewSize = ", getFlagValue("MaxNewSize", flagMap));
printValMB("OldSize = ", getFlagValue("OldSize", flagMap));
printValue("NewRatio = ", getFlagValue("NewRatio", flagMap));
printValue("SurvivorRatio = ", getFlagValue("SurvivorRatio", flagMap));
printValMB("MetaspaceSize = ", getFlagValue("MetaspaceSize", flagMap));
printValMB("ClassMetaspaceSize = ", getFlagValue("ClassMetaspaceSize", flagMap));
printValMB("MaxMetaspaceSize = ", getFlagValue("MaxMetaspaceSize", flagMap));
printValMB("G1HeapRegionSize = ", HeapRegion.grainBytes());
printValue("MinHeapFreeRatio = ", getFlagValue("MinHeapFreeRatio", flagMap));
printValue("MaxHeapFreeRatio = ", getFlagValue("MaxHeapFreeRatio", flagMap));
printValMB("MaxHeapSize = ", getFlagValue("MaxHeapSize", flagMap));
printValMB("NewSize = ", getFlagValue("NewSize", flagMap));
printValMB("MaxNewSize = ", getFlagValue("MaxNewSize", flagMap));
printValMB("OldSize = ", getFlagValue("OldSize", flagMap));
printValue("NewRatio = ", getFlagValue("NewRatio", flagMap));
printValue("SurvivorRatio = ", getFlagValue("SurvivorRatio", flagMap));
printValMB("MetaspaceSize = ", getFlagValue("MetaspaceSize", flagMap));
printValMB("CompressedClassSpaceSize = ", getFlagValue("CompressedClassSpaceSize", flagMap));
printValMB("MaxMetaspaceSize = ", getFlagValue("MaxMetaspaceSize", flagMap));
printValMB("G1HeapRegionSize = ", HeapRegion.grainBytes());
System.out.println();
System.out.println("Heap Usage:");

View file

@ -80,7 +80,7 @@ ifeq ($(SPEC),)
HOSTCC = $(CC)
endif
AS = $(CC) -c -x assembler-with-cpp
AS = $(CC) -c
endif
@ -347,6 +347,13 @@ ifeq ($(OS_VENDOR), Darwin)
LDFLAGS += -mmacosx-version-min=$(MACOSX_VERSION_MIN)
endif
#------------------------------------------------------------------------
# Assembler flags
# Enforce prerpocessing of .s files
ASFLAGS += -x assembler-with-cpp
#------------------------------------------------------------------------
# Linker flags

View file

@ -88,7 +88,7 @@ ifeq ($(INCLUDE_ALL_GCS), false)
g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \
g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \

View file

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
HS_MAJOR_VER=25
HS_MINOR_VER=0
HS_BUILD_NUMBER=51
HS_BUILD_NUMBER=53
JDK_MAJOR_VER=1
JDK_MINOR_VER=8

View file

@ -120,13 +120,13 @@ jprt.my.macosx.x64.jdk7=macosx_x64_10.7
jprt.my.macosx.x64.jdk7u8=${jprt.my.macosx.x64.jdk7}
jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
jprt.my.windows.i586.jdk8=windows_i586_5.1
jprt.my.windows.i586.jdk7=windows_i586_5.1
jprt.my.windows.i586.jdk8=windows_i586_6.1
jprt.my.windows.i586.jdk7=windows_i586_6.1
jprt.my.windows.i586.jdk7u8=${jprt.my.windows.i586.jdk7}
jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
jprt.my.windows.x64.jdk8=windows_x64_5.2
jprt.my.windows.x64.jdk7=windows_x64_5.2
jprt.my.windows.x64.jdk8=windows_x64_6.1
jprt.my.windows.x64.jdk7=windows_x64_6.1
jprt.my.windows.x64.jdk7u8=${jprt.my.windows.x64.jdk7}
jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}

View file

@ -105,7 +105,7 @@ bool LIR_Assembler::is_single_instruction(LIR_Op* op) {
if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false;
}
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
if (src->is_address() && !src->is_stack() && src->type() == T_ADDRESS &&
src->as_address_ptr()->disp() == oopDesc::klass_offset_in_bytes()) return false;
}
@ -963,7 +963,7 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ
case T_METADATA: __ ld_ptr(base, offset, to_reg->as_register()); break;
case T_ADDRESS:
#ifdef _LP64
if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedKlassPointers) {
if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {
__ lduw(base, offset, to_reg->as_register());
__ decode_klass_not_null(to_reg->as_register());
} else
@ -2208,7 +2208,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// We don't know the array types are compatible
if (basic_type != T_OBJECT) {
// Simple test for basic type arrays
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
// We don't need decode because we just need to compare
__ lduw(src, oopDesc::klass_offset_in_bytes(), tmp);
__ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
@ -2342,7 +2342,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// but not necessarily exactly of type default_type.
Label known_ok, halt;
metadata2reg(op->expected_type()->constant_encoding(), tmp);
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
// tmp holds the default type. It currently comes uncompressed after the
// load of a constant, so encode it.
__ encode_klass_not_null(tmp);

View file

@ -186,7 +186,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
set((intx)markOopDesc::prototype(), t1);
}
st_ptr(t1, obj, oopDesc::mark_offset_in_bytes());
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
// Save klass
mov(klass, t1);
encode_klass_not_null(t1);
@ -196,7 +196,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
}
if (len->is_valid()) {
st(len, obj, arrayOopDesc::length_offset_in_bytes());
} else if (UseCompressedKlassPointers) {
} else if (UseCompressedClassPointers) {
// otherwise length is in the class gap
store_klass_gap(G0, obj);
}

View file

@ -3911,7 +3911,7 @@ void MacroAssembler::load_klass(Register src_oop, Register klass) {
// The number of bytes in this code is used by
// MachCallDynamicJavaNode::ret_addr_offset()
// if this changes, change that.
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass);
decode_klass_not_null(klass);
} else {
@ -3920,7 +3920,7 @@ void MacroAssembler::load_klass(Register src_oop, Register klass) {
}
void MacroAssembler::store_klass(Register klass, Register dst_oop) {
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
assert(dst_oop != klass, "not enough registers");
encode_klass_not_null(klass);
st(klass, dst_oop, oopDesc::klass_offset_in_bytes());
@ -3930,7 +3930,7 @@ void MacroAssembler::store_klass(Register klass, Register dst_oop) {
}
void MacroAssembler::store_klass_gap(Register s, Register d) {
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
assert(s != d, "not enough registers");
st(s, d, oopDesc::klass_gap_offset_in_bytes());
}
@ -4089,7 +4089,7 @@ void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) {
}
void MacroAssembler::encode_klass_not_null(Register r) {
assert (UseCompressedKlassPointers, "must be compressed");
assert (UseCompressedClassPointers, "must be compressed");
assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
assert(r != G6_heapbase, "bad register choice");
set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
@ -4105,7 +4105,7 @@ void MacroAssembler::encode_klass_not_null(Register src, Register dst) {
if (src == dst) {
encode_klass_not_null(src);
} else {
assert (UseCompressedKlassPointers, "must be compressed");
assert (UseCompressedClassPointers, "must be compressed");
assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
set((intptr_t)Universe::narrow_klass_base(), dst);
sub(src, dst, dst);
@ -4119,7 +4119,7 @@ void MacroAssembler::encode_klass_not_null(Register src, Register dst) {
// generated by decode_klass_not_null() and reinit_heapbase(). Hence, if
// the instructions they generate change, then this method needs to be updated.
int MacroAssembler::instr_size_for_decode_klass_not_null() {
assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
assert (UseCompressedClassPointers, "only for compressed klass ptrs");
// set + add + set
int num_instrs = insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 1 +
insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base());
@ -4135,7 +4135,7 @@ int MacroAssembler::instr_size_for_decode_klass_not_null() {
void MacroAssembler::decode_klass_not_null(Register r) {
// Do not add assert code to this unless you change vtableStubs_sparc.cpp
// pd_code_size_limit.
assert (UseCompressedKlassPointers, "must be compressed");
assert (UseCompressedClassPointers, "must be compressed");
assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
assert(r != G6_heapbase, "bad register choice");
set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
@ -4151,7 +4151,7 @@ void MacroAssembler::decode_klass_not_null(Register src, Register dst) {
} else {
// Do not add assert code to this unless you change vtableStubs_sparc.cpp
// pd_code_size_limit.
assert (UseCompressedKlassPointers, "must be compressed");
assert (UseCompressedClassPointers, "must be compressed");
assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
if (Universe::narrow_klass_shift() != 0) {
assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice");
@ -4167,7 +4167,7 @@ void MacroAssembler::decode_klass_not_null(Register src, Register dst) {
}
void MacroAssembler::reinit_heapbase() {
if (UseCompressedOops || UseCompressedKlassPointers) {
if (UseCompressedOops || UseCompressedClassPointers) {
if (Universe::heap() != NULL) {
set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase);
} else {

View file

@ -557,7 +557,7 @@ int MachCallDynamicJavaNode::ret_addr_offset() {
int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
int klass_load_size;
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
assert(Universe::heap() != NULL, "java heap should be initialized");
klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
} else {
@ -1657,7 +1657,7 @@ uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
st->print_cr("\nUEP:");
#ifdef _LP64
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
assert(Universe::heap() != NULL, "java heap should be initialized");
st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
st->print_cr("\tSET Universe::narrow_klass_base,R_G6_heap_base");
@ -1897,7 +1897,7 @@ bool Matcher::narrow_oop_use_complex_address() {
bool Matcher::narrow_klass_use_complex_address() {
NOT_LP64(ShouldNotCallThis());
assert(UseCompressedKlassPointers, "only for compressed klass code");
assert(UseCompressedClassPointers, "only for compressed klass code");
return false;
}
@ -2561,7 +2561,7 @@ encode %{
int off = __ offset();
__ load_klass(O0, G3_scratch);
int klass_load_size;
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
assert(Universe::heap() != NULL, "java heap should be initialized");
klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
} else {

View file

@ -2945,7 +2945,7 @@ class StubGenerator: public StubCodeGenerator {
BLOCK_COMMENT("arraycopy argument klass checks");
// get src->klass()
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
__ delayed()->nop(); // ??? not good
__ load_klass(src, G3_src_klass);
} else {
@ -2980,7 +2980,7 @@ class StubGenerator: public StubCodeGenerator {
// Load 32-bits signed value. Use br() instruction with it to check icc.
__ lduw(G3_src_klass, lh_offset, G5_lh);
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
__ load_klass(dst, G4_dst_klass);
}
// Handle objArrays completely differently...
@ -2988,7 +2988,7 @@ class StubGenerator: public StubCodeGenerator {
__ set(objArray_lh, O5_temp);
__ cmp(G5_lh, O5_temp);
__ br(Assembler::equal, false, Assembler::pt, L_objArray);
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
__ delayed()->nop();
} else {
__ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);

View file

@ -52,6 +52,11 @@ extern "C" void bad_compiled_vtable_index(JavaThread* thread, oopDesc* receiver,
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
const int sparc_code_length = VtableStub::pd_code_size_limit(true);
VtableStub* s = new(sparc_code_length) VtableStub(true, vtable_index);
// Can be NULL if there is no free space in the code cache.
if (s == NULL) {
return NULL;
}
ResourceMark rm;
CodeBuffer cb(s->entry_point(), sparc_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);
@ -125,6 +130,11 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
VtableStub* VtableStubs::create_itable_stub(int itable_index) {
const int sparc_code_length = VtableStub::pd_code_size_limit(false);
VtableStub* s = new(sparc_code_length) VtableStub(false, itable_index);
// Can be NULL if there is no free space in the code cache.
if (s == NULL) {
return NULL;
}
ResourceMark rm;
CodeBuffer cb(s->entry_point(), sparc_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);
@ -218,13 +228,13 @@ int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
// ld;ld;ld,jmp,nop
const int basic = 5*BytesPerInstWord +
// shift;add for load_klass (only shift with zero heap based)
(UseCompressedKlassPointers ?
(UseCompressedClassPointers ?
MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
return basic + slop;
} else {
const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord +
// shift;add for load_klass (only shift with zero heap based)
(UseCompressedKlassPointers ?
(UseCompressedClassPointers ?
MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
return (basic + slop);
}

View file

@ -148,7 +148,7 @@
static int adjust_reg_range(int range) {
// Reduce the number of available regs (to free r12) in case of compressed oops
if (UseCompressedOops || UseCompressedKlassPointers) return range - 1;
if (UseCompressedOops || UseCompressedClassPointers) return range - 1;
return range;
}

View file

@ -341,7 +341,7 @@ int LIR_Assembler::check_icache() {
Register receiver = FrameMap::receiver_opr->as_register();
Register ic_klass = IC_Klass;
const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
const bool do_post_padding = VerifyOops || UseCompressedKlassPointers;
const bool do_post_padding = VerifyOops || UseCompressedClassPointers;
if (!do_post_padding) {
// insert some nops so that the verified entry point is aligned on CodeEntryAlignment
while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
@ -1263,7 +1263,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
break;
case T_ADDRESS:
if (UseCompressedKlassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
__ movl(dest->as_register(), from_addr);
} else {
__ movptr(dest->as_register(), from_addr);
@ -1371,7 +1371,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
__ verify_oop(dest->as_register());
} else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
#ifdef _LP64
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
__ decode_klass_not_null(dest->as_register());
}
#endif
@ -1716,7 +1716,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
} else if (obj == klass_RInfo) {
klass_RInfo = dst;
}
if (k->is_loaded() && !UseCompressedKlassPointers) {
if (k->is_loaded() && !UseCompressedClassPointers) {
select_different_registers(obj, dst, k_RInfo, klass_RInfo);
} else {
Rtmp1 = op->tmp3()->as_register();
@ -1724,14 +1724,6 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
}
assert_different_registers(obj, k_RInfo, klass_RInfo);
if (!k->is_loaded()) {
klass2reg_with_patching(k_RInfo, op->info_for_patch());
} else {
#ifdef _LP64
__ mov_metadata(k_RInfo, k->constant_encoding());
#endif // _LP64
}
assert(obj != k_RInfo, "must be different");
__ cmpptr(obj, (int32_t)NULL_WORD);
if (op->should_profile()) {
@ -1748,13 +1740,21 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
} else {
__ jcc(Assembler::equal, *obj_is_null);
}
if (!k->is_loaded()) {
klass2reg_with_patching(k_RInfo, op->info_for_patch());
} else {
#ifdef _LP64
__ mov_metadata(k_RInfo, k->constant_encoding());
#endif // _LP64
}
__ verify_oop(obj);
if (op->fast_check()) {
// get object class
// not a safepoint as obj null check happens earlier
#ifdef _LP64
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
__ load_klass(Rtmp1, obj);
__ cmpptr(k_RInfo, Rtmp1);
} else {
@ -3294,7 +3294,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// We don't know the array types are compatible
if (basic_type != T_OBJECT) {
// Simple test for basic type arrays
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
__ movl(tmp, src_klass_addr);
__ cmpl(tmp, dst_klass_addr);
} else {
@ -3456,21 +3456,21 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
Label known_ok, halt;
__ mov_metadata(tmp, default_type->constant_encoding());
#ifdef _LP64
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
__ encode_klass_not_null(tmp);
}
#endif
if (basic_type != T_OBJECT) {
if (UseCompressedKlassPointers) __ cmpl(tmp, dst_klass_addr);
if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);
else __ cmpptr(tmp, dst_klass_addr);
__ jcc(Assembler::notEqual, halt);
if (UseCompressedKlassPointers) __ cmpl(tmp, src_klass_addr);
if (UseCompressedClassPointers) __ cmpl(tmp, src_klass_addr);
else __ cmpptr(tmp, src_klass_addr);
__ jcc(Assembler::equal, known_ok);
} else {
if (UseCompressedKlassPointers) __ cmpl(tmp, dst_klass_addr);
if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);
else __ cmpptr(tmp, dst_klass_addr);
__ jcc(Assembler::equal, known_ok);
__ cmpptr(src, dst);

View file

@ -1239,7 +1239,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
}
LIR_Opr reg = rlock_result(x);
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
if (!x->klass()->is_loaded() || UseCompressedKlassPointers) {
if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
tmp3 = new_register(objectType);
}
__ checkcast(reg, obj.result(), x->klass(),
@ -1261,7 +1261,7 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) {
}
obj.load_item();
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
if (!x->klass()->is_loaded() || UseCompressedKlassPointers) {
if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
tmp3 = new_register(objectType);
}
__ instanceof(reg, obj.result(), x->klass(),

View file

@ -157,7 +157,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
}
#ifdef _LP64
if (UseCompressedKlassPointers) { // Take care not to kill klass
if (UseCompressedClassPointers) { // Take care not to kill klass
movptr(t1, klass);
encode_klass_not_null(t1);
movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
@ -171,7 +171,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
}
#ifdef _LP64
else if (UseCompressedKlassPointers) {
else if (UseCompressedClassPointers) {
xorptr(t1, t1);
store_klass_gap(obj, t1);
}
@ -334,7 +334,7 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
int start_offset = offset();
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
load_klass(rscratch1, receiver);
cmpptr(rscratch1, iCache);
} else {
@ -345,7 +345,7 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
jump_cc(Assembler::notEqual,
RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
assert(UseCompressedKlassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
assert(UseCompressedClassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
}

View file

@ -1635,7 +1635,7 @@ void MacroAssembler::call_VM_base(Register oop_result,
#ifdef ASSERT
// TraceBytecodes does not use r12 but saves it over the call, so don't verify
// r12 is the heapbase.
LP64_ONLY(if ((UseCompressedOops || UseCompressedKlassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
LP64_ONLY(if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
#endif // ASSERT
assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
@ -4802,7 +4802,7 @@ void MacroAssembler::restore_cpu_control_state_after_jni() {
void MacroAssembler::load_klass(Register dst, Register src) {
#ifdef _LP64
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
decode_klass_not_null(dst);
} else
@ -4817,7 +4817,7 @@ void MacroAssembler::load_prototype_header(Register dst, Register src) {
void MacroAssembler::store_klass(Register dst, Register src) {
#ifdef _LP64
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
encode_klass_not_null(src);
movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
} else
@ -4892,7 +4892,7 @@ void MacroAssembler::store_heap_oop_null(Address dst) {
#ifdef _LP64
void MacroAssembler::store_klass_gap(Register dst, Register src) {
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
// Store to klass gap in destination
movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
}
@ -5075,7 +5075,7 @@ void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
// when (Universe::heap() != NULL). Hence, if the instructions they
// generate change, then this method needs to be updated.
int MacroAssembler::instr_size_for_decode_klass_not_null() {
assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
assert (UseCompressedClassPointers, "only for compressed klass ptrs");
// mov64 + addq + shlq? + mov64 (for reinit_heapbase()).
return (Universe::narrow_klass_shift() == 0 ? 20 : 24);
}
@ -5085,7 +5085,7 @@ int MacroAssembler::instr_size_for_decode_klass_not_null() {
void MacroAssembler::decode_klass_not_null(Register r) {
// Note: it will change flags
assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
assert (UseCompressedKlassPointers, "should only be used for compressed headers");
assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert(r != r12_heapbase, "Decoding a klass in r12");
// Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit.
@ -5103,7 +5103,7 @@ void MacroAssembler::decode_klass_not_null(Register r) {
void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
// Note: it will change flags
assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
assert (UseCompressedKlassPointers, "should only be used for compressed headers");
assert (UseCompressedClassPointers, "should only be used for compressed headers");
if (dst == src) {
decode_klass_not_null(dst);
} else {
@ -5141,7 +5141,7 @@ void MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
}
void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
assert (UseCompressedKlassPointers, "should only be used for compressed headers");
assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
int klass_index = oop_recorder()->find_index(k);
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@ -5149,7 +5149,7 @@ void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
}
void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
assert (UseCompressedKlassPointers, "should only be used for compressed headers");
assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
int klass_index = oop_recorder()->find_index(k);
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@ -5175,7 +5175,7 @@ void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
}
void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
assert (UseCompressedKlassPointers, "should only be used for compressed headers");
assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
int klass_index = oop_recorder()->find_index(k);
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@ -5183,7 +5183,7 @@ void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
}
void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
assert (UseCompressedKlassPointers, "should only be used for compressed headers");
assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
int klass_index = oop_recorder()->find_index(k);
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@ -5191,7 +5191,7 @@ void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
}
void MacroAssembler::reinit_heapbase() {
if (UseCompressedOops || UseCompressedKlassPointers) {
if (UseCompressedOops || UseCompressedClassPointers) {
if (Universe::heap() != NULL) {
if (Universe::narrow_oop_base() == NULL) {
MacroAssembler::xorptr(r12_heapbase, r12_heapbase);

View file

@ -34,9 +34,9 @@
// Run with +PrintInterpreter to get the VM to print out the size.
// Max size with JVMTI
#ifdef AMD64
const static int InterpreterCodeSize = 200 * 1024;
const static int InterpreterCodeSize = 208 * 1024;
#else
const static int InterpreterCodeSize = 168 * 1024;
const static int InterpreterCodeSize = 176 * 1024;
#endif // AMD64
#endif // CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP

View file

@ -58,6 +58,11 @@ extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
const int i486_code_length = VtableStub::pd_code_size_limit(true);
VtableStub* s = new(i486_code_length) VtableStub(true, vtable_index);
// Can be NULL if there is no free space in the code cache.
if (s == NULL) {
return NULL;
}
ResourceMark rm;
CodeBuffer cb(s->entry_point(), i486_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);
@ -132,6 +137,11 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
// add code here, bump the code stub size returned by pd_code_size_limit!
const int i486_code_length = VtableStub::pd_code_size_limit(false);
VtableStub* s = new(i486_code_length) VtableStub(false, itable_index);
// Can be NULL if there is no free space in the code cache.
if (s == NULL) {
return NULL;
}
ResourceMark rm;
CodeBuffer cb(s->entry_point(), i486_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);

View file

@ -49,6 +49,11 @@ extern "C" void bad_compiled_vtable_index(JavaThread* thread,
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
const int amd64_code_length = VtableStub::pd_code_size_limit(true);
VtableStub* s = new(amd64_code_length) VtableStub(true, vtable_index);
// Can be NULL if there is no free space in the code cache.
if (s == NULL) {
return NULL;
}
ResourceMark rm;
CodeBuffer cb(s->entry_point(), amd64_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);
@ -126,6 +131,11 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
// returned by pd_code_size_limit!
const int amd64_code_length = VtableStub::pd_code_size_limit(false);
VtableStub* s = new(amd64_code_length) VtableStub(false, itable_index);
// Can be NULL if there is no free space in the code cache.
if (s == NULL) {
return NULL;
}
ResourceMark rm;
CodeBuffer cb(s->entry_point(), amd64_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);
@ -211,11 +221,11 @@ int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
if (is_vtable_stub) {
// Vtable stub size
return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) +
(UseCompressedKlassPointers ? MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
(UseCompressedClassPointers ? MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
} else {
// Itable stub size
return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
(UseCompressedKlassPointers ? MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
(UseCompressedClassPointers ? MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
}
// In order to tune these parameters, run the JVM with VM options
// +PrintMiscellaneous and +WizardMode to see information about

View file

@ -1391,7 +1391,7 @@ uint BoxLockNode::size(PhaseRegAlloc *ra_) const
#ifndef PRODUCT
void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
st->print_cr("\tcmpq rax, rscratch1\t # Inline cache check");
@ -1408,7 +1408,7 @@ void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
{
MacroAssembler masm(&cbuf);
uint insts_size = cbuf.insts_size();
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
masm.load_klass(rscratch1, j_rarg0);
masm.cmpptr(rax, rscratch1);
} else {
@ -1557,7 +1557,7 @@ bool Matcher::narrow_oop_use_complex_address() {
}
bool Matcher::narrow_klass_use_complex_address() {
assert(UseCompressedKlassPointers, "only for compressed klass code");
assert(UseCompressedClassPointers, "only for compressed klass code");
return (LogKlassAlignmentInBytes <= 3);
}

View file

@ -3589,8 +3589,6 @@ jint os::init_2(void)
#endif
}
os::large_page_init();
// initialize suspend/resume support - must do this before signal_sets_init()
if (SR_initialize() != 0) {
perror("SR_initialize failed");

View file

@ -131,6 +131,7 @@ bool os::Linux::_is_NPTL = false;
bool os::Linux::_supports_fast_thread_cpu_time = false;
const char * os::Linux::_glibc_version = NULL;
const char * os::Linux::_libpthread_version = NULL;
pthread_condattr_t os::Linux::_condattr[1];
static jlong initial_time_count=0;
@ -1399,12 +1400,15 @@ void os::Linux::clock_init() {
clock_gettime_func(CLOCK_MONOTONIC, &tp) == 0) {
// yes, monotonic clock is supported
_clock_gettime = clock_gettime_func;
return;
} else {
// close librt if there is no monotonic clock
dlclose(handle);
}
}
}
warning("No monotonic clock was available - timed services may " \
"be adversely affected if the time-of-day clock changes");
}
#ifndef SYS_clock_getres
@ -2165,23 +2169,49 @@ void os::print_os_info(outputStream* st) {
}
// Try to identify popular distros.
// Most Linux distributions have /etc/XXX-release file, which contains
// the OS version string. Some have more than one /etc/XXX-release file
// (e.g. Mandrake has both /etc/mandrake-release and /etc/redhat-release.),
// so the order is important.
// Most Linux distributions have a /etc/XXX-release file, which contains
// the OS version string. Newer Linux distributions have a /etc/lsb-release
// file that also contains the OS version string. Some have more than one
// /etc/XXX-release file (e.g. Mandrake has both /etc/mandrake-release and
// /etc/redhat-release.), so the order is important.
// Any Linux that is based on Redhat (i.e. Oracle, Mandrake, Sun JDS...) have
// their own specific XXX-release file as well as a redhat-release file.
// Because of this the XXX-release file needs to be searched for before the
// redhat-release file.
// Since Red Hat has a lsb-release file that is not very descriptive the
// search for redhat-release needs to be before lsb-release.
// Since the lsb-release file is the new standard it needs to be searched
// before the older style release files.
// Searching system-release (Red Hat) and os-release (other Linuxes) are a
// next to last resort. The os-release file is a new standard that contains
// distribution information and the system-release file seems to be an old
// standard that has been replaced by the lsb-release and os-release files.
// Searching for the debian_version file is the last resort. It contains
// an informative string like "6.0.6" or "wheezy/sid". Because of this
// "Debian " is printed before the contents of the debian_version file.
void os::Linux::print_distro_info(outputStream* st) {
if (!_print_ascii_file("/etc/mandrake-release", st) &&
!_print_ascii_file("/etc/sun-release", st) &&
!_print_ascii_file("/etc/redhat-release", st) &&
!_print_ascii_file("/etc/SuSE-release", st) &&
!_print_ascii_file("/etc/turbolinux-release", st) &&
!_print_ascii_file("/etc/gentoo-release", st) &&
!_print_ascii_file("/etc/debian_version", st) &&
!_print_ascii_file("/etc/ltib-release", st) &&
!_print_ascii_file("/etc/angstrom-version", st)) {
st->print("Linux");
}
st->cr();
if (!_print_ascii_file("/etc/oracle-release", st) &&
!_print_ascii_file("/etc/mandriva-release", st) &&
!_print_ascii_file("/etc/mandrake-release", st) &&
!_print_ascii_file("/etc/sun-release", st) &&
!_print_ascii_file("/etc/redhat-release", st) &&
!_print_ascii_file("/etc/lsb-release", st) &&
!_print_ascii_file("/etc/SuSE-release", st) &&
!_print_ascii_file("/etc/turbolinux-release", st) &&
!_print_ascii_file("/etc/gentoo-release", st) &&
!_print_ascii_file("/etc/ltib-release", st) &&
!_print_ascii_file("/etc/angstrom-version", st) &&
!_print_ascii_file("/etc/system-release", st) &&
!_print_ascii_file("/etc/os-release", st)) {
if (file_exists("/etc/debian_version")) {
st->print("Debian ");
_print_ascii_file("/etc/debian_version", st);
} else {
st->print("Linux");
}
}
st->cr();
}
void os::Linux::print_libversion_info(outputStream* st) {
@ -4709,6 +4739,26 @@ void os::init(void) {
Linux::clock_init();
initial_time_count = os::elapsed_counter();
// pthread_condattr initialization for monotonic clock
int status;
pthread_condattr_t* _condattr = os::Linux::condAttr();
if ((status = pthread_condattr_init(_condattr)) != 0) {
fatal(err_msg("pthread_condattr_init: %s", strerror(status)));
}
// Only set the clock if CLOCK_MONOTONIC is available
if (Linux::supports_monotonic_clock()) {
if ((status = pthread_condattr_setclock(_condattr, CLOCK_MONOTONIC)) != 0) {
if (status == EINVAL) {
warning("Unable to use monotonic clock with relative timed-waits" \
" - changes to the time-of-day clock may have adverse affects");
} else {
fatal(err_msg("pthread_condattr_setclock: %s", strerror(status)));
}
}
}
// else it defaults to CLOCK_REALTIME
pthread_mutex_init(&dl_mutex, NULL);
// If the pagesize of the VM is greater than 8K determine the appropriate
@ -4755,8 +4805,6 @@ jint os::init_2(void)
#endif
}
os::large_page_init();
// initialize suspend/resume support - must do this before signal_sets_init()
if (SR_initialize() != 0) {
perror("SR_initialize failed");
@ -5519,21 +5567,36 @@ void os::pause() {
static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
if (millis < 0) millis = 0;
struct timeval now;
int status = gettimeofday(&now, NULL);
assert(status == 0, "gettimeofday");
jlong seconds = millis / 1000;
millis %= 1000;
if (seconds > 50000000) { // see man cond_timedwait(3T)
seconds = 50000000;
}
abstime->tv_sec = now.tv_sec + seconds;
long usec = now.tv_usec + millis * 1000;
if (usec >= 1000000) {
abstime->tv_sec += 1;
usec -= 1000000;
if (os::Linux::supports_monotonic_clock()) {
struct timespec now;
int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
assert_status(status == 0, status, "clock_gettime");
abstime->tv_sec = now.tv_sec + seconds;
long nanos = now.tv_nsec + millis * NANOSECS_PER_MILLISEC;
if (nanos >= NANOSECS_PER_SEC) {
abstime->tv_sec += 1;
nanos -= NANOSECS_PER_SEC;
}
abstime->tv_nsec = nanos;
} else {
struct timeval now;
int status = gettimeofday(&now, NULL);
assert(status == 0, "gettimeofday");
abstime->tv_sec = now.tv_sec + seconds;
long usec = now.tv_usec + millis * 1000;
if (usec >= 1000000) {
abstime->tv_sec += 1;
usec -= 1000000;
}
abstime->tv_nsec = usec * 1000;
}
abstime->tv_nsec = usec * 1000;
return abstime;
}
@ -5625,7 +5688,7 @@ int os::PlatformEvent::park(jlong millis) {
status = os::Linux::safe_cond_timedwait(_cond, _mutex, &abst);
if (status != 0 && WorkAroundNPTLTimedWaitHang) {
pthread_cond_destroy (_cond);
pthread_cond_init (_cond, NULL) ;
pthread_cond_init (_cond, os::Linux::condAttr()) ;
}
assert_status(status == 0 || status == EINTR ||
status == ETIME || status == ETIMEDOUT,
@ -5726,32 +5789,50 @@ void os::PlatformEvent::unpark() {
static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
assert (time > 0, "convertTime");
time_t max_secs = 0;
struct timeval now;
int status = gettimeofday(&now, NULL);
assert(status == 0, "gettimeofday");
if (!os::Linux::supports_monotonic_clock() || isAbsolute) {
struct timeval now;
int status = gettimeofday(&now, NULL);
assert(status == 0, "gettimeofday");
time_t max_secs = now.tv_sec + MAX_SECS;
max_secs = now.tv_sec + MAX_SECS;
if (isAbsolute) {
jlong secs = time / 1000;
if (secs > max_secs) {
absTime->tv_sec = max_secs;
if (isAbsolute) {
jlong secs = time / 1000;
if (secs > max_secs) {
absTime->tv_sec = max_secs;
} else {
absTime->tv_sec = secs;
}
absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
} else {
jlong secs = time / NANOSECS_PER_SEC;
if (secs >= MAX_SECS) {
absTime->tv_sec = max_secs;
absTime->tv_nsec = 0;
} else {
absTime->tv_sec = now.tv_sec + secs;
absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
absTime->tv_nsec -= NANOSECS_PER_SEC;
++absTime->tv_sec; // note: this must be <= max_secs
}
}
}
else {
absTime->tv_sec = secs;
}
absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
}
else {
} else {
// must be relative using monotonic clock
struct timespec now;
int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
assert_status(status == 0, status, "clock_gettime");
max_secs = now.tv_sec + MAX_SECS;
jlong secs = time / NANOSECS_PER_SEC;
if (secs >= MAX_SECS) {
absTime->tv_sec = max_secs;
absTime->tv_nsec = 0;
}
else {
} else {
absTime->tv_sec = now.tv_sec + secs;
absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_nsec;
if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
absTime->tv_nsec -= NANOSECS_PER_SEC;
++absTime->tv_sec; // note: this must be <= max_secs
@ -5831,15 +5912,19 @@ void Parker::park(bool isAbsolute, jlong time) {
jt->set_suspend_equivalent();
// cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
assert(_cur_index == -1, "invariant");
if (time == 0) {
status = pthread_cond_wait (_cond, _mutex) ;
_cur_index = REL_INDEX; // arbitrary choice when not timed
status = pthread_cond_wait (&_cond[_cur_index], _mutex) ;
} else {
status = os::Linux::safe_cond_timedwait (_cond, _mutex, &absTime) ;
_cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
status = os::Linux::safe_cond_timedwait (&_cond[_cur_index], _mutex, &absTime) ;
if (status != 0 && WorkAroundNPTLTimedWaitHang) {
pthread_cond_destroy (_cond) ;
pthread_cond_init (_cond, NULL);
pthread_cond_destroy (&_cond[_cur_index]) ;
pthread_cond_init (&_cond[_cur_index], isAbsolute ? NULL : os::Linux::condAttr());
}
}
_cur_index = -1;
assert_status(status == 0 || status == EINTR ||
status == ETIME || status == ETIMEDOUT,
status, "cond_timedwait");
@ -5868,17 +5953,24 @@ void Parker::unpark() {
s = _counter;
_counter = 1;
if (s < 1) {
if (WorkAroundNPTLTimedWaitHang) {
status = pthread_cond_signal (_cond) ;
assert (status == 0, "invariant") ;
// thread might be parked
if (_cur_index != -1) {
// thread is definitely parked
if (WorkAroundNPTLTimedWaitHang) {
status = pthread_cond_signal (&_cond[_cur_index]);
assert (status == 0, "invariant");
status = pthread_mutex_unlock(_mutex);
assert (status == 0, "invariant") ;
} else {
assert (status == 0, "invariant");
} else {
status = pthread_mutex_unlock(_mutex);
assert (status == 0, "invariant") ;
status = pthread_cond_signal (_cond) ;
assert (status == 0, "invariant") ;
}
assert (status == 0, "invariant");
status = pthread_cond_signal (&_cond[_cur_index]);
assert (status == 0, "invariant");
}
} else {
pthread_mutex_unlock(_mutex);
assert (status == 0, "invariant") ;
}
} else {
pthread_mutex_unlock(_mutex);
assert (status == 0, "invariant") ;

View file

@ -221,6 +221,13 @@ class Linux {
static jlong fast_thread_cpu_time(clockid_t clockid);
// pthread_cond clock suppport
private:
static pthread_condattr_t _condattr[1];
public:
static pthread_condattr_t* condAttr() { return _condattr; }
// Stack repair handling
// none present
@ -295,7 +302,7 @@ class PlatformEvent : public CHeapObj<mtInternal> {
public:
PlatformEvent() {
int status;
status = pthread_cond_init (_cond, NULL);
status = pthread_cond_init (_cond, os::Linux::condAttr());
assert_status(status == 0, status, "cond_init");
status = pthread_mutex_init (_mutex, NULL);
assert_status(status == 0, status, "mutex_init");
@ -310,14 +317,19 @@ class PlatformEvent : public CHeapObj<mtInternal> {
void park () ;
void unpark () ;
int TryPark () ;
int park (jlong millis) ;
int park (jlong millis) ; // relative timed-wait only
void SetAssociation (Thread * a) { _Assoc = a ; }
} ;
class PlatformParker : public CHeapObj<mtInternal> {
protected:
enum {
REL_INDEX = 0,
ABS_INDEX = 1
};
int _cur_index; // which cond is in use: -1, 0, 1
pthread_mutex_t _mutex [1] ;
pthread_cond_t _cond [1] ;
pthread_cond_t _cond [2] ; // one for relative times and one for abs.
public: // TODO-FIXME: make dtor private
~PlatformParker() { guarantee (0, "invariant") ; }
@ -325,10 +337,13 @@ class PlatformParker : public CHeapObj<mtInternal> {
public:
PlatformParker() {
int status;
status = pthread_cond_init (_cond, NULL);
assert_status(status == 0, status, "cond_init");
status = pthread_cond_init (&_cond[REL_INDEX], os::Linux::condAttr());
assert_status(status == 0, status, "cond_init rel");
status = pthread_cond_init (&_cond[ABS_INDEX], NULL);
assert_status(status == 0, status, "cond_init abs");
status = pthread_mutex_init (_mutex, NULL);
assert_status(status == 0, status, "mutex_init");
_cur_index = -1; // mark as unused
}
};

View file

@ -5178,9 +5178,7 @@ jint os::init_2(void) {
if(Verbose && PrintMiscellaneous)
tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
#endif
}
os::large_page_init();
}
// Check minimum allowable stack size for thread creation and to initialize
// the java system classes, including StackOverflowError - depends on page

View file

@ -32,7 +32,11 @@ WindowsDecoder::WindowsDecoder() {
_can_decode_in_vm = false;
_pfnSymGetSymFromAddr64 = NULL;
_pfnUndecorateSymbolName = NULL;
#ifdef AMD64
_pfnStackWalk64 = NULL;
_pfnSymFunctionTableAccess64 = NULL;
_pfnSymGetModuleBase64 = NULL;
#endif
_decoder_status = no_error;
initialize();
}
@ -53,14 +57,24 @@ void WindowsDecoder::initialize() {
_pfnUndecorateSymbolName = (pfn_UndecorateSymbolName)::GetProcAddress(handle, "UnDecorateSymbolName");
if (_pfnSymSetOptions == NULL || _pfnSymInitialize == NULL || _pfnSymGetSymFromAddr64 == NULL) {
_pfnSymGetSymFromAddr64 = NULL;
_pfnUndecorateSymbolName = NULL;
::FreeLibrary(handle);
_dbghelp_handle = NULL;
uninitialize();
_decoder_status = helper_func_error;
return;
}
#ifdef AMD64
_pfnStackWalk64 = (pfn_StackWalk64)::GetProcAddress(handle, "StackWalk64");
_pfnSymFunctionTableAccess64 = (pfn_SymFunctionTableAccess64)::GetProcAddress(handle, "SymFunctionTableAccess64");
_pfnSymGetModuleBase64 = (pfn_SymGetModuleBase64)::GetProcAddress(handle, "SymGetModuleBase64");
if (_pfnStackWalk64 == NULL || _pfnSymFunctionTableAccess64 == NULL || _pfnSymGetModuleBase64 == NULL) {
// We can't call StackWalk64 to walk the stack, but we are still
// able to decode the symbols. Let's limp on.
_pfnStackWalk64 = NULL;
_pfnSymFunctionTableAccess64 = NULL;
_pfnSymGetModuleBase64 = NULL;
}
#endif
HANDLE hProcess = ::GetCurrentProcess();
_pfnSymSetOptions(SYMOPT_UNDNAME | SYMOPT_DEFERRED_LOADS | SYMOPT_EXACT_SYMBOLS);
if (!_pfnSymInitialize(hProcess, NULL, TRUE)) {
@ -156,6 +170,11 @@ void WindowsDecoder::initialize() {
void WindowsDecoder::uninitialize() {
_pfnSymGetSymFromAddr64 = NULL;
_pfnUndecorateSymbolName = NULL;
#ifdef AMD64
_pfnStackWalk64 = NULL;
_pfnSymFunctionTableAccess64 = NULL;
_pfnSymGetModuleBase64 = NULL;
#endif
if (_dbghelp_handle != NULL) {
::FreeLibrary(_dbghelp_handle);
}
@ -195,3 +214,65 @@ bool WindowsDecoder::demangle(const char* symbol, char *buf, int buflen) {
_pfnUndecorateSymbolName(symbol, buf, buflen, UNDNAME_COMPLETE);
}
#ifdef AMD64
BOOL WindowsDbgHelp::StackWalk64(DWORD MachineType,
HANDLE hProcess,
HANDLE hThread,
LPSTACKFRAME64 StackFrame,
PVOID ContextRecord,
PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress) {
DecoderLocker locker;
WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
if (!wd->has_error() && wd->_pfnStackWalk64) {
return wd->_pfnStackWalk64(MachineType,
hProcess,
hThread,
StackFrame,
ContextRecord,
ReadMemoryRoutine,
FunctionTableAccessRoutine,
GetModuleBaseRoutine,
TranslateAddress);
} else {
return false;
}
}
PVOID WindowsDbgHelp::SymFunctionTableAccess64(HANDLE hProcess, DWORD64 AddrBase) {
DecoderLocker locker;
WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
if (!wd->has_error() && wd->_pfnSymFunctionTableAccess64) {
return wd->_pfnSymFunctionTableAccess64(hProcess, AddrBase);
} else {
return NULL;
}
}
pfn_SymFunctionTableAccess64 WindowsDbgHelp::pfnSymFunctionTableAccess64() {
DecoderLocker locker;
WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
if (!wd->has_error()) {
return wd->_pfnSymFunctionTableAccess64;
} else {
return NULL;
}
}
pfn_SymGetModuleBase64 WindowsDbgHelp::pfnSymGetModuleBase64() {
DecoderLocker locker;
WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
if (!wd->has_error()) {
return wd->_pfnSymGetModuleBase64;
} else {
return NULL;
}
}
#endif // AMD64

View file

@ -38,6 +38,20 @@ typedef DWORD (WINAPI *pfn_UndecorateSymbolName)(const char*, char*, DWORD, DWOR
typedef BOOL (WINAPI *pfn_SymSetSearchPath)(HANDLE, PCTSTR);
typedef BOOL (WINAPI *pfn_SymGetSearchPath)(HANDLE, PTSTR, int);
#ifdef AMD64
typedef BOOL (WINAPI *pfn_StackWalk64)(DWORD MachineType,
HANDLE hProcess,
HANDLE hThread,
LPSTACKFRAME64 StackFrame,
PVOID ContextRecord,
PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
typedef PVOID (WINAPI *pfn_SymFunctionTableAccess64)(HANDLE hProcess, DWORD64 AddrBase);
typedef DWORD64 (WINAPI *pfn_SymGetModuleBase64)(HANDLE hProcess, DWORD64 dwAddr);
#endif
class WindowsDecoder : public AbstractDecoder {
public:
@ -61,7 +75,34 @@ private:
bool _can_decode_in_vm;
pfn_SymGetSymFromAddr64 _pfnSymGetSymFromAddr64;
pfn_UndecorateSymbolName _pfnUndecorateSymbolName;
#ifdef AMD64
pfn_StackWalk64 _pfnStackWalk64;
pfn_SymFunctionTableAccess64 _pfnSymFunctionTableAccess64;
pfn_SymGetModuleBase64 _pfnSymGetModuleBase64;
friend class WindowsDbgHelp;
#endif
};
#ifdef AMD64
// TODO: refactor and move the handling of dbghelp.dll outside of Decoder
class WindowsDbgHelp : public Decoder {
public:
static BOOL StackWalk64(DWORD MachineType,
HANDLE hProcess,
HANDLE hThread,
LPSTACKFRAME64 StackFrame,
PVOID ContextRecord,
PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
static PVOID SymFunctionTableAccess64(HANDLE hProcess, DWORD64 AddrBase);
static pfn_SymFunctionTableAccess64 pfnSymFunctionTableAccess64();
static pfn_SymGetModuleBase64 pfnSymGetModuleBase64();
};
#endif
#endif // OS_WINDOWS_VM_DECODER_WINDOWS_HPP

View file

@ -3189,9 +3189,12 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, boo
return p_buf;
} else {
if (TracePageSizes && Verbose) {
tty->print_cr("Reserving large pages in a single large chunk.");
}
// normal policy just allocate it all at once
DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot);
char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
if (res != NULL) {
address pc = CALLER_PC;
MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
@ -3917,8 +3920,6 @@ jint os::init_2(void) {
#endif
}
os::large_page_init();
// Setup Windows Exceptions
// for debugging float code generation bugs
@ -5714,7 +5715,66 @@ BOOL os::Advapi32Dll::AdvapiAvailable() {
#endif
#ifndef PRODUCT
// test the code path in reserve_memory_special() that tries to allocate memory in a single
// contiguous memory block at a particular address.
// The test first tries to find a good approximate address to allocate at by using the same
// method to allocate some memory at any address. The test then tries to allocate memory in
// the vicinity (not directly after it to avoid possible by-chance use of that location)
// This is of course only some dodgy assumption, there is no guarantee that the vicinity of
// the previously allocated memory is available for allocation. The only actual failure
// that is reported is when the test tries to allocate at a particular location but gets a
// different valid one. A NULL return value at this point is not considered an error but may
// be legitimate.
// If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
void TestReserveMemorySpecial_test() {
// No tests available for this platform
if (!UseLargePages) {
if (VerboseInternalVMTests) {
gclog_or_tty->print("Skipping test because large pages are disabled");
}
return;
}
// save current value of globals
bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
bool old_use_numa_interleaving = UseNUMAInterleaving;
// set globals to make sure we hit the correct code path
UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
// do an allocation at an address selected by the OS to get a good one.
const size_t large_allocation_size = os::large_page_size() * 4;
char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
if (result == NULL) {
if (VerboseInternalVMTests) {
gclog_or_tty->print("Failed to allocate control block with size "SIZE_FORMAT". Skipping remainder of test.",
large_allocation_size);
}
} else {
os::release_memory_special(result, large_allocation_size);
// allocate another page within the recently allocated memory area which seems to be a good location. At least
// we managed to get it once.
const size_t expected_allocation_size = os::large_page_size();
char* expected_location = result + os::large_page_size();
char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
if (actual_location == NULL) {
if (VerboseInternalVMTests) {
gclog_or_tty->print("Failed to allocate any memory at "PTR_FORMAT" size "SIZE_FORMAT". Skipping remainder of test.",
expected_location, large_allocation_size);
}
} else {
// release memory
os::release_memory_special(actual_location, expected_allocation_size);
// only now check, after releasing any memory to avoid any leaks.
assert(actual_location == expected_location,
err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead",
expected_location, expected_allocation_size, actual_location));
}
}
// restore globals
UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
UseNUMAInterleaving = old_use_numa_interleaving;
}
#endif
#endif // PRODUCT

View file

@ -35,7 +35,9 @@ define_pd_global(intx, CompilerThreadStackSize, 0);
// Used on 64 bit platforms for UseCompressedOops base address
#ifdef _LP64
define_pd_global(uintx, HeapBaseMinAddress, CONST64(4)*G);
// use 6G as default base address because by default the OS maps the application
// to 4G on Solaris-Sparc. This leaves at least 2G for the native heap.
define_pd_global(uintx, HeapBaseMinAddress, CONST64(6)*G);
#else
define_pd_global(uintx, HeapBaseMinAddress, 2*G);
#endif

View file

@ -29,6 +29,7 @@
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "decoder_windows.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm_windows.h"
#include "memory/allocation.inline.hpp"
@ -327,6 +328,94 @@ add_ptr_func_t* os::atomic_add_ptr_func = os::atomic_add_ptr_bootstrap
cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
#ifdef AMD64
/*
* Windows/x64 does not use stack frames the way expected by Java:
* [1] in most cases, there is no frame pointer. All locals are addressed via RSP
* [2] in rare cases, when alloca() is used, a frame pointer is used, but this may
* not be RBP.
* See http://msdn.microsoft.com/en-us/library/ew5tede7.aspx
*
* So it's not possible to print the native stack using the
* while (...) {... fr = os::get_sender_for_C_frame(&fr); }
* loop in vmError.cpp. We need to roll our own loop.
*/
bool os::platform_print_native_stack(outputStream* st, void* context,
char *buf, int buf_size)
{
CONTEXT ctx;
if (context != NULL) {
memcpy(&ctx, context, sizeof(ctx));
} else {
RtlCaptureContext(&ctx);
}
st->print_cr("Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code)");
STACKFRAME stk;
memset(&stk, 0, sizeof(stk));
stk.AddrStack.Offset = ctx.Rsp;
stk.AddrStack.Mode = AddrModeFlat;
stk.AddrFrame.Offset = ctx.Rbp;
stk.AddrFrame.Mode = AddrModeFlat;
stk.AddrPC.Offset = ctx.Rip;
stk.AddrPC.Mode = AddrModeFlat;
int count = 0;
address lastpc = 0;
while (count++ < StackPrintLimit) {
intptr_t* sp = (intptr_t*)stk.AddrStack.Offset;
intptr_t* fp = (intptr_t*)stk.AddrFrame.Offset; // NOT necessarily the same as ctx.Rbp!
address pc = (address)stk.AddrPC.Offset;
if (pc != NULL && sp != NULL && fp != NULL) {
if (count == 2 && lastpc == pc) {
// Skip it -- StackWalk64() may return the same PC
// (but different SP) on the first try.
} else {
// Don't try to create a frame(sp, fp, pc) -- on WinX64, stk.AddrFrame
// may not contain what Java expects, and may cause the frame() constructor
// to crash. Let's just print out the symbolic address.
frame::print_C_frame(st, buf, buf_size, pc);
st->cr();
}
lastpc = pc;
} else {
break;
}
PVOID p = WindowsDbgHelp::SymFunctionTableAccess64(GetCurrentProcess(), stk.AddrPC.Offset);
if (!p) {
// StackWalk64() can't handle this PC. Calling StackWalk64 again may cause crash.
break;
}
BOOL result = WindowsDbgHelp::StackWalk64(
IMAGE_FILE_MACHINE_AMD64, // __in DWORD MachineType,
GetCurrentProcess(), // __in HANDLE hProcess,
GetCurrentThread(), // __in HANDLE hThread,
&stk, // __inout LP STACKFRAME64 StackFrame,
&ctx, // __inout PVOID ContextRecord,
NULL, // __in_opt PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
WindowsDbgHelp::pfnSymFunctionTableAccess64(),
// __in_opt PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
WindowsDbgHelp::pfnSymGetModuleBase64(),
// __in_opt PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
NULL); // __in_opt PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress
if (!result) {
break;
}
}
if (count > StackPrintLimit) {
st->print_cr("...<more frames>...");
}
st->cr();
return true;
}
#endif // AMD64
ExtendedPC os::fetch_frame_from_context(void* ucVoid,
intptr_t** ret_sp, intptr_t** ret_fp) {
@ -401,6 +490,9 @@ frame os::current_frame() {
StubRoutines::x86::get_previous_fp_entry());
if (func == NULL) return frame();
intptr_t* fp = (*func)();
if (fp == NULL) {
return frame();
}
#else
intptr_t* fp = _get_previous_fp();
#endif // AMD64

View file

@ -62,4 +62,10 @@
static bool register_code_area(char *low, char *high);
#ifdef AMD64
#define PLATFORM_PRINT_NATIVE_STACK 1
static bool platform_print_native_stack(outputStream* st, void* context,
char *buf, int buf_size);
#endif
#endif // OS_CPU_WINDOWS_X86_VM_OS_WINDOWS_X86_HPP

View file

@ -4,9 +4,9 @@ It's main purpose is to recreate output similar to
requires a 1.5 JDK to build and simply typing make should build it.
It produces a jar file, logc.jar, that can be run on the
hotspot.log from LogCompilation output like this:
HotSpot log (by default, hotspot_pid{pid}.log) from LogCompilation output like this:
java -jar logc.jar hotspot.log
java -jar logc.jar hotspot_pid1234.log
This will produce something like the normal PrintCompilation output.
Adding the -i option with also report inlining like PrintInlining.

View file

@ -4219,7 +4219,9 @@ void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool succes
}
}
if (!PrintInlining) return;
if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) {
return;
}
CompileTask::print_inlining(callee, scope()->level(), bci(), msg);
if (success && CIPrintMethodCodes) {
callee->print_codes();

View file

@ -709,10 +709,10 @@ static Klass* resolve_field_return_klass(methodHandle caller, int bci, TRAPS) {
Bytecodes::Code code = field_access.code();
// We must load class, initialize class and resolvethe field
FieldAccessInfo result; // initialize class if needed
fieldDescriptor result; // initialize class if needed
constantPoolHandle constants(THREAD, caller->constants());
LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK_NULL);
return result.klass()();
LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK_NULL);
return result.field_holder();
}
@ -826,11 +826,11 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
if (stub_id == Runtime1::access_field_patching_id) {
Bytecode_field field_access(caller_method, bci);
FieldAccessInfo result; // initialize class if needed
fieldDescriptor result; // initialize class if needed
Bytecodes::Code code = field_access.code();
constantPoolHandle constants(THREAD, caller_method->constants());
LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK);
patch_field_offset = result.field_offset();
LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK);
patch_field_offset = result.offset();
// If we're patching a field which is volatile then at compile it
// must not have been know to be volatile, so the generated code

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -75,7 +75,6 @@ ciField::ciField(ciInstanceKlass* klass, int index): _known_to_link_with_put(NUL
assert(klass->get_instanceKlass()->is_linked(), "must be linked before using its constan-pool");
_cp_index = index;
constantPoolHandle cpool(thread, klass->get_instanceKlass()->constants());
// Get the field's name, signature, and type.
@ -116,7 +115,7 @@ ciField::ciField(ciInstanceKlass* klass, int index): _known_to_link_with_put(NUL
// The declared holder of this field may not have been loaded.
// Bail out with partial field information.
if (!holder_is_accessible) {
// _cp_index and _type have already been set.
// _type has already been set.
// The default values for _flags and _constant_value will suffice.
// We need values for _holder, _offset, and _is_constant,
_holder = declared_holder;
@ -146,8 +145,6 @@ ciField::ciField(ciInstanceKlass* klass, int index): _known_to_link_with_put(NUL
ciField::ciField(fieldDescriptor *fd): _known_to_link_with_put(NULL), _known_to_link_with_get(NULL) {
ASSERT_IN_VM;
_cp_index = -1;
// Get the field's name, signature, and type.
ciEnv* env = CURRENT_ENV;
_name = env->get_symbol(fd->name());
@ -351,12 +348,11 @@ bool ciField::will_link(ciInstanceKlass* accessing_klass,
}
}
FieldAccessInfo result;
constantPoolHandle c_pool(THREAD,
accessing_klass->get_instanceKlass()->constants());
LinkResolver::resolve_field(result, c_pool, _cp_index,
Bytecodes::java_code(bc),
true, false, KILL_COMPILE_ON_FATAL_(false));
fieldDescriptor result;
LinkResolver::resolve_field(result, _holder->get_instanceKlass(),
_name->get_symbol(), _signature->get_symbol(),
accessing_klass->get_Klass(), bc, true, false,
KILL_COMPILE_ON_FATAL_(false));
// update the hit-cache, unless there is a problem with memory scoping:
if (accessing_klass->is_shared() || !is_shared()) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -53,9 +53,6 @@ private:
ciInstanceKlass* _known_to_link_with_get;
ciConstant _constant_value;
// Used for will_link
int _cp_index;
ciType* compute_type();
ciType* compute_type_impl();

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -522,8 +522,7 @@ ciInstanceKlass::compute_nonstatic_fields_impl(GrowableArray<ciField*>*
for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
if (fs.access_flags().is_static()) continue;
fieldDescriptor fd;
fd.initialize(k, fs.index());
fieldDescriptor& fd = fs.field_descriptor();
ciField* field = new (arena) ciField(&fd);
fields->append(field);
}

View file

@ -286,7 +286,10 @@ int ciMethod::itable_index() {
check_is_loaded();
assert(holder()->is_linked(), "must be linked");
VM_ENTRY_MARK;
return klassItable::compute_itable_index(get_Method());
Method* m = get_Method();
if (!m->has_itable_index())
return Method::nonvirtual_vtable_index;
return m->itable_index();
}
#endif // SHARK
@ -1137,6 +1140,10 @@ bool ciMethod::is_klass_loaded(int refinfo_index, bool must_be_resolved) const {
// ------------------------------------------------------------------
// ciMethod::check_call
bool ciMethod::check_call(int refinfo_index, bool is_static) const {
// This method is used only in C2 from InlineTree::ok_to_inline,
// and is only used under -Xcomp or -XX:CompileTheWorld.
// It appears to fail when applied to an invokeinterface call site.
// FIXME: Remove this method and resolve_method_statically; refactor to use the other LinkResolver entry points.
VM_ENTRY_MARK;
{
EXCEPTION_MARK;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,6 +44,7 @@ class ciSymbol : public ciBaseObject {
friend class ciInstanceKlass;
friend class ciSignature;
friend class ciMethod;
friend class ciField;
friend class ciObjArrayKlass;
private:

View file

@ -888,6 +888,7 @@ void ClassFileParser::parse_field_attributes(u2 attributes_count,
int runtime_visible_type_annotations_length = 0;
u1* runtime_invisible_type_annotations = NULL;
int runtime_invisible_type_annotations_length = 0;
bool runtime_invisible_type_annotations_exists = false;
while (attributes_count--) {
cfs->guarantee_more(6, CHECK); // attribute_name_index, attribute_length
u2 attribute_name_index = cfs->get_u2_fast();
@ -946,15 +947,27 @@ void ClassFileParser::parse_field_attributes(u2 attributes_count,
assert(runtime_invisible_annotations != NULL, "null invisible annotations");
cfs->skip_u1(runtime_invisible_annotations_length, CHECK);
} else if (attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) {
if (runtime_visible_type_annotations != NULL) {
classfile_parse_error(
"Multiple RuntimeVisibleTypeAnnotations attributes for field in class file %s", CHECK);
}
runtime_visible_type_annotations_length = attribute_length;
runtime_visible_type_annotations = cfs->get_u1_buffer();
assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
cfs->skip_u1(runtime_visible_type_annotations_length, CHECK);
} else if (PreserveAllAnnotations && attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
runtime_invisible_type_annotations_length = attribute_length;
runtime_invisible_type_annotations = cfs->get_u1_buffer();
assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK);
} else if (attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
if (runtime_invisible_type_annotations_exists) {
classfile_parse_error(
"Multiple RuntimeInvisibleTypeAnnotations attributes for field in class file %s", CHECK);
} else {
runtime_invisible_type_annotations_exists = true;
}
if (PreserveAllAnnotations) {
runtime_invisible_type_annotations_length = attribute_length;
runtime_invisible_type_annotations = cfs->get_u1_buffer();
assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
}
cfs->skip_u1(attribute_length, CHECK);
} else {
cfs->skip_u1(attribute_length, CHECK); // Skip unknown attributes
}
@ -2066,6 +2079,7 @@ methodHandle ClassFileParser::parse_method(bool is_interface,
int runtime_visible_type_annotations_length = 0;
u1* runtime_invisible_type_annotations = NULL;
int runtime_invisible_type_annotations_length = 0;
bool runtime_invisible_type_annotations_exists = false;
u1* annotation_default = NULL;
int annotation_default_length = 0;
@ -2322,16 +2336,30 @@ methodHandle ClassFileParser::parse_method(bool is_interface,
assert(annotation_default != NULL, "null annotation default");
cfs->skip_u1(annotation_default_length, CHECK_(nullHandle));
} else if (method_attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) {
if (runtime_visible_type_annotations != NULL) {
classfile_parse_error(
"Multiple RuntimeVisibleTypeAnnotations attributes for method in class file %s",
CHECK_(nullHandle));
}
runtime_visible_type_annotations_length = method_attribute_length;
runtime_visible_type_annotations = cfs->get_u1_buffer();
assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
// No need for the VM to parse Type annotations
cfs->skip_u1(runtime_visible_type_annotations_length, CHECK_(nullHandle));
} else if (PreserveAllAnnotations && method_attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
runtime_invisible_type_annotations_length = method_attribute_length;
runtime_invisible_type_annotations = cfs->get_u1_buffer();
assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK_(nullHandle));
} else if (method_attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
if (runtime_invisible_type_annotations_exists) {
classfile_parse_error(
"Multiple RuntimeInvisibleTypeAnnotations attributes for method in class file %s",
CHECK_(nullHandle));
} else {
runtime_invisible_type_annotations_exists = true;
}
if (PreserveAllAnnotations) {
runtime_invisible_type_annotations_length = method_attribute_length;
runtime_invisible_type_annotations = cfs->get_u1_buffer();
assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
}
cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
} else {
// Skip unknown attributes
cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
@ -2824,6 +2852,7 @@ void ClassFileParser::parse_classfile_attributes(ClassFileParser::ClassAnnotatio
int runtime_visible_type_annotations_length = 0;
u1* runtime_invisible_type_annotations = NULL;
int runtime_invisible_type_annotations_length = 0;
bool runtime_invisible_type_annotations_exists = false;
u1* inner_classes_attribute_start = NULL;
u4 inner_classes_attribute_length = 0;
u2 enclosing_method_class_index = 0;
@ -2927,16 +2956,28 @@ void ClassFileParser::parse_classfile_attributes(ClassFileParser::ClassAnnotatio
parsed_bootstrap_methods_attribute = true;
parse_classfile_bootstrap_methods_attribute(attribute_length, CHECK);
} else if (tag == vmSymbols::tag_runtime_visible_type_annotations()) {
if (runtime_visible_type_annotations != NULL) {
classfile_parse_error(
"Multiple RuntimeVisibleTypeAnnotations attributes in class file %s", CHECK);
}
runtime_visible_type_annotations_length = attribute_length;
runtime_visible_type_annotations = cfs->get_u1_buffer();
assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
// No need for the VM to parse Type annotations
cfs->skip_u1(runtime_visible_type_annotations_length, CHECK);
} else if (PreserveAllAnnotations && tag == vmSymbols::tag_runtime_invisible_type_annotations()) {
runtime_invisible_type_annotations_length = attribute_length;
runtime_invisible_type_annotations = cfs->get_u1_buffer();
assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK);
} else if (tag == vmSymbols::tag_runtime_invisible_type_annotations()) {
if (runtime_invisible_type_annotations_exists) {
classfile_parse_error(
"Multiple RuntimeInvisibleTypeAnnotations attributes in class file %s", CHECK);
} else {
runtime_invisible_type_annotations_exists = true;
}
if (PreserveAllAnnotations) {
runtime_invisible_type_annotations_length = attribute_length;
runtime_invisible_type_annotations = cfs->get_u1_buffer();
assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
}
cfs->skip_u1(attribute_length, CHECK);
} else {
// Unknown attribute
cfs->skip_u1(attribute_length, CHECK);
@ -3954,9 +3995,8 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
this_klass->set_has_final_method();
}
this_klass->copy_method_ordering(method_ordering, CHECK_NULL);
// The InstanceKlass::_methods_jmethod_ids cache and the
// InstanceKlass::_methods_cached_itable_indices cache are
// both managed on the assumption that the initial cache
// The InstanceKlass::_methods_jmethod_ids cache
// is managed on the assumption that the initial cache
// size is equal to the number of methods in the class. If
// that changes, then InstanceKlass::idnum_can_increment()
// has to be changed accordingly.

View file

@ -1319,6 +1319,25 @@ static void clear_pending_exception_if_not_oom(TRAPS) {
// The CHECK at the caller will propagate the exception out
}
/**
* Returns if the given method should be compiled when doing compile-the-world.
*
* TODO: This should be a private method in a CompileTheWorld class.
*/
static bool can_be_compiled(methodHandle m, int comp_level) {
assert(CompileTheWorld, "must be");
// It's not valid to compile a native wrapper for MethodHandle methods
// that take a MemberName appendix since the bytecode signature is not
// correct.
vmIntrinsics::ID iid = m->intrinsic_id();
if (MethodHandles::is_signature_polymorphic(iid) && MethodHandles::has_member_arg(iid)) {
return false;
}
return CompilationPolicy::can_be_compiled(m, comp_level);
}
void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
int len = (int)strlen(name);
if (len > 6 && strcmp(".class", name + len - 6) == 0) {
@ -1362,8 +1381,7 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
int comp_level = CompilationPolicy::policy()->initial_compile_level();
for (int n = 0; n < k->methods()->length(); n++) {
methodHandle m (THREAD, k->methods()->at(n));
if (CompilationPolicy::can_be_compiled(m, comp_level)) {
if (can_be_compiled(m, comp_level)) {
if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) {
// Give sweeper a chance to keep up with CTW
VM_ForceSafepoint op;
@ -1375,7 +1393,7 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
methodHandle(), 0, "CTW", THREAD);
if (HAS_PENDING_EXCEPTION) {
clear_pending_exception_if_not_oom(CHECK);
tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name()->as_C_string());
tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
} else {
_compile_the_world_method_counter++;
}
@ -1391,11 +1409,13 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
methodHandle(), 0, "CTW", THREAD);
if (HAS_PENDING_EXCEPTION) {
clear_pending_exception_if_not_oom(CHECK);
tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name()->as_C_string());
tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
} else {
_compile_the_world_method_counter++;
}
}
} else {
tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
}
nmethod* nm = m->code();

View file

@ -450,6 +450,10 @@ class MethodFamily : public ResourceObj {
streamIndentor si(str, indent * 2);
str->indent().print("Selected method: ");
print_method(str, _selected_target);
Klass* method_holder = _selected_target->method_holder();
if (!method_holder->is_interface()) {
tty->print(" : in superclass");
}
str->print_cr("");
}
@ -1141,19 +1145,23 @@ static void create_overpasses(
#endif // ndef PRODUCT
if (method->has_target()) {
Method* selected = method->get_selected_target();
max_stack = assemble_redirect(
if (selected->method_holder()->is_interface()) {
max_stack = assemble_redirect(
&bpool, &buffer, slot->signature(), selected, CHECK);
}
} else if (method->throws_exception()) {
max_stack = assemble_abstract_method_error(
&bpool, &buffer, method->get_exception_message(), CHECK);
}
AccessFlags flags = accessFlags_from(
if (max_stack != 0) {
AccessFlags flags = accessFlags_from(
JVM_ACC_PUBLIC | JVM_ACC_SYNTHETIC | JVM_ACC_BRIDGE);
Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
flags, max_stack, slot->size_of_parameters(),
ConstMethod::OVERPASS, CHECK);
if (m != NULL) {
overpasses.push(m);
if (m != NULL) {
overpasses.push(m);
}
}
}
}

View file

@ -438,6 +438,29 @@ bool java_lang_String::equals(oop java_string, jchar* chars, int len) {
return true;
}
bool java_lang_String::equals(oop str1, oop str2) {
assert(str1->klass() == SystemDictionary::String_klass(),
"must be java String");
assert(str2->klass() == SystemDictionary::String_klass(),
"must be java String");
typeArrayOop value1 = java_lang_String::value(str1);
int offset1 = java_lang_String::offset(str1);
int length1 = java_lang_String::length(str1);
typeArrayOop value2 = java_lang_String::value(str2);
int offset2 = java_lang_String::offset(str2);
int length2 = java_lang_String::length(str2);
if (length1 != length2) {
return false;
}
for (int i = 0; i < length1; i++) {
if (value1->char_at(i + offset1) != value2->char_at(i + offset2)) {
return false;
}
}
return true;
}
void java_lang_String::print(Handle java_string, outputStream* st) {
oop obj = java_string();
assert(obj->klass() == SystemDictionary::String_klass(), "must be java_string");

View file

@ -182,6 +182,7 @@ class java_lang_String : AllStatic {
static unsigned int hash_string(oop java_string);
static bool equals(oop java_string, jchar* chars, int len);
static bool equals(oop str1, oop str2);
// Conversion between '.' and '/' formats
static Handle externalize_classname(Handle java_string, TRAPS) { return char_converter(java_string, '/', '.', THREAD); }

View file

@ -341,7 +341,7 @@ Symbol* SymbolTable::new_permanent_symbol(const char* name, TRAPS) {
Symbol* SymbolTable::basic_add(int index_arg, u1 *name, int len,
unsigned int hashValue_arg, bool c_heap, TRAPS) {
assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
assert(!Universe::heap()->is_in_reserved(name),
"proposed name of symbol must be stable");
// Don't allow symbols to be created which cannot fit in a Symbol*.
@ -685,7 +685,7 @@ oop StringTable::intern(Handle string_or_null, jchar* name,
if (found_string != NULL) return found_string;
debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
assert(!Universe::heap()->is_in_reserved(name),
"proposed name of symbol must be stable");
Handle string;
@ -807,6 +807,8 @@ void StringTable::possibly_parallel_oops_do(OopClosure* f) {
}
}
// This verification is part of Universe::verify() and needs to be quick.
// See StringTable::verify_and_compare() below for exhaustive verification.
void StringTable::verify() {
for (int i = 0; i < the_table()->table_size(); ++i) {
HashtableEntry<oop, mtSymbol>* p = the_table()->bucket(i);
@ -825,6 +827,162 @@ void StringTable::dump(outputStream* st) {
the_table()->dump_table(st, "StringTable");
}
StringTable::VerifyRetTypes StringTable::compare_entries(
int bkt1, int e_cnt1,
HashtableEntry<oop, mtSymbol>* e_ptr1,
int bkt2, int e_cnt2,
HashtableEntry<oop, mtSymbol>* e_ptr2) {
// These entries are sanity checked by verify_and_compare_entries()
// before this function is called.
oop str1 = e_ptr1->literal();
oop str2 = e_ptr2->literal();
if (str1 == str2) {
tty->print_cr("ERROR: identical oop values (0x" PTR_FORMAT ") "
"in entry @ bucket[%d][%d] and entry @ bucket[%d][%d]",
str1, bkt1, e_cnt1, bkt2, e_cnt2);
return _verify_fail_continue;
}
if (java_lang_String::equals(str1, str2)) {
tty->print_cr("ERROR: identical String values in entry @ "
"bucket[%d][%d] and entry @ bucket[%d][%d]",
bkt1, e_cnt1, bkt2, e_cnt2);
return _verify_fail_continue;
}
return _verify_pass;
}
StringTable::VerifyRetTypes StringTable::verify_entry(int bkt, int e_cnt,
HashtableEntry<oop, mtSymbol>* e_ptr,
StringTable::VerifyMesgModes mesg_mode) {
VerifyRetTypes ret = _verify_pass; // be optimistic
oop str = e_ptr->literal();
if (str == NULL) {
if (mesg_mode == _verify_with_mesgs) {
tty->print_cr("ERROR: NULL oop value in entry @ bucket[%d][%d]", bkt,
e_cnt);
}
// NULL oop means no more verifications are possible
return _verify_fail_done;
}
if (str->klass() != SystemDictionary::String_klass()) {
if (mesg_mode == _verify_with_mesgs) {
tty->print_cr("ERROR: oop is not a String in entry @ bucket[%d][%d]",
bkt, e_cnt);
}
// not a String means no more verifications are possible
return _verify_fail_done;
}
unsigned int h = java_lang_String::hash_string(str);
if (e_ptr->hash() != h) {
if (mesg_mode == _verify_with_mesgs) {
tty->print_cr("ERROR: broken hash value in entry @ bucket[%d][%d], "
"bkt_hash=%d, str_hash=%d", bkt, e_cnt, e_ptr->hash(), h);
}
ret = _verify_fail_continue;
}
if (the_table()->hash_to_index(h) != bkt) {
if (mesg_mode == _verify_with_mesgs) {
tty->print_cr("ERROR: wrong index value for entry @ bucket[%d][%d], "
"str_hash=%d, hash_to_index=%d", bkt, e_cnt, h,
the_table()->hash_to_index(h));
}
ret = _verify_fail_continue;
}
return ret;
}
// See StringTable::verify() above for the quick verification that is
// part of Universe::verify(). This verification is exhaustive and
// reports on every issue that is found. StringTable::verify() only
// reports on the first issue that is found.
//
// StringTable::verify_entry() checks:
// - oop value != NULL (same as verify())
// - oop value is a String
// - hash(String) == hash in entry (same as verify())
// - index for hash == index of entry (same as verify())
//
// StringTable::compare_entries() checks:
// - oops are unique across all entries
// - String values are unique across all entries
//
int StringTable::verify_and_compare_entries() {
assert(StringTable_lock->is_locked(), "sanity check");
int fail_cnt = 0;
// first, verify all the entries individually:
for (int bkt = 0; bkt < the_table()->table_size(); bkt++) {
HashtableEntry<oop, mtSymbol>* e_ptr = the_table()->bucket(bkt);
for (int e_cnt = 0; e_ptr != NULL; e_ptr = e_ptr->next(), e_cnt++) {
VerifyRetTypes ret = verify_entry(bkt, e_cnt, e_ptr, _verify_with_mesgs);
if (ret != _verify_pass) {
fail_cnt++;
}
}
}
// Optimization: if the above check did not find any failures, then
// the comparison loop below does not need to call verify_entry()
// before calling compare_entries(). If there were failures, then we
// have to call verify_entry() to see if the entry can be passed to
// compare_entries() safely. When we call verify_entry() in the loop
// below, we do so quietly to void duplicate messages and we don't
// increment fail_cnt because the failures have already been counted.
bool need_entry_verify = (fail_cnt != 0);
// second, verify all entries relative to each other:
for (int bkt1 = 0; bkt1 < the_table()->table_size(); bkt1++) {
HashtableEntry<oop, mtSymbol>* e_ptr1 = the_table()->bucket(bkt1);
for (int e_cnt1 = 0; e_ptr1 != NULL; e_ptr1 = e_ptr1->next(), e_cnt1++) {
if (need_entry_verify) {
VerifyRetTypes ret = verify_entry(bkt1, e_cnt1, e_ptr1,
_verify_quietly);
if (ret == _verify_fail_done) {
// cannot use the current entry to compare against other entries
continue;
}
}
for (int bkt2 = bkt1; bkt2 < the_table()->table_size(); bkt2++) {
HashtableEntry<oop, mtSymbol>* e_ptr2 = the_table()->bucket(bkt2);
int e_cnt2;
for (e_cnt2 = 0; e_ptr2 != NULL; e_ptr2 = e_ptr2->next(), e_cnt2++) {
if (bkt1 == bkt2 && e_cnt2 <= e_cnt1) {
// skip the entries up to and including the one that
// we're comparing against
continue;
}
if (need_entry_verify) {
VerifyRetTypes ret = verify_entry(bkt2, e_cnt2, e_ptr2,
_verify_quietly);
if (ret == _verify_fail_done) {
// cannot compare against this entry
continue;
}
}
// compare two entries, report and count any failures:
if (compare_entries(bkt1, e_cnt1, e_ptr1, bkt2, e_cnt2, e_ptr2)
!= _verify_pass) {
fail_cnt++;
}
}
}
}
}
return fail_cnt;
}
// Create a new table and using alternate hash code, populate the new table
// with the existing strings. Set flag to use the alternate hash code afterwards.

View file

@ -311,6 +311,26 @@ public:
static void verify();
static void dump(outputStream* st);
enum VerifyMesgModes {
_verify_quietly = 0,
_verify_with_mesgs = 1
};
enum VerifyRetTypes {
_verify_pass = 0,
_verify_fail_continue = 1,
_verify_fail_done = 2
};
static VerifyRetTypes compare_entries(int bkt1, int e_cnt1,
HashtableEntry<oop, mtSymbol>* e_ptr1,
int bkt2, int e_cnt2,
HashtableEntry<oop, mtSymbol>* e_ptr2);
static VerifyRetTypes verify_entry(int bkt, int e_cnt,
HashtableEntry<oop, mtSymbol>* e_ptr,
VerifyMesgModes mesg_mode);
static int verify_and_compare_entries();
// Sharing
static void copy_buckets(char** top, char*end) {
the_table()->Hashtable<oop, mtSymbol>::copy_buckets(top, end);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -160,32 +160,42 @@ address CompiledIC::stub_address() const {
// High-level access to an inline cache. Guaranteed to be MT-safe.
void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
methodHandle method = call_info->selected_method();
bool is_invoke_interface = (bytecode == Bytecodes::_invokeinterface && !call_info->has_vtable_index());
bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
address entry;
if (is_invoke_interface) {
int index = klassItable::compute_itable_index(call_info->resolved_method()());
entry = VtableStubs::create_stub(false, index, method());
assert(entry != NULL, "entry not computed");
if (call_info->call_kind() == CallInfo::itable_call) {
assert(bytecode == Bytecodes::_invokeinterface, "");
int itable_index = call_info->itable_index();
entry = VtableStubs::find_itable_stub(itable_index);
if (entry == false) {
return false;
}
#ifdef ASSERT
int index = call_info->resolved_method()->itable_index();
assert(index == itable_index, "CallInfo pre-computes this");
#endif //ASSERT
InstanceKlass* k = call_info->resolved_method()->method_holder();
assert(k->is_interface(), "sanity check");
assert(k->verify_itable_index(itable_index), "sanity check");
InlineCacheBuffer::create_transition_stub(this, k, entry);
} else {
// Can be different than method->vtable_index(), due to package-private etc.
assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
// Can be different than selected_method->vtable_index(), due to package-private etc.
int vtable_index = call_info->vtable_index();
entry = VtableStubs::create_stub(true, vtable_index, method());
InlineCacheBuffer::create_transition_stub(this, method(), entry);
assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
entry = VtableStubs::find_vtable_stub(vtable_index);
if (entry == NULL) {
return false;
}
InlineCacheBuffer::create_transition_stub(this, NULL, entry);
}
if (TraceICs) {
ResourceMark rm;
tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
instruction_address(), method->print_value_string(), entry);
instruction_address(), call_info->selected_method()->print_value_string(), entry);
}
// We can't check this anymore. With lazy deopt we could have already
@ -195,6 +205,7 @@ void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecod
// race because the IC entry was complete when we safepointed so
// cleaning it immediately is harmless.
// assert(is_megamorphic(), "sanity check");
return true;
}

View file

@ -226,7 +226,10 @@ class CompiledIC: public ResourceObj {
//
void set_to_clean(); // Can only be called during a safepoint operation
void set_to_monomorphic(CompiledICInfo& info);
void set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
// Returns true if successful and false otherwise. The call can fail if memory
// allocation in the code cache fails.
bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);

View file

@ -46,12 +46,9 @@ address VtableStub::_chunk = NULL;
address VtableStub::_chunk_end = NULL;
VMReg VtableStub::_receiver_location = VMRegImpl::Bad();
static int num_vtable_chunks = 0;
void* VtableStub::operator new(size_t size, int code_size) throw() {
assert(size == sizeof(VtableStub), "mismatched size");
num_vtable_chunks++;
// compute real VtableStub size (rounded to nearest word)
const int real_size = round_to(code_size + sizeof(VtableStub), wordSize);
// malloc them in chunks to minimize header overhead
@ -60,7 +57,7 @@ void* VtableStub::operator new(size_t size, int code_size) throw() {
const int bytes = chunk_factor * real_size + pd_code_alignment();
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
if (blob == NULL) {
vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "CodeCache: no room for vtable chunks");
return NULL;
}
_chunk = blob->content_begin();
_chunk_end = _chunk + bytes;
@ -111,7 +108,7 @@ void VtableStubs::initialize() {
}
address VtableStubs::create_stub(bool is_vtable_stub, int vtable_index, Method* method) {
address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
assert(vtable_index >= 0, "must be positive");
VtableStub* s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL;
@ -121,6 +118,12 @@ address VtableStubs::create_stub(bool is_vtable_stub, int vtable_index, Method*
} else {
s = create_itable_stub(vtable_index);
}
// Creation of vtable or itable can fail if there is not enough free space in the code cache.
if (s == NULL) {
return NULL;
}
enter(is_vtable_stub, vtable_index, s);
if (PrintAdapterHandlers) {
tty->print_cr("Decoding VtableStub %s[%d]@%d",

View file

@ -121,9 +121,11 @@ class VtableStubs : AllStatic {
static VtableStub* lookup (bool is_vtable_stub, int vtable_index);
static void enter (bool is_vtable_stub, int vtable_index, VtableStub* s);
static inline uint hash (bool is_vtable_stub, int vtable_index);
static address find_stub (bool is_vtable_stub, int vtable_index);
public:
static address create_stub(bool is_vtable_stub, int vtable_index, Method* method); // return the entry point of a stub for this call
static address find_vtable_stub(int vtable_index) { return find_stub(true, vtable_index); }
static address find_itable_stub(int itable_index) { return find_stub(false, itable_index); }
static bool is_entry_point(address pc); // is pc a vtable stub entry point?
static bool contains(address pc); // is pc within any stub?
static VtableStub* stub_containing(address pc); // stub containing pc or NULL

View file

@ -230,7 +230,7 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
// depends on this property.
debug_only(
FreeChunk* junk = NULL;
assert(UseCompressedKlassPointers ||
assert(UseCompressedClassPointers ||
junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
"Offset of FreeChunk::_prev within FreeChunk must match"
" that of OopDesc::_klass within OopDesc");
@ -1407,7 +1407,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
OrderAccess::storestore();
if (UseCompressedKlassPointers) {
if (UseCompressedClassPointers) {
// Copy gap missed by (aligned) header size calculation below
obj->set_klass_gap(old->klass_gap());
}

View file

@ -481,9 +481,8 @@ uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
_g1h(g1h),
_markBitMap1(MinObjAlignment - 1),
_markBitMap2(MinObjAlignment - 1),
_markBitMap1(log2_intptr(MinObjAlignment)),
_markBitMap2(log2_intptr(MinObjAlignment)),
_parallel_marking_threads(0),
_max_parallel_marking_threads(0),
_sleep_factor(0.0),

View file

@ -0,0 +1,141 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1BiasedArray.hpp"
#ifndef PRODUCT
void G1BiasedMappedArrayBase::verify_index(idx_t index) const {
guarantee(_base != NULL, "Array not initialized");
guarantee(index < length(), err_msg("Index out of bounds index: "SIZE_FORMAT" length: "SIZE_FORMAT, index, length()));
}
void G1BiasedMappedArrayBase::verify_biased_index(idx_t biased_index) const {
guarantee(_biased_base != NULL, "Array not initialized");
guarantee(biased_index >= bias() && biased_index < (bias() + length()),
err_msg("Biased index out of bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
}
void G1BiasedMappedArrayBase::verify_biased_index_inclusive_end(idx_t biased_index) const {
guarantee(_biased_base != NULL, "Array not initialized");
guarantee(biased_index >= bias() && biased_index <= (bias() + length()),
err_msg("Biased index out of inclusive bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
}
class TestMappedArray : public G1BiasedMappedArray<int> {
protected:
virtual int default_value() const { return 0xBAADBABE; }
public:
static void test_biasedarray() {
const size_t REGION_SIZE_IN_WORDS = 512;
const size_t NUM_REGIONS = 20;
HeapWord* fake_heap = (HeapWord*)LP64_ONLY(0xBAAA00000) NOT_LP64(0xBA000000); // Any value that is non-zero
TestMappedArray array;
array.initialize(fake_heap, fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS,
REGION_SIZE_IN_WORDS * HeapWordSize);
// Check address calculation (bounds)
assert(array.bottom_address_mapped() == fake_heap,
err_msg("bottom mapped address should be "PTR_FORMAT", but is "PTR_FORMAT, fake_heap, array.bottom_address_mapped()));
assert(array.end_address_mapped() == (fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS), "must be");
int* bottom = array.address_mapped_to(fake_heap);
assert((void*)bottom == (void*) array.base(), "must be");
int* end = array.address_mapped_to(fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS);
assert((void*)end == (void*)(array.base() + array.length()), "must be");
// The entire array should contain default value elements
for (int* current = bottom; current < end; current++) {
assert(*current == array.default_value(), "must be");
}
// Test setting values in the table
HeapWord* region_start_address = fake_heap + REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2);
HeapWord* region_end_address = fake_heap + (REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2) + REGION_SIZE_IN_WORDS - 1);
// Set/get by address tests: invert some value; first retrieve one
int actual_value = array.get_by_index(NUM_REGIONS / 2);
array.set_by_index(NUM_REGIONS / 2, ~actual_value);
// Get the same value by address, should correspond to the start of the "region"
int value = array.get_by_address(region_start_address);
assert(value == ~actual_value, "must be");
// Get the same value by address, at one HeapWord before the start
value = array.get_by_address(region_start_address - 1);
assert(value == array.default_value(), "must be");
// Get the same value by address, at the end of the "region"
value = array.get_by_address(region_end_address);
assert(value == ~actual_value, "must be");
// Make sure the next value maps to another index
value = array.get_by_address(region_end_address + 1);
assert(value == array.default_value(), "must be");
// Reset the value in the array
array.set_by_address(region_start_address + (region_end_address - region_start_address) / 2, actual_value);
// The entire array should have the default value again
for (int* current = bottom; current < end; current++) {
assert(*current == array.default_value(), "must be");
}
// Set/get by index tests: invert some value
idx_t index = NUM_REGIONS / 2;
actual_value = array.get_by_index(index);
array.set_by_index(index, ~actual_value);
value = array.get_by_index(index);
assert(value == ~actual_value, "must be");
value = array.get_by_index(index - 1);
assert(value == array.default_value(), "must be");
value = array.get_by_index(index + 1);
assert(value == array.default_value(), "must be");
array.set_by_index(0, 0);
value = array.get_by_index(0);
assert(value == 0, "must be");
array.set_by_index(array.length() - 1, 0);
value = array.get_by_index(array.length() - 1);
assert(value == 0, "must be");
array.set_by_index(index, 0);
// The array should have three zeros, and default values otherwise
size_t num_zeros = 0;
for (int* current = bottom; current < end; current++) {
assert(*current == array.default_value() || *current == 0, "must be");
if (*current == 0) {
num_zeros++;
}
}
assert(num_zeros == 3, "must be");
}
};
void TestG1BiasedArray_test() {
TestMappedArray::test_biasedarray();
}
#endif

View file

@ -0,0 +1,181 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
#include "utilities/debug.hpp"
#include "memory/allocation.inline.hpp"
// Implements the common base functionality for arrays that contain provisions
// for accessing its elements using a biased index.
// The element type is defined by the instantiating the template.
class G1BiasedMappedArrayBase VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
public:
typedef size_t idx_t;
protected:
address _base; // the real base address
size_t _length; // the length of the array
address _biased_base; // base address biased by "bias" elements
size_t _bias; // the bias, i.e. the offset biased_base is located to the right in elements
uint _shift_by; // the amount of bits to shift right when mapping to an index of the array.
protected:
G1BiasedMappedArrayBase() : _base(NULL), _length(0), _biased_base(NULL),
_bias(0), _shift_by(0) { }
// Allocate a new array, generic version.
static address create_new_base_array(size_t length, size_t elem_size) {
assert(length > 0, "just checking");
assert(elem_size > 0, "just checking");
return NEW_C_HEAP_ARRAY(u_char, length * elem_size, mtGC);
}
// Initialize the members of this class. The biased start address of this array
// is the bias (in elements) multiplied by the element size.
void initialize_base(address base, size_t length, size_t bias, size_t elem_size, uint shift_by) {
assert(base != NULL, "just checking");
assert(length > 0, "just checking");
assert(shift_by < sizeof(uintptr_t) * 8, err_msg("Shifting by %zd, larger than word size?", shift_by));
_base = base;
_length = length;
_biased_base = base - (bias * elem_size);
_bias = bias;
_shift_by = shift_by;
}
// Allocate and initialize this array to cover the heap addresses in the range
// of [bottom, end).
void initialize(HeapWord* bottom, HeapWord* end, size_t target_elem_size_in_bytes, size_t mapping_granularity_in_bytes) {
assert(mapping_granularity_in_bytes > 0, "just checking");
assert(is_power_of_2(mapping_granularity_in_bytes),
err_msg("mapping granularity must be power of 2, is %zd", mapping_granularity_in_bytes));
assert((uintptr_t)bottom % mapping_granularity_in_bytes == 0,
err_msg("bottom mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
mapping_granularity_in_bytes, bottom));
assert((uintptr_t)end % mapping_granularity_in_bytes == 0,
err_msg("end mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
mapping_granularity_in_bytes, end));
size_t num_target_elems = (end - bottom) / (mapping_granularity_in_bytes / HeapWordSize);
idx_t bias = (uintptr_t)bottom / mapping_granularity_in_bytes;
address base = create_new_base_array(num_target_elems, target_elem_size_in_bytes);
initialize_base(base, num_target_elems, bias, target_elem_size_in_bytes, log2_intptr(mapping_granularity_in_bytes));
}
size_t bias() const { return _bias; }
uint shift_by() const { return _shift_by; }
void verify_index(idx_t index) const PRODUCT_RETURN;
void verify_biased_index(idx_t biased_index) const PRODUCT_RETURN;
void verify_biased_index_inclusive_end(idx_t biased_index) const PRODUCT_RETURN;
public:
// Return the length of the array in elements.
size_t length() const { return _length; }
};
// Array that provides biased access and mapping from (valid) addresses in the
// heap into this array.
template<class T>
class G1BiasedMappedArray : public G1BiasedMappedArrayBase {
public:
typedef G1BiasedMappedArrayBase::idx_t idx_t;
T* base() const { return (T*)G1BiasedMappedArrayBase::_base; }
// Return the element of the given array at the given index. Assume
// the index is valid. This is a convenience method that does sanity
// checking on the index.
T get_by_index(idx_t index) const {
verify_index(index);
return this->base()[index];
}
// Set the element of the given array at the given index to the
// given value. Assume the index is valid. This is a convenience
// method that does sanity checking on the index.
void set_by_index(idx_t index, T value) {
verify_index(index);
this->base()[index] = value;
}
// The raw biased base pointer.
T* biased_base() const { return (T*)G1BiasedMappedArrayBase::_biased_base; }
// Return the element of the given array that covers the given word in the
// heap. Assumes the index is valid.
T get_by_address(HeapWord* value) const {
idx_t biased_index = ((uintptr_t)value) >> this->shift_by();
this->verify_biased_index(biased_index);
return biased_base()[biased_index];
}
// Set the value of the array entry that corresponds to the given array.
void set_by_address(HeapWord * address, T value) {
idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
this->verify_biased_index(biased_index);
biased_base()[biased_index] = value;
}
protected:
// Returns the address of the element the given address maps to
T* address_mapped_to(HeapWord* address) {
idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
this->verify_biased_index_inclusive_end(biased_index);
return biased_base() + biased_index;
}
public:
// Return the smallest address (inclusive) in the heap that this array covers.
HeapWord* bottom_address_mapped() const {
return (HeapWord*) ((uintptr_t)this->bias() << this->shift_by());
}
// Return the highest address (exclusive) in the heap that this array covers.
HeapWord* end_address_mapped() const {
return (HeapWord*) ((uintptr_t)(this->bias() + this->length()) << this->shift_by());
}
protected:
virtual T default_value() const = 0;
// Set all elements of the given array to the given value.
void clear() {
T value = default_value();
for (idx_t i = 0; i < length(); i++) {
set_by_index(i, value);
}
}
public:
G1BiasedMappedArray() {}
// Allocate and initialize this array to cover the heap addresses in the range
// of [bottom, end).
void initialize(HeapWord* bottom, HeapWord* end, size_t mapping_granularity) {
G1BiasedMappedArrayBase::initialize(bottom, end, sizeof(T), mapping_granularity);
this->clear();
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP

View file

@ -33,8 +33,8 @@
void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
if (has_count_table()) {
check_card_num(from_card_num,
err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
assert(from_card_num >= 0 && from_card_num < _committed_max_card_num,
err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
assert(from_card_num < to_card_num,
err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT,
from_card_num, to_card_num));

View file

@ -72,25 +72,21 @@ class G1CardCounts: public CHeapObj<mtGC> {
return has_reserved_count_table() && _committed_max_card_num > 0;
}
void check_card_num(size_t card_num, const char* msg) {
assert(card_num >= 0 && card_num < _committed_max_card_num, msg);
}
size_t ptr_2_card_num(const jbyte* card_ptr) {
assert(card_ptr >= _ct_bot,
err_msg("Inavalied card pointer: "
err_msg("Invalid card pointer: "
"card_ptr: " PTR_FORMAT ", "
"_ct_bot: " PTR_FORMAT,
card_ptr, _ct_bot));
size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
check_card_num(card_num,
err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
assert(card_num >= 0 && card_num < _committed_max_card_num,
err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
return card_num;
}
jbyte* card_num_2_ptr(size_t card_num) {
check_card_num(card_num,
err_msg("card num out of range: "SIZE_FORMAT, card_num));
assert(card_num >= 0 && card_num < _committed_max_card_num,
err_msg("card num out of range: "SIZE_FORMAT, card_num));
return (jbyte*) (_ct_bot + card_num);
}

View file

@ -2069,8 +2069,10 @@ jint G1CollectedHeap::initialize() {
_g1_storage.initialize(g1_rs, 0);
_g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
_hrs.initialize((HeapWord*) _g1_reserved.start(),
(HeapWord*) _g1_reserved.end(),
_expansion_regions);
(HeapWord*) _g1_reserved.end());
assert(_hrs.max_length() == _expansion_regions,
err_msg("max length: %u expansion regions: %u",
_hrs.max_length(), _expansion_regions));
// Do later initialization work for concurrent refinement.
_cg1r->init();
@ -2191,6 +2193,10 @@ jint G1CollectedHeap::initialize() {
return JNI_OK;
}
size_t G1CollectedHeap::conservative_max_heap_alignment() {
return HeapRegion::max_region_size();
}
void G1CollectedHeap::ref_processing_init() {
// Reference processing in G1 currently works as follows:
//

View file

@ -1092,6 +1092,9 @@ public:
// specified by the policy object.
jint initialize();
// Return the (conservative) maximum heap alignment for any G1 heap
static size_t conservative_max_heap_alignment();
// Initialize weak reference processing.
virtual void ref_processing_init();

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -149,6 +149,10 @@ void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
// many regions in the heap (based on the min heap size).
#define TARGET_REGION_NUMBER 2048
size_t HeapRegion::max_region_size() {
return (size_t)MAX_REGION_SIZE;
}
void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
uintx region_size = G1HeapRegionSize;
if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -355,6 +355,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
~((1 << (size_t) LogOfHRGrainBytes) - 1);
}
static size_t max_region_size();
// It sets up the heap region size (GrainBytes / GrainWords), as
// well as other related fields that are based on the heap region
// size (LogOfHRGrainBytes / LogOfHRGrainWords /

View file

@ -71,27 +71,16 @@ uint HeapRegionSeq::find_contiguous_from(uint from, uint num) {
// Public
void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
uint max_length) {
void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end) {
assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0,
"bottom should be heap region aligned");
assert((uintptr_t) end % HeapRegion::GrainBytes == 0,
"end should be heap region aligned");
_length = 0;
_heap_bottom = bottom;
_heap_end = end;
_region_shift = HeapRegion::LogOfHRGrainBytes;
_next_search_index = 0;
_allocated_length = 0;
_max_length = max_length;
_regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length, mtGC);
memset(_regions, 0, (size_t) max_length * sizeof(HeapRegion*));
_regions_biased = _regions - ((uintx) bottom >> _region_shift);
assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
"bottom should be included in the region with index 0");
_regions.initialize(bottom, end, HeapRegion::GrainBytes);
}
MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
@ -101,15 +90,15 @@ MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
G1CollectedHeap* g1h = G1CollectedHeap::heap();
HeapWord* next_bottom = old_end;
assert(_heap_bottom <= next_bottom, "invariant");
assert(heap_bottom() <= next_bottom, "invariant");
while (next_bottom < new_end) {
assert(next_bottom < _heap_end, "invariant");
assert(next_bottom < heap_end(), "invariant");
uint index = length();
assert(index < _max_length, "otherwise we cannot expand further");
assert(index < max_length(), "otherwise we cannot expand further");
if (index == 0) {
// We have not allocated any regions so far
assert(next_bottom == _heap_bottom, "invariant");
assert(next_bottom == heap_bottom(), "invariant");
} else {
// next_bottom should match the end of the last/previous region
assert(next_bottom == at(index - 1)->end(), "invariant");
@ -122,8 +111,8 @@ MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
// allocation failed, we bail out and return what we have done so far
return MemRegion(old_end, next_bottom);
}
assert(_regions[index] == NULL, "invariant");
_regions[index] = new_hr;
assert(_regions.get_by_index(index) == NULL, "invariant");
_regions.set_by_index(index, new_hr);
increment_allocated_length();
}
// Have to increment the length first, otherwise we will get an
@ -228,26 +217,26 @@ uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) {
#ifndef PRODUCT
void HeapRegionSeq::verify_optional() {
guarantee(_length <= _allocated_length,
guarantee(length() <= _allocated_length,
err_msg("invariant: _length: %u _allocated_length: %u",
_length, _allocated_length));
guarantee(_allocated_length <= _max_length,
length(), _allocated_length));
guarantee(_allocated_length <= max_length(),
err_msg("invariant: _allocated_length: %u _max_length: %u",
_allocated_length, _max_length));
guarantee(_next_search_index <= _length,
_allocated_length, max_length()));
guarantee(_next_search_index <= length(),
err_msg("invariant: _next_search_index: %u _length: %u",
_next_search_index, _length));
_next_search_index, length()));
HeapWord* prev_end = _heap_bottom;
HeapWord* prev_end = heap_bottom();
for (uint i = 0; i < _allocated_length; i += 1) {
HeapRegion* hr = _regions[i];
HeapRegion* hr = _regions.get_by_index(i);
guarantee(hr != NULL, err_msg("invariant: i: %u", i));
guarantee(hr->bottom() == prev_end,
err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
i, HR_FORMAT_PARAMS(hr), prev_end));
guarantee(hr->hrs_index() == i,
err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
if (i < _length) {
if (i < length()) {
// Asserts will fire if i is >= _length
HeapWord* addr = hr->bottom();
guarantee(addr_to_region(addr) == hr, "sanity");
@ -265,8 +254,8 @@ void HeapRegionSeq::verify_optional() {
prev_end = hr->end();
}
}
for (uint i = _allocated_length; i < _max_length; i += 1) {
guarantee(_regions[i] == NULL, err_msg("invariant i: %u", i));
for (uint i = _allocated_length; i < max_length(); i += 1) {
guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
}
}
#endif // PRODUCT

View file

@ -25,10 +25,17 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
#include "gc_implementation/g1/g1BiasedArray.hpp"
class HeapRegion;
class HeapRegionClosure;
class FreeRegionList;
class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
protected:
virtual HeapRegion* default_value() const { return NULL; }
};
// This class keeps track of the region metadata (i.e., HeapRegion
// instances). They are kept in the _regions array in address
// order. A region's index in the array corresponds to its index in
@ -44,35 +51,21 @@ class FreeRegionList;
//
// We keep track of three lengths:
//
// * _length (returned by length()) is the number of currently
// * _committed_length (returned by length()) is the number of currently
// committed regions.
// * _allocated_length (not exposed outside this class) is the
// number of regions for which we have HeapRegions.
// * _max_length (returned by max_length()) is the maximum number of
// regions the heap can have.
// * max_length() returns the maximum number of regions the heap can have.
//
// and maintain that: _length <= _allocated_length <= _max_length
// and maintain that: _committed_length <= _allocated_length <= max_length()
class HeapRegionSeq: public CHeapObj<mtGC> {
friend class VMStructs;
// The array that holds the HeapRegions.
HeapRegion** _regions;
// Version of _regions biased to address 0
HeapRegion** _regions_biased;
G1HeapRegionTable _regions;
// The number of regions committed in the heap.
uint _length;
// The address of the first reserved word in the heap.
HeapWord* _heap_bottom;
// The address of the last reserved word in the heap - 1.
HeapWord* _heap_end;
// The log of the region byte size.
uint _region_shift;
uint _committed_length;
// A hint for which index to start searching from for humongous
// allocations.
@ -81,37 +74,33 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
// The number of regions for which we have allocated HeapRegions for.
uint _allocated_length;
// The maximum number of regions in the heap.
uint _max_length;
// Find a contiguous set of empty regions of length num, starting
// from the given index.
uint find_contiguous_from(uint from, uint num);
// Map a heap address to a biased region index. Assume that the
// address is valid.
inline uintx addr_to_index_biased(HeapWord* addr) const;
void increment_allocated_length() {
assert(_allocated_length < _max_length, "pre-condition");
assert(_allocated_length < max_length(), "pre-condition");
_allocated_length++;
}
void increment_length() {
assert(_length < _max_length, "pre-condition");
_length++;
assert(length() < max_length(), "pre-condition");
_committed_length++;
}
void decrement_length() {
assert(_length > 0, "pre-condition");
_length--;
assert(length() > 0, "pre-condition");
_committed_length--;
}
HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
HeapWord* heap_end() const {return _regions.end_address_mapped(); }
public:
// Empty contructor, we'll initialize it with the initialize() method.
HeapRegionSeq() { }
HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { }
void initialize(HeapWord* bottom, HeapWord* end, uint max_length);
void initialize(HeapWord* bottom, HeapWord* end);
// Return the HeapRegion at the given index. Assume that the index
// is valid.
@ -126,10 +115,10 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
// Return the number of regions that have been committed in the heap.
uint length() const { return _length; }
uint length() const { return _committed_length; }
// Return the maximum number of regions in the heap.
uint max_length() const { return _max_length; }
uint max_length() const { return (uint)_regions.length(); }
// Expand the sequence to reflect that the heap has grown from
// old_end to new_end. Either create new HeapRegions, or re-use

View file

@ -28,28 +28,16 @@
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
inline uintx HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
assert(_heap_bottom <= addr && addr < _heap_end,
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
addr, _heap_bottom, _heap_end));
uintx index = (uintx) addr >> _region_shift;
return index;
}
inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const {
assert(_heap_bottom <= addr && addr < _heap_end,
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
addr, _heap_bottom, _heap_end));
uintx index_biased = addr_to_index_biased(addr);
HeapRegion* hr = _regions_biased[index_biased];
HeapRegion* hr = _regions.get_by_address(addr);
assert(hr != NULL, "invariant");
return hr;
}
inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
if (addr != NULL && addr < _heap_end) {
assert(addr >= _heap_bottom,
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, _heap_bottom));
if (addr != NULL && addr < heap_end()) {
assert(addr >= heap_bottom(),
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, heap_bottom()));
return addr_to_region_unsafe(addr);
}
return NULL;
@ -57,7 +45,7 @@ inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
inline HeapRegion* HeapRegionSeq::at(uint index) const {
assert(index < length(), "pre-condition");
HeapRegion* hr = _regions[index];
HeapRegion* hr = _regions.get_by_index(index);
assert(hr != NULL, "sanity");
assert(hr->hrs_index() == index, "sanity");
return hr;

View file

@ -38,6 +38,7 @@
class PtrQueueSet;
class PtrQueue VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
protected:
// The ptr queue set to which this queue belongs.

View file

@ -31,10 +31,17 @@
#define VM_STRUCTS_G1(nonstatic_field, static_field) \
\
static_field(HeapRegion, GrainBytes, size_t) \
static_field(HeapRegion, GrainBytes, size_t) \
static_field(HeapRegion, LogOfHRGrainBytes, int) \
\
nonstatic_field(HeapRegionSeq, _regions, HeapRegion**) \
nonstatic_field(HeapRegionSeq, _length, uint) \
nonstatic_field(G1HeapRegionTable, _base, address) \
nonstatic_field(G1HeapRegionTable, _length, size_t) \
nonstatic_field(G1HeapRegionTable, _biased_base, address) \
nonstatic_field(G1HeapRegionTable, _bias, size_t) \
nonstatic_field(G1HeapRegionTable, _shift_by, uint) \
\
nonstatic_field(HeapRegionSeq, _regions, G1HeapRegionTable) \
nonstatic_field(HeapRegionSeq, _committed_length, uint) \
\
nonstatic_field(G1CollectedHeap, _hrs, HeapRegionSeq) \
nonstatic_field(G1CollectedHeap, _g1_committed, MemRegion) \
@ -57,6 +64,8 @@
#define VM_TYPES_G1(declare_type, declare_toplevel_type) \
\
declare_toplevel_type(G1HeapRegionTable) \
\
declare_type(G1CollectedHeap, SharedHeap) \
\
declare_type(HeapRegion, ContiguousSpace) \

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -68,9 +68,6 @@ class GenerationSizer : public TwoGenerationCollectorPolicy {
size_t min_old_gen_size() { return _min_gen1_size; }
size_t old_gen_size() { return _initial_gen1_size; }
size_t max_old_gen_size() { return _max_gen1_size; }
size_t metaspace_size() { return MetaspaceSize; }
size_t max_metaspace_size() { return MaxMetaspaceSize; }
};
#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_GENERATIONSIZER_HPP

View file

@ -86,6 +86,11 @@ class ParallelScavengeHeap : public CollectedHeap {
set_alignment(_old_gen_alignment, intra_heap_alignment());
}
// Return the (conservative) maximum heap alignment
static size_t conservative_max_heap_alignment() {
return intra_heap_alignment();
}
// For use by VM operations
enum CollectionType {
Scavenge,
@ -122,7 +127,7 @@ class ParallelScavengeHeap : public CollectedHeap {
// The alignment used for eden and survivors within the young gen
// and for boundary between young gen and old gen.
size_t intra_heap_alignment() const { return 64 * K * HeapWordSize; }
static size_t intra_heap_alignment() { return 64 * K * HeapWordSize; }
size_t capacity() const;
size_t used() const;

View file

@ -87,15 +87,15 @@ MetaspaceSummary CollectedHeap::create_metaspace_summary() {
const MetaspaceSizes meta_space(
MetaspaceAux::allocated_capacity_bytes(),
MetaspaceAux::allocated_used_bytes(),
MetaspaceAux::reserved_in_bytes());
MetaspaceAux::reserved_bytes());
const MetaspaceSizes data_space(
MetaspaceAux::allocated_capacity_bytes(Metaspace::NonClassType),
MetaspaceAux::allocated_used_bytes(Metaspace::NonClassType),
MetaspaceAux::reserved_in_bytes(Metaspace::NonClassType));
MetaspaceAux::reserved_bytes(Metaspace::NonClassType));
const MetaspaceSizes class_space(
MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType),
MetaspaceAux::allocated_used_bytes(Metaspace::ClassType),
MetaspaceAux::reserved_in_bytes(Metaspace::ClassType));
MetaspaceAux::reserved_bytes(Metaspace::ClassType));
return MetaspaceSummary(meta_space, data_space, class_space);
}

View file

@ -496,15 +496,15 @@ IRT_END
IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode))
// resolve field
FieldAccessInfo info;
fieldDescriptor info;
constantPoolHandle pool(thread, method(thread)->constants());
bool is_put = (bytecode == Bytecodes::_putfield || bytecode == Bytecodes::_putstatic);
bool is_static = (bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic);
{
JvmtiHideSingleStepping jhss(thread);
LinkResolver::resolve_field(info, pool, get_index_u2_cpcache(thread, bytecode),
bytecode, false, CHECK);
LinkResolver::resolve_field_access(info, pool, get_index_u2_cpcache(thread, bytecode),
bytecode, CHECK);
} // end JvmtiHideSingleStepping
// check if link resolution caused cpCache to be updated
@ -524,7 +524,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecode
// class is intitialized. This is required so that access to the static
// field will call the initialization function every time until the class
// is completely initialized ala. in 2.17.5 in JVM Specification.
InstanceKlass *klass = InstanceKlass::cast(info.klass()());
InstanceKlass* klass = InstanceKlass::cast(info.field_holder());
bool uninitialized_static = ((bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic) &&
!klass->is_initialized());
Bytecodes::Code get_code = (Bytecodes::Code)0;
@ -539,9 +539,9 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecode
cache_entry(thread)->set_field(
get_code,
put_code,
info.klass(),
info.field_index(),
info.field_offset(),
info.field_holder(),
info.index(),
info.offset(),
state,
info.access_flags().is_final(),
info.access_flags().is_volatile(),
@ -686,29 +686,55 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes
if (already_resolved(thread)) return;
if (bytecode == Bytecodes::_invokeinterface) {
if (TraceItables && Verbose) {
ResourceMark rm(thread);
tty->print_cr("Resolving: klass: %s to method: %s", info.resolved_klass()->name()->as_C_string(), info.resolved_method()->name()->as_C_string());
}
}
#ifdef ASSERT
if (bytecode == Bytecodes::_invokeinterface) {
if (info.resolved_method()->method_holder() ==
SystemDictionary::Object_klass()) {
// NOTE: THIS IS A FIX FOR A CORNER CASE in the JVM spec
// (see also cpCacheOop.cpp for details)
// (see also CallInfo::set_interface for details)
assert(info.call_kind() == CallInfo::vtable_call ||
info.call_kind() == CallInfo::direct_call, "");
methodHandle rm = info.resolved_method();
assert(rm->is_final() || info.has_vtable_index(),
"should have been set already");
cache_entry(thread)->set_method(bytecode, rm, info.vtable_index());
} else if (!info.resolved_method()->has_itable_index()) {
// Resolved something like CharSequence.toString. Use vtable not itable.
assert(info.call_kind() != CallInfo::itable_call, "");
} else {
// Setup itable entry
int index = klassItable::compute_itable_index(info.resolved_method()());
cache_entry(thread)->set_interface_call(info.resolved_method(), index);
assert(info.call_kind() == CallInfo::itable_call, "");
int index = info.resolved_method()->itable_index();
assert(info.itable_index() == index, "");
}
} else {
cache_entry(thread)->set_method(
assert(info.call_kind() == CallInfo::direct_call ||
info.call_kind() == CallInfo::vtable_call, "");
}
#endif
switch (info.call_kind()) {
case CallInfo::direct_call:
cache_entry(thread)->set_direct_call(
bytecode,
info.resolved_method());
break;
case CallInfo::vtable_call:
cache_entry(thread)->set_vtable_call(
bytecode,
info.resolved_method(),
info.vtable_index());
break;
case CallInfo::itable_call:
cache_entry(thread)->set_itable_call(
bytecode,
info.resolved_method(),
info.itable_index());
break;
default: ShouldNotReachHere();
}
}
IRT_END

View file

@ -46,19 +46,6 @@
#include "runtime/thread.inline.hpp"
#include "runtime/vmThread.hpp"
//------------------------------------------------------------------------------------------------------------------------
// Implementation of FieldAccessInfo
void FieldAccessInfo::set(KlassHandle klass, Symbol* name, int field_index, int field_offset,
BasicType field_type, AccessFlags access_flags) {
_klass = klass;
_name = name;
_field_index = field_index;
_field_offset = field_offset;
_field_type = field_type;
_access_flags = access_flags;
}
//------------------------------------------------------------------------------------------------------------------------
// Implementation of CallInfo
@ -66,26 +53,25 @@ BasicType field_type, AccessFlags access_flags) {
void CallInfo::set_static(KlassHandle resolved_klass, methodHandle resolved_method, TRAPS) {
int vtable_index = Method::nonvirtual_vtable_index;
set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, vtable_index, CHECK);
set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, CallInfo::direct_call, vtable_index, CHECK);
}
void CallInfo::set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, TRAPS) {
void CallInfo::set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int itable_index, TRAPS) {
// This is only called for interface methods. If the resolved_method
// comes from java/lang/Object, it can be the subject of a virtual call, so
// we should pick the vtable index from the resolved method.
// Other than that case, there is no valid vtable index to specify.
int vtable_index = Method::invalid_vtable_index;
if (resolved_method->method_holder() == SystemDictionary::Object_klass()) {
assert(resolved_method->vtable_index() == selected_method->vtable_index(), "sanity check");
vtable_index = resolved_method->vtable_index();
}
set_common(resolved_klass, selected_klass, resolved_method, selected_method, vtable_index, CHECK);
// In that case, the caller must call set_virtual instead of set_interface.
assert(resolved_method->method_holder()->is_interface(), "");
assert(itable_index == resolved_method()->itable_index(), "");
set_common(resolved_klass, selected_klass, resolved_method, selected_method, CallInfo::itable_call, itable_index, CHECK);
}
void CallInfo::set_virtual(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) {
assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index, "valid index");
set_common(resolved_klass, selected_klass, resolved_method, selected_method, vtable_index, CHECK);
assert(vtable_index < 0 || !resolved_method->has_vtable_index() || vtable_index == resolved_method->vtable_index(), "");
CallKind kind = (vtable_index >= 0 && !resolved_method->can_be_statically_bound() ? CallInfo::vtable_call : CallInfo::direct_call);
set_common(resolved_klass, selected_klass, resolved_method, selected_method, kind, vtable_index, CHECK);
assert(!resolved_method->is_compiled_lambda_form(), "these must be handled via an invokehandle call");
}
@ -98,20 +84,29 @@ void CallInfo::set_handle(methodHandle resolved_method, Handle resolved_appendix
resolved_method->is_compiled_lambda_form(),
"linkMethod must return one of these");
int vtable_index = Method::nonvirtual_vtable_index;
assert(resolved_method->vtable_index() == vtable_index, "");
set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, vtable_index, CHECK);
assert(!resolved_method->has_vtable_index(), "");
set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, CallInfo::direct_call, vtable_index, CHECK);
_resolved_appendix = resolved_appendix;
_resolved_method_type = resolved_method_type;
}
void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) {
void CallInfo::set_common(KlassHandle resolved_klass,
KlassHandle selected_klass,
methodHandle resolved_method,
methodHandle selected_method,
CallKind kind,
int index,
TRAPS) {
assert(resolved_method->signature() == selected_method->signature(), "signatures must correspond");
_resolved_klass = resolved_klass;
_selected_klass = selected_klass;
_resolved_method = resolved_method;
_selected_method = selected_method;
_vtable_index = vtable_index;
_call_kind = kind;
_call_index = index;
_resolved_appendix = Handle();
DEBUG_ONLY(verify()); // verify before making side effects
if (CompilationPolicy::must_be_compiled(selected_method)) {
// This path is unusual, mostly used by the '-Xcomp' stress test mode.
@ -138,6 +133,65 @@ void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass
}
}
// utility query for unreflecting a method
CallInfo::CallInfo(Method* resolved_method, Klass* resolved_klass) {
Klass* resolved_method_holder = resolved_method->method_holder();
if (resolved_klass == NULL) { // 2nd argument defaults to holder of 1st
resolved_klass = resolved_method_holder;
}
_resolved_klass = resolved_klass;
_selected_klass = resolved_klass;
_resolved_method = resolved_method;
_selected_method = resolved_method;
// classify:
CallKind kind = CallInfo::unknown_kind;
int index = resolved_method->vtable_index();
if (resolved_method->can_be_statically_bound()) {
kind = CallInfo::direct_call;
} else if (!resolved_method_holder->is_interface()) {
// Could be an Object method inherited into an interface, but still a vtable call.
kind = CallInfo::vtable_call;
} else if (!resolved_klass->is_interface()) {
// A miranda method. Compute the vtable index.
ResourceMark rm;
klassVtable* vt = InstanceKlass::cast(resolved_klass)->vtable();
index = vt->index_of_miranda(resolved_method->name(),
resolved_method->signature());
kind = CallInfo::vtable_call;
} else {
// A regular interface call.
kind = CallInfo::itable_call;
index = resolved_method->itable_index();
}
assert(index == Method::nonvirtual_vtable_index || index >= 0, err_msg("bad index %d", index));
_call_kind = kind;
_call_index = index;
_resolved_appendix = Handle();
DEBUG_ONLY(verify());
}
#ifdef ASSERT
void CallInfo::verify() {
switch (call_kind()) { // the meaning and allowed value of index depends on kind
case CallInfo::direct_call:
if (_call_index == Method::nonvirtual_vtable_index) break;
// else fall through to check vtable index:
case CallInfo::vtable_call:
assert(resolved_klass()->verify_vtable_index(_call_index), "");
break;
case CallInfo::itable_call:
assert(resolved_method()->method_holder()->verify_itable_index(_call_index), "");
break;
case CallInfo::unknown_kind:
assert(call_kind() != CallInfo::unknown_kind, "CallInfo must be set");
break;
default:
fatal(err_msg_res("Unexpected call kind %d", call_kind()));
}
}
#endif //ASSERT
//------------------------------------------------------------------------------------------------------------------------
// Klass resolution
@ -163,13 +217,6 @@ void LinkResolver::resolve_klass(KlassHandle& result, constantPoolHandle pool, i
result = KlassHandle(THREAD, result_oop);
}
void LinkResolver::resolve_klass_no_update(KlassHandle& result, constantPoolHandle pool, int index, TRAPS) {
Klass* result_oop =
ConstantPool::klass_ref_at_if_loaded_check(pool, index, CHECK);
result = KlassHandle(THREAD, result_oop);
}
//------------------------------------------------------------------------------------------------------------------------
// Method resolution
//
@ -360,7 +407,12 @@ void LinkResolver::check_method_accessability(KlassHandle ref_klass,
void LinkResolver::resolve_method_statically(methodHandle& resolved_method, KlassHandle& resolved_klass,
Bytecodes::Code code, constantPoolHandle pool, int index, TRAPS) {
// This method is used only
// (1) in C2 from InlineTree::ok_to_inline (via ciMethod::check_call),
// and
// (2) in Bytecode_invoke::static_target
// It appears to fail when applied to an invokeinterface call site.
// FIXME: Remove this method and ciMethod::check_call; refactor to use the other LinkResolver entry points.
// resolve klass
if (code == Bytecodes::_invokedynamic) {
resolved_klass = SystemDictionary::MethodHandle_klass();
@ -580,45 +632,49 @@ void LinkResolver::check_field_accessability(KlassHandle ref_klass,
}
}
void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, TRAPS) {
resolve_field(result, pool, index, byte, check_only, true, CHECK);
void LinkResolver::resolve_field_access(fieldDescriptor& result, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS) {
// Load these early in case the resolve of the containing klass fails
Symbol* field = pool->name_ref_at(index);
Symbol* sig = pool->signature_ref_at(index);
// resolve specified klass
KlassHandle resolved_klass;
resolve_klass(resolved_klass, pool, index, CHECK);
KlassHandle current_klass(THREAD, pool->pool_holder());
resolve_field(result, resolved_klass, field, sig, current_klass, byte, true, true, CHECK);
}
void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, bool update_pool, TRAPS) {
void LinkResolver::resolve_field(fieldDescriptor& fd, KlassHandle resolved_klass, Symbol* field, Symbol* sig,
KlassHandle current_klass, Bytecodes::Code byte, bool check_access, bool initialize_class,
TRAPS) {
assert(byte == Bytecodes::_getstatic || byte == Bytecodes::_putstatic ||
byte == Bytecodes::_getfield || byte == Bytecodes::_putfield, "bad bytecode");
byte == Bytecodes::_getfield || byte == Bytecodes::_putfield ||
(byte == Bytecodes::_nop && !check_access), "bad field access bytecode");
bool is_static = (byte == Bytecodes::_getstatic || byte == Bytecodes::_putstatic);
bool is_put = (byte == Bytecodes::_putfield || byte == Bytecodes::_putstatic);
// resolve specified klass
KlassHandle resolved_klass;
if (update_pool) {
resolve_klass(resolved_klass, pool, index, CHECK);
} else {
resolve_klass_no_update(resolved_klass, pool, index, CHECK);
}
// Load these early in case the resolve of the containing klass fails
Symbol* field = pool->name_ref_at(index);
Symbol* sig = pool->signature_ref_at(index);
// Check if there's a resolved klass containing the field
if( resolved_klass.is_null() ) {
if (resolved_klass.is_null()) {
ResourceMark rm(THREAD);
THROW_MSG(vmSymbols::java_lang_NoSuchFieldError(), field->as_C_string());
}
// Resolve instance field
fieldDescriptor fd; // find_field initializes fd if found
KlassHandle sel_klass(THREAD, InstanceKlass::cast(resolved_klass())->find_field(field, sig, &fd));
// check if field exists; i.e., if a klass containing the field def has been selected
if (sel_klass.is_null()){
if (sel_klass.is_null()) {
ResourceMark rm(THREAD);
THROW_MSG(vmSymbols::java_lang_NoSuchFieldError(), field->as_C_string());
}
if (!check_access)
// Access checking may be turned off when calling from within the VM.
return;
// check access
KlassHandle ref_klass(THREAD, pool->pool_holder());
check_field_accessability(ref_klass, resolved_klass, sel_klass, fd, CHECK);
check_field_accessability(current_klass, resolved_klass, sel_klass, fd, CHECK);
// check for errors
if (is_static != fd.is_static()) {
@ -629,7 +685,7 @@ void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle poo
}
// Final fields can only be accessed from its own class.
if (is_put && fd.access_flags().is_final() && sel_klass() != pool->pool_holder()) {
if (is_put && fd.access_flags().is_final() && sel_klass() != current_klass()) {
THROW(vmSymbols::java_lang_IllegalAccessError());
}
@ -639,19 +695,18 @@ void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle poo
//
// note 2: we don't want to force initialization if we are just checking
// if the field access is legal; e.g., during compilation
if (is_static && !check_only) {
if (is_static && initialize_class) {
sel_klass->initialize(CHECK);
}
{
if (sel_klass() != current_klass()) {
HandleMark hm(THREAD);
Handle ref_loader (THREAD, InstanceKlass::cast(ref_klass())->class_loader());
Handle ref_loader (THREAD, InstanceKlass::cast(current_klass())->class_loader());
Handle sel_loader (THREAD, InstanceKlass::cast(sel_klass())->class_loader());
Symbol* signature_ref = pool->signature_ref_at(index);
{
ResourceMark rm(THREAD);
Symbol* failed_type_symbol =
SystemDictionary::check_signature_loaders(signature_ref,
SystemDictionary::check_signature_loaders(sig,
ref_loader, sel_loader,
false,
CHECK);
@ -677,9 +732,6 @@ void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle poo
// return information. note that the klass is set to the actual klass containing the
// field, otherwise access of static fields in superclasses will not work.
KlassHandle holder (THREAD, fd.field_holder());
Symbol* name = fd.name();
result.set(holder, name, fd.index(), fd.offset(), fd.field_type(), fd.access_flags());
}
@ -906,10 +958,6 @@ void LinkResolver::runtime_resolve_virtual_method(CallInfo& result,
THROW(vmSymbols::java_lang_NullPointerException());
}
// Virtual methods cannot be resolved before its klass has been linked, for otherwise the Method*'s
// has not been rewritten, and the vtable initialized.
assert(resolved_method->method_holder()->is_linked(), "must be linked");
// Virtual methods cannot be resolved before its klass has been linked, for otherwise the Method*'s
// has not been rewritten, and the vtable initialized. Make sure to do this after the nullcheck, since
// a missing receiver might result in a bogus lookup.
@ -920,6 +968,7 @@ void LinkResolver::runtime_resolve_virtual_method(CallInfo& result,
vtable_index = vtable_index_of_miranda_method(resolved_klass,
resolved_method->name(),
resolved_method->signature(), CHECK);
assert(vtable_index >= 0 , "we should have valid vtable index at this point");
InstanceKlass* inst = InstanceKlass::cast(recv_klass());
@ -927,6 +976,7 @@ void LinkResolver::runtime_resolve_virtual_method(CallInfo& result,
} else {
// at this point we are sure that resolved_method is virtual and not
// a miranda method; therefore, it must have a valid vtable index.
assert(!resolved_method->has_itable_index(), "");
vtable_index = resolved_method->vtable_index();
// We could get a negative vtable_index for final methods,
// because as an optimization they are they are never put in the vtable,
@ -1006,6 +1056,12 @@ void LinkResolver::runtime_resolve_interface_method(CallInfo& result, methodHand
lookup_instance_method_in_klasses(sel_method, recv_klass,
resolved_method->name(),
resolved_method->signature(), CHECK);
if (sel_method.is_null() && !check_null_and_abstract) {
// In theory this is a harmless placeholder value, but
// in practice leaving in null affects the nsk default method tests.
// This needs further study.
sel_method = resolved_method;
}
// check if method exists
if (sel_method.is_null()) {
ResourceMark rm(THREAD);
@ -1046,7 +1102,14 @@ void LinkResolver::runtime_resolve_interface_method(CallInfo& result, methodHand
sel_method->signature()));
}
// setup result
result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, CHECK);
if (!resolved_method->has_itable_index()) {
int vtable_index = resolved_method->vtable_index();
assert(vtable_index == sel_method->vtable_index(), "sanity check");
result.set_virtual(resolved_klass, recv_klass, resolved_method, sel_method, vtable_index, CHECK);
return;
}
int itable_index = resolved_method()->itable_index();
result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, itable_index, CHECK);
}
@ -1293,7 +1356,8 @@ void LinkResolver::resolve_invokedynamic(CallInfo& result, constantPoolHandle po
}
if (TraceMethodHandles) {
tty->print_cr("resolve_invokedynamic #%d %s %s",
ResourceMark rm(THREAD);
tty->print_cr("resolve_invokedynamic #%d %s %s",
ConstantPool::decode_invokedynamic_index(index),
method_name->as_C_string(), method_signature->as_C_string());
tty->print(" BSM info: "); bootstrap_specifier->print();
@ -1342,9 +1406,16 @@ void LinkResolver::resolve_dynamic_call(CallInfo& result,
//------------------------------------------------------------------------------------------------------------------------
#ifndef PRODUCT
void FieldAccessInfo::print() {
void CallInfo::print() {
ResourceMark rm;
tty->print_cr("Field %s@%d", name()->as_C_string(), field_offset());
const char* kindstr = "unknown";
switch (_call_kind) {
case direct_call: kindstr = "direct"; break;
case vtable_call: kindstr = "vtable"; break;
case itable_call: kindstr = "itable"; break;
}
tty->print_cr("Call %s@%d %s", kindstr, _call_index,
_resolved_method.is_null() ? "(none)" : _resolved_method->name_and_sig_as_C_string());
}
#endif

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,63 +30,54 @@
// All the necessary definitions for run-time link resolution.
// LinkInfo & its subclasses provide all the information gathered
// for a particular link after resolving it. A link is any reference
// CallInfo provides all the information gathered for a particular
// linked call site after resolving it. A link is any reference
// made from within the bytecodes of a method to an object outside of
// that method. If the info is invalid, the link has not been resolved
// successfully.
class LinkInfo VALUE_OBJ_CLASS_SPEC {
};
// Link information for getfield/putfield & getstatic/putstatic bytecodes.
class FieldAccessInfo: public LinkInfo {
protected:
KlassHandle _klass;
Symbol* _name;
AccessFlags _access_flags;
int _field_index; // original index in the klass
int _field_offset;
BasicType _field_type;
class CallInfo VALUE_OBJ_CLASS_SPEC {
public:
void set(KlassHandle klass, Symbol* name, int field_index, int field_offset,
BasicType field_type, AccessFlags access_flags);
KlassHandle klass() const { return _klass; }
Symbol* name() const { return _name; }
int field_index() const { return _field_index; }
int field_offset() const { return _field_offset; }
BasicType field_type() const { return _field_type; }
AccessFlags access_flags() const { return _access_flags; }
// debugging
void print() PRODUCT_RETURN;
};
// Link information for all calls.
class CallInfo: public LinkInfo {
// Ways that a method call might be selected (or not) based on receiver type.
// Note that an invokevirtual instruction might be linked with no_dispatch,
// and an invokeinterface instruction might be linked with any of the three options
enum CallKind {
direct_call, // jump into resolved_method (must be concrete)
vtable_call, // select recv.klass.method_at_vtable(index)
itable_call, // select recv.klass.method_at_itable(resolved_method.holder, index)
unknown_kind = -1
};
private:
KlassHandle _resolved_klass; // static receiver klass
KlassHandle _resolved_klass; // static receiver klass, resolved from a symbolic reference
KlassHandle _selected_klass; // dynamic receiver class (same as static, or subklass)
methodHandle _resolved_method; // static target method
methodHandle _selected_method; // dynamic (actual) target method
int _vtable_index; // vtable index of selected method
CallKind _call_kind; // kind of call (static(=bytecode static/special +
// others inferred), vtable, itable)
int _call_index; // vtable or itable index of selected class method (if any)
Handle _resolved_appendix; // extra argument in constant pool (if CPCE::has_appendix)
Handle _resolved_method_type; // MethodType (for invokedynamic and invokehandle call sites)
void set_static( KlassHandle resolved_klass, methodHandle resolved_method , TRAPS);
void set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method , TRAPS);
void set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int itable_index , TRAPS);
void set_virtual( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index , TRAPS);
void set_handle( methodHandle resolved_method, Handle resolved_appendix, Handle resolved_method_type, TRAPS);
void set_common( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index , TRAPS);
void set_common( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, CallKind kind, int index, TRAPS);
friend class LinkResolver;
public:
CallInfo() {
#ifndef PRODUCT
_call_kind = CallInfo::unknown_kind;
_call_index = Method::garbage_vtable_index;
#endif //PRODUCT
}
// utility to extract an effective CallInfo from a method and an optional receiver limit
// does not queue the method for compilation
CallInfo(Method* resolved_method, Klass* resolved_klass = NULL);
KlassHandle resolved_klass() const { return _resolved_klass; }
KlassHandle selected_klass() const { return _selected_klass; }
methodHandle resolved_method() const { return _resolved_method; }
@ -95,21 +86,43 @@ class CallInfo: public LinkInfo {
Handle resolved_method_type() const { return _resolved_method_type; }
BasicType result_type() const { return selected_method()->result_type(); }
bool has_vtable_index() const { return _vtable_index >= 0; }
bool is_statically_bound() const { return _vtable_index == Method::nonvirtual_vtable_index; }
CallKind call_kind() const { return _call_kind; }
int call_index() const { return _call_index; }
int vtable_index() const {
// Even for interface calls the vtable index could be non-negative.
// See CallInfo::set_interface.
assert(has_vtable_index() || is_statically_bound(), "");
return _vtable_index;
assert(call_kind() == vtable_call || call_kind() == direct_call, "");
// The returned value is < 0 if the call is statically bound.
// But, the returned value may be >= 0 even if the kind is direct_call.
// It is up to the caller to decide which way to go.
return _call_index;
}
int itable_index() const {
assert(call_kind() == itable_call, "");
// The returned value is always >= 0, a valid itable index.
return _call_index;
}
// debugging
#ifdef ASSERT
bool has_vtable_index() const { return _call_index >= 0 && _call_kind != CallInfo::itable_call; }
bool is_statically_bound() const { return _call_index == Method::nonvirtual_vtable_index; }
#endif //ASSERT
void verify() PRODUCT_RETURN;
void print() PRODUCT_RETURN;
};
// Link information for getfield/putfield & getstatic/putstatic bytecodes
// is represented using a fieldDescriptor.
// The LinkResolver is used to resolve constant-pool references at run-time.
// It does all necessary link-time checks & throws exceptions if necessary.
class LinkResolver: AllStatic {
friend class klassVtable;
friend class klassItable;
private:
static void lookup_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
@ -120,7 +133,6 @@ class LinkResolver: AllStatic {
static int vtable_index_of_miranda_method(KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
static void resolve_klass (KlassHandle& result, constantPoolHandle pool, int index, TRAPS);
static void resolve_klass_no_update (KlassHandle& result, constantPoolHandle pool, int index, TRAPS); // no update of constantPool entry
static void resolve_pool (KlassHandle& resolved_klass, Symbol*& method_name, Symbol*& method_signature, KlassHandle& current_klass, constantPoolHandle pool, int index, TRAPS);
@ -148,9 +160,16 @@ class LinkResolver: AllStatic {
Bytecodes::Code code, constantPoolHandle pool, int index, TRAPS);
// runtime/static resolving for fields
static void resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, TRAPS);
// takes an extra bool argument "update_pool" to decide whether to update the constantPool during klass resolution.
static void resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, bool update_pool, TRAPS);
static void resolve_field_access(fieldDescriptor& result, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS);
static void resolve_field(fieldDescriptor& result, KlassHandle resolved_klass, Symbol* field_name, Symbol* field_signature,
KlassHandle current_klass, Bytecodes::Code access_kind, bool check_access, bool initialize_class, TRAPS);
// source of access_kind codes:
static Bytecodes::Code field_access_kind(bool is_static, bool is_put) {
return (is_static
? (is_put ? Bytecodes::_putstatic : Bytecodes::_getstatic)
: (is_put ? Bytecodes::_putfield : Bytecodes::_getfield ));
}
// runtime resolving:
// resolved_klass = specified class (i.e., static receiver class)

View file

@ -47,6 +47,11 @@
// CollectorPolicy methods.
// Align down. If the aligning result in 0, return 'alignment'.
static size_t restricted_align_down(size_t size, size_t alignment) {
return MAX2(alignment, align_size_down_(size, alignment));
}
void CollectorPolicy::initialize_flags() {
assert(max_alignment() >= min_alignment(),
err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
@ -59,18 +64,24 @@ void CollectorPolicy::initialize_flags() {
vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
}
if (MetaspaceSize > MaxMetaspaceSize) {
MaxMetaspaceSize = MetaspaceSize;
}
MetaspaceSize = MAX2(min_alignment(), align_size_down_(MetaspaceSize, min_alignment()));
// Don't increase Metaspace size limit above specified.
MaxMetaspaceSize = align_size_down(MaxMetaspaceSize, max_alignment());
if (MetaspaceSize > MaxMetaspaceSize) {
MetaspaceSize = MaxMetaspaceSize;
if (!is_size_aligned(MaxMetaspaceSize, max_alignment())) {
FLAG_SET_ERGO(uintx, MaxMetaspaceSize,
restricted_align_down(MaxMetaspaceSize, max_alignment()));
}
MinMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MinMetaspaceExpansion, min_alignment()));
MaxMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MaxMetaspaceExpansion, min_alignment()));
if (MetaspaceSize > MaxMetaspaceSize) {
FLAG_SET_ERGO(uintx, MetaspaceSize, MaxMetaspaceSize);
}
if (!is_size_aligned(MetaspaceSize, min_alignment())) {
FLAG_SET_ERGO(uintx, MetaspaceSize,
restricted_align_down(MetaspaceSize, min_alignment()));
}
assert(MetaspaceSize <= MaxMetaspaceSize, "Must be");
MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, min_alignment());
MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, min_alignment());
MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment());
@ -145,6 +156,30 @@ void CollectorPolicy::cleared_all_soft_refs() {
_all_soft_refs_clear = true;
}
size_t CollectorPolicy::compute_max_alignment() {
// The card marking array and the offset arrays for old generations are
// committed in os pages as well. Make sure they are entirely full (to
// avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
// byte entry and the os page size is 4096, the maximum heap size should
// be 512*4096 = 2MB aligned.
// There is only the GenRemSet in Hotspot and only the GenRemSet::CardTable
// is supported.
// Requirements of any new remembered set implementations must be added here.
size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
// Parallel GC does its own alignment of the generations to avoid requiring a
// large page (256M on some platforms) for the permanent generation. The
// other collectors should also be updated to do their own alignment and then
// this use of lcm() should be removed.
if (UseLargePages && !UseParallelGC) {
// in presence of large pages we have to make sure that our
// alignment is large page aware
alignment = lcm(os::large_page_size(), alignment);
}
return alignment;
}
// GenCollectorPolicy methods.
@ -175,29 +210,6 @@ void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
GCTimeRatio);
}
size_t GenCollectorPolicy::compute_max_alignment() {
// The card marking array and the offset arrays for old generations are
// committed in os pages as well. Make sure they are entirely full (to
// avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
// byte entry and the os page size is 4096, the maximum heap size should
// be 512*4096 = 2MB aligned.
size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name());
// Parallel GC does its own alignment of the generations to avoid requiring a
// large page (256M on some platforms) for the permanent generation. The
// other collectors should also be updated to do their own alignment and then
// this use of lcm() should be removed.
if (UseLargePages && !UseParallelGC) {
// in presence of large pages we have to make sure that our
// alignment is large page aware
alignment = lcm(os::large_page_size(), alignment);
}
assert(alignment >= min_alignment(), "Must be");
return alignment;
}
void GenCollectorPolicy::initialize_flags() {
// All sizes must be multiples of the generation granularity.
set_min_alignment((uintx) Generation::GenGrain);

View file

@ -98,6 +98,9 @@ class CollectorPolicy : public CHeapObj<mtGC> {
{}
public:
// Return maximum heap alignment that may be imposed by the policy
static size_t compute_max_alignment();
void set_min_alignment(size_t align) { _min_alignment = align; }
size_t min_alignment() { return _min_alignment; }
void set_max_alignment(size_t align) { _max_alignment = align; }
@ -234,9 +237,6 @@ class GenCollectorPolicy : public CollectorPolicy {
// Try to allocate space by expanding the heap.
virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
// compute max heap alignment
size_t compute_max_alignment();
// Scale the base_size by NewRation according to
// result = base_size / (NewRatio + 1)
// and align by min_alignment()

View file

@ -122,7 +122,7 @@ void GC_locker::jni_unlock(JavaThread* thread) {
// strictly needed. It's added here to make it clear that
// the GC will NOT be performed if any other caller
// of GC_locker::lock() still needs GC locked.
if (!is_active()) {
if (!is_active_internal()) {
_doing_gc = true;
{
// Must give up the lock while at a safepoint

View file

@ -88,7 +88,7 @@ class GC_locker: public AllStatic {
public:
// Accessors
static bool is_active() {
assert(_needs_gc || SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
return is_active_internal();
}
static bool needs_gc() { return _needs_gc; }

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -148,6 +148,11 @@ public:
return gen_policy()->size_policy();
}
// Return the (conservative) maximum heap alignment
static size_t conservative_max_heap_alignment() {
return Generation::GenGrain;
}
size_t capacity() const;
size_t used() const;

View file

@ -50,13 +50,6 @@
// Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
size_t Metablock::_min_block_byte_size = sizeof(Metablock);
#ifdef ASSERT
size_t Metablock::_overhead =
Chunk::aligned_overhead_size(sizeof(Metablock)) / BytesPerWord;
#else
size_t Metablock::_overhead = 0;
#endif
// New blocks returned by the Metaspace are zero initialized.
// We should fix the constructors to not assume this instead.
Metablock* Metablock::initialize(MetaWord* p, size_t word_size) {

View file

@ -48,7 +48,6 @@ class Metablock VALUE_OBJ_CLASS_SPEC {
} _header;
} _block;
static size_t _min_block_byte_size;
static size_t _overhead;
typedef union block_t Block;
typedef struct header_t Header;
@ -73,7 +72,6 @@ class Metablock VALUE_OBJ_CLASS_SPEC {
void set_prev(Metablock* v) { _block._header._prev = v; }
static size_t min_block_byte_size() { return _min_block_byte_size; }
static size_t overhead() { return _overhead; }
bool is_free() { return header()->_word_size != 0; }
void clear_next() { set_next(NULL); }

File diff suppressed because it is too large Load diff

View file

@ -56,12 +56,15 @@
// +-------------------+
//
class ChunkManager;
class ClassLoaderData;
class Metablock;
class Metachunk;
class MetaWord;
class Mutex;
class outputStream;
class SpaceManager;
class VirtualSpaceList;
// Metaspaces each have a SpaceManager and allocations
// are done by the SpaceManager. Allocations are done
@ -76,8 +79,6 @@ class SpaceManager;
// allocate() method returns a block for use as a
// quantum of metadata.
class VirtualSpaceList;
class Metaspace : public CHeapObj<mtClass> {
friend class VMStructs;
friend class SpaceManager;
@ -102,6 +103,10 @@ class Metaspace : public CHeapObj<mtClass> {
private:
void initialize(Mutex* lock, MetaspaceType type);
Metachunk* get_initialization_chunk(MetadataType mdtype,
size_t chunk_word_size,
size_t chunk_bunch);
// Align up the word size to the allocation word size
static size_t align_word_size_up(size_t);
@ -134,6 +139,10 @@ class Metaspace : public CHeapObj<mtClass> {
static VirtualSpaceList* _space_list;
static VirtualSpaceList* _class_space_list;
static ChunkManager* _chunk_manager_metadata;
static ChunkManager* _chunk_manager_class;
public:
static VirtualSpaceList* space_list() { return _space_list; }
static VirtualSpaceList* class_space_list() { return _class_space_list; }
static VirtualSpaceList* get_space_list(MetadataType mdtype) {
@ -141,6 +150,14 @@ class Metaspace : public CHeapObj<mtClass> {
return mdtype == ClassType ? class_space_list() : space_list();
}
static ChunkManager* chunk_manager_metadata() { return _chunk_manager_metadata; }
static ChunkManager* chunk_manager_class() { return _chunk_manager_class; }
static ChunkManager* get_chunk_manager(MetadataType mdtype) {
assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
return mdtype == ClassType ? chunk_manager_class() : chunk_manager_metadata();
}
private:
// This is used by DumpSharedSpaces only, where only _vsm is used. So we will
// maintain a single list for now.
void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
@ -182,9 +199,8 @@ class Metaspace : public CHeapObj<mtClass> {
char* bottom() const;
size_t used_words_slow(MetadataType mdtype) const;
size_t free_words(MetadataType mdtype) const;
size_t free_words_slow(MetadataType mdtype) const;
size_t capacity_words_slow(MetadataType mdtype) const;
size_t waste_words(MetadataType mdtype) const;
size_t used_bytes_slow(MetadataType mdtype) const;
size_t capacity_bytes_slow(MetadataType mdtype) const;
@ -200,6 +216,7 @@ class Metaspace : public CHeapObj<mtClass> {
void dump(outputStream* const out) const;
// Free empty virtualspaces
static void purge(MetadataType mdtype);
static void purge();
void print_on(outputStream* st) const;
@ -213,27 +230,22 @@ class Metaspace : public CHeapObj<mtClass> {
void iterate(AllocRecordClosure *closure);
// Return TRUE only if UseCompressedKlassPointers is True and DumpSharedSpaces is False.
// Return TRUE only if UseCompressedClassPointers is True and DumpSharedSpaces is False.
static bool using_class_space() {
return NOT_LP64(false) LP64_ONLY(UseCompressedKlassPointers && !DumpSharedSpaces);
return NOT_LP64(false) LP64_ONLY(UseCompressedClassPointers && !DumpSharedSpaces);
}
};
class MetaspaceAux : AllStatic {
static size_t free_chunks_total(Metaspace::MetadataType mdtype);
public:
// Statistics for class space and data space in metaspace.
static size_t free_chunks_total_words(Metaspace::MetadataType mdtype);
// These methods iterate over the classloader data graph
// for the given Metaspace type. These are slow.
static size_t used_bytes_slow(Metaspace::MetadataType mdtype);
static size_t free_in_bytes(Metaspace::MetadataType mdtype);
static size_t free_bytes_slow(Metaspace::MetadataType mdtype);
static size_t capacity_bytes_slow(Metaspace::MetadataType mdtype);
// Iterates over the virtual space list.
static size_t reserved_in_bytes(Metaspace::MetadataType mdtype);
static size_t capacity_bytes_slow();
// Running sum of space in all Metachunks that has been
// allocated to a Metaspace. This is used instead of
@ -263,17 +275,16 @@ class MetaspaceAux : AllStatic {
}
// Used by MetaspaceCounters
static size_t free_chunks_total();
static size_t free_chunks_total_in_bytes();
static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype);
static size_t free_chunks_total_words();
static size_t free_chunks_total_bytes();
static size_t free_chunks_total_bytes(Metaspace::MetadataType mdtype);
static size_t allocated_capacity_words(Metaspace::MetadataType mdtype) {
return _allocated_capacity_words[mdtype];
}
static size_t allocated_capacity_words() {
return _allocated_capacity_words[Metaspace::NonClassType] +
(Metaspace::using_class_space() ?
_allocated_capacity_words[Metaspace::ClassType] : 0);
return allocated_capacity_words(Metaspace::NonClassType) +
allocated_capacity_words(Metaspace::ClassType);
}
static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) {
return allocated_capacity_words(mdtype) * BytesPerWord;
@ -286,9 +297,8 @@ class MetaspaceAux : AllStatic {
return _allocated_used_words[mdtype];
}
static size_t allocated_used_words() {
return _allocated_used_words[Metaspace::NonClassType] +
(Metaspace::using_class_space() ?
_allocated_used_words[Metaspace::ClassType] : 0);
return allocated_used_words(Metaspace::NonClassType) +
allocated_used_words(Metaspace::ClassType);
}
static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) {
return allocated_used_words(mdtype) * BytesPerWord;
@ -300,31 +310,22 @@ class MetaspaceAux : AllStatic {
static size_t free_bytes();
static size_t free_bytes(Metaspace::MetadataType mdtype);
// Total capacity in all Metaspaces
static size_t capacity_bytes_slow() {
#ifdef PRODUCT
// Use allocated_capacity_bytes() in PRODUCT instead of this function.
guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
#endif
size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
" class_capacity + non_class_capacity " SIZE_FORMAT
" class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
allocated_capacity_bytes(), class_capacity + non_class_capacity,
class_capacity, non_class_capacity));
return class_capacity + non_class_capacity;
static size_t reserved_bytes(Metaspace::MetadataType mdtype);
static size_t reserved_bytes() {
return reserved_bytes(Metaspace::ClassType) +
reserved_bytes(Metaspace::NonClassType);
}
// Total space reserved in all Metaspaces
static size_t reserved_in_bytes() {
return reserved_in_bytes(Metaspace::ClassType) +
reserved_in_bytes(Metaspace::NonClassType);
static size_t committed_bytes(Metaspace::MetadataType mdtype);
static size_t committed_bytes() {
return committed_bytes(Metaspace::ClassType) +
committed_bytes(Metaspace::NonClassType);
}
static size_t min_chunk_size();
static size_t min_chunk_size_words();
static size_t min_chunk_size_bytes() {
return min_chunk_size_words() * BytesPerWord;
}
// Print change in used metadata.
static void print_metaspace_change(size_t prev_metadata_used);

View file

@ -65,26 +65,25 @@ class MetaspacePerfCounters: public CHeapObj<mtInternal> {
MetaspacePerfCounters* MetaspaceCounters::_perf_counters = NULL;
size_t MetaspaceCounters::calculate_capacity() {
// The total capacity is the sum of
// 1) capacity of Metachunks in use by all Metaspaces
// 2) unused space at the end of each Metachunk
// 3) space in the freelist
size_t total_capacity = MetaspaceAux::allocated_capacity_bytes()
+ MetaspaceAux::free_bytes() + MetaspaceAux::free_chunks_total_in_bytes();
return total_capacity;
size_t MetaspaceCounters::used() {
return MetaspaceAux::allocated_used_bytes();
}
size_t MetaspaceCounters::capacity() {
return MetaspaceAux::committed_bytes();
}
size_t MetaspaceCounters::max_capacity() {
return MetaspaceAux::reserved_bytes();
}
void MetaspaceCounters::initialize_performance_counters() {
if (UsePerfData) {
assert(_perf_counters == NULL, "Should only be initialized once");
size_t min_capacity = MetaspaceAux::min_chunk_size();
size_t capacity = calculate_capacity();
size_t max_capacity = MetaspaceAux::reserved_in_bytes();
size_t used = MetaspaceAux::allocated_used_bytes();
_perf_counters = new MetaspacePerfCounters("metaspace", min_capacity, capacity, max_capacity, used);
size_t min_capacity = 0;
_perf_counters = new MetaspacePerfCounters("metaspace", min_capacity,
capacity(), max_capacity(), used());
}
}
@ -92,31 +91,29 @@ void MetaspaceCounters::update_performance_counters() {
if (UsePerfData) {
assert(_perf_counters != NULL, "Should be initialized");
size_t capacity = calculate_capacity();
size_t max_capacity = MetaspaceAux::reserved_in_bytes();
size_t used = MetaspaceAux::allocated_used_bytes();
_perf_counters->update(capacity, max_capacity, used);
_perf_counters->update(capacity(), max_capacity(), used());
}
}
MetaspacePerfCounters* CompressedClassSpaceCounters::_perf_counters = NULL;
size_t CompressedClassSpaceCounters::calculate_capacity() {
return MetaspaceAux::allocated_capacity_bytes(_class_type) +
MetaspaceAux::free_bytes(_class_type) +
MetaspaceAux::free_chunks_total_in_bytes(_class_type);
size_t CompressedClassSpaceCounters::used() {
return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType);
}
size_t CompressedClassSpaceCounters::capacity() {
return MetaspaceAux::committed_bytes(Metaspace::ClassType);
}
size_t CompressedClassSpaceCounters::max_capacity() {
return MetaspaceAux::reserved_bytes(Metaspace::ClassType);
}
void CompressedClassSpaceCounters::update_performance_counters() {
if (UsePerfData && UseCompressedKlassPointers) {
if (UsePerfData && UseCompressedClassPointers) {
assert(_perf_counters != NULL, "Should be initialized");
size_t capacity = calculate_capacity();
size_t max_capacity = MetaspaceAux::reserved_in_bytes(_class_type);
size_t used = MetaspaceAux::allocated_used_bytes(_class_type);
_perf_counters->update(capacity, max_capacity, used);
_perf_counters->update(capacity(), max_capacity(), used());
}
}
@ -125,13 +122,10 @@ void CompressedClassSpaceCounters::initialize_performance_counters() {
assert(_perf_counters == NULL, "Should only be initialized once");
const char* ns = "compressedclassspace";
if (UseCompressedKlassPointers) {
size_t min_capacity = MetaspaceAux::min_chunk_size();
size_t capacity = calculate_capacity();
size_t max_capacity = MetaspaceAux::reserved_in_bytes(_class_type);
size_t used = MetaspaceAux::allocated_used_bytes(_class_type);
_perf_counters = new MetaspacePerfCounters(ns, min_capacity, capacity, max_capacity, used);
if (UseCompressedClassPointers) {
size_t min_capacity = 0;
_perf_counters = new MetaspacePerfCounters(ns, min_capacity, capacity(),
max_capacity(), used());
} else {
_perf_counters = new MetaspacePerfCounters(ns, 0, 0, 0, 0);
}

View file

@ -25,13 +25,15 @@
#ifndef SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
#define SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
#include "memory/metaspace.hpp"
#include "memory/allocation.hpp"
class MetaspacePerfCounters;
class MetaspaceCounters: public AllStatic {
static MetaspacePerfCounters* _perf_counters;
static size_t calculate_capacity();
static size_t used();
static size_t capacity();
static size_t max_capacity();
public:
static void initialize_performance_counters();
@ -40,8 +42,9 @@ class MetaspaceCounters: public AllStatic {
class CompressedClassSpaceCounters: public AllStatic {
static MetaspacePerfCounters* _perf_counters;
static size_t calculate_capacity();
static const Metaspace::MetadataType _class_type = Metaspace::ClassType;
static size_t used();
static size_t capacity();
static size_t max_capacity();
public:
static void initialize_performance_counters();

View file

@ -103,9 +103,10 @@ static void calculate_fingerprints() {
if (k->oop_is_instance()) {
InstanceKlass* ik = InstanceKlass::cast(k);
for (int i = 0; i < ik->methods()->length(); i++) {
ResourceMark rm;
Method* m = ik->methods()->at(i);
(new Fingerprinter(m))->fingerprint();
Fingerprinter fp(m);
// The side effect of this call sets method's fingerprint field.
fp.fingerprint();
}
}
}

View file

@ -602,7 +602,7 @@ oop Universe::gen_out_of_memory_error(oop default_err) {
}
}
static intptr_t non_oop_bits = 0;
intptr_t Universe::_non_oop_bits = 0;
void* Universe::non_oop_word() {
// Neither the high bits nor the low bits of this value is allowed
@ -616,11 +616,11 @@ void* Universe::non_oop_word() {
// Using the OS-supplied non-memory-address word (usually 0 or -1)
// will take care of the high bits, however many there are.
if (non_oop_bits == 0) {
non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
if (_non_oop_bits == 0) {
_non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
}
return (void*)non_oop_bits;
return (void*)_non_oop_bits;
}
jint universe_init() {
@ -872,13 +872,16 @@ jint Universe::initialize_heap() {
// Reserve the Java heap, which is now the same for all GCs.
ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
assert(alignment <= Arguments::conservative_max_heap_alignment(),
err_msg("actual alignment "SIZE_FORMAT" must be within maximum heap alignment "SIZE_FORMAT,
alignment, Arguments::conservative_max_heap_alignment()));
size_t total_reserved = align_size_up(heap_size, alignment);
assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
"heap size is too big for compressed oops");
bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
assert(!UseLargePages
|| UseParallelOldGC
|| UseParallelGC
|| use_large_pages, "Wrong alignment to use large pages");
char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
@ -1028,7 +1031,7 @@ bool universe_post_init() {
msg = java_lang_String::create_from_str("Metadata space", CHECK_false);
java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg());
msg = java_lang_String::create_from_str("Class Metadata space", CHECK_false);
msg = java_lang_String::create_from_str("Compressed class space", CHECK_false);
java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg());
msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false);

View file

@ -179,9 +179,11 @@ class Universe: AllStatic {
// The particular choice of collected heap.
static CollectedHeap* _collectedHeap;
static intptr_t _non_oop_bits;
// For UseCompressedOops.
static struct NarrowPtrStruct _narrow_oop;
// For UseCompressedKlassPointers.
// For UseCompressedClassPointers.
static struct NarrowPtrStruct _narrow_klass;
static address _narrow_ptrs_base;
@ -229,7 +231,7 @@ class Universe: AllStatic {
_narrow_oop._base = base;
}
static void set_narrow_klass_base(address base) {
assert(UseCompressedKlassPointers, "no compressed klass ptrs?");
assert(UseCompressedClassPointers, "no compressed klass ptrs?");
_narrow_klass._base = base;
}
static void set_narrow_oop_use_implicit_null_checks(bool use) {
@ -353,7 +355,7 @@ class Universe: AllStatic {
static int narrow_oop_shift() { return _narrow_oop._shift; }
static bool narrow_oop_use_implicit_null_checks() { return _narrow_oop._use_implicit_null_checks; }
// For UseCompressedKlassPointers
// For UseCompressedClassPointers
static address narrow_klass_base() { return _narrow_klass._base; }
static bool is_narrow_klass_base(void* addr) { return (narrow_klass_base() == (address)addr); }
static int narrow_klass_shift() { return _narrow_klass._shift; }

View file

@ -65,7 +65,7 @@ class arrayOopDesc : public oopDesc {
// declared nonstatic fields in arrayOopDesc if not compressed, otherwise
// it occupies the second half of the _klass field in oopDesc.
static int length_offset_in_bytes() {
return UseCompressedKlassPointers ? klass_gap_offset_in_bytes() :
return UseCompressedClassPointers ? klass_gap_offset_in_bytes() :
sizeof(arrayOopDesc);
}

View file

@ -108,16 +108,16 @@ objArrayOop ConstantPool::resolved_references() const {
void ConstantPool::initialize_resolved_references(ClassLoaderData* loader_data,
intStack reference_map,
int constant_pool_map_length,
TRAPS) {
TRAPS) {
// Initialized the resolved object cache.
int map_length = reference_map.length();
if (map_length > 0) {
// Only need mapping back to constant pool entries. The map isn't used for
// invokedynamic resolved_reference entries. The constant pool cache index
// has the mapping back to both the constant pool and to the resolved
// reference index.
// invokedynamic resolved_reference entries. For invokedynamic entries,
// the constant pool cache index has the mapping back to both the constant
// pool and to the resolved reference index.
if (constant_pool_map_length > 0) {
Array<u2>* om = MetadataFactory::new_array<u2>(loader_data, map_length, CHECK);
Array<u2>* om = MetadataFactory::new_array<u2>(loader_data, constant_pool_map_length, CHECK);
for (int i = 0; i < constant_pool_map_length; i++) {
int x = reference_map.at(i);
@ -182,16 +182,9 @@ oop ConstantPool::lock() {
int ConstantPool::cp_to_object_index(int cp_index) {
// this is harder don't do this so much.
for (int i = 0; i< reference_map()->length(); i++) {
if (reference_map()->at(i) == cp_index) return i;
// Zero entry is divider between constant pool indices for strings,
// method handles and method types. After that the index is a constant
// pool cache index for invokedynamic. Stop when zero (which can never
// be a constant pool index)
if (reference_map()->at(i) == 0) break;
}
// We might not find the index.
return _no_index_sentinel;
int i = reference_map()->find(cp_index);
// We might not find the index for jsr292 call.
return (i < 0) ? _no_index_sentinel : i;
}
Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS) {
@ -396,32 +389,6 @@ Klass* ConstantPool::klass_ref_at_if_loaded(constantPoolHandle this_oop, int whi
}
// This is an interface for the compiler that allows accessing non-resolved entries
// in the constant pool - but still performs the validations tests. Must be used
// in a pre-parse of the compiler - to determine what it can do and not do.
// Note: We cannot update the ConstantPool from the vm_thread.
Klass* ConstantPool::klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int index, TRAPS) {
int which = this_oop->klass_ref_index_at(index);
CPSlot entry = this_oop->slot_at(which);
if (entry.is_resolved()) {
assert(entry.get_klass()->is_klass(), "must be");
return entry.get_klass();
} else {
assert(entry.is_unresolved(), "must be either symbol or klass");
Symbol* name = entry.get_symbol();
oop loader = this_oop->pool_holder()->class_loader();
oop protection_domain = this_oop->pool_holder()->protection_domain();
Handle h_loader(THREAD, loader);
Handle h_prot (THREAD, protection_domain);
KlassHandle k(THREAD, SystemDictionary::find(name, h_loader, h_prot, THREAD));
// Do access check for klasses
if( k.not_null() ) verify_constant_pool_resolve(this_oop, k, CHECK_NULL);
return k();
}
}
Method* ConstantPool::method_at_if_loaded(constantPoolHandle cpool,
int which) {
if (cpool->cache() == NULL) return NULL; // nothing to load yet
@ -866,8 +833,7 @@ oop ConstantPool::string_at_impl(constantPoolHandle this_oop, int which, int obj
// If the string has already been interned, this entry will be non-null
oop str = this_oop->resolved_references()->obj_at(obj_index);
if (str != NULL) return str;
Symbol* sym = this_oop->unresolved_string_at(which);
Symbol* sym = this_oop->unresolved_string_at(which);
str = StringTable::intern(sym, CHECK_(NULL));
this_oop->string_at_put(which, obj_index, str);
assert(java_lang_String::is_instance(str), "must be string");
@ -1645,9 +1611,11 @@ jint ConstantPool::cpool_entry_size(jint idx) {
case JVM_CONSTANT_UnresolvedClassInError:
case JVM_CONSTANT_StringIndex:
case JVM_CONSTANT_MethodType:
case JVM_CONSTANT_MethodTypeInError:
return 3;
case JVM_CONSTANT_MethodHandle:
case JVM_CONSTANT_MethodHandleInError:
return 4; //tag, ref_kind, ref_index
case JVM_CONSTANT_Integer:
@ -1828,8 +1796,8 @@ int ConstantPool::copy_cpool_bytes(int cpool_size,
case JVM_CONSTANT_MethodHandle:
case JVM_CONSTANT_MethodHandleInError: {
*bytes = JVM_CONSTANT_MethodHandle;
int kind = method_handle_ref_kind_at(idx);
idx1 = method_handle_index_at(idx);
int kind = method_handle_ref_kind_at_error_ok(idx);
idx1 = method_handle_index_at_error_ok(idx);
*(bytes+1) = (unsigned char) kind;
Bytes::put_Java_u2((address) (bytes+2), idx1);
DBG(printf("JVM_CONSTANT_MethodHandle: %d %hd", kind, idx1));
@ -1838,7 +1806,7 @@ int ConstantPool::copy_cpool_bytes(int cpool_size,
case JVM_CONSTANT_MethodType:
case JVM_CONSTANT_MethodTypeInError: {
*bytes = JVM_CONSTANT_MethodType;
idx1 = method_type_index_at(idx);
idx1 = method_type_index_at_error_ok(idx);
Bytes::put_Java_u2((address) (bytes+1), idx1);
DBG(printf("JVM_CONSTANT_MethodType: %hd", idx1));
break;
@ -2026,12 +1994,12 @@ void ConstantPool::print_entry_on(const int index, outputStream* st) {
break;
case JVM_CONSTANT_MethodHandle :
case JVM_CONSTANT_MethodHandleInError :
st->print("ref_kind=%d", method_handle_ref_kind_at(index));
st->print(" ref_index=%d", method_handle_index_at(index));
st->print("ref_kind=%d", method_handle_ref_kind_at_error_ok(index));
st->print(" ref_index=%d", method_handle_index_at_error_ok(index));
break;
case JVM_CONSTANT_MethodType :
case JVM_CONSTANT_MethodTypeInError :
st->print("signature_index=%d", method_type_index_at(index));
st->print("signature_index=%d", method_type_index_at_error_ok(index));
break;
case JVM_CONSTANT_InvokeDynamic :
{

View file

@ -231,7 +231,6 @@ class ConstantPool : public Metadata {
static int cache_offset_in_bytes() { return offset_of(ConstantPool, _cache); }
static int pool_holder_offset_in_bytes() { return offset_of(ConstantPool, _pool_holder); }
static int resolved_references_offset_in_bytes() { return offset_of(ConstantPool, _resolved_references); }
static int reference_map_offset_in_bytes() { return offset_of(ConstantPool, _reference_map); }
// Storing constants
@ -475,18 +474,42 @@ class ConstantPool : public Metadata {
return *int_at_addr(which);
}
int method_handle_ref_kind_at(int which) {
assert(tag_at(which).is_method_handle(), "Corrupted constant pool");
private:
int method_handle_ref_kind_at(int which, bool error_ok) {
assert(tag_at(which).is_method_handle() ||
(error_ok && tag_at(which).is_method_handle_in_error()), "Corrupted constant pool");
return extract_low_short_from_int(*int_at_addr(which)); // mask out unwanted ref_index bits
}
int method_handle_index_at(int which) {
assert(tag_at(which).is_method_handle(), "Corrupted constant pool");
int method_handle_index_at(int which, bool error_ok) {
assert(tag_at(which).is_method_handle() ||
(error_ok && tag_at(which).is_method_handle_in_error()), "Corrupted constant pool");
return extract_high_short_from_int(*int_at_addr(which)); // shift out unwanted ref_kind bits
}
int method_type_index_at(int which) {
assert(tag_at(which).is_method_type(), "Corrupted constant pool");
int method_type_index_at(int which, bool error_ok) {
assert(tag_at(which).is_method_type() ||
(error_ok && tag_at(which).is_method_type_in_error()), "Corrupted constant pool");
return *int_at_addr(which);
}
public:
int method_handle_ref_kind_at(int which) {
return method_handle_ref_kind_at(which, false);
}
int method_handle_ref_kind_at_error_ok(int which) {
return method_handle_ref_kind_at(which, true);
}
int method_handle_index_at(int which) {
return method_handle_index_at(which, false);
}
int method_handle_index_at_error_ok(int which) {
return method_handle_index_at(which, true);
}
int method_type_index_at(int which) {
return method_type_index_at(which, false);
}
int method_type_index_at_error_ok(int which) {
return method_type_index_at(which, true);
}
// Derived queries:
Symbol* method_handle_name_ref_at(int which) {
int member = method_handle_index_at(which);
@ -730,8 +753,6 @@ class ConstantPool : public Metadata {
static oop method_type_at_if_loaded (constantPoolHandle this_oop, int which);
static Klass* klass_at_if_loaded (constantPoolHandle this_oop, int which);
static Klass* klass_ref_at_if_loaded (constantPoolHandle this_oop, int which);
// Same as above - but does LinkResolving.
static Klass* klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int which, TRAPS);
// Routines currently used for annotations (only called by jvm.cpp) but which might be used in the
// future by other Java code. These take constant pool indices rather than

View file

@ -140,9 +140,10 @@ void ConstantPoolCacheEntry::set_parameter_size(int value) {
err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));
}
void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
methodHandle method,
int vtable_index) {
void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_code,
methodHandle method,
int vtable_index) {
bool is_vtable_call = (vtable_index >= 0); // FIXME: split this method on this boolean
assert(method->interpreter_entry() != NULL, "should have been set at this point");
assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache");
@ -160,7 +161,8 @@ void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
// ...and fall through as if we were handling invokevirtual:
case Bytecodes::_invokevirtual:
{
if (method->can_be_statically_bound()) {
if (!is_vtable_call) {
assert(method->can_be_statically_bound(), "");
// set_f2_as_vfinal_method checks if is_vfinal flag is true.
set_method_flags(as_TosState(method->result_type()),
( 1 << is_vfinal_shift) |
@ -169,6 +171,7 @@ void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
method()->size_of_parameters());
set_f2_as_vfinal_method(method());
} else {
assert(!method->can_be_statically_bound(), "");
assert(vtable_index >= 0, "valid index");
assert(!method->is_final_method(), "sanity");
set_method_flags(as_TosState(method->result_type()),
@ -182,6 +185,7 @@ void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
assert(!is_vtable_call, "");
// Note: Read and preserve the value of the is_vfinal flag on any
// invokevirtual bytecode shared with this constant pool cache entry.
// It is cheap and safe to consult is_vfinal() at all times.
@ -232,8 +236,22 @@ void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
NOT_PRODUCT(verify(tty));
}
void ConstantPoolCacheEntry::set_direct_call(Bytecodes::Code invoke_code, methodHandle method) {
int index = Method::nonvirtual_vtable_index;
// index < 0; FIXME: inline and customize set_direct_or_vtable_call
set_direct_or_vtable_call(invoke_code, method, index);
}
void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
void ConstantPoolCacheEntry::set_vtable_call(Bytecodes::Code invoke_code, methodHandle method, int index) {
// either the method is a miranda or its holder should accept the given index
assert(method->method_holder()->is_interface() || method->method_holder()->verify_vtable_index(index), "");
// index >= 0; FIXME: inline and customize set_direct_or_vtable_call
set_direct_or_vtable_call(invoke_code, method, index);
}
void ConstantPoolCacheEntry::set_itable_call(Bytecodes::Code invoke_code, methodHandle method, int index) {
assert(method->method_holder()->verify_itable_index(index), "");
assert(invoke_code == Bytecodes::_invokeinterface, "");
InstanceKlass* interf = method->method_holder();
assert(interf->is_interface(), "must be an interface");
assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here");

Some files were not shown because too many files have changed in this diff Show more