This commit is contained in:
Tobias Hartmann 2016-02-15 11:52:51 +01:00
commit 8b42758800
1109 changed files with 37111 additions and 24119 deletions

View file

@ -346,3 +346,4 @@ f9bcdce2df26678c3fe468130b535c0342c69b89 jdk-9+99
80f67512daa15cf37b4825c1c62a675d524d7c49 jdk-9+101
2dc4c11fe48831854916d53c3913bdb7d49023ea jdk-9+102
4a652e4ca9523422149958673033e0ac740d5e1e jdk-9+103
086c682bd8c5f195c324f61e2c61fbcd0226d63b jdk-9+104

View file

@ -346,3 +346,4 @@ c1f30ac14db0eaff398429c04cd9fab92e1b4b2a jdk-9+100
c4d72a1620835b5d657b7b6792c2879367d0154f jdk-9+101
6406ecf5d39482623225bb1b3098c2cac6f7d450 jdk-9+102
47d6462e514b2097663305a57d9c844c15d5b609 jdk-9+103
9a38f8b4ba220708db198d08d82fd2144a64777d jdk-9+104

View file

@ -573,6 +573,11 @@ AC_DEFUN_ONCE([BASIC_SETUP_PATHS],
# Locate the directory of this script.
AUTOCONF_DIR=$TOPDIR/common/autoconf
# Setup username (for use in adhoc version strings etc)
# Outer [ ] to quote m4.
[ USERNAME=`$ECHO "$USER" | $TR -d -c '[a-z][A-Z][0-9]'` ]
AC_SUBST(USERNAME)
])
# Evaluates platform specific overrides for devkit variables.

View file

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -80,8 +80,9 @@ AC_DEFUN([FLAGS_SETUP_SYSROOT_FLAGS],
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
# Solaris Studio does not have a concept of sysroot. Instead we must
# make sure the default include and lib dirs are appended to each
# compile and link command line.
$1SYSROOT_CFLAGS="-I[$]$1SYSROOT/usr/include"
# compile and link command line. Must also add -I-xbuiltin to enable
# inlining of system functions and intrinsics.
$1SYSROOT_CFLAGS="-I-xbuiltin -I[$]$1SYSROOT/usr/include"
$1SYSROOT_LDFLAGS="-L[$]$1SYSROOT/usr/lib$OPENJDK_TARGET_CPU_ISADIR \
-L[$]$1SYSROOT/lib$OPENJDK_TARGET_CPU_ISADIR \
-L[$]$1SYSROOT/usr/ccs/lib$OPENJDK_TARGET_CPU_ISADIR"
@ -425,7 +426,7 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION],
# Add runtime stack smashing and undefined behavior checks.
# Not all versions of gcc support -fstack-protector
STACK_PROTECTOR_CFLAG="-fstack-protector-all"
FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [$STACK_PROTECTOR_CFLAG], IF_FALSE: [STACK_PROTECTOR_CFLAG=""])
FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [$STACK_PROTECTOR_CFLAG -Werror], IF_FALSE: [STACK_PROTECTOR_CFLAG=""])
CFLAGS_DEBUG_OPTIONS="$STACK_PROTECTOR_CFLAG --param ssp-buffer-size=1"
CXXFLAGS_DEBUG_OPTIONS="$STACK_PROTECTOR_CFLAG --param ssp-buffer-size=1"

View file

@ -917,6 +917,7 @@ JVM_VARIANTS
JVM_INTERPRETER
JDK_VARIANT
SET_OPENJDK
USERNAME
CANONICAL_TOPDIR
ORIGINAL_TOPDIR
TOPDIR
@ -3834,7 +3835,7 @@ ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
#
# Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -4835,7 +4836,7 @@ VS_SDK_PLATFORM_NAME_2013=
#CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks:
DATE_WHEN_GENERATED=1454146111
DATE_WHEN_GENERATED=1454926898
###############################################################################
#
@ -15652,6 +15653,11 @@ $as_echo "$as_me: The path of TOPDIR, which resolves as \"$path\", is invalid."
# Locate the directory of this script.
AUTOCONF_DIR=$TOPDIR/common/autoconf
# Setup username (for use in adhoc version strings etc)
# Outer [ ] to quote m4.
USERNAME=`$ECHO "$USER" | $TR -d -c '[a-z][A-Z][0-9]'`
# Check if it's a pure open build or if custom sources are to be used.
@ -23429,9 +23435,8 @@ $as_echo "$as_me: WARNING: --with-version-opt value has been sanitized from '$wi
# Default is to calculate a string like this <timestamp>.<username>.<base dir name>
timestamp=`$DATE '+%Y-%m-%d-%H%M%S'`
# Outer [ ] to quote m4.
username=`$ECHO "$USER" | $TR -d -c '[a-z][A-Z][0-9]'`
basedirname=`$BASENAME "$TOPDIR" | $TR -d -c '[a-z][A-Z][0-9].-'`
VERSION_OPT="$timestamp.$username.$basedirname"
VERSION_OPT="$timestamp.$USERNAME.$basedirname"
fi
fi
@ -29968,8 +29973,9 @@ fi
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
# Solaris Studio does not have a concept of sysroot. Instead we must
# make sure the default include and lib dirs are appended to each
# compile and link command line.
SYSROOT_CFLAGS="-I$SYSROOT/usr/include"
# compile and link command line. Must also add -I-xbuiltin to enable
# inlining of system functions and intrinsics.
SYSROOT_CFLAGS="-I-xbuiltin -I$SYSROOT/usr/include"
SYSROOT_LDFLAGS="-L$SYSROOT/usr/lib$OPENJDK_TARGET_CPU_ISADIR \
-L$SYSROOT/lib$OPENJDK_TARGET_CPU_ISADIR \
-L$SYSROOT/usr/ccs/lib$OPENJDK_TARGET_CPU_ISADIR"
@ -42361,8 +42367,9 @@ $as_echo "$BUILD_DEVKIT_ROOT" >&6; }
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
# Solaris Studio does not have a concept of sysroot. Instead we must
# make sure the default include and lib dirs are appended to each
# compile and link command line.
BUILD_SYSROOT_CFLAGS="-I$BUILD_SYSROOT/usr/include"
# compile and link command line. Must also add -I-xbuiltin to enable
# inlining of system functions and intrinsics.
BUILD_SYSROOT_CFLAGS="-I-xbuiltin -I$BUILD_SYSROOT/usr/include"
BUILD_SYSROOT_LDFLAGS="-L$BUILD_SYSROOT/usr/lib$OPENJDK_TARGET_CPU_ISADIR \
-L$BUILD_SYSROOT/lib$OPENJDK_TARGET_CPU_ISADIR \
-L$BUILD_SYSROOT/usr/ccs/lib$OPENJDK_TARGET_CPU_ISADIR"
@ -46191,12 +46198,12 @@ $as_echo "$ac_cv_c_bigendian" >&6; }
# Execute function body
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if compiler supports \"$STACK_PROTECTOR_CFLAG\"" >&5
$as_echo_n "checking if compiler supports \"$STACK_PROTECTOR_CFLAG\"... " >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if compiler supports \"$STACK_PROTECTOR_CFLAG -Werror\"" >&5
$as_echo_n "checking if compiler supports \"$STACK_PROTECTOR_CFLAG -Werror\"... " >&6; }
supports=yes
saved_cflags="$CFLAGS"
CFLAGS="$CFLAGS $STACK_PROTECTOR_CFLAG"
CFLAGS="$CFLAGS $STACK_PROTECTOR_CFLAG -Werror"
ac_ext=c
ac_cpp='$CPP $CPPFLAGS'
ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
@ -46222,7 +46229,7 @@ ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
CFLAGS="$saved_cflags"
saved_cxxflags="$CXXFLAGS"
CXXFLAGS="$CXXFLAG $STACK_PROTECTOR_CFLAG"
CXXFLAGS="$CXXFLAG $STACK_PROTECTOR_CFLAG -Werror"
ac_ext=cpp
ac_cpp='$CXXCPP $CPPFLAGS'
ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'

View file

@ -162,9 +162,8 @@ AC_DEFUN_ONCE([JDKVER_SETUP_JDK_VERSION_NUMBERS],
# Default is to calculate a string like this <timestamp>.<username>.<base dir name>
timestamp=`$DATE '+%Y-%m-%d-%H%M%S'`
# Outer [ ] to quote m4.
[ username=`$ECHO "$USER" | $TR -d -c '[a-z][A-Z][0-9]'` ]
[ basedirname=`$BASENAME "$TOPDIR" | $TR -d -c '[a-z][A-Z][0-9].-'` ]
VERSION_OPT="$timestamp.$username.$basedirname"
VERSION_OPT="$timestamp.$USERNAME.$basedirname"
fi
fi

View file

@ -184,6 +184,7 @@ JDK_RC_PLATFORM_NAME:=@JDK_RC_PLATFORM_NAME@
COMPANY_NAME:=@COMPANY_NAME@
MACOSX_BUNDLE_NAME_BASE=@MACOSX_BUNDLE_NAME_BASE@
MACOSX_BUNDLE_ID_BASE=@MACOSX_BUNDLE_ID_BASE@
USERNAME:=@USERNAME@
# Different naming strings generated from the above information.
RUNTIME_NAME=$(PRODUCT_NAME) $(PRODUCT_SUFFIX)

View file

@ -346,3 +346,4 @@ ea285530245cf4e0edf0479121a41347d3030eba jdk-9+98
30dfb3bd3d06b4bb80a087babc0d1841edba187b jdk-9+101
9c4662334d933d299928d1f599d02ff50777cbf8 jdk-9+102
0680fb7dae4da1ee6cf783c4b74184e3e08d3179 jdk-9+103
e385e95e6101711d5c63e7b1a827e99b6ec7a1cc jdk-9+104

View file

@ -506,3 +506,4 @@ bdb0acafc63c42e84d9d8195bf2e2b25ee9c3306 jdk-9+100
9f45d3d57d6948cf526fbc2e2891a9a74ac6941a jdk-9+101
d5239fc1b69749ae50793c61b899fcdacf3df857 jdk-9+102
c5f55130b1b69510d9a6f4a3105b58e21cd7ffe1 jdk-9+103
534c50395957c6025fb6627e93b35756f8d48a08 jdk-9+104

View file

@ -256,7 +256,7 @@ endif
# Compiler warnings are treated as errors
ifneq ($(COMPILER_WARNINGS_FATAL),false)
WARNINGS_ARE_ERRORS = -Werror
WARNINGS_ARE_ERRORS ?= -Werror
endif
ifeq ($(USE_CLANG), true)

View file

@ -203,7 +203,7 @@ else
endif
# Compiler warnings are treated as errors
WARNINGS_ARE_ERRORS = -Werror
WARNINGS_ARE_ERRORS ?= -Werror
ifeq ($(USE_CLANG), true)
# However we need to clean the code up before we can unrestrictedly enable this option with Clang

View file

@ -72,10 +72,14 @@ ifndef USE_GCC
endif
# CFLAGS_WARN holds compiler options to suppress/enable warnings.
CFLAGS_WARN = +w
# Compiler warnings are treated as errors
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
CFLAGS_WARN = +w -errwarn
WARNINGS_ARE_ERRORS ?= -xwe
endif
CFLAGS_WARN += $(WARNINGS_ARE_ERRORS)
# When using compiler version 5.13 (Solaris Studio 12.4), calls to explicitly
# instantiated template functions trigger this warning when +w is active.
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 513), 1)

View file

@ -117,7 +117,7 @@ endif
# Compiler warnings are treated as errors
WARNINGS_ARE_ERRORS = -Werror
WARNINGS_ARE_ERRORS ?= -Werror
# Enable these warnings. See 'info gcc' about details on these options
WARNING_FLAGS = -Wpointer-arith -Wconversion -Wsign-compare -Wundef -Wformat=2

View file

@ -145,7 +145,8 @@ endif
CFLAGS += -DDONT_USE_PRECOMPILED_HEADER
# Compiler warnings are treated as errors
CFLAGS_WARN = -xwe
WARNINGS_ARE_ERRORS ?= -xwe
CFLAGS_WARN = $(WARNINGS_ARE_ERRORS)
################################################
# Begin current (>=5.9) Forte compiler options #

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -901,23 +901,18 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
"caller must use same register for non-constant itable index as for method");
// Compute start of first itableOffsetEntry (which is at the end of the vtable)
int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
int vtable_base = in_bytes(Klass::vtable_start_offset());
int itentry_off = itableMethodEntry::method_offset_in_bytes();
int scan_step = itableOffsetEntry::size() * wordSize;
int vte_size = vtableEntry::size() * wordSize;
int vte_size = vtableEntry::size_in_bytes();
assert(vte_size == wordSize, "else adjust times_vte_scale");
ldrw(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize));
ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
// %%% Could store the aligned, prescaled offset in the klassoop.
// lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3)));
add(scan_temp, scan_temp, vtable_base);
if (HeapWordsPerLong > 1) {
// Round up to align_object_offset boundary
// see code for instanceKlass::start_of_itable!
round_to(scan_temp, BytesPerLong);
}
// Adjust recv_klass by scaled itable_index, so we can free itable_index.
assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
@ -966,7 +961,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
void MacroAssembler::lookup_virtual_method(Register recv_klass,
RegisterOrConstant vtable_index,
Register method_result) {
const int base = InstanceKlass::vtable_start_offset() * wordSize;
const int base = in_bytes(Klass::vtable_start_offset());
assert(vtableEntry::size() * wordSize == 8,
"adjust the scaling in the code below");
int vtable_offset_in_bytes = base + vtableEntry::method_offset_in_bytes();

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -73,7 +73,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
if (DebugVtables) {
Label L;
// check offset vs vtable length
__ ldrw(rscratch1, Address(r19, InstanceKlass::vtable_length_offset() * wordSize));
__ ldrw(rscratch1, Address(r19, Klass::vtable_length_offset()));
__ cmpw(rscratch1, vtable_index * vtableEntry::size());
__ br(Assembler::GT, L);
__ enter();

View file

@ -1583,13 +1583,13 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
"caller must use same register for non-constant itable index as for method");
// Compute start of first itableOffsetEntry (which is at the end of the vtable).
int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
int vtable_base = in_bytes(Klass::vtable_start_offset());
int itentry_off = itableMethodEntry::method_offset_in_bytes();
int logMEsize = exact_log2(itableMethodEntry::size() * wordSize);
int scan_step = itableOffsetEntry::size() * wordSize;
int log_vte_size= exact_log2(vtableEntry::size() * wordSize);
int log_vte_size= exact_log2(vtableEntry::size_in_bytes());
lwz(scan_temp, InstanceKlass::vtable_length_offset() * wordSize, recv_klass);
lwz(scan_temp, in_bytes(Klass::vtable_length_offset()), recv_klass);
// %%% We should store the aligned, prescaled offset in the klassoop.
// Then the next several instructions would fold away.
@ -1657,7 +1657,7 @@ void MacroAssembler::lookup_virtual_method(Register recv_klass,
assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg());
const int base = InstanceKlass::vtable_start_offset() * wordSize;
const int base = in_bytes(Klass::vtable_start_offset());
assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
if (vtable_index.is_register()) {

View file

@ -3568,8 +3568,8 @@ encode %{
__ load_klass(R11_scratch1, R3);
int entry_offset = InstanceKlass::vtable_start_offset() + _vtable_index * vtableEntry::size();
int v_off = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
int entry_offset = in_bytes(Klass::vtable_start_offset()) + _vtable_index * vtableEntry::size_in_bytes();
int v_off = entry_offset + vtableEntry::method_offset_in_bytes();
__ li(R19_method, v_off);
__ ldx(R19_method/*method oop*/, R19_method/*method offset*/, R11_scratch1/*class*/);
// NOTE: for vtable dispatches, the vtable entry will never be

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -3282,9 +3282,9 @@ void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex,
const Register Rtarget_method = Rindex;
// Get target method & entry point.
const int base = InstanceKlass::vtable_start_offset() * wordSize;
const int base = in_bytes(Klass::vtable_start_offset());
// Calc vtable addr scale the vtable index by 8.
__ sldi(Rindex, Rindex, exact_log2(vtableEntry::size() * wordSize));
__ sldi(Rindex, Rindex, exact_log2(vtableEntry::size_in_bytes()));
// Load target.
__ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes());
__ ldx(Rtarget_method, Rindex, Rrecv_klass);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -80,14 +80,14 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
__ load_klass(rcvr_klass, R3);
// Set method (in case of interpreted method), and destination address.
int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index*vtableEntry::size_in_bytes();
#ifndef PRODUCT
if (DebugVtables) {
Label L;
// Check offset vs vtable length.
const Register vtable_len = R12_scratch2;
__ lwz(vtable_len, InstanceKlass::vtable_length_offset()*wordSize, rcvr_klass);
__ lwz(vtable_len, in_bytes(Klass::vtable_length_offset()), rcvr_klass);
__ cmpwi(CCR0, vtable_len, vtable_index*vtableEntry::size());
__ bge(CCR0, L);
__ li(R12_scratch2, vtable_index);
@ -96,7 +96,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
}
#endif
int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
int v_off = entry_offset + vtableEntry::method_offset_in_bytes();
__ ld(R19_method, v_off, rcvr_klass);
@ -163,13 +163,13 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
__ load_klass(rcvr_klass, R3_ARG1);
BLOCK_COMMENT("Load start of itable entries into itable_entry.");
__ lwz(vtable_len, InstanceKlass::vtable_length_offset() * wordSize, rcvr_klass);
__ slwi(vtable_len, vtable_len, exact_log2(vtableEntry::size() * wordSize));
__ lwz(vtable_len, in_bytes(Klass::vtable_length_offset()), rcvr_klass);
__ slwi(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
__ add(itable_entry_addr, vtable_len, rcvr_klass);
// Loop over all itable entries until desired interfaceOop(Rinterface) found.
BLOCK_COMMENT("Increment itable_entry_addr in loop.");
const int vtable_base_offset = InstanceKlass::vtable_start_offset() * wordSize;
const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
__ addi(itable_entry_addr, itable_entry_addr, vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes());
const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -180,6 +180,9 @@ static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
typedef void (*_zero_Fn)(HeapWord* to, size_t count);
// Only used for heap objects, so align_object_offset.
// All other platforms pd_fill_to_aligned_words simply calls pd_fill_to_words, don't
// know why this one is different.
static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");

View file

@ -2188,30 +2188,18 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
}
// Compute start of first itableOffsetEntry (which is at the end of the vtable)
int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
int vtable_base = in_bytes(Klass::vtable_start_offset());
int scan_step = itableOffsetEntry::size() * wordSize;
int vte_size = vtableEntry::size() * wordSize;
int vte_size = vtableEntry::size_in_bytes();
lduw(recv_klass, InstanceKlass::vtable_length_offset() * wordSize, scan_temp);
lduw(recv_klass, in_bytes(Klass::vtable_length_offset()), scan_temp);
// %%% We should store the aligned, prescaled offset in the klassoop.
// Then the next several instructions would fold away.
int round_to_unit = ((HeapWordsPerLong > 1) ? BytesPerLong : 0);
int itb_offset = vtable_base;
if (round_to_unit != 0) {
// hoist first instruction of round_to(scan_temp, BytesPerLong):
itb_offset += round_to_unit - wordSize;
}
int itb_scale = exact_log2(vtableEntry::size() * wordSize);
int itb_scale = exact_log2(vtableEntry::size_in_bytes());
sll(scan_temp, itb_scale, scan_temp);
add(scan_temp, itb_offset, scan_temp);
if (round_to_unit != 0) {
// Round up to align_object_offset boundary
// see code for InstanceKlass::start_of_itable!
// Was: round_to(scan_temp, BytesPerLong);
// Hoisted: add(scan_temp, BytesPerLong-1, scan_temp);
and3(scan_temp, -round_to_unit, scan_temp);
}
add(recv_klass, scan_temp, scan_temp);
// Adjust recv_klass by scaled itable_index, so we can free itable_index.
@ -2280,16 +2268,16 @@ void MacroAssembler::lookup_virtual_method(Register recv_klass,
Register method_result) {
assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg());
Register sethi_temp = method_result;
const int base = (InstanceKlass::vtable_start_offset() * wordSize +
const int base = in_bytes(Klass::vtable_start_offset()) +
// method pointer offset within the vtable entry:
vtableEntry::method_offset_in_bytes());
vtableEntry::method_offset_in_bytes();
RegisterOrConstant vtable_offset = vtable_index;
// Each of the following three lines potentially generates an instruction.
// But the total number of address formation instructions will always be
// at most two, and will often be zero. In any case, it will be optimal.
// If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x).
// If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t).
vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size() * wordSize), vtable_offset);
vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size_in_bytes()), vtable_offset);
vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp);
Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp));
ld_ptr(vtable_entry_addr, method_result);

View file

@ -1,5 +1,5 @@
//
// Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@ -601,8 +601,8 @@ int MachCallDynamicJavaNode::ret_addr_offset() {
NativeCall::instruction_size); // sethi; setlo; call; delay slot
} else {
assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index*vtableEntry::size_in_bytes();
int v_off = entry_offset + vtableEntry::method_offset_in_bytes();
int klass_load_size;
if (UseCompressedClassPointers) {
assert(Universe::heap() != NULL, "java heap should be initialized");
@ -2658,8 +2658,8 @@ encode %{
} else {
klass_load_size = 1*BytesPerInstWord;
}
int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index*vtableEntry::size_in_bytes();
int v_off = entry_offset + vtableEntry::method_offset_in_bytes();
if (Assembler::is_simm13(v_off)) {
__ ld_ptr(G3, v_off, G5_method);
} else {

View file

@ -3153,14 +3153,11 @@ void TemplateTable::invokeinterface(int byte_no) {
//
// compute start of first itableOffsetEntry (which is at end of vtable)
const int base = InstanceKlass::vtable_start_offset() * wordSize;
const int base = in_bytes(Klass::vtable_start_offset());
Label search;
Register Rtemp = O1_flags;
__ ld(O2_Klass, InstanceKlass::vtable_length_offset() * wordSize, Rtemp);
if (align_object_offset(1) > 1) {
__ round_to(Rtemp, align_object_offset(1));
}
__ ld(O2_Klass, in_bytes(Klass::vtable_length_offset()), Rtemp);
__ sll(Rtemp, LogBytesPerWord, Rtemp); // Rscratch *= 4;
if (Assembler::is_simm13(base)) {
__ add(Rtemp, base, Rtemp);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -78,7 +78,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
if (DebugVtables) {
Label L;
// check offset vs vtable length
__ ld(G3_scratch, InstanceKlass::vtable_length_offset()*wordSize, G5);
__ ld(G3_scratch, in_bytes(Klass::vtable_length_offset()), G5);
__ cmp_and_br_short(G5, vtable_index*vtableEntry::size(), Assembler::greaterUnsigned, Assembler::pt, L);
__ set(vtable_index, O2);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), O0, O2);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -5867,22 +5867,17 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
"caller must use same register for non-constant itable index as for method");
// Compute start of first itableOffsetEntry (which is at the end of the vtable)
int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
int vtable_base = in_bytes(Klass::vtable_start_offset());
int itentry_off = itableMethodEntry::method_offset_in_bytes();
int scan_step = itableOffsetEntry::size() * wordSize;
int vte_size = vtableEntry::size() * wordSize;
int vte_size = vtableEntry::size_in_bytes();
Address::ScaleFactor times_vte_scale = Address::times_ptr;
assert(vte_size == wordSize, "else adjust times_vte_scale");
movl(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize));
movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset()));
// %%% Could store the aligned, prescaled offset in the klassoop.
lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
if (HeapWordsPerLong > 1) {
// Round up to align_object_offset boundary
// see code for InstanceKlass::start_of_itable!
round_to(scan_temp, BytesPerLong);
}
// Adjust recv_klass by scaled itable_index, so we can free itable_index.
assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
@ -5930,7 +5925,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
void MacroAssembler::lookup_virtual_method(Register recv_klass,
RegisterOrConstant vtable_index,
Register method_result) {
const int base = InstanceKlass::vtable_start_offset() * wordSize;
const int base = in_bytes(Klass::vtable_start_offset());
assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
Address vtable_entry_addr(recv_klass,
vtable_index, Address::times_ptr,

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -85,7 +85,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
if (DebugVtables) {
Label L;
// check offset vs vtable length
__ cmpl(Address(rax, InstanceKlass::vtable_length_offset()*wordSize), vtable_index*vtableEntry::size());
__ cmpl(Address(rax, Klass::vtable_length_offset()), vtable_index*vtableEntry::size());
__ jcc(Assembler::greater, L);
__ movl(rbx, vtable_index);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), rcx, rbx);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -77,7 +77,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
if (DebugVtables) {
Label L;
// check offset vs vtable length
__ cmpl(Address(rax, InstanceKlass::vtable_length_offset() * wordSize),
__ cmpl(Address(rax, Klass::vtable_length_offset()),
vtable_index * vtableEntry::size());
__ jcc(Assembler::greater, L);
__ movl(rbx, vtable_index);

View file

@ -141,16 +141,20 @@ public class HSDB implements ObjectHistogramPanel.Listener, SAListener {
return;
}
agent = new HotSpotAgent();
workerThread = new WorkerThread();
attachMenuItems = new java.util.ArrayList();
detachMenuItems = new java.util.ArrayList();
// Create frame first, to catch any GUI creation issues
// before we initialize agent
frame = new JFrame("HSDB - HotSpot Debugger");
frame.setSize(800, 600);
frame.setDefaultCloseOperation(WindowConstants.DO_NOTHING_ON_CLOSE);
frame.addWindowListener(new CloseUI());
agent = new HotSpotAgent();
workerThread = new WorkerThread();
attachMenuItems = new java.util.ArrayList();
detachMenuItems = new java.util.ArrayList();
JMenuBar menuBar = new JMenuBar();
//

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,7 +48,6 @@ public class ArrayKlass extends Klass {
dimension = new CIntField(type.getCIntegerField("_dimension"), 0);
higherDimension = new MetadataField(type.getAddressField("_higher_dimension"), 0);
lowerDimension = new MetadataField(type.getAddressField("_lower_dimension"), 0);
vtableLen = new CIntField(type.getCIntegerField("_vtable_len"), 0);
javaLangCloneableName = null;
javaLangObjectName = null;
javaIoSerializableName = null;
@ -61,7 +60,6 @@ public class ArrayKlass extends Klass {
private static CIntField dimension;
private static MetadataField higherDimension;
private static MetadataField lowerDimension;
private static CIntField vtableLen;
public Klass getJavaSuper() {
SystemDictionary sysDict = VM.getVM().getSystemDictionary();
@ -71,7 +69,6 @@ public class ArrayKlass extends Klass {
public long getDimension() { return dimension.getValue(this); }
public Klass getHigherDimension() { return (Klass) higherDimension.getValue(this); }
public Klass getLowerDimension() { return (Klass) lowerDimension.getValue(this); }
public long getVtableLen() { return vtableLen.getValue(this); }
// constant class names - javaLangCloneable, javaIoSerializable, javaLangObject
// Initialized lazily to avoid initialization ordering dependencies between ArrayKlass and SymbolTable
@ -140,6 +137,5 @@ public class ArrayKlass extends Klass {
visitor.doCInt(dimension, true);
visitor.doMetadata(higherDimension, true);
visitor.doMetadata(lowerDimension, true);
visitor.doCInt(vtableLen, true);
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -696,7 +696,7 @@ public class ConstantPool extends Metadata implements ClassConstants {
}
public long getSize() {
return Oop.alignObjectSize(headerSize + getLength());
return alignSize(headerSize + getLength());
}
//----------------------------------------------------------------------

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -70,7 +70,7 @@ public class ConstantPoolCache extends Metadata {
public ConstantPool getConstants() { return (ConstantPool) constants.getValue(this); }
public long getSize() {
return Oop.alignObjectSize(baseOffset + getLength() * elementSize);
return alignSize(baseOffset + getLength() * elementSize);
}
public ConstantPoolCacheEntry getEntryAt(int i) {
@ -79,8 +79,7 @@ public class ConstantPoolCache extends Metadata {
}
public int getIntAt(int entry, int fld) {
//alignObjectSize ?
long offset = baseOffset + /*alignObjectSize*/entry * elementSize + fld * intSize;
long offset = baseOffset + entry * elementSize + fld * intSize;
return (int) getAddress().getCIntegerAt(offset, intSize, true );
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -84,13 +84,12 @@ public class InstanceKlass extends Klass {
nonstaticOopMapSize = new CIntField(type.getCIntegerField("_nonstatic_oop_map_size"), 0);
isMarkedDependent = new CIntField(type.getCIntegerField("_is_marked_dependent"), 0);
initState = new CIntField(type.getCIntegerField("_init_state"), 0);
vtableLen = new CIntField(type.getCIntegerField("_vtable_len"), 0);
itableLen = new CIntField(type.getCIntegerField("_itable_len"), 0);
breakpoints = type.getAddressField("_breakpoints");
genericSignatureIndex = new CIntField(type.getCIntegerField("_generic_signature_index"), 0);
majorVersion = new CIntField(type.getCIntegerField("_major_version"), 0);
minorVersion = new CIntField(type.getCIntegerField("_minor_version"), 0);
headerSize = Oop.alignObjectOffset(type.getSize());
headerSize = type.getSize();
// read field offset constants
ACCESS_FLAGS_OFFSET = db.lookupIntConstant("FieldInfo::access_flags_offset").intValue();
@ -143,7 +142,6 @@ public class InstanceKlass extends Klass {
private static CIntField nonstaticOopMapSize;
private static CIntField isMarkedDependent;
private static CIntField initState;
private static CIntField vtableLen;
private static CIntField itableLen;
private static AddressField breakpoints;
private static CIntField genericSignatureIndex;
@ -242,8 +240,7 @@ public class InstanceKlass extends Klass {
}
public long getSize() {
return Oop.alignObjectSize(getHeaderSize() + Oop.alignObjectOffset(getVtableLen()) +
Oop.alignObjectOffset(getItableLen()) + Oop.alignObjectOffset(getNonstaticOopMapSize()));
return alignSize(getHeaderSize() + getVtableLen() + getItableLen() + getNonstaticOopMapSize());
}
public static long getHeaderSize() { return headerSize; }
@ -352,7 +349,6 @@ public class InstanceKlass extends Klass {
public long getStaticOopFieldCount() { return staticOopFieldCount.getValue(this); }
public long getNonstaticOopMapSize() { return nonstaticOopMapSize.getValue(this); }
public boolean getIsMarkedDependent() { return isMarkedDependent.getValue(this) != 0; }
public long getVtableLen() { return vtableLen.getValue(this); }
public long getItableLen() { return itableLen.getValue(this); }
public long majorVersion() { return majorVersion.getValue(this); }
public long minorVersion() { return minorVersion.getValue(this); }
@ -548,7 +544,6 @@ public class InstanceKlass extends Klass {
visitor.doCInt(nonstaticOopMapSize, true);
visitor.doCInt(isMarkedDependent, true);
visitor.doCInt(initState, true);
visitor.doCInt(vtableLen, true);
visitor.doCInt(itableLen, true);
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -61,6 +61,7 @@ public class Klass extends Metadata implements ClassConstants {
}
subklass = new MetadataField(type.getAddressField("_subklass"), 0);
nextSibling = new MetadataField(type.getAddressField("_next_sibling"), 0);
vtableLen = new CIntField(type.getCIntegerField("_vtable_len"), 0);
LH_INSTANCE_SLOW_PATH_BIT = db.lookupIntConstant("Klass::_lh_instance_slow_path_bit").intValue();
LH_LOG2_ELEMENT_SIZE_SHIFT = db.lookupIntConstant("Klass::_lh_log2_element_size_shift").intValue();
@ -71,6 +72,7 @@ public class Klass extends Metadata implements ClassConstants {
LH_ARRAY_TAG_OBJ_VALUE = db.lookupIntConstant("Klass::_lh_array_tag_obj_value").intValue();
}
public Klass(Address addr) {
super(addr);
}
@ -91,6 +93,7 @@ public class Klass extends Metadata implements ClassConstants {
private static MetadataField subklass;
private static MetadataField nextSibling;
private static sun.jvm.hotspot.types.Field traceIDField;
private static CIntField vtableLen;
private Address getValue(AddressField field) {
return addr.getAddressAt(field.getOffset());
@ -111,6 +114,7 @@ public class Klass extends Metadata implements ClassConstants {
public AccessFlags getAccessFlagsObj(){ return new AccessFlags(getAccessFlags()); }
public Klass getSubklassKlass() { return (Klass) subklass.getValue(this); }
public Klass getNextSiblingKlass() { return (Klass) nextSibling.getValue(this); }
public long getVtableLen() { return vtableLen.getValue(this); }
public long traceID() {
if (traceIDField == null) return 0;
@ -179,6 +183,7 @@ public class Klass extends Metadata implements ClassConstants {
visitor.doCInt(accessFlags, true);
visitor.doMetadata(subklass, true);
visitor.doMetadata(nextSibling, true);
visitor.doCInt(vtableLen, true);
}
public long getObjectSize() {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,6 +44,11 @@ abstract public class Metadata extends VMObject {
super(addr);
}
public static long alignSize(long size) {
// natural word size.
return VM.getVM().alignUp(size, VM.getVM().getBytesPerWord());
}
private static VirtualBaseConstructor<Metadata> metadataConstructor;
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -252,7 +252,7 @@ public class MethodData extends Metadata implements MethodDataInterface<Klass,Me
}
int size() {
return (int)Oop.alignObjectSize(VM.getVM().alignUp(sizeInBytes(), VM.getVM().getBytesPerWord())/VM.getVM().getBytesPerWord());
return (int)alignSize(VM.getVM().alignUp(sizeInBytes(), VM.getVM().getBytesPerWord())/VM.getVM().getBytesPerWord());
}
ParametersTypeData<Klass,Method> parametersTypeData() {

View file

@ -35,7 +35,11 @@ public class WorkerThread {
public WorkerThread() {
mqb = new MessageQueueBackend();
mq = mqb.getFirstQueue();
new Thread(new MainLoop()).start();
// Enable to terminate this worker during runnning by daemonize.
Thread mqthread = new Thread(new MainLoop());
mqthread.setDaemon(true);
mqthread.start();
}
/** Runs the given Runnable in the thread represented by this

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -597,7 +597,7 @@ final class HotSpotResolvedJavaMethodImpl extends HotSpotMethod implements HotSp
}
HotSpotVMConfig config = config();
final int vtableIndex = getVtableIndex((HotSpotResolvedObjectTypeImpl) resolved);
return config.instanceKlassVtableStartOffset() + vtableIndex * config.vtableEntrySize + config.vtableEntryMethodOffset;
return config.klassVtableStartOffset + vtableIndex * config.vtableEntrySize + config.vtableEntryMethodOffset;
}
@Override

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -470,8 +470,8 @@ final class HotSpotResolvedObjectTypeImpl extends HotSpotResolvedJavaType implem
/* Everything has the core vtable of java.lang.Object */
return config.baseVtableLength();
}
int result = UNSAFE.getInt(getMetaspaceKlass() + config.instanceKlassVtableLengthOffset) / (config.vtableEntrySize / config.heapWordSize);
assert result >= config.baseVtableLength() : UNSAFE.getInt(getMetaspaceKlass() + config.instanceKlassVtableLengthOffset) + " " + config.vtableEntrySize;
int result = UNSAFE.getInt(getMetaspaceKlass() + config.klassVtableLengthOffset) / (config.vtableEntrySize / config.heapWordSize);
assert result >= config.baseVtableLength() : UNSAFE.getInt(getMetaspaceKlass() + config.klassVtableLengthOffset) + " " + config.vtableEntrySize;
return result;
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1031,19 +1031,12 @@ public class HotSpotVMConfig {
@HotSpotVMField(name = "InstanceKlass::_init_state", type = "u1", get = HotSpotVMField.Type.OFFSET) @Stable public int instanceKlassInitStateOffset;
@HotSpotVMField(name = "InstanceKlass::_constants", type = "ConstantPool*", get = HotSpotVMField.Type.OFFSET) @Stable public int instanceKlassConstantsOffset;
@HotSpotVMField(name = "InstanceKlass::_fields", type = "Array<u2>*", get = HotSpotVMField.Type.OFFSET) @Stable public int instanceKlassFieldsOffset;
@HotSpotVMField(name = "CompilerToVM::Data::InstanceKlass_vtable_start_offset", type = "int", get = HotSpotVMField.Type.VALUE) @Stable public int instanceKlassVtableStartOffset;
@HotSpotVMField(name = "CompilerToVM::Data::InstanceKlass_vtable_length_offset", type = "int", get = HotSpotVMField.Type.VALUE) @Stable public int instanceKlassVtableLengthOffset;
@HotSpotVMField(name = "CompilerToVM::Data::Klass_vtable_start_offset", type = "int", get = HotSpotVMField.Type.VALUE) @Stable public int klassVtableStartOffset;
@HotSpotVMField(name = "CompilerToVM::Data::Klass_vtable_length_offset", type = "int", get = HotSpotVMField.Type.VALUE) @Stable public int klassVtableLengthOffset;
@HotSpotVMConstant(name = "InstanceKlass::linked") @Stable public int instanceKlassStateLinked;
@HotSpotVMConstant(name = "InstanceKlass::fully_initialized") @Stable public int instanceKlassStateFullyInitialized;
/**
* See {@code InstanceKlass::vtable_start_offset()}.
*/
public final int instanceKlassVtableStartOffset() {
return instanceKlassVtableStartOffset * heapWordSize;
}
@HotSpotVMType(name = "arrayOopDesc", get = HotSpotVMType.Type.SIZE) @Stable public int arrayOopDescSize;
/**

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,7 +48,10 @@
"Load DLLs with executable-stack attribute in the VM Thread") \
\
product(bool, UseSHM, false, \
"Use SYSV shared memory for large pages")
"Use SYSV shared memory for large pages") \
\
diagnostic(bool, UseCpuAllocPath, false, \
"Use CPU_ALLOC code path in os::active_processor_count ")
//
// Defines Linux-specific default values. The flags are available on all

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,6 +32,7 @@
#include "compiler/disassembler.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm_linux.h"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/filemap.hpp"
#include "mutex_linux.inline.hpp"
@ -106,6 +107,14 @@
# include <inttypes.h>
# include <sys/ioctl.h>
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#include <sched.h>
#undef _GNU_SOURCE
#else
#include <sched.h>
#endif
// if RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
// getrusage() is prepared to handle the associated failure.
#ifndef RUSAGE_THREAD
@ -4762,13 +4771,73 @@ void os::make_polling_page_readable(void) {
}
}
// Get the current number of available processors for this process.
// This value can change at any time during a process's lifetime.
// sched_getaffinity gives an accurate answer as it accounts for cpusets.
// If it appears there may be more than 1024 processors then we do a
// dynamic check - see 6515172 for details.
// If anything goes wrong we fallback to returning the number of online
// processors - which can be greater than the number available to the process.
int os::active_processor_count() {
// Linux doesn't yet have a (official) notion of processor sets,
// so just return the number of online processors.
cpu_set_t cpus; // can represent at most 1024 (CPU_SETSIZE) processors
cpu_set_t* cpus_p = &cpus;
int cpus_size = sizeof(cpu_set_t);
int configured_cpus = processor_count(); // upper bound on available cpus
int cpu_count = 0;
// To enable easy testing of the dynamic path on different platforms we
// introduce a diagnostic flag: UseCpuAllocPath
if (configured_cpus >= CPU_SETSIZE || UseCpuAllocPath) {
// kernel may use a mask bigger than cpu_set_t
log_trace(os)("active_processor_count: using dynamic path %s"
"- configured processors: %d",
UseCpuAllocPath ? "(forced) " : "",
configured_cpus);
cpus_p = CPU_ALLOC(configured_cpus);
if (cpus_p != NULL) {
cpus_size = CPU_ALLOC_SIZE(configured_cpus);
// zero it just to be safe
CPU_ZERO_S(cpus_size, cpus_p);
}
else {
// failed to allocate so fallback to online cpus
int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
log_trace(os)("active_processor_count: "
"CPU_ALLOC failed (%s) - using "
"online processor count: %d",
strerror(errno), online_cpus);
return online_cpus;
}
}
else {
log_trace(os)("active_processor_count: using static path - configured processors: %d",
configured_cpus);
}
// pid 0 means the current thread - which we have to assume represents the process
if (sched_getaffinity(0, cpus_size, cpus_p) == 0) {
if (cpus_p != &cpus) {
cpu_count = CPU_COUNT_S(cpus_size, cpus_p);
}
else {
cpu_count = CPU_COUNT(cpus_p);
}
log_trace(os)("active_processor_count: sched_getaffinity processor count: %d", cpu_count);
}
else {
cpu_count = ::sysconf(_SC_NPROCESSORS_ONLN);
warning("sched_getaffinity failed (%s)- using online processor count (%d) "
"which may exceed available processors", strerror(errno), cpu_count);
}
if (cpus_p != &cpus) {
CPU_FREE(cpus_p);
}
assert(cpu_count > 0 && cpu_count <= processor_count(), "sanity check");
return cpu_count;
}
void os::set_native_thread_name(const char *name) {
if (Linux::_pthread_setname_np) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -5267,8 +5267,29 @@ bool os::check_heap(bool force) {
bool os::find(address addr, outputStream* st) {
// Nothing yet
return false;
int offset = -1;
bool result = false;
char buf[256];
if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
st->print(PTR_FORMAT " ", addr);
if (strlen(buf) < sizeof(buf) - 1) {
char* p = strrchr(buf, '\\');
if (p) {
st->print("%s", p + 1);
} else {
st->print("%s", buf);
}
} else {
// The library name is probably truncated. Let's omit the library name.
// See also JDK-8147512.
}
if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
st->print("::%s + 0x%x", buf, offset);
}
st->cr();
result = true;
}
return result;
}
LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {

View file

@ -599,6 +599,7 @@ void os::print_register_info(outputStream *st, const void *context) {
// this is only for the "general purpose" registers
#ifdef AMD64
st->print("RIP="); print_location(st, uc->Rip);
st->print("RAX="); print_location(st, uc->Rax);
st->print("RBX="); print_location(st, uc->Rbx);
st->print("RCX="); print_location(st, uc->Rcx);
@ -616,6 +617,7 @@ void os::print_register_info(outputStream *st, const void *context) {
st->print("R14="); print_location(st, uc->R14);
st->print("R15="); print_location(st, uc->R15);
#else
st->print("EIP="); print_location(st, uc->Eip);
st->print("EAX="); print_location(st, uc->Eax);
st->print("EBX="); print_location(st, uc->Ebx);
st->print("ECX="); print_location(st, uc->Ecx);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2972,8 +2972,8 @@ void LIRGenerator::do_Invoke(Invoke* x) {
SharedRuntime::get_resolve_virtual_call_stub(),
arg_list, info);
} else {
int entry_offset = InstanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
int entry_offset = in_bytes(Klass::vtable_start_offset()) + x->vtable_index() * vtableEntry::size_in_bytes();
int vtable_offset = entry_offset + vtableEntry::method_offset_in_bytes();
__ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
}
break;

View file

@ -34,6 +34,7 @@
#include "classfile/verifier.hpp"
#include "classfile/vmSymbols.hpp"
#include "gc/shared/gcLocker.hpp"
#include "logging/log.hpp"
#include "memory/allocation.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/oopFactory.hpp"
@ -79,7 +80,7 @@
#define JAVA_CLASSFILE_MAGIC 0xCAFEBABE
#define JAVA_MIN_SUPPORTED_VERSION 45
#define JAVA_MAX_SUPPORTED_VERSION 52
#define JAVA_MAX_SUPPORTED_VERSION 53
#define JAVA_MAX_SUPPORTED_MINOR_VERSION 0
// Used for two backward compatibility reasons:
@ -100,6 +101,8 @@
// Extension method support.
#define JAVA_8_VERSION 52
#define JAVA_9_VERSION 53
enum { LegalClass, LegalField, LegalMethod }; // used to verify unqualified names
void ClassFileParser::parse_constant_pool_entries(const ClassFileStream* const stream,
@ -2705,7 +2708,7 @@ Method* ClassFileParser::parse_method(const ClassFileStream* const cfs,
ConstMethod::NORMAL,
CHECK_NULL);
ClassLoadingService::add_class_method_size(m->size()*HeapWordSize);
ClassLoadingService::add_class_method_size(m->size()*wordSize);
// Fill in information from fixed part (access_flags already set)
m->set_constants(_cp);
@ -4602,8 +4605,8 @@ void ClassFileParser::verify_legal_method_modifiers(jint flags,
}
} else if (major_gte_15) {
// Class file version in the interval [JAVA_1_5_VERSION, JAVA_8_VERSION)
if (!is_public || is_static || is_final || is_synchronized ||
is_native || !is_abstract || is_strict) {
if (!is_public || is_private || is_protected || is_static || is_final ||
is_synchronized || is_native || !is_abstract || is_strict) {
is_illegal = true;
}
} else {
@ -5347,30 +5350,12 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, TRAPS) {
ClassLoadingService::notify_class_loaded(ik, false /* not shared class */);
if (!is_internal()) {
if (TraceClassLoading) {
ResourceMark rm;
// print in a single call to reduce interleaving of output
if (_stream->source() != NULL) {
tty->print("[Loaded %s from %s]\n",
ik->external_name(),
_stream->source());
} else if (_loader_data->class_loader() == NULL) {
const Klass* const caller =
THREAD->is_Java_thread()
? ((JavaThread*)THREAD)->security_get_caller_class(1)
: NULL;
// caller can be NULL, for example, during a JVMTI VM_Init hook
if (caller != NULL) {
tty->print("[Loaded %s by instance of %s]\n",
ik->external_name(),
caller->external_name());
} else {
tty->print("[Loaded %s]\n", ik->external_name());
}
} else {
tty->print("[Loaded %s from %s]\n", ik->external_name(),
_loader_data->class_loader()->klass()->external_name());
if (log_is_enabled(Info, classload)) {
ik->print_loading_log(LogLevel::Info, _loader_data, _stream);
}
// No 'else' here as logging levels are not mutually exclusive
if (log_is_enabled(Debug, classload)) {
ik->print_loading_log(LogLevel::Debug, _loader_data, _stream);
}
if (log_is_enabled(Info, classresolve)) {

View file

@ -578,15 +578,14 @@ ClassPathEntry* ClassLoader::create_class_path_entry(const char *path, const str
}
}
}
if (TraceClassLoading || TraceClassPaths) {
if (TraceClassPaths) {
tty->print_cr("[Opened %s]", path);
}
log_info(classload)("opened: %s", path);
} else {
// Directory
new_entry = new ClassPathDirEntry(path);
if (TraceClassLoading) {
tty->print_cr("[Path %s]", path);
}
log_info(classload)("path: %s", path);
}
return new_entry;
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -54,12 +54,14 @@
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "gc/shared/gcLocker.hpp"
#include "logging/log.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/oopFactory.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/mutex.hpp"
#include "runtime/safepoint.hpp"
@ -286,9 +288,9 @@ void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
_klasses = k;
}
if (publicize && TraceClassLoaderData && Verbose && k->class_loader_data() != NULL) {
if (publicize && k->class_loader_data() != NULL) {
ResourceMark rm;
tty->print_cr("[TraceClassLoaderData] Adding k: " PTR_FORMAT " %s to CLD: "
log_trace(classloaderdata)("Adding k: " PTR_FORMAT " %s to CLD: "
PTR_FORMAT " loader: " PTR_FORMAT " %s",
p2i(k),
k->external_name(),
@ -326,15 +328,16 @@ void ClassLoaderData::unload() {
// Tell serviceability tools these classes are unloading
classes_do(InstanceKlass::notify_unload_class);
if (TraceClassLoaderData) {
if (log_is_enabled(Debug, classloaderdata)) {
ResourceMark rm;
tty->print("[ClassLoaderData: unload loader data " INTPTR_FORMAT, p2i(this));
tty->print(" for instance " INTPTR_FORMAT " of %s", p2i((void *)class_loader()),
outputStream* log = LogHandle(classloaderdata)::debug_stream();
log->print(": unload loader data " INTPTR_FORMAT, p2i(this));
log->print(" for instance " INTPTR_FORMAT " of %s", p2i((void *)class_loader()),
loader_name());
if (is_anonymous()) {
tty->print(" for anonymous class " INTPTR_FORMAT " ", p2i(_klasses));
log->print(" for anonymous class " INTPTR_FORMAT " ", p2i(_klasses));
}
tty->print_cr("]");
log->cr();
}
}
@ -408,13 +411,13 @@ Metaspace* ClassLoaderData::metaspace_non_null() {
assert (class_loader() == NULL, "Must be");
set_metaspace(new Metaspace(_metaspace_lock, Metaspace::BootMetaspaceType));
} else if (is_anonymous()) {
if (TraceClassLoaderData && Verbose && class_loader() != NULL) {
tty->print_cr("is_anonymous: %s", class_loader()->klass()->internal_name());
if (class_loader() != NULL) {
log_trace(classloaderdata)("is_anonymous: %s", class_loader()->klass()->internal_name());
}
set_metaspace(new Metaspace(_metaspace_lock, Metaspace::AnonymousMetaspaceType));
} else if (class_loader()->is_a(SystemDictionary::reflect_DelegatingClassLoader_klass())) {
if (TraceClassLoaderData && Verbose && class_loader() != NULL) {
tty->print_cr("is_reflection: %s", class_loader()->klass()->internal_name());
if (class_loader() != NULL) {
log_trace(classloaderdata)("is_reflection: %s", class_loader()->klass()->internal_name());
}
set_metaspace(new Metaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType));
} else {
@ -601,21 +604,47 @@ ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRA
cld->set_next(next);
ClassLoaderData* exchanged = (ClassLoaderData*)Atomic::cmpxchg_ptr(cld, list_head, next);
if (exchanged == next) {
if (TraceClassLoaderData) {
ResourceMark rm;
tty->print("[ClassLoaderData: ");
tty->print("create class loader data " INTPTR_FORMAT, p2i(cld));
tty->print(" for instance " INTPTR_FORMAT " of %s", p2i((void *)cld->class_loader()),
cld->loader_name());
tty->print_cr("]");
if (log_is_enabled(Debug, classloaderdata)) {
PauseNoSafepointVerifier pnsv(&no_safepoints); // Need safe points for JavaCalls::call_virtual
log_creation(loader, cld, CHECK_NULL);
}
return cld;
}
next = exchanged;
} while (true);
}
void ClassLoaderDataGraph::log_creation(Handle loader, ClassLoaderData* cld, TRAPS) {
Handle string;
if (loader.not_null()) {
// Include the result of loader.toString() in the output. This allows
// the user of the log to identify the class loader instance.
JavaValue result(T_OBJECT);
KlassHandle spec_klass(THREAD, SystemDictionary::ClassLoader_klass());
JavaCalls::call_virtual(&result,
loader,
spec_klass,
vmSymbols::toString_name(),
vmSymbols::void_string_signature(),
CHECK);
assert(result.get_type() == T_OBJECT, "just checking");
string = (oop)result.get_jobject();
}
ResourceMark rm;
outputStream* log = LogHandle(classloaderdata)::debug_stream();
log->print("create class loader data " INTPTR_FORMAT, p2i(cld));
log->print(" for instance " INTPTR_FORMAT " of %s", p2i((void *)cld->class_loader()),
cld->loader_name());
if (string.not_null()) {
log->print(": ");
java_lang_String::print(string(), log);
}
log->cr();
}
void ClassLoaderDataGraph::oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) {
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
cld->oops_do(f, klass_closure, must_claim);
@ -709,10 +738,11 @@ GrowableArray<ClassLoaderData*>* ClassLoaderDataGraph::new_clds() {
if (!curr->claimed()) {
array->push(curr);
if (TraceClassLoaderData) {
tty->print("[ClassLoaderData] found new CLD: ");
curr->print_value_on(tty);
tty->cr();
if (log_is_enabled(Debug, classloaderdata)) {
outputStream* log = LogHandle(classloaderdata)::debug_stream();
log->print("found new CLD: ");
curr->print_value_on(log);
log->cr();
}
}

View file

@ -116,6 +116,7 @@ class ClassLoaderDataGraph : public AllStatic {
static void dump_on(outputStream * const out) PRODUCT_RETURN;
static void dump() { dump_on(tty); }
static void verify();
static void log_creation(Handle loader, ClassLoaderData* cld, TRAPS);
static bool unload_list_contains(const void* x);
#ifndef PRODUCT

View file

@ -365,14 +365,14 @@ bool HashtableTextDump::skip_newline() {
}
int HashtableTextDump::skip(char must_be_char) {
corrupted_if(remain() < 1);
corrupted_if(*_p++ != must_be_char);
corrupted_if(remain() < 1, "Truncated");
corrupted_if(*_p++ != must_be_char, "Unexpected character");
return 0;
}
void HashtableTextDump::skip_past(char c) {
for (;;) {
corrupted_if(remain() < 1);
corrupted_if(remain() < 1, "Truncated");
if (*_p++ == c) {
return;
}
@ -381,7 +381,7 @@ void HashtableTextDump::skip_past(char c) {
void HashtableTextDump::check_version(const char* ver) {
int len = (int)strlen(ver);
corrupted_if(remain() < len);
corrupted_if(remain() < len, "Truncated");
if (strncmp(_p, ver, len) != 0) {
quit("wrong version of hashtable dump file", _filename);
}
@ -451,7 +451,7 @@ int HashtableTextDump::scan_symbol_prefix() {
jchar HashtableTextDump::unescape(const char* from, const char* end, int count) {
jchar value = 0;
corrupted_if(from + count > end);
corrupted_if(from + count > end, "Truncated");
for (int i=0; i<count; i++) {
char c = *from++;
@ -486,7 +486,7 @@ void HashtableTextDump::get_utf8(char* utf8_buffer, int utf8_length) {
if (*from != '\\') {
*to++ = *from++;
} else {
corrupted_if(from + 2 > end);
corrupted_if(from + 2 > end, "Truncated");
char c = from[1];
from += 2;
switch (c) {
@ -507,7 +507,7 @@ void HashtableTextDump::get_utf8(char* utf8_buffer, int utf8_length) {
}
}
}
corrupted_if(n > 0); // expected more chars but file has ended
corrupted_if(n > 0, "Truncated"); // expected more chars but file has ended
_p = from;
skip_newline();
}

View file

@ -276,9 +276,9 @@ public:
void corrupted(const char *p, const char *msg);
inline void corrupted_if(bool cond) {
inline void corrupted_if(bool cond, const char *msg) {
if (cond) {
corrupted(_p, NULL);
corrupted(_p, msg);
}
}
@ -287,27 +287,30 @@ public:
void skip_past(char c);
void check_version(const char* ver);
inline bool get_num(char delim, int *utf8_length) {
inline void get_num(char delim, int *num) {
const char* p = _p;
const char* end = _end;
int num = 0;
u8 n = 0;
while (p < end) {
char c = *p ++;
if ('0' <= c && c <= '9') {
num = num * 10 + (c - '0');
n = n * 10 + (c - '0');
if (n > (u8)INT_MAX) {
corrupted(_p, "Num overflow");
}
} else if (c == delim) {
_p = p;
*utf8_length = num;
return true;
*num = (int)n;
return;
} else {
// Not [0-9], not 'delim'
return false;
corrupted(_p, "Unrecognized format");;
}
}
corrupted(_end, "Incorrect format");
ShouldNotReachHere();
return false;
}
void scan_prefix_type();

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -129,7 +129,7 @@ compute_offset(int &dest_offset,
tty->print_cr(" name: %s, sig: %s, flags: %08x", fs.name()->as_C_string(), fs.signature()->as_C_string(), fs.access_flags().as_int());
}
#endif //PRODUCT
vm_exit_during_initialization("Invalid layout of preloaded class: use -XX:+TraceClassLoading to see the origin of the problem class");
vm_exit_during_initialization("Invalid layout of preloaded class: use -Xlog:classload=info to see the origin of the problem class");
}
dest_offset = fd.offset();
}
@ -3972,7 +3972,7 @@ int InjectedField::compute_offset() {
tty->print_cr(" name: %s, sig: %s, flags: %08x", fs.name()->as_C_string(), fs.signature()->as_C_string(), fs.access_flags().as_int());
}
#endif //PRODUCT
vm_exit_during_initialization("Invalid layout of preloaded class: use -XX:+TraceClassLoading to see the origin of the problem class");
vm_exit_during_initialization("Invalid layout of preloaded class: use -Xlog:classload=info to see the origin of the problem class");
return -1;
}

View file

@ -737,7 +737,7 @@ bool StringTable::copy_compact_table(char** top, char *end, GrowableArray<MemReg
return false;
}
ch_table.dump(top, end);
*top = (char*)align_pointer_up(*top, sizeof(void*));
*top = (char*)align_ptr_up(*top, sizeof(void*));
#endif
return true;
@ -760,7 +760,7 @@ const char* StringTable::init_shared_table(FileMapInfo *mapinfo, char *buffer) {
juint *p = (juint*)buffer;
const char* end = _shared_table.init(
CompactHashtable<oop, char>::_string_table, (char*)p);
const char* aligned_end = (const char*)align_pointer_up(end, sizeof(void*));
const char* aligned_end = (const char*)align_ptr_up(end, sizeof(void*));
if (_ignore_shared_strings) {
_shared_table.reset();

View file

@ -544,7 +544,7 @@ bool SymbolTable::copy_compact_table(char** top, char*end) {
ch_table.dump(top, end);
*top = (char*)align_pointer_up(*top, sizeof(void*));
*top = (char*)align_ptr_up(*top, sizeof(void*));
#endif
return true;
}
@ -552,7 +552,7 @@ bool SymbolTable::copy_compact_table(char** top, char*end) {
const char* SymbolTable::init_shared_table(const char* buffer) {
const char* end = _shared_table.init(
CompactHashtable<Symbol*, char>::_symbol_table, buffer);
return (const char*)align_pointer_up(end, sizeof(void*));
return (const char*)align_ptr_up(end, sizeof(void*));
}
//---------------------------------------------------------------------------
@ -600,7 +600,7 @@ void SymbolTable::print_histogram() {
tty->print_cr("Symbol Table Histogram:");
tty->print_cr(" Total number of symbols %7d", total_count);
tty->print_cr(" Total size in memory %7dK",
(total_size*HeapWordSize)/1024);
(total_size*wordSize)/1024);
tty->print_cr(" Total counted %7d", _symbols_counted);
tty->print_cr(" Total removed %7d", _symbols_removed);
if (_symbols_counted > 0) {
@ -617,11 +617,11 @@ void SymbolTable::print_histogram() {
tty->print_cr(" %6s %10s %10s", "Length", "#Symbols", "Size");
for (i = 0; i < results_length; i++) {
if (counts[i] > 0) {
tty->print_cr(" %6d %10d %10dK", i, counts[i], (sizes[i]*HeapWordSize)/1024);
tty->print_cr(" %6d %10d %10dK", i, counts[i], (sizes[i]*wordSize)/1024);
}
}
tty->print_cr(" >=%6d %10d %10dK\n", results_length,
out_of_range_count, (out_of_range_size*HeapWordSize)/1024);
out_of_range_count, (out_of_range_size*wordSize)/1024);
}
void SymbolTable::print() {

View file

@ -1302,14 +1302,13 @@ instanceKlassHandle SystemDictionary::load_shared_class(instanceKlassHandle ik,
ik->restore_unshareable_info(loader_data, protection_domain, CHECK_(nh));
}
if (TraceClassLoading) {
ResourceMark rm;
tty->print("[Loaded %s", ik->external_name());
tty->print(" from shared objects file");
if (class_loader.not_null()) {
tty->print(" by %s", loader_data->loader_name());
if (log_is_enabled(Info, classload)) {
ik()->print_loading_log(LogLevel::Info, loader_data, NULL);
}
tty->print_cr("]");
// No 'else' here as logging levels are not mutually exclusive
if (log_is_enabled(Debug, classload)) {
ik()->print_loading_log(LogLevel::Debug, loader_data, NULL);
}
if (DumpLoadedClassList != NULL && classlist_file->is_open()) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -651,6 +651,7 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
int ex_max = -1;
// Look through each item on the exception table. Each of the fields must refer
// to a legal instruction.
if (was_recursively_verified()) return;
verify_exception_handler_table(
code_length, code_data, ex_min, ex_max, CHECK_VERIFY(this));
@ -737,11 +738,14 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
// should be used for this check. So, do the check here before a possible
// local is added to the type state.
if (Bytecodes::is_store_into_local(opcode) && bci >= ex_min && bci < ex_max) {
if (was_recursively_verified()) return;
verify_exception_handler_targets(
bci, this_uninit, &current_frame, &stackmap_table, CHECK_VERIFY(this));
verified_exc_handlers = true;
}
if (was_recursively_verified()) return;
switch (opcode) {
case Bytecodes::_nop :
no_control_flow = false; break;
@ -1730,6 +1734,7 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
assert(!(verified_exc_handlers && this_uninit),
"Exception handler targets got verified before this_uninit got set");
if (!verified_exc_handlers && bci >= ex_min && bci < ex_max) {
if (was_recursively_verified()) return;
verify_exception_handler_targets(
bci, this_uninit, &current_frame, &stackmap_table, CHECK_VERIFY(this));
}
@ -1767,6 +1772,9 @@ char* ClassVerifier::generate_code_data(const methodHandle& m, u4 code_length, T
return code_data;
}
// Since this method references the constant pool, call was_recursively_verified()
// before calling this method to make sure a prior class load did not cause the
// current class to get verified.
void ClassVerifier::verify_exception_handler_table(u4 code_length, char* code_data, int& min, int& max, TRAPS) {
ExceptionTable exhandlers(_method());
int exlength = exhandlers.length();
@ -1874,7 +1882,11 @@ u2 ClassVerifier::verify_stackmap_table(u2 stackmap_index, u2 bci,
return stackmap_index;
}
void ClassVerifier::verify_exception_handler_targets(u2 bci, bool this_uninit, StackMapFrame* current_frame,
// Since this method references the constant pool, call was_recursively_verified()
// before calling this method to make sure a prior class load did not cause the
// current class to get verified.
void ClassVerifier::verify_exception_handler_targets(u2 bci, bool this_uninit,
StackMapFrame* current_frame,
StackMapTable* stackmap_table, TRAPS) {
constantPoolHandle cp (THREAD, _method->constants());
ExceptionTable exhandlers(_method());
@ -1889,6 +1901,7 @@ void ClassVerifier::verify_exception_handler_targets(u2 bci, bool this_uninit, S
if (this_uninit) { flags |= FLAG_THIS_UNINIT; }
StackMapFrame* new_frame = current_frame->frame_in_exception_handler(flags);
if (catch_type_index != 0) {
if (was_recursively_verified()) return;
// We know that this index refers to a subclass of Throwable
VerificationType catch_type = cp_index_to_type(
catch_type_index, cp, CHECK_VERIFY(this));
@ -2269,6 +2282,7 @@ void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
check_protected: {
if (_this_type == stack_object_type)
break; // stack_object_type must be assignable to _current_class_type
if (was_recursively_verified()) return;
Symbol* ref_class_name =
cp->klass_name_at(cp->klass_ref_index_at(index));
if (!name_in_supers(ref_class_name, current_class()))
@ -2531,6 +2545,7 @@ void ClassVerifier::verify_invoke_init(
// Check the exception handler target stackmaps with the locals from the
// incoming stackmap (before initialize_object() changes them to outgoing
// state).
if (was_recursively_verified()) return;
verify_exception_handler_targets(bci, true, current_frame,
stackmap_table, CHECK_VERIFY(this));
} // in_try_block
@ -2548,6 +2563,7 @@ void ClassVerifier::verify_invoke_init(
return;
}
u2 new_class_index = Bytes::get_Java_u2(new_bcp + 1);
if (was_recursively_verified()) return;
verify_cp_class_type(bci, new_class_index, cp, CHECK_VERIFY(this));
// The method must be an <init> method of the indicated class
@ -2567,6 +2583,7 @@ void ClassVerifier::verify_invoke_init(
VerificationType objectref_type = new_class_type;
if (name_in_supers(ref_class_type.name(), current_class())) {
Klass* ref_klass = load_class(ref_class_type.name(), CHECK);
if (was_recursively_verified()) return;
Method* m = InstanceKlass::cast(ref_klass)->uncached_lookup_method(
vmSymbols::object_initializer_name(),
cp->signature_ref_at(bcs->get_index_u2()),
@ -2591,6 +2608,7 @@ void ClassVerifier::verify_invoke_init(
// incoming stackmap (before initialize_object() changes them to outgoing
// state).
if (in_try_block) {
if (was_recursively_verified()) return;
verify_exception_handler_targets(bci, *this_uninit, current_frame,
stackmap_table, CHECK_VERIFY(this));
}
@ -2791,6 +2809,7 @@ void ClassVerifier::verify_invoke_instructions(
verify_invoke_init(bcs, index, ref_class_type, current_frame,
code_length, in_try_block, this_uninit, cp, stackmap_table,
CHECK_VERIFY(this));
if (was_recursively_verified()) return;
} else { // other methods
// Ensures that target class is assignable to method class.
if (opcode == Bytecodes::_invokespecial) {
@ -2816,6 +2835,7 @@ void ClassVerifier::verify_invoke_instructions(
VerificationType stack_object_type =
current_frame->pop_stack(ref_class_type, CHECK_VERIFY(this));
if (current_type() != stack_object_type) {
if (was_recursively_verified()) return;
assert(cp->cache() == NULL, "not rewritten yet");
Symbol* ref_class_name =
cp->klass_name_at(cp->klass_ref_index_at(index));
@ -2894,6 +2914,7 @@ void ClassVerifier::verify_anewarray(
current_frame->pop_stack(
VerificationType::integer_type(), CHECK_VERIFY(this));
if (was_recursively_verified()) return;
VerificationType component_type =
cp_index_to_type(index, cp, CHECK_VERIFY(this));
int length;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,6 +48,7 @@
#include "utilities/dtrace.hpp"
#include "utilities/events.hpp"
#include "utilities/xmlstream.hpp"
#include "logging/log.hpp"
#ifdef TARGET_ARCH_x86
# include "nativeInst_x86.hpp"
#endif
@ -1313,13 +1314,14 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
flush_dependencies(is_alive);
// Break cycle between nmethod & method
if (TraceClassUnloading && WizardMode) {
tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT
" unloadable], Method*(" INTPTR_FORMAT
if (log_is_enabled(Trace, classunload)) {
outputStream* log = LogHandle(classunload)::trace_stream();
log->print_cr("making nmethod " INTPTR_FORMAT
" unloadable, Method*(" INTPTR_FORMAT
"), cause(" INTPTR_FORMAT ")",
p2i(this), p2i(_method), p2i(cause));
if (!Universe::heap()->is_gc_active())
cause->klass()->print();
cause->klass()->print_on(log);
}
// Unlink the osr method, so we do not look this up again
if (is_osr_method()) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,6 +35,7 @@
#include "gc/shared/space.hpp"
#include "gc/shared/vmGCOperations.hpp"
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/arguments.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/handles.inline.hpp"

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,13 +42,7 @@ class ParMarkFromRootsClosure;
// Decode the oop and call do_oop on it.
#define DO_OOP_WORK_DEFN \
void do_oop(oop obj); \
template <class T> inline void do_oop_work(T* p) { \
T heap_oop = oopDesc::load_heap_oop(p); \
if (!oopDesc::is_null(heap_oop)) { \
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
do_oop(obj); \
} \
}
template <class T> inline void do_oop_work(T* p);
// TODO: This duplication of the MetadataAwareOopClosure class is only needed
// because some CMS OopClosures derive from OopsInGenClosure. It would be
@ -131,8 +125,8 @@ class PushAndMarkClosure: public MetadataAwareOopClosure {
bool concurrent_precleaning);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
};
// In the parallel case, the bit map and the
@ -157,8 +151,8 @@ class ParPushAndMarkClosure: public MetadataAwareOopClosure {
OopTaskQueue* work_queue);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { ParPushAndMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { ParPushAndMarkClosure::do_oop_work(p); }
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
};
// The non-parallel version (the parallel version appears further below).
@ -186,8 +180,8 @@ class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
bool concurrent_precleaning);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
void set_freelistLock(Mutex* m) {
_freelistLock = m;
@ -220,8 +214,8 @@ class ParMarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
OopTaskQueue* work_queue);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
void trim_queue(uint size);
};
@ -249,8 +243,8 @@ class PushOrMarkClosure: public MetadataAwareOopClosure {
MarkFromRootsClosure* parent);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
// Deal with a stack overflow condition
void handle_stack_overflow(HeapWord* lost);
@ -287,8 +281,8 @@ class ParPushOrMarkClosure: public MetadataAwareOopClosure {
ParMarkFromRootsClosure* parent);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { ParPushOrMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { ParPushOrMarkClosure::do_oop_work(p); }
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
// Deal with a stack overflow condition
void handle_stack_overflow(HeapWord* lost);
@ -318,8 +312,8 @@ class CMSKeepAliveClosure: public MetadataAwareOopClosure {
bool concurrent_precleaning() const { return _concurrent_precleaning; }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
};
class CMSInnerParMarkAndPushClosure: public MetadataAwareOopClosure {
@ -336,8 +330,8 @@ class CMSInnerParMarkAndPushClosure: public MetadataAwareOopClosure {
OopTaskQueue* work_queue);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
inline void do_oop_nv(oop* p);
inline void do_oop_nv(narrowOop* p);
};
// A parallel (MT) version of the above, used when

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,21 +30,6 @@
#include "gc/shared/taskqueue.inline.hpp"
#include "oops/oop.inline.hpp"
// Trim our work_queue so its length is below max at return
inline void ParMarkRefsIntoAndScanClosure::trim_queue(uint max) {
while (_work_queue->size() > max) {
oop newOop;
if (_work_queue->pop_local(newOop)) {
assert(newOop->is_oop(), "Expected an oop");
assert(_bit_map->isMarked((HeapWord*)newOop),
"only grey objects on this stack");
// iterate over the oops in this oop, marking and pushing
// the ones in CMS heap (i.e. in _span).
newOop->oop_iterate(&_parPushAndMarkClosure);
}
}
}
// MetadataAwareOopClosure and MetadataAwareOopsInGenClosure are duplicated,
// until we get rid of OopsInGenClosure.
@ -61,4 +46,48 @@ inline void MetadataAwareOopsInGenClosure::do_cld_nv(ClassLoaderData* cld) {
cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim);
}
// Decode the oop and call do_oop on it.
#define DO_OOP_WORK_IMPL(cls) \
template <class T> void cls::do_oop_work(T* p) { \
T heap_oop = oopDesc::load_heap_oop(p); \
if (!oopDesc::is_null(heap_oop)) { \
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
do_oop(obj); \
} \
}
#define DO_OOP_WORK_NV_IMPL(cls) \
DO_OOP_WORK_IMPL(cls) \
void cls::do_oop_nv(oop* p) { cls::do_oop_work(p); } \
void cls::do_oop_nv(narrowOop* p) { cls::do_oop_work(p); }
DO_OOP_WORK_IMPL(MarkRefsIntoClosure)
DO_OOP_WORK_IMPL(ParMarkRefsIntoClosure)
DO_OOP_WORK_IMPL(MarkRefsIntoVerifyClosure)
DO_OOP_WORK_NV_IMPL(PushAndMarkClosure)
DO_OOP_WORK_NV_IMPL(ParPushAndMarkClosure)
DO_OOP_WORK_NV_IMPL(MarkRefsIntoAndScanClosure)
DO_OOP_WORK_NV_IMPL(ParMarkRefsIntoAndScanClosure)
// Trim our work_queue so its length is below max at return
inline void ParMarkRefsIntoAndScanClosure::trim_queue(uint max) {
while (_work_queue->size() > max) {
oop newOop;
if (_work_queue->pop_local(newOop)) {
assert(newOop->is_oop(), "Expected an oop");
assert(_bit_map->isMarked((HeapWord*)newOop),
"only grey objects on this stack");
// iterate over the oops in this oop, marking and pushing
// the ones in CMS heap (i.e. in _span).
newOop->oop_iterate(&_parPushAndMarkClosure);
}
}
}
DO_OOP_WORK_NV_IMPL(PushOrMarkClosure)
DO_OOP_WORK_NV_IMPL(ParPushOrMarkClosure)
DO_OOP_WORK_NV_IMPL(CMSKeepAliveClosure)
DO_OOP_WORK_NV_IMPL(CMSInnerParMarkAndPushClosure)
DO_OOP_WORK_IMPL(CMSParKeepAliveClosure)
#endif // SHARE_VM_GC_CMS_CMSOOPCLOSURES_INLINE_HPP

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -219,6 +219,10 @@ void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
}
}
size_t CompactibleFreeListSpace::obj_size(const HeapWord* addr) const {
return adjustObjectSize(oop(addr)->size());
}
void CompactibleFreeListSpace::resetIndexedFreeListArray() {
for (size_t i = 1; i < IndexSetSize; i++) {
assert(_indexedFreeList[i].size() == (size_t) i,

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -313,9 +313,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
return adjustObjectSize(size);
}
inline size_t obj_size(const HeapWord* addr) const {
return adjustObjectSize(oop(addr)->size());
}
inline size_t obj_size(const HeapWord* addr) const;
protected:
// Reset the indexed free list to its initial empty condition.

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1517,6 +1517,8 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
gch->pre_full_gc_dump(gc_timer);
GCTraceTime(Trace, gc) t("CMS:MSC");
// Temporarily widen the span of the weak reference processing to
@ -1593,6 +1595,8 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
_inter_sweep_timer.reset();
_inter_sweep_timer.start();
gch->post_full_gc_dump(gc_timer);
gc_timer->register_gc_end();
gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
@ -3324,6 +3328,8 @@ class ParConcMarkingClosure: public MetadataAwareOopClosure {
}
};
DO_OOP_WORK_IMPL(ParConcMarkingClosure)
// Grey object scanning during work stealing phase --
// the salient assumption here is that any references
// that are in these stolen objects being scanned must

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@
#include "gc/cms/parOopClosures.inline.hpp"
#include "gc/serial/defNewGeneration.inline.hpp"
#include "gc/shared/adaptiveSizePolicy.hpp"
#include "gc/shared/ageTable.hpp"
#include "gc/shared/ageTable.inline.hpp"
#include "gc/shared/copyFailedInfo.hpp"
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/gcTimer.hpp"
@ -414,7 +414,7 @@ void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) {
}
void ParScanThreadStateSet::print_taskqueue_stats() {
if (!develop_log_is_enabled(Trace, gc, task, stats)) {
if (!log_develop_is_enabled(Trace, gc, task, stats)) {
return;
}
LogHandle(gc, task, stats) log;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,6 +34,31 @@
/////////////////////////////////////////////////////////////////////////
PromotedObject* PromotedObject::next() const {
assert(!((FreeChunk*)this)->is_free(), "Error");
PromotedObject* res;
if (UseCompressedOops) {
// The next pointer is a compressed oop stored in the top 32 bits
res = (PromotedObject*)oopDesc::decode_heap_oop(_data._narrow_next);
} else {
res = (PromotedObject*)(_next & next_mask);
}
assert(oop(res)->is_oop_or_null(true /* ignore mark word */), "Expected an oop or NULL at " PTR_FORMAT, p2i(oop(res)));
return res;
}
inline void PromotedObject::setNext(PromotedObject* x) {
assert(((intptr_t)x & ~next_mask) == 0, "Conflict in bit usage, "
"or insufficient alignment of objects");
if (UseCompressedOops) {
assert(_data._narrow_next == 0, "Overwrite?");
_data._narrow_next = oopDesc::encode_heap_oop(oop(x));
} else {
_next |= (intptr_t)x;
}
assert(!((FreeChunk*)this)->is_free(), "Error");
}
//////////////////////////////////////////////////////////////////////////////
// We go over the list of promoted objects, removing each from the list,
// and applying the closure (this may, in turn, add more elements to

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -64,29 +64,8 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
Data _data;
};
public:
inline PromotedObject* next() const {
assert(!((FreeChunk*)this)->is_free(), "Error");
PromotedObject* res;
if (UseCompressedOops) {
// The next pointer is a compressed oop stored in the top 32 bits
res = (PromotedObject*)oopDesc::decode_heap_oop(_data._narrow_next);
} else {
res = (PromotedObject*)(_next & next_mask);
}
assert(oop(res)->is_oop_or_null(true /* ignore mark word */), "Expected an oop or NULL at " PTR_FORMAT, p2i(oop(res)));
return res;
}
inline void setNext(PromotedObject* x) {
assert(((intptr_t)x & ~next_mask) == 0, "Conflict in bit usage, "
"or insufficient alignment of objects");
if (UseCompressedOops) {
assert(_data._narrow_next == 0, "Overwrite?");
_data._narrow_next = oopDesc::encode_heap_oop(oop(x));
} else {
_next |= (intptr_t)x;
}
assert(!((FreeChunk*)this)->is_free(), "Error");
}
PromotedObject* next() const;
void setNext(PromotedObject* x);
inline void setPromotedMark() {
_next |= promoted_mask;
assert(!((FreeChunk*)this)->is_free(), "Error");

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -59,10 +59,10 @@ ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *nex
_monitor = DirtyCardQ_CBL_mon;
}
initialize();
create_and_start();
// set name
set_name("G1 Refine#%d", worker_id);
create_and_start();
}
void ConcurrentG1RefineThread::initialize() {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,7 +43,7 @@
SurrogateLockerThread*
ConcurrentMarkThread::_slt = NULL;
ConcurrentMarkThread::ConcurrentMarkThread(ConcurrentMark* cm) :
ConcurrentMarkThread::ConcurrentMarkThread(G1ConcurrentMark* cm) :
ConcurrentGCThread(),
_cm(cm),
_state(Idle),
@ -56,10 +56,10 @@ ConcurrentMarkThread::ConcurrentMarkThread(ConcurrentMark* cm) :
class CMCheckpointRootsFinalClosure: public VoidClosure {
ConcurrentMark* _cm;
G1ConcurrentMark* _cm;
public:
CMCheckpointRootsFinalClosure(ConcurrentMark* cm) :
CMCheckpointRootsFinalClosure(G1ConcurrentMark* cm) :
_cm(cm) {}
void do_void(){
@ -68,10 +68,10 @@ public:
};
class CMCleanUp: public VoidClosure {
ConcurrentMark* _cm;
G1ConcurrentMark* _cm;
public:
CMCleanUp(ConcurrentMark* cm) :
CMCleanUp(G1ConcurrentMark* cm) :
_cm(cm) {}
void do_void(){
@ -92,10 +92,10 @@ void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool
}
class GCConcPhaseTimer : StackObj {
ConcurrentMark* _cm;
G1ConcurrentMark* _cm;
public:
GCConcPhaseTimer(ConcurrentMark* cm, const char* title) : _cm(cm) {
GCConcPhaseTimer(G1ConcurrentMark* cm, const char* title) : _cm(cm) {
_cm->register_concurrent_phase_start(title);
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,10 +27,10 @@
#include "gc/shared/concurrentGCThread.hpp"
// The Concurrent Mark GC Thread triggers the parallel CMConcurrentMarkingTasks
// The Concurrent Mark GC Thread triggers the parallel G1CMConcurrentMarkingTasks
// as well as handling various marking cleanup.
class ConcurrentMark;
class G1ConcurrentMark;
class G1CollectorPolicy;
class ConcurrentMarkThread: public ConcurrentGCThread {
@ -45,7 +45,7 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
virtual void run();
private:
ConcurrentMark* _cm;
G1ConcurrentMark* _cm;
enum State {
Idle,
@ -65,7 +65,7 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
public:
// Constructor
ConcurrentMarkThread(ConcurrentMark* cm);
ConcurrentMarkThread(G1ConcurrentMark* cm);
static void makeSurrogateLockerThread(TRAPS);
static SurrogateLockerThread* slt() { return _slt; }
@ -75,7 +75,7 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
// Marking virtual time so far this thread and concurrent marking tasks.
double vtime_mark_accum();
ConcurrentMark* cm() { return _cm; }
G1ConcurrentMark* cm() { return _cm; }
void set_idle() { assert(_state != Started, "must not be starting a new cycle"); _state = Idle; }
bool idle() { return _state == Idle; }

View file

@ -25,8 +25,8 @@
#ifndef SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_INLINE_HPP
#define SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_INLINE_HPP
#include "gc/g1/concurrentMark.hpp"
#include "gc/g1/concurrentMarkThread.hpp"
#include "gc/g1/g1ConcurrentMark.hpp"
// Total virtual time so far.
inline double ConcurrentMarkThread::vtime_accum() {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -438,7 +438,7 @@ void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
// If an end alignment was requested, insert filler objects.
if (end_alignment_in_bytes != 0) {
HeapWord* currtop = _allocation_region->top();
HeapWord* newtop = (HeapWord*)align_pointer_up(currtop, end_alignment_in_bytes);
HeapWord* newtop = (HeapWord*)align_ptr_up(currtop, end_alignment_in_bytes);
size_t fill_size = pointer_delta(newtop, currtop);
if (fill_size != 0) {
if (fill_size < CollectedHeap::min_fill_size()) {
@ -447,7 +447,7 @@ void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
// region boundary because the max supported alignment is smaller than the min
// region size, and because the allocation code never leaves space smaller than
// the min_fill_size at the top of the current allocation region.
newtop = (HeapWord*)align_pointer_up(currtop + CollectedHeap::min_fill_size(),
newtop = (HeapWord*)align_ptr_up(currtop + CollectedHeap::min_fill_size(),
end_alignment_in_bytes);
fill_size = pointer_delta(newtop, currtop);
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -74,10 +74,16 @@ class CodeRootSetTable : public Hashtable<nmethod*, mtGC> {
static size_t static_mem_size() {
return sizeof(_purge_list);
}
size_t mem_size();
};
CodeRootSetTable* volatile CodeRootSetTable::_purge_list = NULL;
size_t CodeRootSetTable::mem_size() {
return sizeof(CodeRootSetTable) + (entry_size() * number_of_entries()) + (sizeof(HashtableBucket<mtGC>) * table_size());
}
CodeRootSetTable::Entry* CodeRootSetTable::new_entry(nmethod* nm) {
unsigned int hash = compute_hash(nm);
Entry* entry = (Entry*) new_entry_free_list();
@ -232,7 +238,6 @@ void G1CodeRootSet::move_to_large() {
OrderAccess::release_store_ptr(&_table, temp);
}
void G1CodeRootSet::purge() {
CodeRootSetTable::purge();
}
@ -247,12 +252,13 @@ void G1CodeRootSet::add(nmethod* method) {
allocate_small_table();
}
added = _table->add(method);
if (added) {
if (_length == Threshold) {
move_to_large();
}
if (added) {
++_length;
}
assert(_length == (size_t)_table->number_of_entries(), "sizes should match");
}
bool G1CodeRootSet::remove(nmethod* method) {
@ -266,11 +272,13 @@ bool G1CodeRootSet::remove(nmethod* method) {
clear();
}
}
assert((_length == 0 && _table == NULL) ||
(_length == (size_t)_table->number_of_entries()), "sizes should match");
return removed;
}
bool G1CodeRootSet::contains(nmethod* method) {
CodeRootSetTable* table = load_acquire_table();
CodeRootSetTable* table = load_acquire_table(); // contains() may be called outside of lock, so ensure mem sync.
if (table != NULL) {
return table->contains(method);
}
@ -284,8 +292,7 @@ void G1CodeRootSet::clear() {
}
size_t G1CodeRootSet::mem_size() {
return sizeof(*this) +
(_table != NULL ? sizeof(CodeRootSetTable) + _table->entry_size() * _length : 0);
return sizeof(*this) + (_table != NULL ? _table->mem_size() : 0);
}
void G1CodeRootSet::nmethods_do(CodeBlobClosure* blk) const {

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,11 +25,11 @@
#ifndef SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
#define SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
#include "gc/g1/concurrentMark.hpp"
#include "gc/g1/evacuationInfo.hpp"
#include "gc/g1/g1AllocationContext.hpp"
#include "gc/g1/g1BiasedArray.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentMark.hpp"
#include "gc/g1/g1HRPrinter.hpp"
#include "gc/g1/g1InCSetState.hpp"
#include "gc/g1/g1MonitoringSupport.hpp"
@ -68,7 +68,7 @@ class Space;
class G1CollectorPolicy;
class G1RemSet;
class HeapRegionRemSetIterator;
class ConcurrentMark;
class G1ConcurrentMark;
class ConcurrentMarkThread;
class ConcurrentG1Refine;
class ConcurrentGCTimer;
@ -82,6 +82,7 @@ class Ticks;
class WorkGang;
class G1Allocator;
class G1ArchiveAllocator;
class G1HeapVerifier;
typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
@ -118,6 +119,7 @@ class G1CollectedHeap : public CollectedHeap {
friend class VMStructs;
friend class MutatorAllocRegion;
friend class G1GCAllocRegion;
friend class G1HeapVerifier;
// Closures used in implementation.
friend class G1ParScanThreadState;
@ -181,6 +183,9 @@ private:
// Manages all allocations with regions except humongous object allocations.
G1Allocator* _allocator;
// Manages all heap verification.
G1HeapVerifier* _verifier;
// Outside of GC pauses, the number of bytes used in all regions other
// than the current allocation region(s).
size_t _summary_bytes_used;
@ -286,12 +291,6 @@ private:
size_t size,
size_t translation_factor);
double verify(bool guard, const char* msg);
void verify_before_gc();
void verify_after_gc();
void log_gc_footer(jlong pause_time_counter);
void trace_heap(GCWhen::Type when, const GCTracer* tracer);
void process_weak_jni_handles();
@ -527,6 +526,10 @@ public:
return _allocator;
}
G1HeapVerifier* verifier() {
return _verifier;
}
G1MonitoringSupport* g1mm() {
assert(_g1mm != NULL, "should have been initialized");
return _g1mm;
@ -768,7 +771,7 @@ protected:
void abandon_collection_set(HeapRegion* cs_head);
// The concurrent marker (and the thread it runs in.)
ConcurrentMark* _cm;
G1ConcurrentMark* _cm;
ConcurrentMarkThread* _cmThread;
// The concurrent refiner.
@ -1056,54 +1059,6 @@ public:
// The number of regions that are not completely free.
uint num_used_regions() const { return num_regions() - num_free_regions(); }
void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
void verify_dirty_young_regions() PRODUCT_RETURN;
#ifndef PRODUCT
// Make sure that the given bitmap has no marked objects in the
// range [from,limit). If it does, print an error message and return
// false. Otherwise, just return true. bitmap_name should be "prev"
// or "next".
bool verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
HeapWord* from, HeapWord* limit);
// Verify that the prev / next bitmap range [tams,end) for the given
// region has no marks. Return true if all is well, false if errors
// are detected.
bool verify_bitmaps(const char* caller, HeapRegion* hr);
#endif // PRODUCT
// If G1VerifyBitmaps is set, verify that the marking bitmaps for
// the given region do not have any spurious marks. If errors are
// detected, print appropriate error messages and crash.
void check_bitmaps(const char* caller, HeapRegion* hr) PRODUCT_RETURN;
// If G1VerifyBitmaps is set, verify that the marking bitmaps do not
// have any spurious marks. If errors are detected, print
// appropriate error messages and crash.
void check_bitmaps(const char* caller) PRODUCT_RETURN;
// Do sanity check on the contents of the in-cset fast test table.
bool check_cset_fast_test() PRODUCT_RETURN_( return true; );
// verify_region_sets() performs verification over the region
// lists. It will be compiled in the product code to be used when
// necessary (i.e., during heap verification).
void verify_region_sets();
// verify_region_sets_optional() is planted in the code for
// list verification in non-product builds (and it can be enabled in
// product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1).
#if HEAP_REGION_SET_FORCE_VERIFY
void verify_region_sets_optional() {
verify_region_sets();
}
#else // HEAP_REGION_SET_FORCE_VERIFY
void verify_region_sets_optional() { }
#endif // HEAP_REGION_SET_FORCE_VERIFY
#ifdef ASSERT
bool is_on_master_free_list(HeapRegion* hr) {
return _hrm.is_free(hr);
@ -1425,12 +1380,7 @@ public:
inline bool is_obj_ill(const oop obj) const;
bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
bool is_marked(oop obj, VerifyOption vo);
const char* top_at_mark_start_str(VerifyOption vo);
ConcurrentMark* concurrent_mark() const { return _cm; }
G1ConcurrentMark* concurrent_mark() const { return _cm; }
// Refinement

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,10 +25,10 @@
#ifndef SHARE_VM_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
#define SHARE_VM_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
#include "gc/g1/concurrentMark.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentMark.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegionManager.inline.hpp"
#include "gc/g1/heapRegionSet.inline.hpp"

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,10 +24,10 @@
#include "precompiled.hpp"
#include "gc/g1/concurrentG1Refine.hpp"
#include "gc/g1/concurrentMark.hpp"
#include "gc/g1/concurrentMarkThread.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1ConcurrentMark.hpp"
#include "gc/g1/g1IHOPControl.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/heapRegion.inline.hpp"
@ -117,15 +117,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
_rs_lengths_prediction(0),
_max_survivor_regions(0),
_eden_used_bytes_before_gc(0),
_survivor_used_bytes_before_gc(0),
_old_used_bytes_before_gc(0),
_humongous_used_bytes_before_gc(0),
_heap_used_bytes_before_gc(0),
_metaspace_used_bytes_before_gc(0),
_eden_capacity_bytes_before_gc(0),
_heap_capacity_bytes_before_gc(0),
_eden_cset_region_length(0),
_survivor_cset_region_length(0),
_old_cset_region_length(0),
@ -745,22 +736,6 @@ void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) {
}
}
HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
bool is_tlab,
bool* gc_overhead_limit_was_exceeded) {
guarantee(false, "Not using this policy feature yet.");
return NULL;
}
// This method controls how a collector handles one or more
// of its generations being fully allocated.
HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
bool is_tlab) {
guarantee(false, "Not using this policy feature yet.");
return NULL;
}
#ifndef PRODUCT
bool G1CollectorPolicy::verify_young_ages() {
HeapRegion* head = _g1->young_list()->first_region();
@ -809,7 +784,6 @@ G1CollectorPolicy::verify_young_ages(HeapRegion* head,
void G1CollectorPolicy::record_full_collection_start() {
_full_collection_start_sec = os::elapsedTime();
record_heap_size_info_at_start(true /* full */);
// Release the future to-space so that it is available for compaction into.
collector_state()->set_full_collection(true);
}
@ -871,8 +845,6 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
_trace_young_gen_time_data.record_start_collection(s_w_t_ms);
_stop_world_start = 0.0;
record_heap_size_info_at_start(false /* full */);
phase_times()->record_cur_collection_start_sec(start_time_sec);
_pending_cards = _g1->pending_card_num();
@ -987,7 +959,7 @@ bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc
// Anything below that is considered to be zero
#define MIN_TIMER_GRANULARITY 0.0000001
void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned) {
void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
double end_time_sec = os::elapsedTime();
size_t cur_used_bytes = _g1->used();
@ -1138,7 +1110,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
}
_rs_length_diff_seq->add((double) rs_length_diff);
size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes;
size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes;
size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes;
double cost_per_byte_ms = 0.0;
@ -1260,51 +1232,8 @@ void G1CollectorPolicy::report_ihop_statistics() {
_ihop_control->print();
}
#define EXT_SIZE_FORMAT "%.1f%s"
#define EXT_SIZE_PARAMS(bytes) \
byte_size_in_proper_unit((double)(bytes)), \
proper_unit_for_byte_size((bytes))
void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
YoungList* young_list = _g1->young_list();
_eden_used_bytes_before_gc = young_list->eden_used_bytes();
_survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
_heap_capacity_bytes_before_gc = _g1->capacity();
_old_used_bytes_before_gc = _g1->old_regions_count() * HeapRegion::GrainBytes;
_humongous_used_bytes_before_gc = _g1->humongous_regions_count() * HeapRegion::GrainBytes;
_heap_used_bytes_before_gc = _g1->used();
_eden_capacity_bytes_before_gc = (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
_metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
}
void G1CollectorPolicy::print_detailed_heap_transition() const {
YoungList* young_list = _g1->young_list();
size_t eden_used_bytes_after_gc = young_list->eden_used_bytes();
size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes();
size_t heap_used_bytes_after_gc = _g1->used();
size_t old_used_bytes_after_gc = _g1->old_regions_count() * HeapRegion::GrainBytes;
size_t humongous_used_bytes_after_gc = _g1->humongous_regions_count() * HeapRegion::GrainBytes;
size_t heap_capacity_bytes_after_gc = _g1->capacity();
size_t eden_capacity_bytes_after_gc =
(_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc;
size_t survivor_capacity_bytes_after_gc = _max_survivor_regions * HeapRegion::GrainBytes;
log_info(gc, heap)("Eden: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
_eden_used_bytes_before_gc / K, eden_used_bytes_after_gc /K, eden_capacity_bytes_after_gc /K);
log_info(gc, heap)("Survivor: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
_survivor_used_bytes_before_gc / K, survivor_used_bytes_after_gc /K, survivor_capacity_bytes_after_gc /K);
log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K",
_old_used_bytes_before_gc / K, old_used_bytes_after_gc /K);
log_info(gc, heap)("Humongous: " SIZE_FORMAT "K->" SIZE_FORMAT "K",
_humongous_used_bytes_before_gc / K, humongous_used_bytes_after_gc /K);
MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc);
}
void G1CollectorPolicy::print_phases(double pause_time_ms) {
phase_times()->print(pause_time_ms);
void G1CollectorPolicy::print_phases() {
phase_times()->print();
}
void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
@ -2310,7 +2239,7 @@ void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) {
// whether we added any apparently expensive regions or not, to
// avoid generating output per region.
log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)."
"old %u regions, expensive: %u regions, min %u regions, remaining time: %1.2fms",
"old: %u regions, expensive: %u regions, min: %u regions, remaining time: %1.2fms",
old_cset_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms);
}
@ -2319,7 +2248,7 @@ void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) {
stop_incremental_cset_building();
log_debug(gc, ergo, cset)("Finish choosing CSet. old %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
log_debug(gc, ergo, cset)("Finish choosing CSet. old: %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
old_cset_region_length(), predicted_old_time_ms, time_remaining_ms);
double non_young_end_time_sec = os::elapsedTime();

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -621,22 +621,13 @@ public:
// Create jstat counters for the policy.
virtual void initialize_gc_policy_counters();
virtual HeapWord* mem_allocate_work(size_t size,
bool is_tlab,
bool* gc_overhead_limit_was_exceeded);
// This method controls how a collector handles one or more
// of its generations being fully allocated.
virtual HeapWord* satisfy_failed_allocation(size_t size,
bool is_tlab);
bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
bool about_to_start_mixed_phase() const;
// Record the start and end of an evacuation pause.
void record_collection_pause_start(double start_time_sec);
void record_collection_pause_end(double pause_time_ms, size_t cards_scanned);
void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
// Record the start and end of a full collection.
void record_full_collection_start();
@ -654,15 +645,7 @@ public:
void record_concurrent_mark_cleanup_end();
void record_concurrent_mark_cleanup_completed();
// Records the information about the heap size for reporting in
// print_detailed_heap_transition
void record_heap_size_info_at_start(bool full);
// Print heap sizing transition (with less and more detail).
void print_detailed_heap_transition() const;
virtual void print_phases(double pause_time_ms);
virtual void print_phases();
void record_stop_world_start();
void record_concurrent_pause();
@ -825,16 +808,6 @@ private:
// The value of _heap_bytes_before_gc is also used to calculate
// the cost of copying.
size_t _eden_used_bytes_before_gc; // Eden occupancy before GC
size_t _survivor_used_bytes_before_gc; // Survivor occupancy before GC
size_t _old_used_bytes_before_gc; // Old occupancy before GC
size_t _humongous_used_bytes_before_gc; // Humongous occupancy before GC
size_t _heap_used_bytes_before_gc; // Heap occupancy before GC
size_t _metaspace_used_bytes_before_gc; // Metaspace occupancy before GC
size_t _eden_capacity_bytes_before_gc; // Eden capacity before GC
size_t _heap_capacity_bytes_before_gc; // Heap capacity before GC
// The amount of survivor regions after a collection.
uint _recorded_survivor_regions;
// List of survivor regions.
@ -846,6 +819,10 @@ private:
public:
uint tenuring_threshold() const { return _tenuring_threshold; }
uint max_survivor_regions() {
return _max_survivor_regions;
}
static const uint REGIONS_UNLIMITED = (uint) -1;
uint max_regions(InCSetState dest) const {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,8 +22,8 @@
*
*/
#ifndef SHARE_VM_GC_G1_CONCURRENTMARK_HPP
#define SHARE_VM_GC_G1_CONCURRENTMARK_HPP
#ifndef SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP
#define SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP
#include "classfile/javaClasses.hpp"
#include "gc/g1/g1RegionToSpaceMapper.hpp"
@ -31,11 +31,11 @@
#include "gc/shared/taskqueue.hpp"
class G1CollectedHeap;
class CMBitMap;
class CMTask;
class ConcurrentMark;
typedef GenericTaskQueue<oop, mtGC> CMTaskQueue;
typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet;
class G1CMBitMap;
class G1CMTask;
class G1ConcurrentMark;
typedef GenericTaskQueue<oop, mtGC> G1CMTaskQueue;
typedef GenericTaskQueueSet<G1CMTaskQueue, mtGC> G1CMTaskQueueSet;
// Closure used by CM during concurrent reference discovery
// and reference processing (during remarking) to determine
@ -54,7 +54,7 @@ class G1CMIsAliveClosure: public BoolObjectClosure {
// A generic CM bit map. This is essentially a wrapper around the BitMap
// class, with one bit per (1<<_shifter) HeapWords.
class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
class G1CMBitMapRO VALUE_OBJ_CLASS_SPEC {
protected:
HeapWord* _bmStartWord; // base address of range covered by map
size_t _bmWordSize; // map size (in #HeapWords covered)
@ -63,7 +63,7 @@ class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
public:
// constructor
CMBitMapRO(int shifter);
G1CMBitMapRO(int shifter);
// inquiries
HeapWord* startWord() const { return _bmStartWord; }
@ -96,12 +96,7 @@ class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
}
// The argument addr should be the start address of a valid object
HeapWord* nextObject(HeapWord* addr) {
oop obj = (oop) addr;
HeapWord* res = addr + obj->size();
assert(offsetToHeapWord(heapWordToOffset(res)) == res, "sanity");
return res;
}
inline HeapWord* nextObject(HeapWord* addr);
void print_on_error(outputStream* st, const char* prefix) const;
@ -109,20 +104,20 @@ class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
NOT_PRODUCT(bool covers(MemRegion rs) const;)
};
class CMBitMapMappingChangedListener : public G1MappingChangedListener {
class G1CMBitMapMappingChangedListener : public G1MappingChangedListener {
private:
CMBitMap* _bm;
G1CMBitMap* _bm;
public:
CMBitMapMappingChangedListener() : _bm(NULL) {}
G1CMBitMapMappingChangedListener() : _bm(NULL) {}
void set_bitmap(CMBitMap* bm) { _bm = bm; }
void set_bitmap(G1CMBitMap* bm) { _bm = bm; }
virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
};
class CMBitMap : public CMBitMapRO {
class G1CMBitMap : public G1CMBitMapRO {
private:
CMBitMapMappingChangedListener _listener;
G1CMBitMapMappingChangedListener _listener;
public:
static size_t compute_size(size_t heap_size);
@ -134,7 +129,7 @@ class CMBitMap : public CMBitMapRO {
return mark_distance();
}
CMBitMap() : CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); }
G1CMBitMap() : G1CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); }
// Initializes the underlying BitMap to cover the given area.
void initialize(MemRegion heap, G1RegionToSpaceMapper* storage);
@ -151,9 +146,9 @@ class CMBitMap : public CMBitMapRO {
};
// Represents a marking stack used by ConcurrentMarking in the G1 collector.
class CMMarkStack VALUE_OBJ_CLASS_SPEC {
class G1CMMarkStack VALUE_OBJ_CLASS_SPEC {
VirtualSpace _virtual_space; // Underlying backing store for actual stack
ConcurrentMark* _cm;
G1ConcurrentMark* _cm;
oop* _base; // bottom of stack
jint _index; // one more than last occupied index
jint _capacity; // max #elements
@ -163,8 +158,8 @@ class CMMarkStack VALUE_OBJ_CLASS_SPEC {
bool _should_expand;
public:
CMMarkStack(ConcurrentMark* cm);
~CMMarkStack();
G1CMMarkStack(G1ConcurrentMark* cm);
~G1CMMarkStack();
bool allocate(size_t capacity);
@ -225,19 +220,19 @@ class YoungList;
// Currently, we only support root region scanning once (at the start
// of the marking cycle) and the root regions are all the survivor
// regions populated during the initial-mark pause.
class CMRootRegions VALUE_OBJ_CLASS_SPEC {
class G1CMRootRegions VALUE_OBJ_CLASS_SPEC {
private:
YoungList* _young_list;
ConcurrentMark* _cm;
G1ConcurrentMark* _cm;
volatile bool _scan_in_progress;
volatile bool _should_abort;
HeapRegion* volatile _next_survivor;
public:
CMRootRegions();
G1CMRootRegions();
// We actually do most of the initialization in this method.
void init(G1CollectedHeap* g1h, ConcurrentMark* cm);
void init(G1CollectedHeap* g1h, G1ConcurrentMark* cm);
// Reset the claiming / scanning of the root regions.
void prepare_for_scan();
@ -265,19 +260,19 @@ public:
class ConcurrentMarkThread;
class ConcurrentMark: public CHeapObj<mtGC> {
friend class CMMarkStack;
class G1ConcurrentMark: public CHeapObj<mtGC> {
friend class ConcurrentMarkThread;
friend class CMTask;
friend class CMBitMapClosure;
friend class CMRemarkTask;
friend class CMConcurrentMarkingTask;
friend class G1ParNoteEndTask;
friend class CalcLiveObjectsClosure;
friend class G1CMRefProcTaskProxy;
friend class G1CMRefProcTaskExecutor;
friend class G1CMKeepAliveAndDrainClosure;
friend class G1CMDrainMarkingStackClosure;
friend class G1CMBitMapClosure;
friend class G1CMConcurrentMarkingTask;
friend class G1CMMarkStack;
friend class G1CMRemarkTask;
friend class G1CMTask;
protected:
ConcurrentMarkThread* _cmThread; // The thread doing the work
@ -295,10 +290,10 @@ protected:
FreeRegionList _cleanup_list;
// Concurrent marking support structures
CMBitMap _markBitMap1;
CMBitMap _markBitMap2;
CMBitMapRO* _prevMarkBitMap; // Completed mark bitmap
CMBitMap* _nextMarkBitMap; // Under-construction mark bitmap
G1CMBitMap _markBitMap1;
G1CMBitMap _markBitMap2;
G1CMBitMapRO* _prevMarkBitMap; // Completed mark bitmap
G1CMBitMap* _nextMarkBitMap; // Under-construction mark bitmap
BitMap _region_bm;
BitMap _card_bm;
@ -308,10 +303,10 @@ protected:
HeapWord* _heap_end;
// Root region tracking and claiming
CMRootRegions _root_regions;
G1CMRootRegions _root_regions;
// For gray objects
CMMarkStack _markStack; // Grey objects behind global finger
G1CMMarkStack _markStack; // Grey objects behind global finger
HeapWord* volatile _finger; // The global finger, region aligned,
// always points to the end of the
// last claimed region
@ -319,8 +314,8 @@ protected:
// Marking tasks
uint _max_worker_id;// Maximum worker id
uint _active_tasks; // Task num currently active
CMTask** _tasks; // Task queue array (max_worker_id len)
CMTaskQueueSet* _task_queues; // Task queue set
G1CMTask** _tasks; // Task queue array (max_worker_id len)
G1CMTaskQueueSet* _task_queues; // Task queue set
ParallelTaskTerminator _terminator; // For termination
// Two sync barriers that are used to synchronize tasks when an
@ -435,21 +430,21 @@ protected:
bool out_of_regions() { return _finger >= _heap_end; }
// Returns the task with the given id
CMTask* task(int id) {
G1CMTask* task(int id) {
assert(0 <= id && id < (int) _active_tasks,
"task id not within active bounds");
return _tasks[id];
}
// Returns the task queue with the given id
CMTaskQueue* task_queue(int id) {
G1CMTaskQueue* task_queue(int id) {
assert(0 <= id && id < (int) _active_tasks,
"task queue id not within active bounds");
return (CMTaskQueue*) _task_queues->queue(id);
return (G1CMTaskQueue*) _task_queues->queue(id);
}
// Returns the task queue set
CMTaskQueueSet* task_queues() { return _task_queues; }
G1CMTaskQueueSet* task_queues() { return _task_queues; }
// Access / manipulation of the overflow flag which is set to
// indicate that the global stack has overflown
@ -507,7 +502,7 @@ public:
bool mark_stack_overflow() { return _markStack.overflow(); }
bool mark_stack_empty() { return _markStack.isEmpty(); }
CMRootRegions* root_regions() { return &_root_regions; }
G1CMRootRegions* root_regions() { return &_root_regions; }
bool concurrent_marking_in_progress() {
return _concurrent_marking_in_progress;
@ -536,15 +531,15 @@ public:
// Attempts to steal an object from the task queues of other tasks
bool try_stealing(uint worker_id, int* hash_seed, oop& obj);
ConcurrentMark(G1CollectedHeap* g1h,
G1ConcurrentMark(G1CollectedHeap* g1h,
G1RegionToSpaceMapper* prev_bitmap_storage,
G1RegionToSpaceMapper* next_bitmap_storage);
~ConcurrentMark();
~G1ConcurrentMark();
ConcurrentMarkThread* cmThread() { return _cmThread; }
CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; }
G1CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
G1CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; }
// Returns the number of GC threads to be used in a concurrent
// phase based on the number of GC threads being used in a STW
@ -627,14 +622,7 @@ public:
// If marking is not in progress, it's a no-op.
void verify_no_cset_oops() PRODUCT_RETURN;
bool isPrevMarked(oop p) const {
assert(p != NULL && p->is_oop(), "expected an oop");
HeapWord* addr = (HeapWord*)p;
assert(addr >= _prevMarkBitMap->startWord() ||
addr < _prevMarkBitMap->endWord(), "in a region");
return _prevMarkBitMap->isMarked(addr);
}
inline bool isPrevMarked(oop p) const;
inline bool do_yield_check(uint worker_i = 0);
@ -740,7 +728,7 @@ protected:
};
// A class representing a marking task.
class CMTask : public TerminatorTerminator {
class G1CMTask : public TerminatorTerminator {
private:
enum PrivateConstants {
// the regular clock call is called once the scanned words reaches
@ -758,13 +746,13 @@ private:
uint _worker_id;
G1CollectedHeap* _g1h;
ConcurrentMark* _cm;
CMBitMap* _nextMarkBitMap;
G1ConcurrentMark* _cm;
G1CMBitMap* _nextMarkBitMap;
// the task queue of this task
CMTaskQueue* _task_queue;
G1CMTaskQueue* _task_queue;
private:
// the task queue set---needed for stealing
CMTaskQueueSet* _task_queues;
G1CMTaskQueueSet* _task_queues;
// indicates whether the task has been claimed---this is only for
// debugging purposes
bool _claimed;
@ -881,7 +869,7 @@ private:
public:
// It resets the task; it should be called right at the beginning of
// a marking phase.
void reset(CMBitMap* _nextMarkBitMap);
void reset(G1CMBitMap* _nextMarkBitMap);
// it clears all the fields that correspond to a claimed region.
void clear_region_fields();
@ -968,12 +956,12 @@ public:
_finger = new_finger;
}
CMTask(uint worker_id,
ConcurrentMark *cm,
G1CMTask(uint worker_id,
G1ConcurrentMark *cm,
size_t* marked_bytes,
BitMap* card_bm,
CMTaskQueue* task_queue,
CMTaskQueueSet* task_queues);
G1CMTaskQueue* task_queue,
G1CMTaskQueueSet* task_queues);
// it prints statistics associated with this task
void print_stats();
@ -1033,4 +1021,4 @@ public:
~G1PrintRegionLivenessInfoClosure();
};
#endif // SHARE_VM_GC_G1_CONCURRENTMARK_HPP
#endif // SHARE_VM_GC_G1_G1CONCURRENTMARK_HPP

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,16 +22,16 @@
*
*/
#ifndef SHARE_VM_GC_G1_CONCURRENTMARK_INLINE_HPP
#define SHARE_VM_GC_G1_CONCURRENTMARK_INLINE_HPP
#ifndef SHARE_VM_GC_G1_G1CONCURRENTMARK_INLINE_HPP
#define SHARE_VM_GC_G1_G1CONCURRENTMARK_INLINE_HPP
#include "gc/g1/concurrentMark.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentMark.hpp"
#include "gc/shared/taskqueue.inline.hpp"
// Utility routine to set an exclusive range of cards on the given
// card liveness bitmap
inline void ConcurrentMark::set_card_bitmap_range(BitMap* card_bm,
inline void G1ConcurrentMark::set_card_bitmap_range(BitMap* card_bm,
BitMap::idx_t start_idx,
BitMap::idx_t end_idx,
bool is_par) {
@ -67,7 +67,7 @@ inline void ConcurrentMark::set_card_bitmap_range(BitMap* card_bm,
// Returns the index in the liveness accounting card bitmap
// for the given address
inline BitMap::idx_t ConcurrentMark::card_bitmap_index_for(HeapWord* addr) {
inline BitMap::idx_t G1ConcurrentMark::card_bitmap_index_for(HeapWord* addr) {
// Below, the term "card num" means the result of shifting an address
// by the card shift -- address 0 corresponds to card number 0. One
// must subtract the card num of the bottom of the heap to obtain a
@ -78,7 +78,7 @@ inline BitMap::idx_t ConcurrentMark::card_bitmap_index_for(HeapWord* addr) {
// Counts the given memory region in the given task/worker
// counting data structures.
inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
inline void G1ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
size_t* marked_bytes_array,
BitMap* task_card_bm) {
G1CollectedHeap* g1h = _g1h;
@ -115,7 +115,7 @@ inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
}
// Counts the given object in the given task/worker counting data structures.
inline void ConcurrentMark::count_object(oop obj,
inline void G1ConcurrentMark::count_object(oop obj,
HeapRegion* hr,
size_t* marked_bytes_array,
BitMap* task_card_bm,
@ -135,7 +135,7 @@ inline void ConcurrentMark::count_object(oop obj,
// Attempts to mark the given object and, if successful, counts
// the object in the given task/worker counting structures.
inline bool ConcurrentMark::par_mark_and_count(oop obj,
inline bool G1ConcurrentMark::par_mark_and_count(oop obj,
HeapRegion* hr,
size_t* marked_bytes_array,
BitMap* task_card_bm) {
@ -150,7 +150,7 @@ inline bool ConcurrentMark::par_mark_and_count(oop obj,
// Attempts to mark the given object and, if successful, counts
// the object in the task/worker counting structures for the
// given worker id.
inline bool ConcurrentMark::par_mark_and_count(oop obj,
inline bool G1ConcurrentMark::par_mark_and_count(oop obj,
size_t word_size,
HeapRegion* hr,
uint worker_id) {
@ -163,7 +163,7 @@ inline bool ConcurrentMark::par_mark_and_count(oop obj,
return false;
}
inline bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) {
inline bool G1CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) {
HeapWord* start_addr = MAX2(startWord(), mr.start());
HeapWord* end_addr = MIN2(endWord(), mr.end());
@ -185,6 +185,14 @@ inline bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) {
return true;
}
// The argument addr should be the start address of a valid object
HeapWord* G1CMBitMapRO::nextObject(HeapWord* addr) {
oop obj = (oop) addr;
HeapWord* res = addr + obj->size();
assert(offsetToHeapWord(heapWordToOffset(res)) == res, "sanity");
return res;
}
#define check_mark(addr) \
assert(_bmStartWord <= (addr) && (addr) < (_bmStartWord + _bmWordSize), \
"outside underlying space?"); \
@ -193,17 +201,17 @@ inline bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) {
" corresponding to " PTR_FORMAT " (%u)", \
p2i(this), p2i(addr), G1CollectedHeap::heap()->addr_to_region(addr));
inline void CMBitMap::mark(HeapWord* addr) {
inline void G1CMBitMap::mark(HeapWord* addr) {
check_mark(addr);
_bm.set_bit(heapWordToOffset(addr));
}
inline void CMBitMap::clear(HeapWord* addr) {
inline void G1CMBitMap::clear(HeapWord* addr) {
check_mark(addr);
_bm.clear_bit(heapWordToOffset(addr));
}
inline bool CMBitMap::parMark(HeapWord* addr) {
inline bool G1CMBitMap::parMark(HeapWord* addr) {
check_mark(addr);
return _bm.par_set_bit(heapWordToOffset(addr));
}
@ -211,7 +219,7 @@ inline bool CMBitMap::parMark(HeapWord* addr) {
#undef check_mark
template<typename Fn>
inline void CMMarkStack::iterate(Fn fn) {
inline void G1CMMarkStack::iterate(Fn fn) {
assert(_saved_index == _index, "saved index: %d index: %d", _saved_index, _index);
for (int i = 0; i < _index; ++i) {
fn(_base[i]);
@ -219,9 +227,9 @@ inline void CMMarkStack::iterate(Fn fn) {
}
// It scans an object and visits its children.
inline void CMTask::scan_object(oop obj) { process_grey_object<true>(obj); }
inline void G1CMTask::scan_object(oop obj) { process_grey_object<true>(obj); }
inline void CMTask::push(oop obj) {
inline void G1CMTask::push(oop obj) {
HeapWord* objAddr = (HeapWord*) obj;
assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
assert(!_g1h->is_on_master_free_list(
@ -242,7 +250,7 @@ inline void CMTask::push(oop obj) {
}
}
inline bool CMTask::is_below_finger(oop obj, HeapWord* global_finger) const {
inline bool G1CMTask::is_below_finger(oop obj, HeapWord* global_finger) const {
// If obj is above the global finger, then the mark bitmap scan
// will find it later, and no push is needed. Similarly, if we have
// a current region and obj is between the local finger and the
@ -273,7 +281,7 @@ inline bool CMTask::is_below_finger(oop obj, HeapWord* global_finger) const {
}
template<bool scan>
inline void CMTask::process_grey_object(oop obj) {
inline void G1CMTask::process_grey_object(oop obj) {
assert(scan || obj->is_typeArray(), "Skipping scan of grey non-typeArray");
assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
@ -288,10 +296,10 @@ inline void CMTask::process_grey_object(oop obj) {
inline void CMTask::make_reference_grey(oop obj, HeapRegion* hr) {
inline void G1CMTask::make_reference_grey(oop obj, HeapRegion* hr) {
if (_cm->par_mark_and_count(obj, hr, _marked_bytes_array, _card_bm)) {
// No OrderAccess:store_load() is needed. It is implicit in the
// CAS done in CMBitMap::parMark() call in the routine above.
// CAS done in G1CMBitMap::parMark() call in the routine above.
HeapWord* global_finger = _cm->finger();
// We only need to push a newly grey object on the mark
@ -327,7 +335,7 @@ inline void CMTask::make_reference_grey(oop obj, HeapRegion* hr) {
}
}
inline void CMTask::deal_with_reference(oop obj) {
inline void G1CMTask::deal_with_reference(oop obj) {
increment_refs_reached();
HeapWord* objAddr = (HeapWord*) obj;
@ -346,14 +354,23 @@ inline void CMTask::deal_with_reference(oop obj) {
}
}
inline void ConcurrentMark::markPrev(oop p) {
inline void G1ConcurrentMark::markPrev(oop p) {
assert(!_prevMarkBitMap->isMarked((HeapWord*) p), "sanity");
// Note we are overriding the read-only view of the prev map here, via
// the cast.
((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*) p);
((G1CMBitMap*)_prevMarkBitMap)->mark((HeapWord*) p);
}
inline void ConcurrentMark::grayRoot(oop obj, size_t word_size,
bool G1ConcurrentMark::isPrevMarked(oop p) const {
assert(p != NULL && p->is_oop(), "expected an oop");
HeapWord* addr = (HeapWord*)p;
assert(addr >= _prevMarkBitMap->startWord() ||
addr < _prevMarkBitMap->endWord(), "in a region");
return _prevMarkBitMap->isMarked(addr);
}
inline void G1ConcurrentMark::grayRoot(oop obj, size_t word_size,
uint worker_id, HeapRegion* hr) {
assert(obj != NULL, "pre-condition");
HeapWord* addr = (HeapWord*) obj;
@ -374,4 +391,4 @@ inline void ConcurrentMark::grayRoot(oop obj, size_t word_size,
}
}
#endif // SHARE_VM_GC_G1_CONCURRENTMARK_INLINE_HPP
#endif // SHARE_VM_GC_G1_G1CONCURRENTMARK_INLINE_HPP

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,11 +23,12 @@
*/
#include "precompiled.hpp"
#include "gc/g1/concurrentMark.inline.hpp"
#include "gc/g1/dirtyCardQueue.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentMark.inline.hpp"
#include "gc/g1/g1EvacFailure.hpp"
#include "gc/g1/g1HeapVerifier.hpp"
#include "gc/g1/g1OopClosures.inline.hpp"
#include "gc/g1/g1_globals.hpp"
#include "gc/g1/heapRegion.hpp"
@ -61,7 +62,7 @@ public:
class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
private:
G1CollectedHeap* _g1;
ConcurrentMark* _cm;
G1ConcurrentMark* _cm;
HeapRegion* _hr;
size_t _marked_bytes;
OopsInHeapRegionClosure *_update_rset_cl;
@ -223,7 +224,7 @@ public:
if (hr->evacuation_failed()) {
hr->note_self_forwarding_removal_start(during_initial_mark,
during_conc_mark);
_g1h->check_bitmaps("Self-Forwarding Ptr Removal", hr);
_g1h->verifier()->check_bitmaps("Self-Forwarding Ptr Removal", hr);
// In the common case (i.e. when there is no evacuation
// failure) we make sure that the following is done when

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View file

@ -30,6 +30,7 @@
#include "gc/g1/workerDataArray.inline.hpp"
#include "memory/allocation.hpp"
#include "logging/log.hpp"
#include "runtime/timer.hpp"
#include "runtime/os.hpp"
// Helper class for avoiding interleaved logging
@ -125,7 +126,7 @@ G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
_gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>(max_gc_threads, "Queue Fixup:", true, 2);
_gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>(max_gc_threads, "Table Fixup:", true, 2);
_gc_par_phases[RedirtyCards] = new WorkerDataArray<double>(max_gc_threads, "Parallel Redirty", true, 3);
_gc_par_phases[RedirtyCards] = new WorkerDataArray<double>(max_gc_threads, "Parallel Redirty:", true, 3);
_redirtied_cards = new WorkerDataArray<size_t>(max_gc_threads, "Redirtied Cards:", true, 3);
_gc_par_phases[RedirtyCards]->link_thread_work_items(_redirtied_cards);
}
@ -133,6 +134,7 @@ G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
assert(active_gc_threads > 0, "The number of threads must be > 0");
assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max number of threads");
_gc_start_counter = os::elapsed_counter();
_active_gc_threads = active_gc_threads;
_cur_expand_heap_time_ms = 0.0;
_external_accounted_time_ms = 0.0;
@ -146,6 +148,7 @@ void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
}
void G1GCPhaseTimes::note_gc_end() {
_gc_pause_time_ms = TimeHelper::counter_to_millis(os::elapsed_counter() - _gc_start_counter);
for (uint i = 0; i < _active_gc_threads; i++) {
double worker_time = _gc_par_phases[GCWorkerEnd]->get(i) - _gc_par_phases[GCWorkerStart]->get(i);
record_time_secs(GCWorkerTotal, i , worker_time);
@ -349,7 +352,7 @@ class G1GCParPhasePrinter : public StackObj {
}
};
void G1GCPhaseTimes::print(double pause_time_ms) {
void G1GCPhaseTimes::print() {
note_gc_end();
G1GCParPhasePrinter par_phase_printer(this);
@ -373,7 +376,7 @@ void G1GCPhaseTimes::print(double pause_time_ms) {
}
print_stats(Indents[1], "Clear CT", _cur_clear_ct_time_ms);
print_stats(Indents[1], "Expand Heap After Collection", _cur_expand_heap_time_ms);
double misc_time_ms = pause_time_ms - accounted_time_ms();
double misc_time_ms = _gc_pause_time_ms - accounted_time_ms();
print_stats(Indents[1], "Other", misc_time_ms);
if (_cur_verify_before_time_ms > 0.0) {
print_stats(Indents[2], "Verify Before", _cur_verify_before_time_ms);

View file

@ -36,6 +36,8 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
uint _active_gc_threads;
uint _max_gc_threads;
jlong _gc_start_counter;
double _gc_pause_time_ms;
public:
enum GCParPhases {
@ -126,7 +128,7 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
public:
G1GCPhaseTimes(uint max_gc_threads);
void note_gc_start(uint active_gc_threads);
void print(double pause_time_ms);
void print();
// record the time a phase took in seconds
void record_time_secs(GCParPhases phase, uint worker_i, double secs);

View file

@ -0,0 +1,122 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1HeapTransition.hpp"
#include "logging/log.hpp"
#include "memory/metaspace.hpp"
G1HeapTransition::Data::Data(G1CollectedHeap* g1_heap) {
YoungList* young_list = g1_heap->young_list();
_eden_length = young_list->eden_length();
_survivor_length = young_list->survivor_length();
_old_length = g1_heap->old_regions_count();
_humongous_length = g1_heap->humongous_regions_count();
_metaspace_used_bytes = MetaspaceAux::used_bytes();
}
G1HeapTransition::G1HeapTransition(G1CollectedHeap* g1_heap) : _g1_heap(g1_heap), _before(g1_heap) { }
struct DetailedUsage : public StackObj {
size_t _eden_used;
size_t _survivor_used;
size_t _old_used;
size_t _humongous_used;
size_t _eden_region_count;
size_t _survivor_region_count;
size_t _old_region_count;
size_t _humongous_region_count;
DetailedUsage() :
_eden_used(0), _survivor_used(0), _old_used(0), _humongous_used(0),
_eden_region_count(0), _survivor_region_count(0), _old_region_count(0), _humongous_region_count(0) {}
};
class DetailedUsageClosure: public HeapRegionClosure {
public:
DetailedUsage _usage;
bool doHeapRegion(HeapRegion* r) {
if (r->is_old()) {
_usage._old_used += r->used();
_usage._old_region_count++;
} else if (r->is_survivor()) {
_usage._survivor_used += r->used();
_usage._survivor_region_count++;
} else if (r->is_eden()) {
_usage._eden_used += r->used();
_usage._eden_region_count++;
} else if (r->is_humongous()) {
_usage._humongous_used += r->used();
_usage._humongous_region_count++;
} else {
assert(r->used() == 0, "Expected used to be 0 but it was " SIZE_FORMAT, r->used());
}
return false;
}
};
void G1HeapTransition::print() {
Data after(_g1_heap);
size_t eden_capacity_bytes_after_gc = _g1_heap->g1_policy()->young_list_target_length() - after._survivor_length;
size_t survivor_capacity_bytes_after_gc = _g1_heap->g1_policy()->max_survivor_regions();
DetailedUsage usage;
if (log_is_enabled(Trace, gc, heap)) {
DetailedUsageClosure blk;
_g1_heap->heap_region_iterate(&blk);
usage = blk._usage;
assert(usage._eden_region_count == 0, "Expected no eden regions, but got " SIZE_FORMAT, usage._eden_region_count);
assert(usage._survivor_region_count == after._survivor_length, "Expected survivors to be " SIZE_FORMAT " but was " SIZE_FORMAT,
after._survivor_length, usage._survivor_region_count);
assert(usage._old_region_count == after._old_length, "Expected old to be " SIZE_FORMAT " but was " SIZE_FORMAT,
after._old_length, usage._old_region_count);
assert(usage._humongous_region_count == after._humongous_length, "Expected humongous to be " SIZE_FORMAT " but was " SIZE_FORMAT,
after._humongous_length, usage._humongous_region_count);
}
log_info(gc, heap)("Eden regions: " SIZE_FORMAT "->" SIZE_FORMAT "(" SIZE_FORMAT ")",
_before._eden_length, after._eden_length, eden_capacity_bytes_after_gc);
log_trace(gc, heap)(" Used: 0K, Waste: 0K");
log_info(gc, heap)("Survivor regions: " SIZE_FORMAT "->" SIZE_FORMAT "(" SIZE_FORMAT ")",
_before._survivor_length, after._survivor_length, survivor_capacity_bytes_after_gc);
log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
usage._survivor_used / K, ((after._survivor_length * HeapRegion::GrainBytes) - usage._survivor_used) / K);
log_info(gc, heap)("Old regions: " SIZE_FORMAT "->" SIZE_FORMAT,
_before._old_length, after._old_length);
log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
usage._old_used / K, ((after._old_length * HeapRegion::GrainBytes) - usage._old_used) / K);
log_info(gc, heap)("Humongous regions: " SIZE_FORMAT "->" SIZE_FORMAT,
_before._humongous_length, after._humongous_length);
log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
usage._humongous_used / K, ((after._humongous_length * HeapRegion::GrainBytes) - usage._humongous_used) / K);
MetaspaceAux::print_metaspace_change(_before._metaspace_used_bytes);
}

View file

@ -0,0 +1,52 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_G1_G1HEAPTRANSITION_HPP
#define SHARE_VM_GC_G1_G1HEAPTRANSITION_HPP
#include "gc/shared/plab.hpp"
class G1CollectedHeap;
class G1HeapTransition {
struct Data {
size_t _eden_length;
size_t _survivor_length;
size_t _old_length;
size_t _humongous_length;
size_t _metaspace_used_bytes;
Data(G1CollectedHeap* g1_heap);
};
G1CollectedHeap* _g1_heap;
Data _before;
public:
G1HeapTransition(G1CollectedHeap* g1_heap);
void print();
};
#endif // SHARE_VM_GC_G1_G1HEAPTRANSITION_HPP

View file

@ -0,0 +1,731 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "logging/log.hpp"
#include "gc/g1/concurrentMarkThread.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1HeapVerifier.hpp"
#include "gc/g1/g1MarkSweep.hpp"
#include "gc/g1/g1RemSet.hpp"
#include "gc/g1/g1RootProcessor.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "gc/g1/g1StringDedup.hpp"
#include "gc/g1/youngList.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
class VerifyRootsClosure: public OopClosure {
private:
G1CollectedHeap* _g1h;
VerifyOption _vo;
bool _failures;
public:
// _vo == UsePrevMarking -> use "prev" marking information,
// _vo == UseNextMarking -> use "next" marking information,
// _vo == UseMarkWord -> use mark word from object header.
VerifyRootsClosure(VerifyOption vo) :
_g1h(G1CollectedHeap::heap()),
_vo(vo),
_failures(false) { }
bool failures() { return _failures; }
template <class T> void do_oop_nv(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
if (_g1h->is_obj_dead_cond(obj, _vo)) {
LogHandle(gc, verify) log;
log.info("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
if (_vo == VerifyOption_G1UseMarkWord) {
log.info(" Mark word: " PTR_FORMAT, p2i(obj->mark()));
}
ResourceMark rm;
obj->print_on(log.info_stream());
_failures = true;
}
}
}
void do_oop(oop* p) { do_oop_nv(p); }
void do_oop(narrowOop* p) { do_oop_nv(p); }
};
class G1VerifyCodeRootOopClosure: public OopClosure {
G1CollectedHeap* _g1h;
OopClosure* _root_cl;
nmethod* _nm;
VerifyOption _vo;
bool _failures;
template <class T> void do_oop_work(T* p) {
// First verify that this root is live
_root_cl->do_oop(p);
if (!G1VerifyHeapRegionCodeRoots) {
// We're not verifying the code roots attached to heap region.
return;
}
// Don't check the code roots during marking verification in a full GC
if (_vo == VerifyOption_G1UseMarkWord) {
return;
}
// Now verify that the current nmethod (which contains p) is
// in the code root list of the heap region containing the
// object referenced by p.
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
// Now fetch the region containing the object
HeapRegion* hr = _g1h->heap_region_containing(obj);
HeapRegionRemSet* hrrs = hr->rem_set();
// Verify that the strong code root list for this region
// contains the nmethod
if (!hrrs->strong_code_roots_list_contains(_nm)) {
log_info(gc, verify)("Code root location " PTR_FORMAT " "
"from nmethod " PTR_FORMAT " not in strong "
"code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
_failures = true;
}
}
}
public:
G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
_g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
void do_oop(oop* p) { do_oop_work(p); }
void do_oop(narrowOop* p) { do_oop_work(p); }
void set_nmethod(nmethod* nm) { _nm = nm; }
bool failures() { return _failures; }
};
class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
G1VerifyCodeRootOopClosure* _oop_cl;
public:
G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl):
_oop_cl(oop_cl) {}
void do_code_blob(CodeBlob* cb) {
nmethod* nm = cb->as_nmethod_or_null();
if (nm != NULL) {
_oop_cl->set_nmethod(nm);
nm->oops_do(_oop_cl);
}
}
};
class YoungRefCounterClosure : public OopClosure {
G1CollectedHeap* _g1h;
int _count;
public:
YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
void do_oop(oop* p) { if (_g1h->is_in_young(*p)) { _count++; } }
void do_oop(narrowOop* p) { ShouldNotReachHere(); }
int count() { return _count; }
void reset_count() { _count = 0; };
};
class VerifyKlassClosure: public KlassClosure {
YoungRefCounterClosure _young_ref_counter_closure;
OopClosure *_oop_closure;
public:
VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
void do_klass(Klass* k) {
k->oops_do(_oop_closure);
_young_ref_counter_closure.reset_count();
k->oops_do(&_young_ref_counter_closure);
if (_young_ref_counter_closure.count() > 0) {
guarantee(k->has_modified_oops(), "Klass " PTR_FORMAT ", has young refs but is not dirty.", p2i(k));
}
}
};
class VerifyLivenessOopClosure: public OopClosure {
G1CollectedHeap* _g1h;
VerifyOption _vo;
public:
VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
_g1h(g1h), _vo(vo)
{ }
void do_oop(narrowOop *p) { do_oop_work(p); }
void do_oop( oop *p) { do_oop_work(p); }
template <class T> void do_oop_work(T *p) {
oop obj = oopDesc::load_decode_heap_oop(p);
guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
"Dead object referenced by a not dead object");
}
};
class VerifyObjsInRegionClosure: public ObjectClosure {
private:
G1CollectedHeap* _g1h;
size_t _live_bytes;
HeapRegion *_hr;
VerifyOption _vo;
public:
// _vo == UsePrevMarking -> use "prev" marking information,
// _vo == UseNextMarking -> use "next" marking information,
// _vo == UseMarkWord -> use mark word from object header.
VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
: _live_bytes(0), _hr(hr), _vo(vo) {
_g1h = G1CollectedHeap::heap();
}
void do_object(oop o) {
VerifyLivenessOopClosure isLive(_g1h, _vo);
assert(o != NULL, "Huh?");
if (!_g1h->is_obj_dead_cond(o, _vo)) {
// If the object is alive according to the mark word,
// then verify that the marking information agrees.
// Note we can't verify the contra-positive of the
// above: if the object is dead (according to the mark
// word), it may not be marked, or may have been marked
// but has since became dead, or may have been allocated
// since the last marking.
if (_vo == VerifyOption_G1UseMarkWord) {
guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
}
o->oop_iterate_no_header(&isLive);
if (!_hr->obj_allocated_since_prev_marking(o)) {
size_t obj_size = o->size(); // Make sure we don't overflow
_live_bytes += (obj_size * HeapWordSize);
}
}
}
size_t live_bytes() { return _live_bytes; }
};
class VerifyArchiveOopClosure: public OopClosure {
public:
VerifyArchiveOopClosure(HeapRegion *hr) { }
void do_oop(narrowOop *p) { do_oop_work(p); }
void do_oop( oop *p) { do_oop_work(p); }
template <class T> void do_oop_work(T *p) {
oop obj = oopDesc::load_decode_heap_oop(p);
guarantee(obj == NULL || G1MarkSweep::in_archive_range(obj),
"Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
p2i(p), p2i(obj));
}
};
class VerifyArchiveRegionClosure: public ObjectClosure {
public:
VerifyArchiveRegionClosure(HeapRegion *hr) { }
// Verify that all object pointers are to archive regions.
void do_object(oop o) {
VerifyArchiveOopClosure checkOop(NULL);
assert(o != NULL, "Should not be here for NULL oops");
o->oop_iterate_no_header(&checkOop);
}
};
class VerifyRegionClosure: public HeapRegionClosure {
private:
bool _par;
VerifyOption _vo;
bool _failures;
public:
// _vo == UsePrevMarking -> use "prev" marking information,
// _vo == UseNextMarking -> use "next" marking information,
// _vo == UseMarkWord -> use mark word from object header.
VerifyRegionClosure(bool par, VerifyOption vo)
: _par(par),
_vo(vo),
_failures(false) {}
bool failures() {
return _failures;
}
bool doHeapRegion(HeapRegion* r) {
// For archive regions, verify there are no heap pointers to
// non-pinned regions. For all others, verify liveness info.
if (r->is_archive()) {
VerifyArchiveRegionClosure verify_oop_pointers(r);
r->object_iterate(&verify_oop_pointers);
return true;
}
if (!r->is_continues_humongous()) {
bool failures = false;
r->verify(_vo, &failures);
if (failures) {
_failures = true;
} else if (!r->is_starts_humongous()) {
VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
r->object_iterate(&not_dead_yet_cl);
if (_vo != VerifyOption_G1UseNextMarking) {
if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
log_info(gc, verify)("[" PTR_FORMAT "," PTR_FORMAT "] max_live_bytes " SIZE_FORMAT " < calculated " SIZE_FORMAT,
p2i(r->bottom()), p2i(r->end()), r->max_live_bytes(), not_dead_yet_cl.live_bytes());
_failures = true;
}
} else {
// When vo == UseNextMarking we cannot currently do a sanity
// check on the live bytes as the calculation has not been
// finalized yet.
}
}
}
return false; // stop the region iteration if we hit a failure
}
};
// This is the task used for parallel verification of the heap regions
class G1ParVerifyTask: public AbstractGangTask {
private:
G1CollectedHeap* _g1h;
VerifyOption _vo;
bool _failures;
HeapRegionClaimer _hrclaimer;
public:
// _vo == UsePrevMarking -> use "prev" marking information,
// _vo == UseNextMarking -> use "next" marking information,
// _vo == UseMarkWord -> use mark word from object header.
G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
AbstractGangTask("Parallel verify task"),
_g1h(g1h),
_vo(vo),
_failures(false),
_hrclaimer(g1h->workers()->active_workers()) {}
bool failures() {
return _failures;
}
void work(uint worker_id) {
HandleMark hm;
VerifyRegionClosure blk(true, _vo);
_g1h->heap_region_par_iterate(&blk, worker_id, &_hrclaimer);
if (blk.failures()) {
_failures = true;
}
}
};
void G1HeapVerifier::verify(VerifyOption vo) {
if (!SafepointSynchronize::is_at_safepoint()) {
log_info(gc, verify)("Skipping verification. Not at safepoint.");
}
assert(Thread::current()->is_VM_thread(),
"Expected to be executed serially by the VM thread at this point");
log_debug(gc, verify)("Roots");
VerifyRootsClosure rootsCl(vo);
VerifyKlassClosure klassCl(_g1h, &rootsCl);
CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
// We apply the relevant closures to all the oops in the
// system dictionary, class loader data graph, the string table
// and the nmethods in the code cache.
G1VerifyCodeRootOopClosure codeRootsCl(_g1h, &rootsCl, vo);
G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
{
G1RootProcessor root_processor(_g1h, 1);
root_processor.process_all_roots(&rootsCl,
&cldCl,
&blobsCl);
}
bool failures = rootsCl.failures() || codeRootsCl.failures();
if (vo != VerifyOption_G1UseMarkWord) {
// If we're verifying during a full GC then the region sets
// will have been torn down at the start of the GC. Therefore
// verifying the region sets will fail. So we only verify
// the region sets when not in a full GC.
log_debug(gc, verify)("HeapRegionSets");
verify_region_sets();
}
log_debug(gc, verify)("HeapRegions");
if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
G1ParVerifyTask task(_g1h, vo);
_g1h->workers()->run_task(&task);
if (task.failures()) {
failures = true;
}
} else {
VerifyRegionClosure blk(false, vo);
_g1h->heap_region_iterate(&blk);
if (blk.failures()) {
failures = true;
}
}
if (G1StringDedup::is_enabled()) {
log_debug(gc, verify)("StrDedup");
G1StringDedup::verify();
}
if (failures) {
log_info(gc, verify)("Heap after failed verification:");
// It helps to have the per-region information in the output to
// help us track down what went wrong. This is why we call
// print_extended_on() instead of print_on().
LogHandle(gc, verify) log;
ResourceMark rm;
_g1h->print_extended_on(log.info_stream());
}
guarantee(!failures, "there should not have been any failures");
}
// Heap region set verification
class VerifyRegionListsClosure : public HeapRegionClosure {
private:
HeapRegionSet* _old_set;
HeapRegionSet* _humongous_set;
HeapRegionManager* _hrm;
public:
uint _old_count;
uint _humongous_count;
uint _free_count;
VerifyRegionListsClosure(HeapRegionSet* old_set,
HeapRegionSet* humongous_set,
HeapRegionManager* hrm) :
_old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
_old_count(), _humongous_count(), _free_count(){ }
bool doHeapRegion(HeapRegion* hr) {
if (hr->is_young()) {
// TODO
} else if (hr->is_humongous()) {
assert(hr->containing_set() == _humongous_set, "Heap region %u is humongous but not in humongous set.", hr->hrm_index());
_humongous_count++;
} else if (hr->is_empty()) {
assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index());
_free_count++;
} else if (hr->is_old()) {
assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index());
_old_count++;
} else {
// There are no other valid region types. Check for one invalid
// one we can identify: pinned without old or humongous set.
assert(!hr->is_pinned(), "Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index());
ShouldNotReachHere();
}
return false;
}
void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
guarantee(old_set->length() == _old_count, "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count);
guarantee(humongous_set->length() == _humongous_count, "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count);
guarantee(free_list->num_free_regions() == _free_count, "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count);
}
};
void G1HeapVerifier::verify_region_sets() {
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
// First, check the explicit lists.
_g1h->_hrm.verify();
{
// Given that a concurrent operation might be adding regions to
// the secondary free list we have to take the lock before
// verifying it.
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
_g1h->_secondary_free_list.verify_list();
}
// If a concurrent region freeing operation is in progress it will
// be difficult to correctly attributed any free regions we come
// across to the correct free list given that they might belong to
// one of several (free_list, secondary_free_list, any local lists,
// etc.). So, if that's the case we will skip the rest of the
// verification operation. Alternatively, waiting for the concurrent
// operation to complete will have a non-trivial effect on the GC's
// operation (no concurrent operation will last longer than the
// interval between two calls to verification) and it might hide
// any issues that we would like to catch during testing.
if (_g1h->free_regions_coming()) {
return;
}
// Make sure we append the secondary_free_list on the free_list so
// that all free regions we will come across can be safely
// attributed to the free_list.
_g1h->append_secondary_free_list_if_not_empty_with_lock();
// Finally, make sure that the region accounting in the lists is
// consistent with what we see in the heap.
VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm);
_g1h->heap_region_iterate(&cl);
cl.verify_counts(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm);
}
void G1HeapVerifier::prepare_for_verify() {
if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
_g1h->ensure_parsability(false);
}
_g1h->g1_rem_set()->prepare_for_verify();
}
double G1HeapVerifier::verify(bool guard, const char* msg) {
double verify_time_ms = 0.0;
if (guard && _g1h->total_collections() >= VerifyGCStartAt) {
double verify_start = os::elapsedTime();
HandleMark hm; // Discard invalid handles created during verification
prepare_for_verify();
Universe::verify(VerifyOption_G1UsePrevMarking, msg);
verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
}
return verify_time_ms;
}
void G1HeapVerifier::verify_before_gc() {
double verify_time_ms = verify(VerifyBeforeGC, "Before GC");
_g1h->g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
}
void G1HeapVerifier::verify_after_gc() {
double verify_time_ms = verify(VerifyAfterGC, "After GC");
_g1h->g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
}
#ifndef PRODUCT
class G1VerifyCardTableCleanup: public HeapRegionClosure {
G1HeapVerifier* _verifier;
G1SATBCardTableModRefBS* _ct_bs;
public:
G1VerifyCardTableCleanup(G1HeapVerifier* verifier, G1SATBCardTableModRefBS* ct_bs)
: _verifier(verifier), _ct_bs(ct_bs) { }
virtual bool doHeapRegion(HeapRegion* r) {
if (r->is_survivor()) {
_verifier->verify_dirty_region(r);
} else {
_verifier->verify_not_dirty_region(r);
}
return false;
}
};
void G1HeapVerifier::verify_card_table_cleanup() {
if (G1VerifyCTCleanup || VerifyAfterGC) {
G1VerifyCardTableCleanup cleanup_verifier(this, _g1h->g1_barrier_set());
_g1h->heap_region_iterate(&cleanup_verifier);
}
}
void G1HeapVerifier::verify_not_dirty_region(HeapRegion* hr) {
// All of the region should be clean.
G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
MemRegion mr(hr->bottom(), hr->end());
ct_bs->verify_not_dirty_region(mr);
}
void G1HeapVerifier::verify_dirty_region(HeapRegion* hr) {
// We cannot guarantee that [bottom(),end()] is dirty. Threads
// dirty allocated blocks as they allocate them. The thread that
// retires each region and replaces it with a new one will do a
// maximal allocation to fill in [pre_dummy_top(),end()] but will
// not dirty that area (one less thing to have to do while holding
// a lock). So we can only verify that [bottom(),pre_dummy_top()]
// is dirty.
G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
MemRegion mr(hr->bottom(), hr->pre_dummy_top());
if (hr->is_young()) {
ct_bs->verify_g1_young_region(mr);
} else {
ct_bs->verify_dirty_region(mr);
}
}
void G1HeapVerifier::verify_dirty_young_list(HeapRegion* head) {
G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
verify_dirty_region(hr);
}
}
void G1HeapVerifier::verify_dirty_young_regions() {
verify_dirty_young_list(_g1h->young_list()->first_region());
}
bool G1HeapVerifier::verify_no_bits_over_tams(const char* bitmap_name, G1CMBitMapRO* bitmap,
HeapWord* tams, HeapWord* end) {
guarantee(tams <= end,
"tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
if (result < end) {
log_info(gc, verify)("## wrong marked address on %s bitmap: " PTR_FORMAT, bitmap_name, p2i(result));
log_info(gc, verify)("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, bitmap_name, p2i(tams), p2i(end));
return false;
}
return true;
}
bool G1HeapVerifier::verify_bitmaps(const char* caller, HeapRegion* hr) {
G1CMBitMapRO* prev_bitmap = _g1h->concurrent_mark()->prevMarkBitMap();
G1CMBitMapRO* next_bitmap = (G1CMBitMapRO*) _g1h->concurrent_mark()->nextMarkBitMap();
HeapWord* bottom = hr->bottom();
HeapWord* ptams = hr->prev_top_at_mark_start();
HeapWord* ntams = hr->next_top_at_mark_start();
HeapWord* end = hr->end();
bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
bool res_n = true;
// We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
// we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
// if we happen to be in that state.
if (_g1h->collector_state()->mark_in_progress() || !_g1h->_cmThread->in_progress()) {
res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
}
if (!res_p || !res_n) {
log_info(gc, verify)("#### Bitmap verification failed for " HR_FORMAT, HR_FORMAT_PARAMS(hr));
log_info(gc, verify)("#### Caller: %s", caller);
return false;
}
return true;
}
void G1HeapVerifier::check_bitmaps(const char* caller, HeapRegion* hr) {
if (!G1VerifyBitmaps) return;
guarantee(verify_bitmaps(caller, hr), "bitmap verification");
}
class G1VerifyBitmapClosure : public HeapRegionClosure {
private:
const char* _caller;
G1HeapVerifier* _verifier;
bool _failures;
public:
G1VerifyBitmapClosure(const char* caller, G1HeapVerifier* verifier) :
_caller(caller), _verifier(verifier), _failures(false) { }
bool failures() { return _failures; }
virtual bool doHeapRegion(HeapRegion* hr) {
bool result = _verifier->verify_bitmaps(_caller, hr);
if (!result) {
_failures = true;
}
return false;
}
};
void G1HeapVerifier::check_bitmaps(const char* caller) {
if (!G1VerifyBitmaps) return;
G1VerifyBitmapClosure cl(caller, this);
_g1h->heap_region_iterate(&cl);
guarantee(!cl.failures(), "bitmap verification");
}
class G1CheckCSetFastTableClosure : public HeapRegionClosure {
private:
bool _failures;
public:
G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
virtual bool doHeapRegion(HeapRegion* hr) {
uint i = hr->hrm_index();
InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
if (hr->is_humongous()) {
if (hr->in_collection_set()) {
log_info(gc, verify)("## humongous region %u in CSet", i);
_failures = true;
return true;
}
if (cset_state.is_in_cset()) {
log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for humongous region %u", cset_state.value(), i);
_failures = true;
return true;
}
if (hr->is_continues_humongous() && cset_state.is_humongous()) {
log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for continues humongous region %u", cset_state.value(), i);
_failures = true;
return true;
}
} else {
if (cset_state.is_humongous()) {
log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for non-humongous region %u", cset_state.value(), i);
_failures = true;
return true;
}
if (hr->in_collection_set() != cset_state.is_in_cset()) {
log_info(gc, verify)("## in CSet %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
hr->in_collection_set(), cset_state.value(), i);
_failures = true;
return true;
}
if (cset_state.is_in_cset()) {
if (hr->is_young() != (cset_state.is_young())) {
log_info(gc, verify)("## is_young %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
hr->is_young(), cset_state.value(), i);
_failures = true;
return true;
}
if (hr->is_old() != (cset_state.is_old())) {
log_info(gc, verify)("## is_old %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
hr->is_old(), cset_state.value(), i);
_failures = true;
return true;
}
}
}
return false;
}
bool failures() const { return _failures; }
};
bool G1HeapVerifier::check_cset_fast_test() {
G1CheckCSetFastTableClosure cl;
_g1h->_hrm.iterate(&cl);
return !cl.failures();
}
#endif // PRODUCT

View file

@ -0,0 +1,115 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_G1_G1HEAPVERIFIER_HPP
#define SHARE_VM_GC_G1_G1HEAPVERIFIER_HPP
#include "gc/g1/heapRegionSet.hpp"
#include "memory/allocation.hpp"
#include "memory/universe.hpp"
class G1CollectedHeap;
class G1HeapVerifier : public CHeapObj<mtGC> {
private:
G1CollectedHeap* _g1h;
// verify_region_sets() performs verification over the region
// lists. It will be compiled in the product code to be used when
// necessary (i.e., during heap verification).
void verify_region_sets();
public:
G1HeapVerifier(G1CollectedHeap* heap) : _g1h(heap) { }
// Perform verification.
// vo == UsePrevMarking -> use "prev" marking information,
// vo == UseNextMarking -> use "next" marking information
// vo == UseMarkWord -> use the mark word in the object header
//
// NOTE: Only the "prev" marking information is guaranteed to be
// consistent most of the time, so most calls to this should use
// vo == UsePrevMarking.
// Currently, there is only one case where this is called with
// vo == UseNextMarking, which is to verify the "next" marking
// information at the end of remark.
// Currently there is only one place where this is called with
// vo == UseMarkWord, which is to verify the marking during a
// full GC.
void verify(VerifyOption vo);
// verify_region_sets_optional() is planted in the code for
// list verification in non-product builds (and it can be enabled in
// product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1).
#if HEAP_REGION_SET_FORCE_VERIFY
void verify_region_sets_optional() {
verify_region_sets();
}
#else // HEAP_REGION_SET_FORCE_VERIFY
void verify_region_sets_optional() { }
#endif // HEAP_REGION_SET_FORCE_VERIFY
void prepare_for_verify();
double verify(bool guard, const char* msg);
void verify_before_gc();
void verify_after_gc();
#ifndef PRODUCT
// Make sure that the given bitmap has no marked objects in the
// range [from,limit). If it does, print an error message and return
// false. Otherwise, just return true. bitmap_name should be "prev"
// or "next".
bool verify_no_bits_over_tams(const char* bitmap_name, G1CMBitMapRO* bitmap,
HeapWord* from, HeapWord* limit);
// Verify that the prev / next bitmap range [tams,end) for the given
// region has no marks. Return true if all is well, false if errors
// are detected.
bool verify_bitmaps(const char* caller, HeapRegion* hr);
#endif // PRODUCT
// If G1VerifyBitmaps is set, verify that the marking bitmaps for
// the given region do not have any spurious marks. If errors are
// detected, print appropriate error messages and crash.
void check_bitmaps(const char* caller, HeapRegion* hr) PRODUCT_RETURN;
// If G1VerifyBitmaps is set, verify that the marking bitmaps do not
// have any spurious marks. If errors are detected, print
// appropriate error messages and crash.
void check_bitmaps(const char* caller) PRODUCT_RETURN;
// Do sanity check on the contents of the in-cset fast test table.
bool check_cset_fast_test() PRODUCT_RETURN_( return true; );
void verify_card_table_cleanup() PRODUCT_RETURN;
void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
void verify_dirty_young_regions() PRODUCT_RETURN;
};
#endif // SHARE_VM_GC_G1_G1HEAPVERIFIER_HPP

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,12 +31,12 @@
class HeapRegion;
class G1CollectedHeap;
class G1RemSet;
class ConcurrentMark;
class G1ConcurrentMark;
class DirtyCardToOopClosure;
class CMBitMap;
class CMMarkStack;
class G1CMBitMap;
class G1CMMarkStack;
class G1ParScanThreadState;
class CMTask;
class G1CMTask;
class ReferenceProcessor;
// A class that scans oops in a given heap region (much as OopsInGenClosure
@ -92,7 +92,7 @@ protected:
G1ParScanThreadState* _par_scan_state;
uint _worker_id; // Cache value from par_scan_state.
Klass* _scanned_klass;
ConcurrentMark* _cm;
G1ConcurrentMark* _cm;
// Mark the object if it's not already marked. This is used to mark
// objects pointed to by roots that are guaranteed not to move
@ -170,12 +170,12 @@ public:
// Closure for iterating over object fields during concurrent marking
class G1CMOopClosure : public MetadataAwareOopClosure {
protected:
ConcurrentMark* _cm;
G1ConcurrentMark* _cm;
private:
G1CollectedHeap* _g1h;
CMTask* _task;
G1CMTask* _task;
public:
G1CMOopClosure(G1CollectedHeap* g1h, ConcurrentMark* cm, CMTask* task);
G1CMOopClosure(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1CMTask* task);
template <class T> void do_oop_nv(T* p);
virtual void do_oop( oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
@ -185,10 +185,10 @@ public:
class G1RootRegionScanClosure : public MetadataAwareOopClosure {
private:
G1CollectedHeap* _g1h;
ConcurrentMark* _cm;
G1ConcurrentMark* _cm;
uint _worker_id;
public:
G1RootRegionScanClosure(G1CollectedHeap* g1h, ConcurrentMark* cm,
G1RootRegionScanClosure(G1CollectedHeap* g1h, G1ConcurrentMark* cm,
uint worker_id) :
_g1h(g1h), _cm(cm), _worker_id(worker_id) { }
template <class T> void do_oop_nv(T* p);
@ -206,9 +206,9 @@ class G1Mux2Closure : public OopClosure {
OopClosure* _c2;
public:
G1Mux2Closure(OopClosure *c1, OopClosure *c2);
template <class T> void do_oop_work(T* p);
virtual void do_oop(oop* p) { do_oop_work(p); }
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
template <class T> inline void do_oop_work(T* p);
virtual inline void do_oop(oop* p);
virtual inline void do_oop(narrowOop* p);
};
// A closure that returns true if it is actually applied
@ -219,9 +219,9 @@ class G1TriggerClosure : public OopClosure {
public:
G1TriggerClosure();
bool triggered() const { return _triggered; }
template <class T> void do_oop_work(T* p);
virtual void do_oop(oop* p) { do_oop_work(p); }
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
template <class T> inline void do_oop_work(T* p);
virtual inline void do_oop(oop* p);
virtual inline void do_oop(narrowOop* p);
};
// A closure which uses a triggering closure to determine
@ -232,9 +232,9 @@ class G1InvokeIfNotTriggeredClosure: public OopClosure {
OopClosure* _oop_cl;
public:
G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t, OopClosure* oc);
template <class T> void do_oop_work(T* p);
virtual void do_oop(oop* p) { do_oop_work(p); }
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
template <class T> inline void do_oop_work(T* p);
virtual inline void do_oop(oop* p);
virtual inline void do_oop(narrowOop* p);
};
class G1UpdateRSOrPushRefOopClosure: public OopClosure {
@ -263,9 +263,9 @@ public:
return result;
}
template <class T> void do_oop_work(T* p);
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop(oop* p) { do_oop_work(p); }
template <class T> inline void do_oop_work(T* p);
virtual inline void do_oop(narrowOop* p);
virtual inline void do_oop(oop* p);
};
#endif // SHARE_VM_GC_G1_G1OOPCLOSURES_HPP

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,8 +25,8 @@
#ifndef SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP
#define SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP
#include "gc/g1/concurrentMark.inline.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1ConcurrentMark.inline.hpp"
#include "gc/g1/g1OopClosures.hpp"
#include "gc/g1/g1ParScanThreadState.inline.hpp"
#include "gc/g1/g1RemSet.hpp"
@ -141,12 +141,16 @@ inline void G1Mux2Closure::do_oop_work(T* p) {
_c1->do_oop(p);
_c2->do_oop(p);
}
void G1Mux2Closure::do_oop(oop* p) { do_oop_work(p); }
void G1Mux2Closure::do_oop(narrowOop* p) { do_oop_work(p); }
template <class T>
inline void G1TriggerClosure::do_oop_work(T* p) {
// Record that this closure was actually applied (triggered).
_triggered = true;
}
void G1TriggerClosure::do_oop(oop* p) { do_oop_work(p); }
void G1TriggerClosure::do_oop(narrowOop* p) { do_oop_work(p); }
template <class T>
inline void G1InvokeIfNotTriggeredClosure::do_oop_work(T* p) {
@ -154,6 +158,8 @@ inline void G1InvokeIfNotTriggeredClosure::do_oop_work(T* p) {
_oop_cl->do_oop(p);
}
}
void G1InvokeIfNotTriggeredClosure::do_oop(oop* p) { do_oop_work(p); }
void G1InvokeIfNotTriggeredClosure::do_oop(narrowOop* p) { do_oop_work(p); }
template <class T>
inline void G1UpdateRSOrPushRefOopClosure::do_oop_work(T* p) {
@ -224,6 +230,8 @@ inline void G1UpdateRSOrPushRefOopClosure::do_oop_work(T* p) {
to->rem_set()->add_reference(p, _worker_i);
}
}
void G1UpdateRSOrPushRefOopClosure::do_oop(oop* p) { do_oop_work(p); }
void G1UpdateRSOrPushRefOopClosure::do_oop(narrowOop* p) { do_oop_work(p); }
template <class T>
void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {

View file

@ -34,6 +34,7 @@
#include "gc/g1/g1HotCardCache.hpp"
#include "gc/g1/g1OopClosures.inline.hpp"
#include "gc/g1/g1RemSet.inline.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.inline.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionManager.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"

View file

@ -221,7 +221,7 @@ public:
class HRRSStatsIter: public HeapRegionClosure {
private:
RegionTypeCounter _young;
RegionTypeCounter _humonguous;
RegionTypeCounter _humongous;
RegionTypeCounter _free;
RegionTypeCounter _old;
RegionTypeCounter _all;
@ -245,7 +245,7 @@ private:
HeapRegion* max_code_root_mem_sz_region() const { return _max_code_root_mem_sz_region; }
public:
HRRSStatsIter() : _all("All"), _young("Young"), _humonguous("Humonguous"),
HRRSStatsIter() : _all("All"), _young("Young"), _humongous("Humongous"),
_free("Free"), _old("Old"), _max_code_root_mem_sz_region(NULL), _max_rs_mem_sz_region(NULL),
_max_rs_mem_sz(0), _max_code_root_mem_sz(0)
{}
@ -274,7 +274,7 @@ public:
} else if (r->is_young()) {
current = &_young;
} else if (r->is_humongous()) {
current = &_humonguous;
current = &_humongous;
} else if (r->is_old()) {
current = &_old;
} else {
@ -287,7 +287,7 @@ public:
}
void print_summary_on(outputStream* out) {
RegionTypeCounter* counters[] = { &_young, &_humonguous, &_free, &_old, NULL };
RegionTypeCounter* counters[] = { &_young, &_humongous, &_free, &_old, NULL };
out->print_cr(" Current rem set statistics");
out->print_cr(" Total per region rem sets sizes = " SIZE_FORMAT "K."

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.inline.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/g1/satbMarkQueue.hpp"
#include "gc/shared/memset_with_concurrent_readers.hpp"

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,20 +58,11 @@ public:
// We export this to make it available in cases where the static
// type of the barrier set is known. Note that it is non-virtual.
template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {
T heap_oop = oopDesc::load_heap_oop(field);
if (!oopDesc::is_null(heap_oop)) {
enqueue(oopDesc::decode_heap_oop(heap_oop));
}
}
template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal);
// These are the more general virtual versions.
virtual void write_ref_field_pre_work(oop* field, oop new_val) {
inline_write_ref_field_pre(field, new_val);
}
virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {
inline_write_ref_field_pre(field, new_val);
}
inline virtual void write_ref_field_pre_work(oop* field, oop new_val);
inline virtual void write_ref_field_pre_work(narrowOop* field, oop new_val);
virtual void write_ref_field_pre_work(void* field, oop new_val) {
guarantee(false, "Not needed");
}
@ -98,15 +89,7 @@ public:
return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
}
void set_card_claimed(size_t card_index) {
jbyte val = _byte_map[card_index];
if (val == clean_card_val()) {
val = (jbyte)claimed_card_val();
} else {
val |= (jbyte)claimed_card_val();
}
_byte_map[card_index] = val;
}
inline void set_card_claimed(size_t card_index);
void verify_g1_young_region(MemRegion mr) PRODUCT_RETURN;
void g1_mark_as_young(const MemRegion& mr);

View file

@ -0,0 +1,58 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
#define SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "oops/oop.inline.hpp"
// We export this to make it available in cases where the static
// type of the barrier set is known. Note that it is non-virtual.
template <class T> void G1SATBCardTableModRefBS::inline_write_ref_field_pre(T* field, oop newVal) {
T heap_oop = oopDesc::load_heap_oop(field);
if (!oopDesc::is_null(heap_oop)) {
enqueue(oopDesc::decode_heap_oop(heap_oop));
}
}
// These are the more general virtual versions.
void G1SATBCardTableModRefBS::write_ref_field_pre_work(oop* field, oop new_val) {
inline_write_ref_field_pre(field, new_val);
}
void G1SATBCardTableModRefBS::write_ref_field_pre_work(narrowOop* field, oop new_val) {
inline_write_ref_field_pre(field, new_val);
}
void G1SATBCardTableModRefBS::set_card_claimed(size_t card_index) {
jbyte val = _byte_map[card_index];
if (val == clean_card_val()) {
val = (jbyte)claimed_card_val();
} else {
val |= (jbyte)claimed_card_val();
}
_byte_map[card_index] = val;
}
#endif // SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -61,9 +61,8 @@ G1YoungRemSetSamplingThread::G1YoungRemSetSamplingThread() : ConcurrentGCThread(
true,
Monitor::_safepoint_check_never);
create_and_start();
set_name("G1 Young RemSet Sampling");
create_and_start();
}
void G1YoungRemSetSamplingThread::sleep_before_next_cycle() {

View file

@ -143,6 +143,7 @@ void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_hea
// The cast to int is safe, given that we've bounded region_size by
// MIN_REGION_SIZE and MAX_REGION_SIZE.
GrainBytes = region_size;
log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", GrainBytes / M);
guarantee(GrainWords == 0, "we should only set it once");
GrainWords = GrainBytes >> LogHeapWordSize;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -110,7 +110,9 @@ protected:
public:
HeapRegion* hr() const { return _hr; }
HeapRegion* hr() const {
return (HeapRegion*) OrderAccess::load_ptr_acquire(&_hr);
}
jint occupied() const {
// Overkill, but if we ever need it...
@ -123,10 +125,12 @@ public:
set_next(NULL);
set_prev(NULL);
}
_hr = hr;
_collision_list_next = NULL;
_occupied = 0;
_bm.clear();
// Make sure that the bitmap clearing above has been finished before publishing
// this PRT to concurrent threads.
OrderAccess::release_store_ptr(&_hr, hr);
}
void add_reference(OopOrNarrowOopStar from) {
@ -357,7 +361,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
if (G1FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) {
assert(contains_reference(from), "We just added it!");
assert(contains_reference(from), "We just found " PTR_FORMAT " in the FromCardCache", p2i(from));
return;
}
@ -367,7 +371,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
// If the region is already coarsened, return.
if (_coarse_map.at(from_hrm_ind)) {
assert(contains_reference(from), "We just added it!");
assert(contains_reference(from), "We just found " PTR_FORMAT " in the Coarse table", p2i(from));
return;
}
@ -388,7 +392,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
"Must be in range.");
if (G1HRRSUseSparseTable &&
_sparse_table.add_card(from_hrm_ind, card_index)) {
assert(contains_reference_locked(from), "We just added it!");
assert(contains_reference_locked(from), "We just added " PTR_FORMAT " to the Sparse table", p2i(from));
return;
}
@ -438,7 +442,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
assert(prt != NULL, "Inv");
prt->add_reference(from);
assert(contains_reference(from), "We just added it!");
assert(contains_reference(from), "We just added " PTR_FORMAT " to the PRT", p2i(from));
}
PerRegionTable*
@ -785,6 +789,9 @@ void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
assert(nm != NULL, "sanity");
assert((!CodeCache_lock->owned_by_self() || SafepointSynchronize::is_at_safepoint()),
"should call add_strong_code_root_locked instead. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s",
BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()));
// Optimistic unlocked contains-check
if (!_code_roots.contains(nm)) {
MutexLockerEx ml(&_m, Mutex::_no_safepoint_check_flag);
@ -794,6 +801,12 @@ void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
void HeapRegionRemSet::add_strong_code_root_locked(nmethod* nm) {
assert(nm != NULL, "sanity");
assert((CodeCache_lock->owned_by_self() ||
(SafepointSynchronize::is_at_safepoint() &&
(_m.owned_by_self() || Thread::current()->is_VM_thread()))),
"not safely locked. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s, _m.owned_by_self(): %s, Thread::current()->is_VM_thread(): %s",
BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),
BOOL_TO_STR(_m.owned_by_self()), BOOL_TO_STR(Thread::current()->is_VM_thread()));
_code_roots.add(nm);
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,7 @@
#include "gc/parallel/asPSYoungGen.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/parallel/psMarkSweepDecorator.hpp"
#include "gc/parallel/psScavenge.hpp"
#include "gc/parallel/psScavenge.inline.hpp"
#include "gc/parallel/psYoungGen.hpp"
#include "gc/shared/gcUtil.hpp"
#include "gc/shared/spaceDecorator.hpp"

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "gc/parallel/cardTableExtension.hpp"
#include "gc/parallel/gcTaskManager.hpp"
#include "gc/parallel/objectStartArray.inline.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/parallel/psPromotionManager.inline.hpp"
#include "gc/parallel/psScavenge.hpp"

Some files were not shown because too many files have changed in this diff Show more