This commit is contained in:
Bharadwaj Yadavalli 2013-04-04 17:01:34 -07:00
commit 4b9150eaf7
96 changed files with 3726 additions and 1144 deletions

View file

@ -1,5 +1,5 @@
# #
# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -190,6 +190,17 @@ $(SIMPLE_DIRS):
# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile. # literal "$(GAMMADIR)/" suitable for inclusion in a Makefile.
gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2))) gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2)))
# This bit is needed to enable local rebuilds.
# Unless the makefile itself sets LP64, any environmental
# setting of LP64 will interfere with the build.
LP64_SETTING/32 = LP64 = \#empty
LP64_SETTING/64 = LP64 = 1
DATA_MODE/i486 = 32
DATA_MODE/amd64 = 64
DATA_MODE = $(DATA_MODE/$(BUILDARCH))
flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
@echo Creating $@ ... @echo Creating $@ ...
$(QUIETLY) ( \ $(QUIETLY) ( \
@ -212,6 +223,7 @@ flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
echo "HOTSPOT_BUILD_USER = $(HOTSPOT_BUILD_USER)"; \ echo "HOTSPOT_BUILD_USER = $(HOTSPOT_BUILD_USER)"; \
echo "HOTSPOT_VM_DISTRO = $(HOTSPOT_VM_DISTRO)"; \ echo "HOTSPOT_VM_DISTRO = $(HOTSPOT_VM_DISTRO)"; \
echo "OPENJDK = $(OPENJDK)"; \ echo "OPENJDK = $(OPENJDK)"; \
echo "$(LP64_SETTING/$(DATA_MODE))"; \
echo; \ echo; \
echo "# Used for platform dispatching"; \ echo "# Used for platform dispatching"; \
echo "TARGET_DEFINES = -DTARGET_OS_FAMILY_\$$(Platform_os_family)"; \ echo "TARGET_DEFINES = -DTARGET_OS_FAMILY_\$$(Platform_os_family)"; \

View file

@ -1,6 +1,6 @@
#! /bin/sh #! /bin/sh
# #
# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -28,44 +28,38 @@
set -u set -u
if [ $# != 2 ]; then if [ $# -lt 1 ]; then
echo "Usage : $0 Build_Options Location" echo "Usage : $0 BuildTarget [LP64=1] [BuildOptions]"
echo "Build Options : debug or optimized or basicdebug or basic or clean" echo " Server VM | Client VM"
echo "Location : specify any workspace which has gamma sources" echo "BuildTarget : debug | debug1"
echo " fastdebug | fastdebug1"
echo " jvmg | jvmg1"
echo " optimized | optimized1"
echo " profiled | profiled1"
echo " product | product1"
exit 1
fi
if [ "${JAVA_HOME-}" = "" -o ! -d "${JAVA_HOME-}" -o ! -d ${JAVA_HOME-}/jre/lib/ ]; then
echo "JAVA_HOME needs to be set to a valid JDK path"
echo "JAVA_HOME: ${JAVA_HOME-}"
exit 1 exit 1
fi fi
# Just in case: # Just in case:
case ${JAVA_HOME} in JAVA_HOME=`( cd $JAVA_HOME; pwd )`
/*) true;;
?*) JAVA_HOME=`( cd $JAVA_HOME; pwd )`;;
esac
case `uname -m` in if [ "${ALT_BOOTDIR-}" = "" -o ! -d "${ALT_BOOTDIR-}" -o ! -d ${ALT_BOOTDIR-}/jre/lib/ ]; then
i386|i486|i586|i686) ALT_BOOTDIR=${JAVA_HOME}
mach=i386
;;
*)
echo "Unsupported machine: " `uname -m`
exit 1
;;
esac
if [ "${JAVA_HOME}" = "" -o ! -d "${JAVA_HOME}" -o ! -d ${JAVA_HOME}/jre/lib/${mach} ]; then
echo "JAVA_HOME needs to be set to a valid JDK path"
echo "ksh : export JAVA_HOME=/net/tetrasparc/export/gobi/JDK1.2_fcs_V/bsd"
echo "csh : setenv JAVA_HOME /net/tetrasparc/export/gobi/JDK1.2_fcs_V/bsd"
exit 1
fi fi
# build in current directory by default
if [ "${ALT_OUTPUTDIR-}" = "" -o ! -d "${ALT_OUTPUTDIR-}" ]; then
ALT_OUTPUTDIR=`(pwd)`
fi
LD_LIBRARY_PATH=${JAVA_HOME}/jre/lib/`uname -p`:\ HOTSPOT_SRC=`(dirname $0)`/..
${JAVA_HOME}/jre/lib/`uname -p`/native_threads:${LD_LIBRARY_PATH-.} HOTSPOT_SRC=`(cd ${HOTSPOT_SRC}; pwd)`
# This is necessary as long as we are using the old launcher
# with the new distribution format:
CLASSPATH=${JAVA_HOME}/jre/lib/rt.jar:${CLASSPATH-.}
for gm in gmake gnumake for gm in gmake gnumake
do do
@ -74,22 +68,25 @@ do
done done
: ${GNUMAKE:?'Cannot locate the gnumake program. Stop.'} : ${GNUMAKE:?'Cannot locate the gnumake program. Stop.'}
# quiet build by default
Quiet="MAKE_VERBOSE="
# no debug info by default
NoDebugInfo="ENABLE_FULL_DEBUG_SYMBOLS="
LANG=C
echo "### ENVIRONMENT SETTINGS:" echo "### ENVIRONMENT SETTINGS:"
export HOTSPOT_SRC ; echo "HOTSPOT_SRC=$HOTSPOT_SRC"
export JAVA_HOME ; echo "JAVA_HOME=$JAVA_HOME" export JAVA_HOME ; echo "JAVA_HOME=$JAVA_HOME"
export LD_LIBRARY_PATH ; echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH" export ALT_BOOTDIR ; echo "ALT_BOOTDIR=$ALT_BOOTDIR"
export CLASSPATH ; echo "CLASSPATH=$CLASSPATH" export ALT_OUTPUTDIR ; echo "ALT_OUTPUTDIR=$ALT_OUTPUTDIR"
export GNUMAKE ; echo "GNUMAKE=$GNUMAKE" export GNUMAKE ; echo "GNUMAKE=$GNUMAKE"
export LANG ; echo "LANG=$LANG"
echo "###" echo "###"
Build_Options=$1 BuildOptions="$Quiet $NoDebugInfo $*"
Location=$2
case ${Location} in
/*) true;;
?*) Location=`(cd ${Location}; pwd)`;;
esac
echo \ echo \
${GNUMAKE} -f ${Location}/make/bsd/Makefile $Build_Options GAMMADIR=${Location} ${GNUMAKE} -f ${HOTSPOT_SRC}/make/Makefile $BuildOptions GAMMADIR=${HOTSPOT_SRC}
${GNUMAKE} -f ${Location}/make/bsd/Makefile $Build_Options GAMMADIR=${Location} ${GNUMAKE} -f ${HOTSPOT_SRC}/make/Makefile $BuildOptions GAMMADIR=${HOTSPOT_SRC}

View file

@ -1,98 +0,0 @@
#! /bin/sh
#
# Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# Make sure the variable JAVA_HOME is set before running this script.
set -u
if [ $# != 2 ]; then
echo "Usage : $0 Build_Options Location"
echo "Build Options : debug or optimized or basicdebug or basic or clean"
echo "Location : specify any workspace which has gamma sources"
exit 1
fi
# Just in case:
case ${JAVA_HOME} in
/*) true;;
?*) JAVA_HOME=`( cd $JAVA_HOME; pwd )`;;
esac
case `uname -m` in
i386|i486|i586|i686)
mach=i386
;;
x86_64)
mach=amd64
;;
*)
echo "Unsupported machine: " `uname -m`
exit 1
;;
esac
if [ "${JAVA_HOME}" = "" -o ! -d "${JAVA_HOME}" -o ! -d ${JAVA_HOME}/jre/lib/${mach} ]; then
echo "JAVA_HOME needs to be set to a valid JDK path"
echo "ksh : export JAVA_HOME=/net/tetrasparc/export/gobi/JDK1.2_fcs_V/linux"
echo "csh : setenv JAVA_HOME /net/tetrasparc/export/gobi/JDK1.2_fcs_V/linux"
exit 1
fi
LD_LIBRARY_PATH=${JAVA_HOME}/jre/lib/`uname -p`:\
${JAVA_HOME}/jre/lib/`uname -p`/native_threads:${LD_LIBRARY_PATH-.}
# This is necessary as long as we are using the old launcher
# with the new distribution format:
CLASSPATH=${JAVA_HOME}/jre/lib/rt.jar:${CLASSPATH-.}
for gm in gmake gnumake
do
if [ "${GNUMAKE-}" != "" ]; then break; fi
($gm --version >/dev/null) 2>/dev/null && GNUMAKE=$gm
done
: ${GNUMAKE:?'Cannot locate the gnumake program. Stop.'}
echo "### ENVIRONMENT SETTINGS:"
export JAVA_HOME ; echo "JAVA_HOME=$JAVA_HOME"
export LD_LIBRARY_PATH ; echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
export CLASSPATH ; echo "CLASSPATH=$CLASSPATH"
export GNUMAKE ; echo "GNUMAKE=$GNUMAKE"
echo "###"
Build_Options=$1
Location=$2
case ${Location} in
/*) true;;
?*) Location=`(cd ${Location}; pwd)`;;
esac
echo \
${GNUMAKE} -f ${Location}/make/linux/Makefile $Build_Options GAMMADIR=${Location}
${GNUMAKE} -f ${Location}/make/linux/Makefile $Build_Options GAMMADIR=${Location}

View file

@ -1,5 +1,5 @@
# #
# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -183,6 +183,19 @@ $(SIMPLE_DIRS):
# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile. # literal "$(GAMMADIR)/" suitable for inclusion in a Makefile.
gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2))) gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2)))
# This bit is needed to enable local rebuilds.
# Unless the makefile itself sets LP64, any environmental
# setting of LP64 will interfere with the build.
LP64_SETTING/32 = LP64 = \#empty
LP64_SETTING/64 = LP64 = 1
DATA_MODE/i486 = 32
DATA_MODE/sparc = 32
DATA_MODE/sparcv9 = 64
DATA_MODE/amd64 = 64
DATA_MODE = $(DATA_MODE/$(BUILDARCH))
flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
@echo Creating $@ ... @echo Creating $@ ...
$(QUIETLY) ( \ $(QUIETLY) ( \
@ -205,6 +218,7 @@ flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
echo "HOTSPOT_BUILD_USER = $(HOTSPOT_BUILD_USER)"; \ echo "HOTSPOT_BUILD_USER = $(HOTSPOT_BUILD_USER)"; \
echo "HOTSPOT_VM_DISTRO = $(HOTSPOT_VM_DISTRO)"; \ echo "HOTSPOT_VM_DISTRO = $(HOTSPOT_VM_DISTRO)"; \
echo "OPENJDK = $(OPENJDK)"; \ echo "OPENJDK = $(OPENJDK)"; \
echo "$(LP64_SETTING/$(DATA_MODE))"; \
echo; \ echo; \
echo "# Used for platform dispatching"; \ echo "# Used for platform dispatching"; \
echo "TARGET_DEFINES = -DTARGET_OS_FAMILY_\$$(Platform_os_family)"; \ echo "TARGET_DEFINES = -DTARGET_OS_FAMILY_\$$(Platform_os_family)"; \

View file

@ -1,127 +0,0 @@
#! /bin/sh
#
# Copyright (c) 1998, 2008, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# Make sure the variable JAVA_HOME is set before running this script.
set -u
usage() {
(
echo "Usage : $0 [-sb | -sbfast] config ws_path"
echo ""
echo "Where:"
echo " -sb ::= enable source browser info generation for"
echo " all configs during compilation"
echo ""
echo " -sbfast ::= enable source browser info generation for"
echo " all configs without compilation"
echo ""
echo " config ::= debug | debug1 | debugcore"
echo " fastdebug | fastdebug1 | fastdebugcore"
echo " jvmg | jvmg1 | jvmgcore"
echo " optimized | optimized1 | optimizedcore"
echo " profiled | profiled1 | profiledcore"
echo " product | product1 | productcore"
echo ""
echo " ws_path ::= path to HotSpot workspace"
) >&2
exit 1
}
# extract possible options
options=""
if [ $# -gt 2 ]; then
case "$1" in
-sb)
options="CFLAGS_BROWSE=-xsb"
shift
;;
-sbfast)
options="CFLAGS_BROWSE=-xsbfast"
shift
;;
*)
echo "Unknown option: '$1'" >&2
usage
;;
esac
fi
# should be just two args left at this point
if [ $# != 2 ]; then
usage
fi
# Just in case:
case ${JAVA_HOME} in
/*) true;;
?*) JAVA_HOME=`( cd $JAVA_HOME; pwd )`;;
esac
if [ "${JAVA_HOME}" = "" -o ! -d "${JAVA_HOME}" -o ! -d ${JAVA_HOME}/jre/lib/`uname -p` ]; then
echo "JAVA_HOME needs to be set to a valid JDK path"
echo "ksh : export JAVA_HOME=/net/tetrasparc/export/gobi/JDK1.2_fcs_V/solaris"
echo "csh : setenv JAVA_HOME /net/tetrasparc/export/gobi/JDK1.2_fcs_V/solaris"
exit 1
fi
LD_LIBRARY_PATH=${JAVA_HOME}/jre/lib/`uname -p`:\
${JAVA_HOME}/jre/lib/`uname -p`/native_threads:${LD_LIBRARY_PATH-.}
# This is necessary as long as we are using the old launcher
# with the new distribution format:
CLASSPATH=${JAVA_HOME}/jre/lib/rt.jar:${CLASSPATH-.}
for gm in gmake gnumake
do
if [ "${GNUMAKE-}" != "" ]; then break; fi
($gm --version >/dev/null) 2>/dev/null && GNUMAKE=$gm
done
: ${GNUMAKE:?'Cannot locate the gnumake program. Stop.'}
echo "### ENVIRONMENT SETTINGS:"
export JAVA_HOME ; echo "JAVA_HOME=$JAVA_HOME"
export LD_LIBRARY_PATH ; echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
export CLASSPATH ; echo "CLASSPATH=$CLASSPATH"
export GNUMAKE ; echo "GNUMAKE=$GNUMAKE"
echo "###"
config=$1
ws_path=$2
case ${ws_path} in
/*) true;;
?*) ws_path=`(cd ${ws_path}; pwd)`;;
esac
echo \
${GNUMAKE} -f ${ws_path}/make/solaris/Makefile \
$config GAMMADIR=${ws_path} $options
${GNUMAKE} -f ${ws_path}/make/solaris/Makefile \
$config GAMMADIR=${ws_path} $options

View file

@ -1,5 +1,5 @@
# #
# Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -176,6 +176,19 @@ $(SIMPLE_DIRS):
# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile. # literal "$(GAMMADIR)/" suitable for inclusion in a Makefile.
gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2))) gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2)))
# This bit is needed to enable local rebuilds.
# Unless the makefile itself sets LP64, any environmental
# setting of LP64 will interfere with the build.
LP64_SETTING/32 = LP64 = \#empty
LP64_SETTING/64 = LP64 = 1
DATA_MODE/i486 = 32
DATA_MODE/sparc = 32
DATA_MODE/sparcv9 = 64
DATA_MODE/amd64 = 64
DATA_MODE = $(DATA_MODE/$(BUILDARCH))
flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
@echo Creating $@ ... @echo Creating $@ ...
$(QUIETLY) ( \ $(QUIETLY) ( \

View file

@ -51,6 +51,16 @@ RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
void RangeCheckStub::emit_code(LIR_Assembler* ce) { void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry); __ bind(_entry);
if (_info->deoptimize_on_exception()) {
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
__ call(a, relocInfo::runtime_call_type);
__ delayed()->nop();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
return;
}
if (_index->is_register()) { if (_index->is_register()) {
__ mov(_index->as_register(), G4); __ mov(_index->as_register(), G4);
} else { } else {
@ -64,11 +74,22 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ delayed()->nop(); __ delayed()->nop();
ce->add_call_info_here(_info); ce->add_call_info_here(_info);
ce->verify_oop_map(_info); ce->verify_oop_map(_info);
#ifdef ASSERT debug_only(__ should_not_reach_here());
__ should_not_reach_here();
#endif
} }
PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
_info = new CodeEmitInfo(info);
}
void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
__ call(a, relocInfo::runtime_call_type);
__ delayed()->nop();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
}
void CounterOverflowStub::emit_code(LIR_Assembler* ce) { void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry); __ bind(_entry);
@ -99,10 +120,17 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) {
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
address a;
if (_info->deoptimize_on_exception()) {
// Deoptimize, do not throw the exception, because it is probably wrong to do it here.
a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
} else {
a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
}
ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
__ bind(_entry); __ bind(_entry);
__ call(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id), __ call(a, relocInfo::runtime_call_type);
relocInfo::runtime_call_type);
__ delayed()->nop(); __ delayed()->nop();
ce->add_call_info_here(_info); ce->add_call_info_here(_info);
ce->verify_oop_map(_info); ce->verify_oop_map(_info);

View file

@ -3361,6 +3361,45 @@ void LIR_Assembler::get_thread(LIR_Opr result_reg) {
__ mov(G2_thread, result_reg->as_register()); __ mov(G2_thread, result_reg->as_register());
} }
#ifdef ASSERT
// emit run-time assertion
void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
assert(op->code() == lir_assert, "must be");
if (op->in_opr1()->is_valid()) {
assert(op->in_opr2()->is_valid(), "both operands must be valid");
comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
} else {
assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
assert(op->condition() == lir_cond_always, "no other conditions allowed");
}
Label ok;
if (op->condition() != lir_cond_always) {
Assembler::Condition acond;
switch (op->condition()) {
case lir_cond_equal: acond = Assembler::equal; break;
case lir_cond_notEqual: acond = Assembler::notEqual; break;
case lir_cond_less: acond = Assembler::less; break;
case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
case lir_cond_greater: acond = Assembler::greater; break;
case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;
case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;
default: ShouldNotReachHere();
};
__ br(acond, false, Assembler::pt, ok);
__ delayed()->nop();
}
if (op->halt()) {
const char* str = __ code_string(op->msg());
__ stop(str);
} else {
breakpoint();
}
__ bind(ok);
}
#endif
void LIR_Assembler::peephole(LIR_List* lir) { void LIR_Assembler::peephole(LIR_List* lir) {
LIR_OpList* inst = lir->instructions_list(); LIR_OpList* inst = lir->instructions_list();

View file

@ -324,7 +324,7 @@ void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp)
void LIRGenerator::do_StoreIndexed(StoreIndexed* x) { void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
assert(x->is_pinned(),""); assert(x->is_pinned(),"");
bool needs_range_check = true; bool needs_range_check = x->compute_needs_range_check();
bool use_length = x->length() != NULL; bool use_length = x->length() != NULL;
bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT; bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL || bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
@ -339,13 +339,10 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
array.load_item(); array.load_item();
index.load_nonconstant(); index.load_nonconstant();
if (use_length) { if (use_length && needs_range_check) {
needs_range_check = x->compute_needs_range_check();
if (needs_range_check) {
length.set_instruction(x->length()); length.set_instruction(x->length());
length.load_item(); length.load_item();
} }
}
if (needs_store_check) { if (needs_store_check) {
value.load_item(); value.load_item();
} else { } else {

View file

@ -987,6 +987,25 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
break; break;
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
case predicate_failed_trap_id:
{
__ set_info("predicate_failed_trap", dont_gc_arguments);
OopMap* oop_map = save_live_registers(sasm);
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
oop_maps = new OopMapSet();
oop_maps->add_gc_map(call_offset, oop_map);
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created");
restore_live_registers(sasm);
__ restore();
__ br(Assembler::always, false, Assembler::pt, deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type);
__ delayed()->nop();
}
break;
default: default:
{ __ set_info("unimplemented entry", dont_gc_arguments); { __ set_info("unimplemented entry", dont_gc_arguments);
__ save_frame(0); __ save_frame(0);

View file

@ -101,6 +101,15 @@ RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
void RangeCheckStub::emit_code(LIR_Assembler* ce) { void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry); __ bind(_entry);
if (_info->deoptimize_on_exception()) {
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
__ call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
return;
}
// pass the array index on stack because all registers must be preserved // pass the array index on stack because all registers must be preserved
if (_index->is_cpu_register()) { if (_index->is_cpu_register()) {
ce->store_parameter(_index->as_register(), 0); ce->store_parameter(_index->as_register(), 0);
@ -115,9 +124,22 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
} }
__ call(RuntimeAddress(Runtime1::entry_for(stub_id))); __ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
ce->add_call_info_here(_info); ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here()); debug_only(__ should_not_reach_here());
} }
PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
_info = new CodeEmitInfo(info);
}
void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
__ call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
}
void DivByZeroStub::emit_code(LIR_Assembler* ce) { void DivByZeroStub::emit_code(LIR_Assembler* ce) {
if (_offset != -1) { if (_offset != -1) {
@ -414,10 +436,19 @@ void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
address a;
if (_info->deoptimize_on_exception()) {
// Deoptimize, do not throw the exception, because it is probably wrong to do it here.
a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
} else {
a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
}
ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
__ bind(_entry); __ bind(_entry);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id))); __ call(RuntimeAddress(a));
ce->add_call_info_here(_info); ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here()); debug_only(__ should_not_reach_here());
} }

View file

@ -3755,6 +3755,44 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type,
} }
} }
#ifdef ASSERT
// emit run-time assertion
void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
assert(op->code() == lir_assert, "must be");
if (op->in_opr1()->is_valid()) {
assert(op->in_opr2()->is_valid(), "both operands must be valid");
comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
} else {
assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
assert(op->condition() == lir_cond_always, "no other conditions allowed");
}
Label ok;
if (op->condition() != lir_cond_always) {
Assembler::Condition acond = Assembler::zero;
switch (op->condition()) {
case lir_cond_equal: acond = Assembler::equal; break;
case lir_cond_notEqual: acond = Assembler::notEqual; break;
case lir_cond_less: acond = Assembler::less; break;
case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
case lir_cond_greater: acond = Assembler::greater; break;
case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
default: ShouldNotReachHere();
}
__ jcc(acond, ok);
}
if (op->halt()) {
const char* str = __ code_string(op->msg());
__ stop(str);
} else {
breakpoint();
}
__ bind(ok);
}
#endif
void LIR_Assembler::membar() { void LIR_Assembler::membar() {
// QQQ sparc TSO uses this, // QQQ sparc TSO uses this,

View file

@ -263,7 +263,7 @@ void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp)
void LIRGenerator::do_StoreIndexed(StoreIndexed* x) { void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
assert(x->is_pinned(),""); assert(x->is_pinned(),"");
bool needs_range_check = true; bool needs_range_check = x->compute_needs_range_check();
bool use_length = x->length() != NULL; bool use_length = x->length() != NULL;
bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT; bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL || bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
@ -278,12 +278,10 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
array.load_item(); array.load_item();
index.load_nonconstant(); index.load_nonconstant();
if (use_length) { if (use_length && needs_range_check) {
needs_range_check = x->compute_needs_range_check();
if (needs_range_check) {
length.set_instruction(x->length()); length.set_instruction(x->length());
length.load_item(); length.load_item();
}
} }
if (needs_store_check) { if (needs_store_check) {
value.load_item(); value.load_item();

View file

@ -675,7 +675,8 @@ void FpuStackAllocator::handle_op2(LIR_Op2* op2) {
switch (op2->code()) { switch (op2->code()) {
case lir_cmp: case lir_cmp:
case lir_cmp_fd2i: case lir_cmp_fd2i:
case lir_ucmp_fd2i: { case lir_ucmp_fd2i:
case lir_assert: {
assert(left->is_fpu_register(), "invalid LIR"); assert(left->is_fpu_register(), "invalid LIR");
assert(right->is_fpu_register(), "invalid LIR"); assert(right->is_fpu_register(), "invalid LIR");

View file

@ -1807,6 +1807,24 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
break; break;
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
case predicate_failed_trap_id:
{
StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
OopMap* map = save_live_registers(sasm, 1);
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
oop_maps = new OopMapSet();
oop_maps->add_gc_map(call_offset, map);
restore_live_registers(sasm);
__ leave();
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created");
__ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
}
break;
default: default:
{ StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
__ movptr(rax, (int)id); __ movptr(rax, (int)id);

View file

@ -1299,25 +1299,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ push(rdx); __ push(rdx);
#endif // _LP64 #endif // _LP64
// Either restore the MXCSR register after returning from the JNI Call // Verify or restore cpu control state after JNI call
// or verify that it wasn't changed. __ restore_cpu_control_state_after_jni();
if (VM_Version::supports_sse()) {
if (RestoreMXCSROnJNICalls) {
__ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
}
else if (CheckJNICalls ) {
__ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
}
}
#ifndef _LP64
// Either restore the x87 floating pointer control word after returning
// from the JNI call or verify that it wasn't changed.
if (CheckJNICalls) {
__ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
}
#endif // _LP64
// change thread state // change thread state
__ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans); __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);

View file

@ -4765,6 +4765,31 @@ void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
pop_CPU_state(); pop_CPU_state();
} }
void MacroAssembler::restore_cpu_control_state_after_jni() {
// Either restore the MXCSR register after returning from the JNI Call
// or verify that it wasn't changed (with -Xcheck:jni flag).
if (VM_Version::supports_sse()) {
if (RestoreMXCSROnJNICalls) {
ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
} else if (CheckJNICalls) {
call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
}
}
if (VM_Version::supports_avx()) {
// Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
vzeroupper();
}
#ifndef _LP64
// Either restore the x87 floating pointer control word after returning
// from the JNI call or verify that it wasn't changed.
if (CheckJNICalls) {
call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
}
#endif // _LP64
}
void MacroAssembler::load_klass(Register dst, Register src) { void MacroAssembler::load_klass(Register dst, Register src) {
#ifdef _LP64 #ifdef _LP64
if (UseCompressedKlassPointers) { if (UseCompressedKlassPointers) {
@ -5759,6 +5784,8 @@ void MacroAssembler::string_compare(Register str1, Register str2,
addptr(result, stride2); addptr(result, stride2);
subl(cnt2, stride2); subl(cnt2, stride2);
jccb(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP); jccb(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP);
// clean upper bits of YMM registers
vzeroupper();
// compare wide vectors tail // compare wide vectors tail
bind(COMPARE_WIDE_TAIL); bind(COMPARE_WIDE_TAIL);
@ -5772,6 +5799,8 @@ void MacroAssembler::string_compare(Register str1, Register str2,
// Identifies the mismatching (higher or lower)16-bytes in the 32-byte vectors. // Identifies the mismatching (higher or lower)16-bytes in the 32-byte vectors.
bind(VECTOR_NOT_EQUAL); bind(VECTOR_NOT_EQUAL);
// clean upper bits of YMM registers
vzeroupper();
lea(str1, Address(str1, result, scale)); lea(str1, Address(str1, result, scale));
lea(str2, Address(str2, result, scale)); lea(str2, Address(str2, result, scale));
jmp(COMPARE_16_CHARS); jmp(COMPARE_16_CHARS);
@ -6028,6 +6057,10 @@ void MacroAssembler::char_arrays_equals(bool is_array_equ, Register ary1, Regist
// That's it // That's it
bind(DONE); bind(DONE);
if (UseAVX >= 2) {
// clean upper bits of YMM registers
vzeroupper();
}
} }
void MacroAssembler::generate_fill(BasicType t, bool aligned, void MacroAssembler::generate_fill(BasicType t, bool aligned,
@ -6157,6 +6190,10 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
vmovdqu(Address(to, 0), xtmp); vmovdqu(Address(to, 0), xtmp);
addptr(to, 32); addptr(to, 32);
subl(count, 8 << shift); subl(count, 8 << shift);
BIND(L_check_fill_8_bytes);
// clean upper bits of YMM registers
vzeroupper();
} else { } else {
// Fill 32-byte chunks // Fill 32-byte chunks
pshufd(xtmp, xtmp, 0); pshufd(xtmp, xtmp, 0);
@ -6180,8 +6217,9 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
addptr(to, 32); addptr(to, 32);
subl(count, 8 << shift); subl(count, 8 << shift);
jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
}
BIND(L_check_fill_8_bytes); BIND(L_check_fill_8_bytes);
}
addl(count, 8 << shift); addl(count, 8 << shift);
jccb(Assembler::zero, L_exit); jccb(Assembler::zero, L_exit);
jmpb(L_fill_8_bytes); jmpb(L_fill_8_bytes);
@ -6316,6 +6354,10 @@ void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
jccb(Assembler::lessEqual, L_copy_16_chars); jccb(Assembler::lessEqual, L_copy_16_chars);
bind(L_copy_16_chars_exit); bind(L_copy_16_chars_exit);
if (UseAVX >= 2) {
// clean upper bits of YMM registers
vzeroupper();
}
subptr(len, 8); subptr(len, 8);
jccb(Assembler::greater, L_copy_8_chars_exit); jccb(Assembler::greater, L_copy_8_chars_exit);

View file

@ -582,6 +582,9 @@ class MacroAssembler: public Assembler {
// only if +VerifyFPU // only if +VerifyFPU
void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
// Verify or restore cpu control state after JNI call
void restore_cpu_control_state_after_jni();
// prints msg, dumps registers and stops execution // prints msg, dumps registers and stops execution
void stop(const char* msg); void stop(const char* msg);

View file

@ -2065,6 +2065,9 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ call(RuntimeAddress(native_func)); __ call(RuntimeAddress(native_func));
// Verify or restore cpu control state after JNI call
__ restore_cpu_control_state_after_jni();
// WARNING - on Windows Java Natives use pascal calling convention and pop the // WARNING - on Windows Java Natives use pascal calling convention and pop the
// arguments off of the stack. We could just re-adjust the stack pointer here // arguments off of the stack. We could just re-adjust the stack pointer here
// and continue to do SP relative addressing but we instead switch to FP // and continue to do SP relative addressing but we instead switch to FP

View file

@ -2315,16 +2315,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ call(RuntimeAddress(native_func)); __ call(RuntimeAddress(native_func));
// Either restore the MXCSR register after returning from the JNI Call // Verify or restore cpu control state after JNI call
// or verify that it wasn't changed. __ restore_cpu_control_state_after_jni();
if (RestoreMXCSROnJNICalls) {
__ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
}
else if (CheckJNICalls ) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::verify_mxcsr_entry())));
}
// Unpack native results. // Unpack native results.
switch (ret_type) { switch (ret_type) {

View file

@ -835,6 +835,11 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_copy_64_bytes); __ BIND(L_copy_64_bytes);
__ subl(qword_count, 8); __ subl(qword_count, 8);
__ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop); __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop);
if (UseUnalignedLoadStores && (UseAVX >= 2)) {
// clean upper bits of YMM registers
__ vzeroupper();
}
__ addl(qword_count, 8); __ addl(qword_count, 8);
__ jccb(Assembler::zero, L_exit); __ jccb(Assembler::zero, L_exit);
// //

View file

@ -1331,6 +1331,10 @@ class StubGenerator: public StubCodeGenerator {
} }
__ addptr(qword_count, 4); __ addptr(qword_count, 4);
__ BIND(L_end); __ BIND(L_end);
if (UseAVX >= 2) {
// clean upper bits of YMM registers
__ vzeroupper();
}
} else { } else {
// Copy 32-bytes per iteration // Copy 32-bytes per iteration
__ BIND(L_loop); __ BIND(L_loop);
@ -1404,6 +1408,10 @@ class StubGenerator: public StubCodeGenerator {
} }
__ subptr(qword_count, 4); __ subptr(qword_count, 4);
__ BIND(L_end); __ BIND(L_end);
if (UseAVX >= 2) {
// clean upper bits of YMM registers
__ vzeroupper();
}
} else { } else {
// Copy 32-bytes per iteration // Copy 32-bytes per iteration
__ BIND(L_loop); __ BIND(L_loop);

View file

@ -1080,22 +1080,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// result potentially in rdx:rax or ST0 // result potentially in rdx:rax or ST0
// Either restore the MXCSR register after returning from the JNI Call // Verify or restore cpu control state after JNI call
// or verify that it wasn't changed. __ restore_cpu_control_state_after_jni();
if (VM_Version::supports_sse()) {
if (RestoreMXCSROnJNICalls) {
__ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
}
else if (CheckJNICalls ) {
__ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
}
}
// Either restore the x87 floating pointer control word after returning
// from the JNI call or verify that it wasn't changed.
if (CheckJNICalls) {
__ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
}
// save potential result in ST(0) & rdx:rax // save potential result in ST(0) & rdx:rax
// (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 -

View file

@ -1079,15 +1079,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ call(rax); __ call(rax);
// result potentially in rax or xmm0 // result potentially in rax or xmm0
// Depending on runtime options, either restore the MXCSR // Verify or restore cpu control state after JNI call
// register after returning from the JNI Call or verify that __ restore_cpu_control_state_after_jni();
// it wasn't changed during -Xcheck:jni.
if (RestoreMXCSROnJNICalls) {
__ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
}
else if (CheckJNICalls) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::verify_mxcsr_entry())));
}
// NOTE: The order of these pushes is known to frame::interpreter_frame_result // NOTE: The order of these pushes is known to frame::interpreter_frame_result
// in order to extract the result of a method call. If the order of these // in order to extract the result of a method call. If the order of these

View file

@ -228,10 +228,16 @@ static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], CON
static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000)); static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
// Offset hacking within calls. // Offset hacking within calls.
static int pre_call_FPU_size() { static int pre_call_resets_size() {
if (Compile::current()->in_24_bit_fp_mode()) int size = 0;
return 6; // fldcw Compile* C = Compile::current();
return 0; if (C->in_24_bit_fp_mode()) {
size += 6; // fldcw
}
if (C->max_vector_size() > 16) {
size += 3; // vzeroupper
}
return size;
} }
static int preserve_SP_size() { static int preserve_SP_size() {
@ -242,21 +248,21 @@ static int preserve_SP_size() {
// from the start of the call to the point where the return address // from the start of the call to the point where the return address
// will point. // will point.
int MachCallStaticJavaNode::ret_addr_offset() { int MachCallStaticJavaNode::ret_addr_offset() {
int offset = 5 + pre_call_FPU_size(); // 5 bytes from start of call to where return address points int offset = 5 + pre_call_resets_size(); // 5 bytes from start of call to where return address points
if (_method_handle_invoke) if (_method_handle_invoke)
offset += preserve_SP_size(); offset += preserve_SP_size();
return offset; return offset;
} }
int MachCallDynamicJavaNode::ret_addr_offset() { int MachCallDynamicJavaNode::ret_addr_offset() {
return 10 + pre_call_FPU_size(); // 10 bytes from start of call to where return address points return 10 + pre_call_resets_size(); // 10 bytes from start of call to where return address points
} }
static int sizeof_FFree_Float_Stack_All = -1; static int sizeof_FFree_Float_Stack_All = -1;
int MachCallRuntimeNode::ret_addr_offset() { int MachCallRuntimeNode::ret_addr_offset() {
assert(sizeof_FFree_Float_Stack_All != -1, "must have been emitted already"); assert(sizeof_FFree_Float_Stack_All != -1, "must have been emitted already");
return sizeof_FFree_Float_Stack_All + 5 + pre_call_FPU_size(); return sizeof_FFree_Float_Stack_All + 5 + pre_call_resets_size();
} }
// Indicate if the safepoint node needs the polling page as an input. // Indicate if the safepoint node needs the polling page as an input.
@ -272,7 +278,7 @@ bool SafePointNode::needs_polling_address_input() {
// The address of the call instruction needs to be 4-byte aligned to // The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched. // ensure that it does not span a cache line so that it can be patched.
int CallStaticJavaDirectNode::compute_padding(int current_offset) const { int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
current_offset += pre_call_FPU_size(); // skip fldcw, if any current_offset += pre_call_resets_size(); // skip fldcw, if any
current_offset += 1; // skip call opcode byte current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset; return round_to(current_offset, alignment_required()) - current_offset;
} }
@ -280,7 +286,7 @@ int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
// The address of the call instruction needs to be 4-byte aligned to // The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched. // ensure that it does not span a cache line so that it can be patched.
int CallStaticJavaHandleNode::compute_padding(int current_offset) const { int CallStaticJavaHandleNode::compute_padding(int current_offset) const {
current_offset += pre_call_FPU_size(); // skip fldcw, if any current_offset += pre_call_resets_size(); // skip fldcw, if any
current_offset += preserve_SP_size(); // skip mov rbp, rsp current_offset += preserve_SP_size(); // skip mov rbp, rsp
current_offset += 1; // skip call opcode byte current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset; return round_to(current_offset, alignment_required()) - current_offset;
@ -289,7 +295,7 @@ int CallStaticJavaHandleNode::compute_padding(int current_offset) const {
// The address of the call instruction needs to be 4-byte aligned to // The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched. // ensure that it does not span a cache line so that it can be patched.
int CallDynamicJavaDirectNode::compute_padding(int current_offset) const { int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
current_offset += pre_call_FPU_size(); // skip fldcw, if any current_offset += pre_call_resets_size(); // skip fldcw, if any
current_offset += 5; // skip MOV instruction current_offset += 5; // skip MOV instruction
current_offset += 1; // skip call opcode byte current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset; return round_to(current_offset, alignment_required()) - current_offset;
@ -583,6 +589,10 @@ void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
// Remove two words for return addr and rbp, // Remove two words for return addr and rbp,
framesize -= 2*wordSize; framesize -= 2*wordSize;
if (C->max_vector_size() > 16) {
st->print("VZEROUPPER");
st->cr(); st->print("\t");
}
if (C->in_24_bit_fp_mode()) { if (C->in_24_bit_fp_mode()) {
st->print("FLDCW standard control word"); st->print("FLDCW standard control word");
st->cr(); st->print("\t"); st->cr(); st->print("\t");
@ -602,6 +612,12 @@ void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
Compile *C = ra_->C; Compile *C = ra_->C;
if (C->max_vector_size() > 16) {
// Clear upper bits of YMM registers when current compiled code uses
// wide vectors to avoid AVX <-> SSE transition penalty during call.
MacroAssembler masm(&cbuf);
masm.vzeroupper();
}
// If method set FPU control word, restore to standard control word // If method set FPU control word, restore to standard control word
if (C->in_24_bit_fp_mode()) { if (C->in_24_bit_fp_mode()) {
MacroAssembler masm(&cbuf); MacroAssembler masm(&cbuf);
@ -619,8 +635,7 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
emit_opcode(cbuf, 0x81); // add SP, #framesize emit_opcode(cbuf, 0x81); // add SP, #framesize
emit_rm(cbuf, 0x3, 0x00, ESP_enc); emit_rm(cbuf, 0x3, 0x00, ESP_enc);
emit_d32(cbuf, framesize); emit_d32(cbuf, framesize);
} } else if (framesize) {
else if( framesize ) {
emit_opcode(cbuf, 0x83); // add SP, #framesize emit_opcode(cbuf, 0x83); // add SP, #framesize
emit_rm(cbuf, 0x3, 0x00, ESP_enc); emit_rm(cbuf, 0x3, 0x00, ESP_enc);
emit_d8(cbuf, framesize); emit_d8(cbuf, framesize);
@ -640,6 +655,7 @@ uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
Compile *C = ra_->C; Compile *C = ra_->C;
// If method set FPU control word, restore to standard control word // If method set FPU control word, restore to standard control word
int size = C->in_24_bit_fp_mode() ? 6 : 0; int size = C->in_24_bit_fp_mode() ? 6 : 0;
if (C->max_vector_size() > 16) size += 3; // vzeroupper
if (do_polling() && C->is_method_compilation()) size += 6; if (do_polling() && C->is_method_compilation()) size += 6;
int framesize = C->frame_slots() << LogBytesPerInt; int framesize = C->frame_slots() << LogBytesPerInt;
@ -1853,15 +1869,21 @@ encode %{
%} %}
enc_class pre_call_FPU %{ enc_class pre_call_resets %{
// If method sets FPU control word restore it here // If method sets FPU control word restore it here
debug_only(int off0 = cbuf.insts_size()); debug_only(int off0 = cbuf.insts_size());
if( Compile::current()->in_24_bit_fp_mode() ) { if (ra_->C->in_24_bit_fp_mode()) {
MacroAssembler masm(&cbuf); MacroAssembler _masm(&cbuf);
masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
}
if (ra_->C->max_vector_size() > 16) {
// Clear upper bits of YMM registers when current compiled code uses
// wide vectors to avoid AVX <-> SSE transition penalty during call.
MacroAssembler _masm(&cbuf);
__ vzeroupper();
} }
debug_only(int off1 = cbuf.insts_size()); debug_only(int off1 = cbuf.insts_size());
assert(off1 - off0 == pre_call_FPU_size(), "correct size prediction"); assert(off1 - off0 == pre_call_resets_size(), "correct size prediction");
%} %}
enc_class post_call_FPU %{ enc_class post_call_FPU %{
@ -12828,7 +12850,7 @@ instruct CallStaticJavaDirect(method meth) %{
ins_cost(300); ins_cost(300);
format %{ "CALL,static " %} format %{ "CALL,static " %}
opcode(0xE8); /* E8 cd */ opcode(0xE8); /* E8 cd */
ins_encode( pre_call_FPU, ins_encode( pre_call_resets,
Java_Static_Call( meth ), Java_Static_Call( meth ),
call_epilog, call_epilog,
post_call_FPU ); post_call_FPU );
@ -12849,7 +12871,7 @@ instruct CallStaticJavaHandle(method meth, eBPRegP ebp_mh_SP_save) %{
ins_cost(300); ins_cost(300);
format %{ "CALL,static/MethodHandle " %} format %{ "CALL,static/MethodHandle " %}
opcode(0xE8); /* E8 cd */ opcode(0xE8); /* E8 cd */
ins_encode( pre_call_FPU, ins_encode( pre_call_resets,
preserve_SP, preserve_SP,
Java_Static_Call( meth ), Java_Static_Call( meth ),
restore_SP, restore_SP,
@ -12870,7 +12892,7 @@ instruct CallDynamicJavaDirect(method meth) %{
format %{ "MOV EAX,(oop)-1\n\t" format %{ "MOV EAX,(oop)-1\n\t"
"CALL,dynamic" %} "CALL,dynamic" %}
opcode(0xE8); /* E8 cd */ opcode(0xE8); /* E8 cd */
ins_encode( pre_call_FPU, ins_encode( pre_call_resets,
Java_Dynamic_Call( meth ), Java_Dynamic_Call( meth ),
call_epilog, call_epilog,
post_call_FPU ); post_call_FPU );
@ -12887,7 +12909,7 @@ instruct CallRuntimeDirect(method meth) %{
format %{ "CALL,runtime " %} format %{ "CALL,runtime " %}
opcode(0xE8); /* E8 cd */ opcode(0xE8); /* E8 cd */
// Use FFREEs to clear entries in float stack // Use FFREEs to clear entries in float stack
ins_encode( pre_call_FPU, ins_encode( pre_call_resets,
FFree_Float_Stack_All, FFree_Float_Stack_All,
Java_To_Runtime( meth ), Java_To_Runtime( meth ),
post_call_FPU ); post_call_FPU );
@ -12902,7 +12924,7 @@ instruct CallLeafDirect(method meth) %{
ins_cost(300); ins_cost(300);
format %{ "CALL_LEAF,runtime " %} format %{ "CALL_LEAF,runtime " %}
opcode(0xE8); /* E8 cd */ opcode(0xE8); /* E8 cd */
ins_encode( pre_call_FPU, ins_encode( pre_call_resets,
FFree_Float_Stack_All, FFree_Float_Stack_All,
Java_To_Runtime( meth ), Java_To_Runtime( meth ),
Verify_FPU_For_Leaf, post_call_FPU ); Verify_FPU_For_Leaf, post_call_FPU );

View file

@ -399,6 +399,9 @@ source %{
static int preserve_SP_size() { static int preserve_SP_size() {
return 3; // rex.w, op, rm(reg/reg) return 3; // rex.w, op, rm(reg/reg)
} }
static int clear_avx_size() {
return (Compile::current()->max_vector_size() > 16) ? 3 : 0; // vzeroupper
}
// !!!!! Special hack to get all types of calls to specify the byte offset // !!!!! Special hack to get all types of calls to specify the byte offset
// from the start of the call to the point where the return address // from the start of the call to the point where the return address
@ -406,6 +409,7 @@ static int preserve_SP_size() {
int MachCallStaticJavaNode::ret_addr_offset() int MachCallStaticJavaNode::ret_addr_offset()
{ {
int offset = 5; // 5 bytes from start of call to where return address points int offset = 5; // 5 bytes from start of call to where return address points
offset += clear_avx_size();
if (_method_handle_invoke) if (_method_handle_invoke)
offset += preserve_SP_size(); offset += preserve_SP_size();
return offset; return offset;
@ -413,11 +417,16 @@ int MachCallStaticJavaNode::ret_addr_offset()
int MachCallDynamicJavaNode::ret_addr_offset() int MachCallDynamicJavaNode::ret_addr_offset()
{ {
return 15; // 15 bytes from start of call to where return address points int offset = 15; // 15 bytes from start of call to where return address points
offset += clear_avx_size();
return offset;
} }
// In os_cpu .ad file int MachCallRuntimeNode::ret_addr_offset() {
// int MachCallRuntimeNode::ret_addr_offset() int offset = 13; // movq r10,#addr; callq (r10)
offset += clear_avx_size();
return offset;
}
// Indicate if the safepoint node needs the polling page as an input, // Indicate if the safepoint node needs the polling page as an input,
// it does if the polling page is more than disp32 away. // it does if the polling page is more than disp32 away.
@ -434,6 +443,7 @@ bool SafePointNode::needs_polling_address_input()
// ensure that it does not span a cache line so that it can be patched. // ensure that it does not span a cache line so that it can be patched.
int CallStaticJavaDirectNode::compute_padding(int current_offset) const int CallStaticJavaDirectNode::compute_padding(int current_offset) const
{ {
current_offset += clear_avx_size(); // skip vzeroupper
current_offset += 1; // skip call opcode byte current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset; return round_to(current_offset, alignment_required()) - current_offset;
} }
@ -443,6 +453,7 @@ int CallStaticJavaDirectNode::compute_padding(int current_offset) const
int CallStaticJavaHandleNode::compute_padding(int current_offset) const int CallStaticJavaHandleNode::compute_padding(int current_offset) const
{ {
current_offset += preserve_SP_size(); // skip mov rbp, rsp current_offset += preserve_SP_size(); // skip mov rbp, rsp
current_offset += clear_avx_size(); // skip vzeroupper
current_offset += 1; // skip call opcode byte current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset; return round_to(current_offset, alignment_required()) - current_offset;
} }
@ -451,6 +462,7 @@ int CallStaticJavaHandleNode::compute_padding(int current_offset) const
// ensure that it does not span a cache line so that it can be patched. // ensure that it does not span a cache line so that it can be patched.
int CallDynamicJavaDirectNode::compute_padding(int current_offset) const int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
{ {
current_offset += clear_avx_size(); // skip vzeroupper
current_offset += 11; // skip movq instruction + call opcode byte current_offset += 11; // skip movq instruction + call opcode byte
return round_to(current_offset, alignment_required()) - current_offset; return round_to(current_offset, alignment_required()) - current_offset;
} }
@ -764,6 +776,11 @@ int MachPrologNode::reloc() const
void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{ {
Compile* C = ra_->C; Compile* C = ra_->C;
if (C->max_vector_size() > 16) {
st->print("vzeroupper");
st->cr(); st->print("\t");
}
int framesize = C->frame_slots() << LogBytesPerInt; int framesize = C->frame_slots() << LogBytesPerInt;
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
// Remove word for return adr already pushed // Remove word for return adr already pushed
@ -793,6 +810,13 @@ void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
{ {
Compile* C = ra_->C; Compile* C = ra_->C;
if (C->max_vector_size() > 16) {
// Clear upper bits of YMM registers when current compiled code uses
// wide vectors to avoid AVX <-> SSE transition penalty during call.
MacroAssembler _masm(&cbuf);
__ vzeroupper();
}
int framesize = C->frame_slots() << LogBytesPerInt; int framesize = C->frame_slots() << LogBytesPerInt;
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
// Remove word for return adr already pushed // Remove word for return adr already pushed
@ -2008,6 +2032,25 @@ encode %{
__ bind(miss); __ bind(miss);
%} %}
enc_class clear_avx %{
debug_only(int off0 = cbuf.insts_size());
if (ra_->C->max_vector_size() > 16) {
// Clear upper bits of YMM registers when current compiled code uses
// wide vectors to avoid AVX <-> SSE transition penalty during call.
MacroAssembler _masm(&cbuf);
__ vzeroupper();
}
debug_only(int off1 = cbuf.insts_size());
assert(off1 - off0 == clear_avx_size(), "correct size prediction");
%}
enc_class Java_To_Runtime(method meth) %{
// No relocation needed
MacroAssembler _masm(&cbuf);
__ mov64(r10, (int64_t) $meth$$method);
__ call(r10);
%}
enc_class Java_To_Interpreter(method meth) enc_class Java_To_Interpreter(method meth)
%{ %{
// CALL Java_To_Interpreter // CALL Java_To_Interpreter
@ -11366,7 +11409,7 @@ instruct CallStaticJavaDirect(method meth) %{
ins_cost(300); ins_cost(300);
format %{ "call,static " %} format %{ "call,static " %}
opcode(0xE8); /* E8 cd */ opcode(0xE8); /* E8 cd */
ins_encode(Java_Static_Call(meth), call_epilog); ins_encode(clear_avx, Java_Static_Call(meth), call_epilog);
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
ins_alignment(4); ins_alignment(4);
%} %}
@ -11384,7 +11427,7 @@ instruct CallStaticJavaHandle(method meth, rbp_RegP rbp_mh_SP_save) %{
ins_cost(300); ins_cost(300);
format %{ "call,static/MethodHandle " %} format %{ "call,static/MethodHandle " %}
opcode(0xE8); /* E8 cd */ opcode(0xE8); /* E8 cd */
ins_encode(preserve_SP, ins_encode(clear_avx, preserve_SP,
Java_Static_Call(meth), Java_Static_Call(meth),
restore_SP, restore_SP,
call_epilog); call_epilog);
@ -11403,7 +11446,7 @@ instruct CallDynamicJavaDirect(method meth)
ins_cost(300); ins_cost(300);
format %{ "movq rax, #Universe::non_oop_word()\n\t" format %{ "movq rax, #Universe::non_oop_word()\n\t"
"call,dynamic " %} "call,dynamic " %}
ins_encode(Java_Dynamic_Call(meth), call_epilog); ins_encode(clear_avx, Java_Dynamic_Call(meth), call_epilog);
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
ins_alignment(4); ins_alignment(4);
%} %}
@ -11416,8 +11459,7 @@ instruct CallRuntimeDirect(method meth)
ins_cost(300); ins_cost(300);
format %{ "call,runtime " %} format %{ "call,runtime " %}
opcode(0xE8); /* E8 cd */ ins_encode(clear_avx, Java_To_Runtime(meth));
ins_encode(Java_To_Runtime(meth));
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
@ -11429,8 +11471,7 @@ instruct CallLeafDirect(method meth)
ins_cost(300); ins_cost(300);
format %{ "call_leaf,runtime " %} format %{ "call_leaf,runtime " %}
opcode(0xE8); /* E8 cd */ ins_encode(clear_avx, Java_To_Runtime(meth));
ins_encode(Java_To_Runtime(meth));
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
@ -11442,7 +11483,6 @@ instruct CallLeafNoFPDirect(method meth)
ins_cost(300); ins_cost(300);
format %{ "call_leaf_nofp,runtime " %} format %{ "call_leaf_nofp,runtime " %}
opcode(0xE8); /* E8 cd */
ins_encode(Java_To_Runtime(meth)); ins_encode(Java_To_Runtime(meth));
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}

View file

@ -199,7 +199,7 @@ case "$MODE" in
rm -f $GDBSCR rm -f $GDBSCR
;; ;;
dbx) dbx)
$DBX -s $MYDIR/.dbxrc $LAUNCHER $JPARAMS $DBX -s $HOME/.dbxrc $LAUNCHER $JPARMS
;; ;;
valgrind) valgrind)
echo Warning: Defaulting to 16Mb heap to make Valgrind run faster, use -Xmx for larger heap echo Warning: Defaulting to 16Mb heap to make Valgrind run faster, use -Xmx for larger heap

View file

@ -3773,6 +3773,8 @@ extern "C" {
} }
} }
static jint initSock();
// this is called _after_ the global arguments have been parsed // this is called _after_ the global arguments have been parsed
jint os::init_2(void) { jint os::init_2(void) {
// Allocate a single page and mark it as readable for safepoint polling // Allocate a single page and mark it as readable for safepoint polling
@ -3903,6 +3905,10 @@ jint os::init_2(void) {
if (!success) UseNUMAInterleaving = false; if (!success) UseNUMAInterleaving = false;
} }
if (initSock() != JNI_OK) {
return JNI_ERR;
}
return JNI_OK; return JNI_OK;
} }
@ -4899,42 +4905,24 @@ LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
// We don't build a headless jre for Windows // We don't build a headless jre for Windows
bool os::is_headless_jre() { return false; } bool os::is_headless_jre() { return false; }
static jint initSock() {
typedef CRITICAL_SECTION mutex_t;
#define mutexInit(m) InitializeCriticalSection(m)
#define mutexDestroy(m) DeleteCriticalSection(m)
#define mutexLock(m) EnterCriticalSection(m)
#define mutexUnlock(m) LeaveCriticalSection(m)
static bool sock_initialized = FALSE;
static mutex_t sockFnTableMutex;
static void initSock() {
WSADATA wsadata; WSADATA wsadata;
if (!os::WinSock2Dll::WinSock2Available()) { if (!os::WinSock2Dll::WinSock2Available()) {
jio_fprintf(stderr, "Could not load Winsock 2 (error: %d)\n", jio_fprintf(stderr, "Could not load Winsock (error: %d)\n",
::GetLastError()); ::GetLastError());
return; return JNI_ERR;
} }
if (sock_initialized == TRUE) return;
::mutexInit(&sockFnTableMutex); if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
::mutexLock(&sockFnTableMutex); jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
if (os::WinSock2Dll::WSAStartup(MAKEWORD(1,1), &wsadata) != 0) { ::GetLastError());
jio_fprintf(stderr, "Could not initialize Winsock\n"); return JNI_ERR;
} }
sock_initialized = TRUE; return JNI_OK;
::mutexUnlock(&sockFnTableMutex);
} }
struct hostent* os::get_host_by_name(char* name) { struct hostent* os::get_host_by_name(char* name) {
if (!sock_initialized) {
initSock();
}
if (!os::WinSock2Dll::WinSock2Available()) {
return NULL;
}
return (struct hostent*)os::WinSock2Dll::gethostbyname(name); return (struct hostent*)os::WinSock2Dll::gethostbyname(name);
} }

View file

@ -55,20 +55,6 @@ encode %{
// adding a syntax that specifies the sizes of fields in an order, // adding a syntax that specifies the sizes of fields in an order,
// so that the adlc can build the emit functions automagically // so that the adlc can build the emit functions automagically
enc_class Java_To_Runtime(method meth) %{
// No relocation needed
// movq r10, <meth>
emit_opcode(cbuf, Assembler::REX_WB);
emit_opcode(cbuf, 0xB8 | (R10_enc - 8));
emit_d64(cbuf, (int64_t) $meth$$method);
// call (r10)
emit_opcode(cbuf, Assembler::REX_B);
emit_opcode(cbuf, 0xFF);
emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
%}
%} %}
@ -76,8 +62,4 @@ encode %{
source %{ source %{
int MachCallRuntimeNode::ret_addr_offset() {
return 13; // movq r10,#addr; callq (r10)
}
%} %}

View file

@ -55,20 +55,6 @@ encode %{
// adding a syntax that specifies the sizes of fields in an order, // adding a syntax that specifies the sizes of fields in an order,
// so that the adlc can build the emit functions automagically // so that the adlc can build the emit functions automagically
enc_class Java_To_Runtime(method meth) %{
// No relocation needed
// movq r10, <meth>
emit_opcode(cbuf, Assembler::REX_WB);
emit_opcode(cbuf, 0xB8 | (R10_enc - 8));
emit_d64(cbuf, (int64_t) $meth$$method);
// call (r10)
emit_opcode(cbuf, Assembler::REX_B);
emit_opcode(cbuf, 0xFF);
emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
%}
%} %}
@ -76,8 +62,4 @@ encode %{
source %{ source %{
int MachCallRuntimeNode::ret_addr_offset() {
return 13; // movq r10,#addr; callq (r10)
}
%} %}

View file

@ -54,39 +54,10 @@ encode %{
// main source block for now. In future, we can generalize this by // main source block for now. In future, we can generalize this by
// adding a syntax that specifies the sizes of fields in an order, // adding a syntax that specifies the sizes of fields in an order,
// so that the adlc can build the emit functions automagically // so that the adlc can build the emit functions automagically
enc_class Java_To_Runtime(method meth) %{
// No relocation needed
// movq r10, <meth>
emit_opcode(cbuf, Assembler::REX_WB);
emit_opcode(cbuf, 0xB8 | (R10_enc - 8));
emit_d64(cbuf, (int64_t) $meth$$method);
// call (r10)
emit_opcode(cbuf, Assembler::REX_B);
emit_opcode(cbuf, 0xFF);
emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
%}
enc_class post_call_verify_mxcsr %{
MacroAssembler _masm(&cbuf);
if (RestoreMXCSROnJNICalls) {
__ ldmxcsr(ExternalAddress(StubRoutines::amd64::mxcsr_std()));
}
else if (CheckJNICalls) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::amd64::verify_mxcsr_entry())));
}
%}
%} %}
// Platform dependent source // Platform dependent source
source %{ source %{
int MachCallRuntimeNode::ret_addr_offset() {
return 13; // movq r10,#addr; callq (r10)
}
%} %}

View file

@ -53,30 +53,11 @@ encode %{
// adding a syntax that specifies the sizes of fields in an order, // adding a syntax that specifies the sizes of fields in an order,
// so that the adlc can build the emit functions automagically // so that the adlc can build the emit functions automagically
enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime
// No relocation needed
// movq r10, <meth>
emit_opcode(cbuf, Assembler::REX_WB);
emit_opcode(cbuf, 0xB8 | (R10_enc - 8));
emit_d64(cbuf, (int64_t) $meth$$method);
// call (r10)
emit_opcode(cbuf, Assembler::REX_B);
emit_opcode(cbuf, 0xFF);
emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
%} %}
%}
//
// Platform dependent source // Platform dependent source
//
source %{ source %{
int MachCallRuntimeNode::ret_addr_offset()
{
return 13; // movq r10,#addr; callq (r10)
}
%} %}

View file

@ -832,6 +832,7 @@ static const char *getRegMask(const char *reg_class_name) {
int length = (int)strlen(rc_name) + (int)strlen(mask) + 5; int length = (int)strlen(rc_name) + (int)strlen(mask) + 5;
char *regMask = new char[length]; char *regMask = new char[length];
sprintf(regMask,"%s%s()", rc_name, mask); sprintf(regMask,"%s%s()", rc_name, mask);
delete[] rc_name;
return regMask; return regMask;
} }
} }

View file

@ -191,12 +191,19 @@ static void cost_check(FILE *fp, const char *spaces,
// Macro equivalent to: _kids[0]->valid(FOO) && _kids[1]->valid(BAR) // Macro equivalent to: _kids[0]->valid(FOO) && _kids[1]->valid(BAR)
// //
static void child_test(FILE *fp, MatchList &mList) { static void child_test(FILE *fp, MatchList &mList) {
if( mList._lchild ) // If left child, check it if (mList._lchild) { // If left child, check it
fprintf(fp, "STATE__VALID_CHILD(_kids[0], %s)", ArchDesc::getMachOperEnum(mList._lchild)); const char* lchild_to_upper = ArchDesc::getMachOperEnum(mList._lchild);
if( mList._lchild && mList._rchild ) // If both, add the "&&" fprintf(fp, "STATE__VALID_CHILD(_kids[0], %s)", lchild_to_upper);
delete[] lchild_to_upper;
}
if (mList._lchild && mList._rchild) { // If both, add the "&&"
fprintf(fp, " && "); fprintf(fp, " && ");
if( mList._rchild ) // If right child, check it }
fprintf(fp, "STATE__VALID_CHILD(_kids[1], %s)", ArchDesc::getMachOperEnum(mList._rchild)); if (mList._rchild) { // If right child, check it
const char* rchild_to_upper = ArchDesc::getMachOperEnum(mList._rchild);
fprintf(fp, "STATE__VALID_CHILD(_kids[1], %s)", rchild_to_upper);
delete[] rchild_to_upper;
}
} }
//---------------------------calc_cost----------------------------------------- //---------------------------calc_cost-----------------------------------------
@ -207,12 +214,16 @@ Expr *ArchDesc::calc_cost(FILE *fp, const char *spaces, MatchList &mList, Produc
fprintf(fp, "%sunsigned int c = ", spaces); fprintf(fp, "%sunsigned int c = ", spaces);
Expr *c = new Expr("0"); Expr *c = new Expr("0");
if (mList._lchild) { // If left child, add it in if (mList._lchild) { // If left child, add it in
sprintf(Expr::buffer(), "_kids[0]->_cost[%s]", ArchDesc::getMachOperEnum(mList._lchild)); const char* lchild_to_upper = ArchDesc::getMachOperEnum(mList._lchild);
sprintf(Expr::buffer(), "_kids[0]->_cost[%s]", lchild_to_upper);
c->add(Expr::buffer()); c->add(Expr::buffer());
delete[] lchild_to_upper;
} }
if (mList._rchild) { // If right child, add it in if (mList._rchild) { // If right child, add it in
sprintf(Expr::buffer(), "_kids[1]->_cost[%s]", ArchDesc::getMachOperEnum(mList._rchild)); const char* rchild_to_upper = ArchDesc::getMachOperEnum(mList._rchild);
sprintf(Expr::buffer(), "_kids[1]->_cost[%s]", rchild_to_upper);
c->add(Expr::buffer()); c->add(Expr::buffer());
delete[] rchild_to_upper;
} }
// Add in cost of this rule // Add in cost of this rule
const char *mList_cost = mList.get_cost(); const char *mList_cost = mList.get_cost();
@ -240,7 +251,9 @@ void ArchDesc::gen_match(FILE *fp, MatchList &mList, ProductionState &status, Di
child_test(fp, mList); child_test(fp, mList);
// Only generate predicate test if one exists for this match // Only generate predicate test if one exists for this match
if (predicate_test) { if (predicate_test) {
if( has_child_constraints ) { fprintf(fp," &&\n"); } if (has_child_constraints) {
fprintf(fp," &&\n");
}
fprintf(fp, "%s %s", spaces6, predicate_test); fprintf(fp, "%s %s", spaces6, predicate_test);
} }
// End of outer tests // End of outer tests

View file

@ -937,4 +937,6 @@ void Canonicalizer::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
void Canonicalizer::do_ProfileCall(ProfileCall* x) {} void Canonicalizer::do_ProfileCall(ProfileCall* x) {}
void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {} void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {}
void Canonicalizer::do_RuntimeCall(RuntimeCall* x) {} void Canonicalizer::do_RuntimeCall(RuntimeCall* x) {}
void Canonicalizer::do_RangeCheckPredicate(RangeCheckPredicate* x) {}
void Canonicalizer::do_Assert(Assert* x) {}
void Canonicalizer::do_MemBar(MemBar* x) {} void Canonicalizer::do_MemBar(MemBar* x) {}

View file

@ -107,6 +107,8 @@ class Canonicalizer: InstructionVisitor {
virtual void do_ProfileInvoke (ProfileInvoke* x); virtual void do_ProfileInvoke (ProfileInvoke* x);
virtual void do_RuntimeCall (RuntimeCall* x); virtual void do_RuntimeCall (RuntimeCall* x);
virtual void do_MemBar (MemBar* x); virtual void do_MemBar (MemBar* x);
virtual void do_RangeCheckPredicate(RangeCheckPredicate* x);
virtual void do_Assert (Assert* x);
}; };
#endif // SHARE_VM_C1_C1_CANONICALIZER_HPP #endif // SHARE_VM_C1_C1_CANONICALIZER_HPP

View file

@ -166,6 +166,22 @@ class RangeCheckStub: public CodeStub {
#endif // PRODUCT #endif // PRODUCT
}; };
// stub used when predicate fails and deoptimization is needed
class PredicateFailedStub: public CodeStub {
private:
CodeEmitInfo* _info;
public:
PredicateFailedStub(CodeEmitInfo* info);
virtual void emit_code(LIR_Assembler* e);
virtual CodeEmitInfo* info() const { return _info; }
virtual void visit(LIR_OpVisitState* visitor) {
visitor->do_slow_case(_info);
}
#ifndef PRODUCT
virtual void print_name(outputStream* out) const { out->print("PredicateFailedStub"); }
#endif // PRODUCT
};
class DivByZeroStub: public CodeStub { class DivByZeroStub: public CodeStub {
private: private:

View file

@ -33,13 +33,16 @@
#include "c1/c1_ValueStack.hpp" #include "c1/c1_ValueStack.hpp"
#include "code/debugInfoRec.hpp" #include "code/debugInfoRec.hpp"
#include "compiler/compileLog.hpp" #include "compiler/compileLog.hpp"
#include "c1/c1_RangeCheckElimination.hpp"
typedef enum { typedef enum {
_t_compile, _t_compile,
_t_setup, _t_setup,
_t_optimizeIR,
_t_buildIR, _t_buildIR,
_t_optimize_blocks,
_t_optimize_null_checks,
_t_rangeCheckElimination,
_t_emit_lir, _t_emit_lir,
_t_linearScan, _t_linearScan,
_t_lirGeneration, _t_lirGeneration,
@ -52,8 +55,10 @@ typedef enum {
static const char * timer_name[] = { static const char * timer_name[] = {
"compile", "compile",
"setup", "setup",
"optimizeIR",
"buildIR", "buildIR",
"optimize_blocks",
"optimize_null_checks",
"rangeCheckElimination",
"emit_lir", "emit_lir",
"linearScan", "linearScan",
"lirGeneration", "lirGeneration",
@ -159,9 +164,9 @@ void Compilation::build_hir() {
if (UseC1Optimizations) { if (UseC1Optimizations) {
NEEDS_CLEANUP NEEDS_CLEANUP
// optimization // optimization
PhaseTraceTime timeit(_t_optimizeIR); PhaseTraceTime timeit(_t_optimize_blocks);
_hir->optimize(); _hir->optimize_blocks();
} }
_hir->verify(); _hir->verify();
@ -180,13 +185,47 @@ void Compilation::build_hir() {
_hir->compute_code(); _hir->compute_code();
if (UseGlobalValueNumbering) { if (UseGlobalValueNumbering) {
ResourceMark rm; // No resource mark here! LoopInvariantCodeMotion can allocate ValueStack objects.
int instructions = Instruction::number_of_instructions(); int instructions = Instruction::number_of_instructions();
GlobalValueNumbering gvn(_hir); GlobalValueNumbering gvn(_hir);
assert(instructions == Instruction::number_of_instructions(), assert(instructions == Instruction::number_of_instructions(),
"shouldn't have created an instructions"); "shouldn't have created an instructions");
} }
_hir->verify();
#ifndef PRODUCT
if (PrintCFGToFile) {
CFGPrinter::print_cfg(_hir, "Before RangeCheckElimination", true, false);
}
#endif
if (RangeCheckElimination) {
if (_hir->osr_entry() == NULL) {
PhaseTraceTime timeit(_t_rangeCheckElimination);
RangeCheckElimination::eliminate(_hir);
}
}
#ifndef PRODUCT
if (PrintCFGToFile) {
CFGPrinter::print_cfg(_hir, "After RangeCheckElimination", true, false);
}
#endif
if (UseC1Optimizations) {
// loop invariant code motion reorders instructions and range
// check elimination adds new instructions so do null check
// elimination after.
NEEDS_CLEANUP
// optimization
PhaseTraceTime timeit(_t_optimize_null_checks);
_hir->eliminate_null_checks();
}
_hir->verify();
// compute use counts after global value numbering // compute use counts after global value numbering
_hir->compute_use_counts(); _hir->compute_use_counts();
@ -502,6 +541,7 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
, _next_id(0) , _next_id(0)
, _next_block_id(0) , _next_block_id(0)
, _code(buffer_blob) , _code(buffer_blob)
, _has_access_indexed(false)
, _current_instruction(NULL) , _current_instruction(NULL)
#ifndef PRODUCT #ifndef PRODUCT
, _last_instruction_printed(NULL) , _last_instruction_printed(NULL)
@ -567,7 +607,9 @@ void Compilation::print_timers() {
tty->print_cr(" Detailed C1 Timings"); tty->print_cr(" Detailed C1 Timings");
tty->print_cr(" Setup time: %6.3f s (%4.1f%%)", timers[_t_setup].seconds(), (timers[_t_setup].seconds() / total) * 100.0); tty->print_cr(" Setup time: %6.3f s (%4.1f%%)", timers[_t_setup].seconds(), (timers[_t_setup].seconds() / total) * 100.0);
tty->print_cr(" Build IR: %6.3f s (%4.1f%%)", timers[_t_buildIR].seconds(), (timers[_t_buildIR].seconds() / total) * 100.0); tty->print_cr(" Build IR: %6.3f s (%4.1f%%)", timers[_t_buildIR].seconds(), (timers[_t_buildIR].seconds() / total) * 100.0);
tty->print_cr(" Optimize: %6.3f s (%4.1f%%)", timers[_t_optimizeIR].seconds(), (timers[_t_optimizeIR].seconds() / total) * 100.0); float t_optimizeIR = timers[_t_optimize_blocks].seconds() + timers[_t_optimize_null_checks].seconds();
tty->print_cr(" Optimize: %6.3f s (%4.1f%%)", t_optimizeIR, (t_optimizeIR / total) * 100.0);
tty->print_cr(" RCE: %6.3f s (%4.1f%%)", timers[_t_rangeCheckElimination].seconds(), (timers[_t_rangeCheckElimination].seconds() / total) * 100.0);
tty->print_cr(" Emit LIR: %6.3f s (%4.1f%%)", timers[_t_emit_lir].seconds(), (timers[_t_emit_lir].seconds() / total) * 100.0); tty->print_cr(" Emit LIR: %6.3f s (%4.1f%%)", timers[_t_emit_lir].seconds(), (timers[_t_emit_lir].seconds() / total) * 100.0);
tty->print_cr(" LIR Gen: %6.3f s (%4.1f%%)", timers[_t_lirGeneration].seconds(), (timers[_t_lirGeneration].seconds() / total) * 100.0); tty->print_cr(" LIR Gen: %6.3f s (%4.1f%%)", timers[_t_lirGeneration].seconds(), (timers[_t_lirGeneration].seconds() / total) * 100.0);
tty->print_cr(" Linear Scan: %6.3f s (%4.1f%%)", timers[_t_linearScan].seconds(), (timers[_t_linearScan].seconds() / total) * 100.0); tty->print_cr(" Linear Scan: %6.3f s (%4.1f%%)", timers[_t_linearScan].seconds(), (timers[_t_linearScan].seconds() / total) * 100.0);

View file

@ -26,8 +26,10 @@
#define SHARE_VM_C1_C1_COMPILATION_HPP #define SHARE_VM_C1_C1_COMPILATION_HPP
#include "ci/ciEnv.hpp" #include "ci/ciEnv.hpp"
#include "ci/ciMethodData.hpp"
#include "code/exceptionHandlerTable.hpp" #include "code/exceptionHandlerTable.hpp"
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
#include "runtime/deoptimization.hpp"
class CompilationResourceObj; class CompilationResourceObj;
class XHandlers; class XHandlers;
@ -85,6 +87,7 @@ class Compilation: public StackObj {
LinearScan* _allocator; LinearScan* _allocator;
CodeOffsets _offsets; CodeOffsets _offsets;
CodeBuffer _code; CodeBuffer _code;
bool _has_access_indexed;
// compilation helpers // compilation helpers
void initialize(); void initialize();
@ -140,6 +143,7 @@ class Compilation: public StackObj {
C1_MacroAssembler* masm() const { return _masm; } C1_MacroAssembler* masm() const { return _masm; }
CodeOffsets* offsets() { return &_offsets; } CodeOffsets* offsets() { return &_offsets; }
Arena* arena() { return _arena; } Arena* arena() { return _arena; }
bool has_access_indexed() { return _has_access_indexed; }
// Instruction ids // Instruction ids
int get_next_id() { return _next_id++; } int get_next_id() { return _next_id++; }
@ -154,6 +158,7 @@ class Compilation: public StackObj {
void set_has_fpu_code(bool f) { _has_fpu_code = f; } void set_has_fpu_code(bool f) { _has_fpu_code = f; }
void set_has_unsafe_access(bool f) { _has_unsafe_access = f; } void set_has_unsafe_access(bool f) { _has_unsafe_access = f; }
void set_would_profile(bool f) { _would_profile = f; } void set_would_profile(bool f) { _would_profile = f; }
void set_has_access_indexed(bool f) { _has_access_indexed = f; }
// Add a set of exception handlers covering the given PC offset // Add a set of exception handlers covering the given PC offset
void add_exception_handlers_for_pco(int pco, XHandlers* exception_handlers); void add_exception_handlers_for_pco(int pco, XHandlers* exception_handlers);
// Statistics gathering // Statistics gathering
@ -233,6 +238,14 @@ class Compilation: public StackObj {
return env()->comp_level() == CompLevel_full_profile && return env()->comp_level() == CompLevel_full_profile &&
C1UpdateMethodData && C1ProfileCheckcasts; C1UpdateMethodData && C1ProfileCheckcasts;
} }
// will compilation make optimistic assumptions that might lead to
// deoptimization and that the runtime will account for?
bool is_optimistic() const {
return !TieredCompilation &&
(RangeCheckElimination || UseLoopInvariantCodeMotion) &&
method()->method_data()->trap_count(Deoptimization::Reason_none) == 0;
}
}; };

View file

@ -947,7 +947,9 @@ void GraphBuilder::store_local(ValueStack* state, Value x, int index) {
void GraphBuilder::load_indexed(BasicType type) { void GraphBuilder::load_indexed(BasicType type) {
ValueStack* state_before = copy_state_for_exception(); // In case of in block code motion in range check elimination
ValueStack* state_before = copy_state_indexed_access();
compilation()->set_has_access_indexed(true);
Value index = ipop(); Value index = ipop();
Value array = apop(); Value array = apop();
Value length = NULL; Value length = NULL;
@ -961,7 +963,9 @@ void GraphBuilder::load_indexed(BasicType type) {
void GraphBuilder::store_indexed(BasicType type) { void GraphBuilder::store_indexed(BasicType type) {
ValueStack* state_before = copy_state_for_exception(); // In case of in block code motion in range check elimination
ValueStack* state_before = copy_state_indexed_access();
compilation()->set_has_access_indexed(true);
Value value = pop(as_ValueType(type)); Value value = pop(as_ValueType(type));
Value index = ipop(); Value index = ipop();
Value array = apop(); Value array = apop();
@ -1179,7 +1183,9 @@ void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* sta
BlockBegin* tsux = block_at(stream()->get_dest()); BlockBegin* tsux = block_at(stream()->get_dest());
BlockBegin* fsux = block_at(stream()->next_bci()); BlockBegin* fsux = block_at(stream()->next_bci());
bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci(); bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();
Instruction *i = append(new If(x, cond, false, y, tsux, fsux, is_bb ? state_before : NULL, is_bb)); // In case of loop invariant code motion or predicate insertion
// before the body of a loop the state is needed
Instruction *i = append(new If(x, cond, false, y, tsux, fsux, (is_bb || compilation()->is_optimistic()) ? state_before : NULL, is_bb));
assert(i->as_Goto() == NULL || assert(i->as_Goto() == NULL ||
(i->as_Goto()->sux_at(0) == tsux && i->as_Goto()->is_safepoint() == tsux->bci() < stream()->cur_bci()) || (i->as_Goto()->sux_at(0) == tsux && i->as_Goto()->is_safepoint() == tsux->bci() < stream()->cur_bci()) ||
@ -1294,7 +1300,9 @@ void GraphBuilder::table_switch() {
BlockBegin* tsux = block_at(bci() + sw.dest_offset_at(0)); BlockBegin* tsux = block_at(bci() + sw.dest_offset_at(0));
BlockBegin* fsux = block_at(bci() + sw.default_offset()); BlockBegin* fsux = block_at(bci() + sw.default_offset());
bool is_bb = tsux->bci() < bci() || fsux->bci() < bci(); bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
ValueStack* state_before = is_bb ? copy_state_before() : NULL; // In case of loop invariant code motion or predicate insertion
// before the body of a loop the state is needed
ValueStack* state_before = copy_state_if_bb(is_bb);
append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb)); append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
} else { } else {
// collect successors // collect successors
@ -1308,7 +1316,9 @@ void GraphBuilder::table_switch() {
// add default successor // add default successor
if (sw.default_offset() < 0) has_bb = true; if (sw.default_offset() < 0) has_bb = true;
sux->at_put(i, block_at(bci() + sw.default_offset())); sux->at_put(i, block_at(bci() + sw.default_offset()));
ValueStack* state_before = has_bb ? copy_state_before() : NULL; // In case of loop invariant code motion or predicate insertion
// before the body of a loop the state is needed
ValueStack* state_before = copy_state_if_bb(has_bb);
Instruction* res = append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb)); Instruction* res = append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb));
#ifdef ASSERT #ifdef ASSERT
if (res->as_Goto()) { if (res->as_Goto()) {
@ -1336,7 +1346,9 @@ void GraphBuilder::lookup_switch() {
BlockBegin* tsux = block_at(bci() + pair.offset()); BlockBegin* tsux = block_at(bci() + pair.offset());
BlockBegin* fsux = block_at(bci() + sw.default_offset()); BlockBegin* fsux = block_at(bci() + sw.default_offset());
bool is_bb = tsux->bci() < bci() || fsux->bci() < bci(); bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
ValueStack* state_before = is_bb ? copy_state_before() : NULL; // In case of loop invariant code motion or predicate insertion
// before the body of a loop the state is needed
ValueStack* state_before = copy_state_if_bb(is_bb);;
append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb)); append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
} else { } else {
// collect successors & keys // collect successors & keys
@ -1353,7 +1365,9 @@ void GraphBuilder::lookup_switch() {
// add default successor // add default successor
if (sw.default_offset() < 0) has_bb = true; if (sw.default_offset() < 0) has_bb = true;
sux->at_put(i, block_at(bci() + sw.default_offset())); sux->at_put(i, block_at(bci() + sw.default_offset()));
ValueStack* state_before = has_bb ? copy_state_before() : NULL; // In case of loop invariant code motion or predicate insertion
// before the body of a loop the state is needed
ValueStack* state_before = copy_state_if_bb(has_bb);
Instruction* res = append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb)); Instruction* res = append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb));
#ifdef ASSERT #ifdef ASSERT
if (res->as_Goto()) { if (res->as_Goto()) {

View file

@ -301,6 +301,8 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
ValueStack* copy_state_exhandling(); ValueStack* copy_state_exhandling();
ValueStack* copy_state_for_exception_with_bci(int bci); ValueStack* copy_state_for_exception_with_bci(int bci);
ValueStack* copy_state_for_exception(); ValueStack* copy_state_for_exception();
ValueStack* copy_state_if_bb(bool is_bb) { return (is_bb || compilation()->is_optimistic()) ? copy_state_before() : NULL; }
ValueStack* copy_state_indexed_access() { return compilation()->is_optimistic() ? copy_state_before() : copy_state_for_exception(); }
// //
// Inlining support // Inlining support

View file

@ -182,13 +182,14 @@ bool IRScopeDebugInfo::should_reexecute() {
// Implementation of CodeEmitInfo // Implementation of CodeEmitInfo
// Stack must be NON-null // Stack must be NON-null
CodeEmitInfo::CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers) CodeEmitInfo::CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers, bool deoptimize_on_exception)
: _scope(stack->scope()) : _scope(stack->scope())
, _scope_debug_info(NULL) , _scope_debug_info(NULL)
, _oop_map(NULL) , _oop_map(NULL)
, _stack(stack) , _stack(stack)
, _exception_handlers(exception_handlers) , _exception_handlers(exception_handlers)
, _is_method_handle_invoke(false) { , _is_method_handle_invoke(false)
, _deoptimize_on_exception(deoptimize_on_exception) {
assert(_stack != NULL, "must be non null"); assert(_stack != NULL, "must be non null");
} }
@ -199,7 +200,8 @@ CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, ValueStack* stack)
, _scope_debug_info(NULL) , _scope_debug_info(NULL)
, _oop_map(NULL) , _oop_map(NULL)
, _stack(stack == NULL ? info->_stack : stack) , _stack(stack == NULL ? info->_stack : stack)
, _is_method_handle_invoke(info->_is_method_handle_invoke) { , _is_method_handle_invoke(info->_is_method_handle_invoke)
, _deoptimize_on_exception(info->_deoptimize_on_exception) {
// deep copy of exception handlers // deep copy of exception handlers
if (info->_exception_handlers != NULL) { if (info->_exception_handlers != NULL) {
@ -239,7 +241,7 @@ IR::IR(Compilation* compilation, ciMethod* method, int osr_bci) :
} }
void IR::optimize() { void IR::optimize_blocks() {
Optimizer opt(this); Optimizer opt(this);
if (!compilation()->profile_branches()) { if (!compilation()->profile_branches()) {
if (DoCEE) { if (DoCEE) {
@ -257,6 +259,10 @@ void IR::optimize() {
#endif #endif
} }
} }
}
void IR::eliminate_null_checks() {
Optimizer opt(this);
if (EliminateNullChecks) { if (EliminateNullChecks) {
opt.eliminate_null_checks(); opt.eliminate_null_checks();
#ifndef PRODUCT #ifndef PRODUCT
@ -429,6 +435,7 @@ class ComputeLinearScanOrder : public StackObj {
BlockList _loop_end_blocks; // list of all loop end blocks collected during count_edges BlockList _loop_end_blocks; // list of all loop end blocks collected during count_edges
BitMap2D _loop_map; // two-dimensional bit set: a bit is set if a block is contained in a loop BitMap2D _loop_map; // two-dimensional bit set: a bit is set if a block is contained in a loop
BlockList _work_list; // temporary list (used in mark_loops and compute_order) BlockList _work_list; // temporary list (used in mark_loops and compute_order)
BlockList _loop_headers;
Compilation* _compilation; Compilation* _compilation;
@ -594,6 +601,7 @@ void ComputeLinearScanOrder::count_edges(BlockBegin* cur, BlockBegin* parent) {
TRACE_LINEAR_SCAN(3, tty->print_cr("Block B%d is loop header of loop %d", cur->block_id(), _num_loops)); TRACE_LINEAR_SCAN(3, tty->print_cr("Block B%d is loop header of loop %d", cur->block_id(), _num_loops));
cur->set_loop_index(_num_loops); cur->set_loop_index(_num_loops);
_loop_headers.append(cur);
_num_loops++; _num_loops++;
} }
@ -656,6 +664,16 @@ void ComputeLinearScanOrder::clear_non_natural_loops(BlockBegin* start_block) {
// -> this is not a natural loop, so ignore it // -> this is not a natural loop, so ignore it
TRACE_LINEAR_SCAN(2, tty->print_cr("Loop %d is non-natural, so it is ignored", i)); TRACE_LINEAR_SCAN(2, tty->print_cr("Loop %d is non-natural, so it is ignored", i));
BlockBegin *loop_header = _loop_headers.at(i);
assert(loop_header->is_set(BlockBegin::linear_scan_loop_header_flag), "Must be loop header");
for (int j = 0; j < loop_header->number_of_preds(); j++) {
BlockBegin *pred = loop_header->pred_at(j);
pred->clear(BlockBegin::linear_scan_loop_end_flag);
}
loop_header->clear(BlockBegin::linear_scan_loop_header_flag);
for (int block_id = _max_block_id - 1; block_id >= 0; block_id--) { for (int block_id = _max_block_id - 1; block_id >= 0; block_id--) {
clear_block_in_loop(i, block_id); clear_block_in_loop(i, block_id);
} }
@ -729,9 +747,20 @@ void ComputeLinearScanOrder::compute_dominator(BlockBegin* cur, BlockBegin* pare
} else if (!(cur->is_set(BlockBegin::linear_scan_loop_header_flag) && parent->is_set(BlockBegin::linear_scan_loop_end_flag))) { } else if (!(cur->is_set(BlockBegin::linear_scan_loop_header_flag) && parent->is_set(BlockBegin::linear_scan_loop_end_flag))) {
TRACE_LINEAR_SCAN(4, tty->print_cr("DOM: computing dominator of B%d: common dominator of B%d and B%d is B%d", cur->block_id(), parent->block_id(), cur->dominator()->block_id(), common_dominator(cur->dominator(), parent)->block_id())); TRACE_LINEAR_SCAN(4, tty->print_cr("DOM: computing dominator of B%d: common dominator of B%d and B%d is B%d", cur->block_id(), parent->block_id(), cur->dominator()->block_id(), common_dominator(cur->dominator(), parent)->block_id()));
assert(cur->number_of_preds() > 1, ""); // Does not hold for exception blocks
assert(cur->number_of_preds() > 1 || cur->is_set(BlockBegin::exception_entry_flag), "");
cur->set_dominator(common_dominator(cur->dominator(), parent)); cur->set_dominator(common_dominator(cur->dominator(), parent));
} }
// Additional edge to xhandler of all our successors
// range check elimination needs that the state at the end of a
// block be valid in every block it dominates so cur must dominate
// the exception handlers of its successors.
int num_cur_xhandler = cur->number_of_exception_handlers();
for (int j = 0; j < num_cur_xhandler; j++) {
BlockBegin* xhandler = cur->exception_handler_at(j);
compute_dominator(xhandler, parent);
}
} }
@ -898,7 +927,6 @@ void ComputeLinearScanOrder::compute_order(BlockBegin* start_block) {
num_sux = cur->number_of_exception_handlers(); num_sux = cur->number_of_exception_handlers();
for (i = 0; i < num_sux; i++) { for (i = 0; i < num_sux; i++) {
BlockBegin* sux = cur->exception_handler_at(i); BlockBegin* sux = cur->exception_handler_at(i);
compute_dominator(sux, cur);
if (ready_for_processing(sux)) { if (ready_for_processing(sux)) {
sort_into_work_list(sux); sort_into_work_list(sux);
} }
@ -918,8 +946,23 @@ bool ComputeLinearScanOrder::compute_dominators_iter() {
BlockBegin* dominator = block->pred_at(0); BlockBegin* dominator = block->pred_at(0);
int num_preds = block->number_of_preds(); int num_preds = block->number_of_preds();
for (int i = 1; i < num_preds; i++) {
dominator = common_dominator(dominator, block->pred_at(i)); TRACE_LINEAR_SCAN(4, tty->print_cr("DOM: Processing B%d", block->block_id()));
for (int j = 0; j < num_preds; j++) {
BlockBegin *pred = block->pred_at(j);
TRACE_LINEAR_SCAN(4, tty->print_cr(" DOM: Subrocessing B%d", pred->block_id()));
if (block->is_set(BlockBegin::exception_entry_flag)) {
dominator = common_dominator(dominator, pred);
int num_pred_preds = pred->number_of_preds();
for (int k = 0; k < num_pred_preds; k++) {
dominator = common_dominator(dominator, pred->pred_at(k));
}
} else {
dominator = common_dominator(dominator, pred);
}
} }
if (dominator != block->dominator()) { if (dominator != block->dominator()) {
@ -946,6 +989,21 @@ void ComputeLinearScanOrder::compute_dominators() {
// check that dominators are correct // check that dominators are correct
assert(!compute_dominators_iter(), "fix point not reached"); assert(!compute_dominators_iter(), "fix point not reached");
// Add Blocks to dominates-Array
int num_blocks = _linear_scan_order->length();
for (int i = 0; i < num_blocks; i++) {
BlockBegin* block = _linear_scan_order->at(i);
BlockBegin *dom = block->dominator();
if (dom) {
assert(dom->dominator_depth() != -1, "Dominator must have been visited before");
dom->dominates()->append(block);
block->set_dominator_depth(dom->dominator_depth() + 1);
} else {
block->set_dominator_depth(0);
}
}
} }
@ -1032,7 +1090,7 @@ void ComputeLinearScanOrder::verify() {
BlockBegin* sux = cur->sux_at(j); BlockBegin* sux = cur->sux_at(j);
assert(sux->linear_scan_number() >= 0 && sux->linear_scan_number() == _linear_scan_order->index_of(sux), "incorrect linear_scan_number"); assert(sux->linear_scan_number() >= 0 && sux->linear_scan_number() == _linear_scan_order->index_of(sux), "incorrect linear_scan_number");
if (!cur->is_set(BlockBegin::linear_scan_loop_end_flag)) { if (!sux->is_set(BlockBegin::backward_branch_target_flag)) {
assert(cur->linear_scan_number() < sux->linear_scan_number(), "invalid order"); assert(cur->linear_scan_number() < sux->linear_scan_number(), "invalid order");
} }
if (cur->loop_depth() == sux->loop_depth()) { if (cur->loop_depth() == sux->loop_depth()) {
@ -1044,7 +1102,7 @@ void ComputeLinearScanOrder::verify() {
BlockBegin* pred = cur->pred_at(j); BlockBegin* pred = cur->pred_at(j);
assert(pred->linear_scan_number() >= 0 && pred->linear_scan_number() == _linear_scan_order->index_of(pred), "incorrect linear_scan_number"); assert(pred->linear_scan_number() >= 0 && pred->linear_scan_number() == _linear_scan_order->index_of(pred), "incorrect linear_scan_number");
if (!cur->is_set(BlockBegin::linear_scan_loop_header_flag)) { if (!cur->is_set(BlockBegin::backward_branch_target_flag)) {
assert(cur->linear_scan_number() > pred->linear_scan_number(), "invalid order"); assert(cur->linear_scan_number() > pred->linear_scan_number(), "invalid order");
} }
if (cur->loop_depth() == pred->loop_depth()) { if (cur->loop_depth() == pred->loop_depth()) {
@ -1060,7 +1118,8 @@ void ComputeLinearScanOrder::verify() {
} else { } else {
assert(cur->dominator() != NULL, "all but first block must have dominator"); assert(cur->dominator() != NULL, "all but first block must have dominator");
} }
assert(cur->number_of_preds() != 1 || cur->dominator() == cur->pred_at(0), "Single predecessor must also be dominator"); // Assertion does not hold for exception handlers
assert(cur->number_of_preds() != 1 || cur->dominator() == cur->pred_at(0) || cur->is_set(BlockBegin::exception_entry_flag), "Single predecessor must also be dominator");
} }
// check that all loops are continuous // check that all loops are continuous
@ -1249,9 +1308,22 @@ class PredecessorValidator : public BlockClosure {
} }
}; };
class VerifyBlockBeginField : public BlockClosure {
public:
virtual void block_do(BlockBegin *block) {
for ( Instruction *cur = block; cur != NULL; cur = cur->next()) {
assert(cur->block() == block, "Block begin is not correct");
}
}
};
void IR::verify() { void IR::verify() {
#ifdef ASSERT #ifdef ASSERT
PredecessorValidator pv(this); PredecessorValidator pv(this);
VerifyBlockBeginField verifier;
this->iterate_postorder(&verifier);
#endif #endif
} }

View file

@ -254,6 +254,7 @@ class CodeEmitInfo: public CompilationResourceObj {
OopMap* _oop_map; OopMap* _oop_map;
ValueStack* _stack; // used by deoptimization (contains also monitors ValueStack* _stack; // used by deoptimization (contains also monitors
bool _is_method_handle_invoke; // true if the associated call site is a MethodHandle call site. bool _is_method_handle_invoke; // true if the associated call site is a MethodHandle call site.
bool _deoptimize_on_exception;
FrameMap* frame_map() const { return scope()->compilation()->frame_map(); } FrameMap* frame_map() const { return scope()->compilation()->frame_map(); }
Compilation* compilation() const { return scope()->compilation(); } Compilation* compilation() const { return scope()->compilation(); }
@ -261,7 +262,7 @@ class CodeEmitInfo: public CompilationResourceObj {
public: public:
// use scope from ValueStack // use scope from ValueStack
CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers); CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers, bool deoptimize_on_exception = false);
// make a copy // make a copy
CodeEmitInfo(CodeEmitInfo* info, ValueStack* stack = NULL); CodeEmitInfo(CodeEmitInfo* info, ValueStack* stack = NULL);
@ -272,6 +273,7 @@ class CodeEmitInfo: public CompilationResourceObj {
IRScope* scope() const { return _scope; } IRScope* scope() const { return _scope; }
XHandlers* exception_handlers() const { return _exception_handlers; } XHandlers* exception_handlers() const { return _exception_handlers; }
ValueStack* stack() const { return _stack; } ValueStack* stack() const { return _stack; }
bool deoptimize_on_exception() const { return _deoptimize_on_exception; }
void add_register_oop(LIR_Opr opr); void add_register_oop(LIR_Opr opr);
void record_debug_info(DebugInformationRecorder* recorder, int pc_offset); void record_debug_info(DebugInformationRecorder* recorder, int pc_offset);
@ -309,7 +311,8 @@ class IR: public CompilationResourceObj {
int max_stack() const { return top_scope()->max_stack(); } // expensive int max_stack() const { return top_scope()->max_stack(); } // expensive
// ir manipulation // ir manipulation
void optimize(); void optimize_blocks();
void eliminate_null_checks();
void compute_predecessors(); void compute_predecessors();
void split_critical_edges(); void split_critical_edges();
void compute_code(); void compute_code();

View file

@ -34,6 +34,15 @@
// Implementation of Instruction // Implementation of Instruction
int Instruction::dominator_depth() {
int result = -1;
if (block()) {
result = block()->dominator_depth();
}
assert(result != -1 || this->as_Local(), "Only locals have dominator depth -1");
return result;
}
Instruction::Condition Instruction::mirror(Condition cond) { Instruction::Condition Instruction::mirror(Condition cond) {
switch (cond) { switch (cond) {
case eql: return eql; case eql: return eql;
@ -42,6 +51,8 @@ Instruction::Condition Instruction::mirror(Condition cond) {
case leq: return geq; case leq: return geq;
case gtr: return lss; case gtr: return lss;
case geq: return leq; case geq: return leq;
case aeq: return beq;
case beq: return aeq;
} }
ShouldNotReachHere(); ShouldNotReachHere();
return eql; return eql;
@ -56,6 +67,8 @@ Instruction::Condition Instruction::negate(Condition cond) {
case leq: return gtr; case leq: return gtr;
case gtr: return leq; case gtr: return leq;
case geq: return lss; case geq: return lss;
case aeq: assert(false, "Above equal cannot be negated");
case beq: assert(false, "Below equal cannot be negated");
} }
ShouldNotReachHere(); ShouldNotReachHere();
return eql; return eql;
@ -70,10 +83,10 @@ void Instruction::update_exception_state(ValueStack* state) {
} }
} }
// Prev without need to have BlockBegin
Instruction* Instruction::prev(BlockBegin* block) { Instruction* Instruction::prev() {
Instruction* p = NULL; Instruction* p = NULL;
Instruction* q = block; Instruction* q = block();
while (q != this) { while (q != this) {
assert(q != NULL, "this is not in the block's instruction list"); assert(q != NULL, "this is not in the block's instruction list");
p = q; q = q->next(); p = q; q = q->next();
@ -122,6 +135,9 @@ void Instruction::print(InstructionPrinter& ip) {
// perform constant and interval tests on index value // perform constant and interval tests on index value
bool AccessIndexed::compute_needs_range_check() { bool AccessIndexed::compute_needs_range_check() {
if (length()) {
Constant* clength = length()->as_Constant(); Constant* clength = length()->as_Constant();
Constant* cindex = index()->as_Constant(); Constant* cindex = index()->as_Constant();
if (clength && cindex) { if (clength && cindex) {
@ -131,6 +147,12 @@ bool AccessIndexed::compute_needs_range_check() {
return false; return false;
} }
} }
}
if (!this->check_flag(NeedsRangeCheckFlag)) {
return false;
}
return true; return true;
} }
@ -631,19 +653,25 @@ void BlockBegin::substitute_sux(BlockBegin* old_sux, BlockBegin* new_sux) {
// of the inserted block, without recomputing the values of the other blocks // of the inserted block, without recomputing the values of the other blocks
// in the CFG. Therefore the value of "depth_first_number" in BlockBegin becomes meaningless. // in the CFG. Therefore the value of "depth_first_number" in BlockBegin becomes meaningless.
BlockBegin* BlockBegin::insert_block_between(BlockBegin* sux) { BlockBegin* BlockBegin::insert_block_between(BlockBegin* sux) {
BlockBegin* new_sux = new BlockBegin(end()->state()->bci()); int bci = sux->bci();
// critical edge splitting may introduce a goto after a if and array
// bound check elimination may insert a predicate between the if and
// goto. The bci of the goto can't be the one of the if otherwise
// the state and bci are inconsistent and a deoptimization triggered
// by the predicate would lead to incorrect execution/a crash.
BlockBegin* new_sux = new BlockBegin(bci);
// mark this block (special treatment when block order is computed) // mark this block (special treatment when block order is computed)
new_sux->set(critical_edge_split_flag); new_sux->set(critical_edge_split_flag);
// This goto is not a safepoint. // This goto is not a safepoint.
Goto* e = new Goto(sux, false); Goto* e = new Goto(sux, false);
new_sux->set_next(e, end()->state()->bci()); new_sux->set_next(e, bci);
new_sux->set_end(e); new_sux->set_end(e);
// setup states // setup states
ValueStack* s = end()->state(); ValueStack* s = end()->state();
new_sux->set_state(s->copy()); new_sux->set_state(s->copy(s->kind(), bci));
e->set_state(s->copy()); e->set_state(s->copy(s->kind(), bci));
assert(new_sux->state()->locals_size() == s->locals_size(), "local size mismatch!"); assert(new_sux->state()->locals_size() == s->locals_size(), "local size mismatch!");
assert(new_sux->state()->stack_size() == s->stack_size(), "stack size mismatch!"); assert(new_sux->state()->stack_size() == s->stack_size(), "stack size mismatch!");
assert(new_sux->state()->locks_size() == s->locks_size(), "locks size mismatch!"); assert(new_sux->state()->locks_size() == s->locks_size(), "locks size mismatch!");
@ -960,15 +988,14 @@ void BlockEnd::set_begin(BlockBegin* begin) {
BlockList* sux = NULL; BlockList* sux = NULL;
if (begin != NULL) { if (begin != NULL) {
sux = begin->successors(); sux = begin->successors();
} else if (_begin != NULL) { } else if (this->begin() != NULL) {
// copy our sux list // copy our sux list
BlockList* sux = new BlockList(_begin->number_of_sux()); BlockList* sux = new BlockList(this->begin()->number_of_sux());
for (int i = 0; i < _begin->number_of_sux(); i++) { for (int i = 0; i < this->begin()->number_of_sux(); i++) {
sux->append(_begin->sux_at(i)); sux->append(this->begin()->sux_at(i));
} }
} }
_sux = sux; _sux = sux;
_begin = begin;
} }
@ -1008,7 +1035,38 @@ int Phi::operand_count() const {
} }
} }
#ifdef ASSERT
// Constructor of Assert
Assert::Assert(Value x, Condition cond, bool unordered_is_true, Value y) : Instruction(illegalType)
, _x(x)
, _cond(cond)
, _y(y)
{
set_flag(UnorderedIsTrueFlag, unordered_is_true);
assert(x->type()->tag() == y->type()->tag(), "types must match");
pin();
stringStream strStream;
Compilation::current()->method()->print_name(&strStream);
stringStream strStream1;
InstructionPrinter ip1(1, &strStream1);
ip1.print_instr(x);
stringStream strStream2;
InstructionPrinter ip2(1, &strStream2);
ip2.print_instr(y);
stringStream ss;
ss.print("Assertion %s %s %s in method %s", strStream1.as_string(), ip2.cond_name(cond), strStream2.as_string(), strStream.as_string());
_message = ss.as_string();
}
#endif
void RangeCheckPredicate::check_state() {
assert(state()->kind() != ValueStack::EmptyExceptionState && state()->kind() != ValueStack::ExceptionState, "will deopt with empty state");
}
void ProfileInvoke::state_values_do(ValueVisitor* f) { void ProfileInvoke::state_values_do(ValueVisitor* f) {
if (state() != NULL) state()->values_do(f); if (state() != NULL) state()->values_do(f);

View file

@ -110,6 +110,8 @@ class ProfileCall;
class ProfileInvoke; class ProfileInvoke;
class RuntimeCall; class RuntimeCall;
class MemBar; class MemBar;
class RangeCheckPredicate;
class Assert;
// A Value is a reference to the instruction creating the value // A Value is a reference to the instruction creating the value
typedef Instruction* Value; typedef Instruction* Value;
@ -210,6 +212,10 @@ class InstructionVisitor: public StackObj {
virtual void do_ProfileInvoke (ProfileInvoke* x) = 0; virtual void do_ProfileInvoke (ProfileInvoke* x) = 0;
virtual void do_RuntimeCall (RuntimeCall* x) = 0; virtual void do_RuntimeCall (RuntimeCall* x) = 0;
virtual void do_MemBar (MemBar* x) = 0; virtual void do_MemBar (MemBar* x) = 0;
virtual void do_RangeCheckPredicate(RangeCheckPredicate* x) = 0;
#ifdef ASSERT
virtual void do_Assert (Assert* x) = 0;
#endif
}; };
@ -306,8 +312,9 @@ class Instruction: public CompilationResourceObj {
void update_exception_state(ValueStack* state); void update_exception_state(ValueStack* state);
//protected: protected:
public: BlockBegin* _block; // Block that contains this instruction
void set_type(ValueType* type) { void set_type(ValueType* type) {
assert(type != NULL, "type must exist"); assert(type != NULL, "type must exist");
_type = type; _type = type;
@ -342,6 +349,9 @@ class Instruction: public CompilationResourceObj {
ThrowIncompatibleClassChangeErrorFlag, ThrowIncompatibleClassChangeErrorFlag,
ProfileMDOFlag, ProfileMDOFlag,
IsLinkedInBlockFlag, IsLinkedInBlockFlag,
NeedsRangeCheckFlag,
InWorkListFlag,
DeoptimizeOnException,
InstructionLastFlag InstructionLastFlag
}; };
@ -351,7 +361,7 @@ class Instruction: public CompilationResourceObj {
// 'globally' used condition values // 'globally' used condition values
enum Condition { enum Condition {
eql, neq, lss, leq, gtr, geq eql, neq, lss, leq, gtr, geq, aeq, beq
}; };
// Instructions may be pinned for many reasons and under certain conditions // Instructions may be pinned for many reasons and under certain conditions
@ -381,6 +391,7 @@ class Instruction: public CompilationResourceObj {
, _pin_state(0) , _pin_state(0)
, _type(type) , _type(type)
, _next(NULL) , _next(NULL)
, _block(NULL)
, _subst(NULL) , _subst(NULL)
, _flags(0) , _flags(0)
, _operand(LIR_OprFact::illegalOpr) , _operand(LIR_OprFact::illegalOpr)
@ -399,11 +410,13 @@ class Instruction: public CompilationResourceObj {
int printable_bci() const { assert(has_printable_bci(), "_printable_bci should have been set"); return _printable_bci; } int printable_bci() const { assert(has_printable_bci(), "_printable_bci should have been set"); return _printable_bci; }
void set_printable_bci(int bci) { _printable_bci = bci; } void set_printable_bci(int bci) { _printable_bci = bci; }
#endif #endif
int dominator_depth();
int use_count() const { return _use_count; } int use_count() const { return _use_count; }
int pin_state() const { return _pin_state; } int pin_state() const { return _pin_state; }
bool is_pinned() const { return _pin_state != 0 || PinAllInstructions; } bool is_pinned() const { return _pin_state != 0 || PinAllInstructions; }
ValueType* type() const { return _type; } ValueType* type() const { return _type; }
Instruction* prev(BlockBegin* block); // use carefully, expensive operation BlockBegin *block() const { return _block; }
Instruction* prev(); // use carefully, expensive operation
Instruction* next() const { return _next; } Instruction* next() const { return _next; }
bool has_subst() const { return _subst != NULL; } bool has_subst() const { return _subst != NULL; }
Instruction* subst() { return _subst == NULL ? this : _subst->subst(); } Instruction* subst() { return _subst == NULL ? this : _subst->subst(); }
@ -432,6 +445,9 @@ class Instruction: public CompilationResourceObj {
assert(as_BlockEnd() == NULL, "BlockEnd instructions must have no next"); assert(as_BlockEnd() == NULL, "BlockEnd instructions must have no next");
assert(next->can_be_linked(), "shouldn't link these instructions into list"); assert(next->can_be_linked(), "shouldn't link these instructions into list");
BlockBegin *block = this->block();
next->_block = block;
next->set_flag(Instruction::IsLinkedInBlockFlag, true); next->set_flag(Instruction::IsLinkedInBlockFlag, true);
_next = next; _next = next;
return next; return next;
@ -444,6 +460,29 @@ class Instruction: public CompilationResourceObj {
return set_next(next); return set_next(next);
} }
// when blocks are merged
void fixup_block_pointers() {
Instruction *cur = next()->next(); // next()'s block is set in set_next
while (cur && cur->_block != block()) {
cur->_block = block();
cur = cur->next();
}
}
Instruction *insert_after(Instruction *i) {
Instruction* n = _next;
set_next(i);
i->set_next(n);
return _next;
}
Instruction *insert_after_same_bci(Instruction *i) {
#ifndef PRODUCT
i->set_printable_bci(printable_bci());
#endif
return insert_after(i);
}
void set_subst(Instruction* subst) { void set_subst(Instruction* subst) {
assert(subst == NULL || assert(subst == NULL ||
type()->base() == subst->type()->base() || type()->base() == subst->type()->base() ||
@ -452,6 +491,7 @@ class Instruction: public CompilationResourceObj {
} }
void set_exception_handlers(XHandlers *xhandlers) { _exception_handlers = xhandlers; } void set_exception_handlers(XHandlers *xhandlers) { _exception_handlers = xhandlers; }
void set_exception_state(ValueStack* s) { check_state(s); _exception_state = s; } void set_exception_state(ValueStack* s) { check_state(s); _exception_state = s; }
void set_state_before(ValueStack* s) { check_state(s); _state_before = s; }
// machine-specifics // machine-specifics
void set_operand(LIR_Opr operand) { assert(operand != LIR_OprFact::illegalOpr, "operand must exist"); _operand = operand; } void set_operand(LIR_Opr operand) { assert(operand != LIR_OprFact::illegalOpr, "operand must exist"); _operand = operand; }
@ -509,6 +549,11 @@ class Instruction: public CompilationResourceObj {
virtual ExceptionObject* as_ExceptionObject() { return NULL; } virtual ExceptionObject* as_ExceptionObject() { return NULL; }
virtual UnsafeOp* as_UnsafeOp() { return NULL; } virtual UnsafeOp* as_UnsafeOp() { return NULL; }
virtual ProfileInvoke* as_ProfileInvoke() { return NULL; } virtual ProfileInvoke* as_ProfileInvoke() { return NULL; }
virtual RangeCheckPredicate* as_RangeCheckPredicate() { return NULL; }
#ifdef ASSERT
virtual Assert* as_Assert() { return NULL; }
#endif
virtual void visit(InstructionVisitor* v) = 0; virtual void visit(InstructionVisitor* v) = 0;
@ -570,7 +615,6 @@ class AssertValues: public ValueVisitor {
LEAF(Phi, Instruction) LEAF(Phi, Instruction)
private: private:
BlockBegin* _block; // the block to which the phi function belongs
int _pf_flags; // the flags of the phi function int _pf_flags; // the flags of the phi function
int _index; // to value on operand stack (index < 0) or to local int _index; // to value on operand stack (index < 0) or to local
public: public:
@ -578,9 +622,9 @@ LEAF(Phi, Instruction)
Phi(ValueType* type, BlockBegin* b, int index) Phi(ValueType* type, BlockBegin* b, int index)
: Instruction(type->base()) : Instruction(type->base())
, _pf_flags(0) , _pf_flags(0)
, _block(b)
, _index(index) , _index(index)
{ {
_block = b;
NOT_PRODUCT(set_printable_bci(Value(b)->printable_bci())); NOT_PRODUCT(set_printable_bci(Value(b)->printable_bci()));
if (type->is_illegal()) { if (type->is_illegal()) {
make_illegal(); make_illegal();
@ -603,8 +647,6 @@ LEAF(Phi, Instruction)
Value operand_at(int i) const; Value operand_at(int i) const;
int operand_count() const; int operand_count() const;
BlockBegin* block() const { return _block; }
void set(Flag f) { _pf_flags |= f; } void set(Flag f) { _pf_flags |= f; }
void clear(Flag f) { _pf_flags &= ~f; } void clear(Flag f) { _pf_flags &= ~f; }
bool is_set(Flag f) const { return (_pf_flags & f) != 0; } bool is_set(Flag f) const { return (_pf_flags & f) != 0; }
@ -670,6 +712,7 @@ LEAF(Constant, Instruction)
pin(); pin();
} }
// generic
virtual bool can_trap() const { return state_before() != NULL; } virtual bool can_trap() const { return state_before() != NULL; }
virtual void input_values_do(ValueVisitor* f) { /* no values */ } virtual void input_values_do(ValueVisitor* f) { /* no values */ }
@ -852,6 +895,7 @@ BASE(AccessIndexed, AccessArray)
, _length(length) , _length(length)
, _elt_type(elt_type) , _elt_type(elt_type)
{ {
set_flag(Instruction::NeedsRangeCheckFlag, true);
ASSERT_VALUES ASSERT_VALUES
} }
@ -860,6 +904,7 @@ BASE(AccessIndexed, AccessArray)
Value length() const { return _length; } Value length() const { return _length; }
BasicType elt_type() const { return _elt_type; } BasicType elt_type() const { return _elt_type; }
void clear_length() { _length = NULL; }
// perform elimination of range checks involving constants // perform elimination of range checks involving constants
bool compute_needs_range_check(); bool compute_needs_range_check();
@ -1524,6 +1569,7 @@ LEAF(BlockBegin, StateSplit)
int _bci; // start-bci of block int _bci; // start-bci of block
int _depth_first_number; // number of this block in a depth-first ordering int _depth_first_number; // number of this block in a depth-first ordering
int _linear_scan_number; // number of this block in linear-scan ordering int _linear_scan_number; // number of this block in linear-scan ordering
int _dominator_depth;
int _loop_depth; // the loop nesting level of this block int _loop_depth; // the loop nesting level of this block
int _loop_index; // number of the innermost loop of this block int _loop_index; // number of the innermost loop of this block
int _flags; // the flags associated with this block int _flags; // the flags associated with this block
@ -1535,6 +1581,7 @@ LEAF(BlockBegin, StateSplit)
// SSA specific fields: (factor out later) // SSA specific fields: (factor out later)
BlockList _successors; // the successors of this block BlockList _successors; // the successors of this block
BlockList _predecessors; // the predecessors of this block BlockList _predecessors; // the predecessors of this block
BlockList _dominates; // list of blocks that are dominated by this block
BlockBegin* _dominator; // the dominator of this block BlockBegin* _dominator; // the dominator of this block
// SSA specific ends // SSA specific ends
BlockEnd* _end; // the last instruction of this block BlockEnd* _end; // the last instruction of this block
@ -1583,10 +1630,12 @@ LEAF(BlockBegin, StateSplit)
, _linear_scan_number(-1) , _linear_scan_number(-1)
, _loop_depth(0) , _loop_depth(0)
, _flags(0) , _flags(0)
, _dominator_depth(-1)
, _dominator(NULL) , _dominator(NULL)
, _end(NULL) , _end(NULL)
, _predecessors(2) , _predecessors(2)
, _successors(2) , _successors(2)
, _dominates(2)
, _exception_handlers(1) , _exception_handlers(1)
, _exception_states(NULL) , _exception_states(NULL)
, _exception_handler_pco(-1) , _exception_handler_pco(-1)
@ -1603,6 +1652,7 @@ LEAF(BlockBegin, StateSplit)
, _total_preds(0) , _total_preds(0)
, _stores_to_locals() , _stores_to_locals()
{ {
_block = this;
#ifndef PRODUCT #ifndef PRODUCT
set_printable_bci(bci); set_printable_bci(bci);
#endif #endif
@ -1612,8 +1662,10 @@ LEAF(BlockBegin, StateSplit)
int block_id() const { return _block_id; } int block_id() const { return _block_id; }
int bci() const { return _bci; } int bci() const { return _bci; }
BlockList* successors() { return &_successors; } BlockList* successors() { return &_successors; }
BlockList* dominates() { return &_dominates; }
BlockBegin* dominator() const { return _dominator; } BlockBegin* dominator() const { return _dominator; }
int loop_depth() const { return _loop_depth; } int loop_depth() const { return _loop_depth; }
int dominator_depth() const { return _dominator_depth; }
int depth_first_number() const { return _depth_first_number; } int depth_first_number() const { return _depth_first_number; }
int linear_scan_number() const { return _linear_scan_number; } int linear_scan_number() const { return _linear_scan_number; }
BlockEnd* end() const { return _end; } BlockEnd* end() const { return _end; }
@ -1634,6 +1686,7 @@ LEAF(BlockBegin, StateSplit)
// manipulation // manipulation
void set_dominator(BlockBegin* dom) { _dominator = dom; } void set_dominator(BlockBegin* dom) { _dominator = dom; }
void set_loop_depth(int d) { _loop_depth = d; } void set_loop_depth(int d) { _loop_depth = d; }
void set_dominator_depth(int d) { _dominator_depth = d; }
void set_depth_first_number(int dfn) { _depth_first_number = dfn; } void set_depth_first_number(int dfn) { _depth_first_number = dfn; }
void set_linear_scan_number(int lsn) { _linear_scan_number = lsn; } void set_linear_scan_number(int lsn) { _linear_scan_number = lsn; }
void set_end(BlockEnd* end); void set_end(BlockEnd* end);
@ -1695,7 +1748,8 @@ LEAF(BlockBegin, StateSplit)
parser_loop_header_flag = 1 << 7, // set by parser to identify blocks where phi functions can not be created on demand parser_loop_header_flag = 1 << 7, // set by parser to identify blocks where phi functions can not be created on demand
critical_edge_split_flag = 1 << 8, // set for all blocks that are introduced when critical edges are split critical_edge_split_flag = 1 << 8, // set for all blocks that are introduced when critical edges are split
linear_scan_loop_header_flag = 1 << 9, // set during loop-detection for LinearScan linear_scan_loop_header_flag = 1 << 9, // set during loop-detection for LinearScan
linear_scan_loop_end_flag = 1 << 10 // set during loop-detection for LinearScan linear_scan_loop_end_flag = 1 << 10, // set during loop-detection for LinearScan
donot_eliminate_range_checks = 1 << 11 // Should be try to eliminate range checks in this block
}; };
void set(Flag f) { _flags |= f; } void set(Flag f) { _flags |= f; }
@ -1728,7 +1782,6 @@ LEAF(BlockBegin, StateSplit)
BASE(BlockEnd, StateSplit) BASE(BlockEnd, StateSplit)
private: private:
BlockBegin* _begin;
BlockList* _sux; BlockList* _sux;
protected: protected:
@ -1746,7 +1799,6 @@ BASE(BlockEnd, StateSplit)
// creation // creation
BlockEnd(ValueType* type, ValueStack* state_before, bool is_safepoint) BlockEnd(ValueType* type, ValueStack* state_before, bool is_safepoint)
: StateSplit(type, state_before) : StateSplit(type, state_before)
, _begin(NULL)
, _sux(NULL) , _sux(NULL)
{ {
set_flag(IsSafepointFlag, is_safepoint); set_flag(IsSafepointFlag, is_safepoint);
@ -1754,7 +1806,8 @@ BASE(BlockEnd, StateSplit)
// accessors // accessors
bool is_safepoint() const { return check_flag(IsSafepointFlag); } bool is_safepoint() const { return check_flag(IsSafepointFlag); }
BlockBegin* begin() const { return _begin; } // For compatibility with old code, for new code use block()
BlockBegin* begin() const { return _block; }
// manipulation // manipulation
void set_begin(BlockBegin* begin); void set_begin(BlockBegin* begin);
@ -1811,6 +1864,74 @@ LEAF(Goto, BlockEnd)
void set_direction(Direction d) { _direction = d; } void set_direction(Direction d) { _direction = d; }
}; };
#ifdef ASSERT
LEAF(Assert, Instruction)
private:
Value _x;
Condition _cond;
Value _y;
char *_message;
public:
// creation
// unordered_is_true is valid for float/double compares only
Assert(Value x, Condition cond, bool unordered_is_true, Value y);
// accessors
Value x() const { return _x; }
Condition cond() const { return _cond; }
bool unordered_is_true() const { return check_flag(UnorderedIsTrueFlag); }
Value y() const { return _y; }
const char *message() const { return _message; }
// generic
virtual void input_values_do(ValueVisitor* f) { f->visit(&_x); f->visit(&_y); }
};
#endif
LEAF(RangeCheckPredicate, StateSplit)
private:
Value _x;
Condition _cond;
Value _y;
void check_state();
public:
// creation
// unordered_is_true is valid for float/double compares only
RangeCheckPredicate(Value x, Condition cond, bool unordered_is_true, Value y, ValueStack* state) : StateSplit(illegalType)
, _x(x)
, _cond(cond)
, _y(y)
{
ASSERT_VALUES
set_flag(UnorderedIsTrueFlag, unordered_is_true);
assert(x->type()->tag() == y->type()->tag(), "types must match");
this->set_state(state);
check_state();
}
// Always deoptimize
RangeCheckPredicate(ValueStack* state) : StateSplit(illegalType)
{
this->set_state(state);
_x = _y = NULL;
check_state();
}
// accessors
Value x() const { return _x; }
Condition cond() const { return _cond; }
bool unordered_is_true() const { return check_flag(UnorderedIsTrueFlag); }
Value y() const { return _y; }
void always_fail() { _x = _y = NULL; }
// generic
virtual void input_values_do(ValueVisitor* f) { StateSplit::input_values_do(f); f->visit(&_x); f->visit(&_y); }
HASHING3(RangeCheckPredicate, true, x()->subst(), y()->subst(), cond())
};
LEAF(If, BlockEnd) LEAF(If, BlockEnd)
private: private:

View file

@ -57,6 +57,8 @@ const char* InstructionPrinter::cond_name(If::Condition cond) {
case If::leq: return "<="; case If::leq: return "<=";
case If::gtr: return ">"; case If::gtr: return ">";
case If::geq: return ">="; case If::geq: return ">=";
case If::aeq: return "|>=|";
case If::beq: return "|<=|";
} }
ShouldNotReachHere(); ShouldNotReachHere();
return NULL; return NULL;
@ -181,6 +183,11 @@ void InstructionPrinter::print_indexed(AccessIndexed* indexed) {
output()->put('['); output()->put('[');
print_value(indexed->index()); print_value(indexed->index());
output()->put(']'); output()->put(']');
if (indexed->length() != NULL) {
output()->put('(');
print_value(indexed->length());
output()->put(')');
}
} }
@ -373,6 +380,7 @@ void InstructionPrinter::do_Constant(Constant* x) {
void InstructionPrinter::do_LoadField(LoadField* x) { void InstructionPrinter::do_LoadField(LoadField* x) {
print_field(x); print_field(x);
output()->print(" (%c)", type2char(x->field()->type()->basic_type())); output()->print(" (%c)", type2char(x->field()->type()->basic_type()));
output()->print(" %s", x->field()->name()->as_utf8());
} }
@ -381,6 +389,7 @@ void InstructionPrinter::do_StoreField(StoreField* x) {
output()->print(" := "); output()->print(" := ");
print_value(x->value()); print_value(x->value());
output()->print(" (%c)", type2char(x->field()->type()->basic_type())); output()->print(" (%c)", type2char(x->field()->type()->basic_type()));
output()->print(" %s", x->field()->name()->as_utf8());
} }
@ -393,6 +402,9 @@ void InstructionPrinter::do_ArrayLength(ArrayLength* x) {
void InstructionPrinter::do_LoadIndexed(LoadIndexed* x) { void InstructionPrinter::do_LoadIndexed(LoadIndexed* x) {
print_indexed(x); print_indexed(x);
output()->print(" (%c)", type2char(x->elt_type())); output()->print(" (%c)", type2char(x->elt_type()));
if (x->check_flag(Instruction::NeedsRangeCheckFlag)) {
output()->print(" [rc]");
}
} }
@ -401,6 +413,9 @@ void InstructionPrinter::do_StoreIndexed(StoreIndexed* x) {
output()->print(" := "); output()->print(" := ");
print_value(x->value()); print_value(x->value());
output()->print(" (%c)", type2char(x->elt_type())); output()->print(" (%c)", type2char(x->elt_type()));
if (x->check_flag(Instruction::NeedsRangeCheckFlag)) {
output()->print(" [rc]");
}
} }
void InstructionPrinter::do_NegateOp(NegateOp* x) { void InstructionPrinter::do_NegateOp(NegateOp* x) {
@ -843,6 +858,25 @@ void InstructionPrinter::do_UnsafePrefetchRead(UnsafePrefetchRead* x) {
output()->put(')'); output()->put(')');
} }
void InstructionPrinter::do_RangeCheckPredicate(RangeCheckPredicate* x) {
if (x->x() != NULL && x->y() != NULL) {
output()->print("if ");
print_value(x->x());
output()->print(" %s ", cond_name(x->cond()));
print_value(x->y());
output()->print(" then deoptimize!");
} else {
output()->print("always deoptimize!");
}
}
void InstructionPrinter::do_Assert(Assert* x) {
output()->print("assert ");
print_value(x->x());
output()->print(" %s ", cond_name(x->cond()));
print_value(x->y());
}
void InstructionPrinter::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { void InstructionPrinter::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
print_unsafe_object_op(x, "UnsafePrefetchWrite"); print_unsafe_object_op(x, "UnsafePrefetchWrite");

View file

@ -135,6 +135,8 @@ class InstructionPrinter: public InstructionVisitor {
virtual void do_ProfileInvoke (ProfileInvoke* x); virtual void do_ProfileInvoke (ProfileInvoke* x);
virtual void do_RuntimeCall (RuntimeCall* x); virtual void do_RuntimeCall (RuntimeCall* x);
virtual void do_MemBar (MemBar* x); virtual void do_MemBar (MemBar* x);
virtual void do_RangeCheckPredicate(RangeCheckPredicate* x);
virtual void do_Assert (Assert* x);
}; };
#endif // PRODUCT #endif // PRODUCT

View file

@ -633,6 +633,7 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
case lir_ushr: case lir_ushr:
case lir_xadd: case lir_xadd:
case lir_xchg: case lir_xchg:
case lir_assert:
{ {
assert(op->as_Op2() != NULL, "must be"); assert(op->as_Op2() != NULL, "must be");
LIR_Op2* op2 = (LIR_Op2*)op; LIR_Op2* op2 = (LIR_Op2*)op;
@ -1112,6 +1113,11 @@ void LIR_OpLock::emit_code(LIR_Assembler* masm) {
} }
} }
#ifdef ASSERT
void LIR_OpAssert::emit_code(LIR_Assembler* masm) {
masm->emit_assert(this);
}
#endif
void LIR_OpDelay::emit_code(LIR_Assembler* masm) { void LIR_OpDelay::emit_code(LIR_Assembler* masm) {
masm->emit_delay(this); masm->emit_delay(this);
@ -1771,6 +1777,8 @@ const char * LIR_Op::name() const {
case lir_cas_int: s = "cas_int"; break; case lir_cas_int: s = "cas_int"; break;
// LIR_OpProfileCall // LIR_OpProfileCall
case lir_profile_call: s = "profile_call"; break; case lir_profile_call: s = "profile_call"; break;
// LIR_OpAssert
case lir_assert: s = "assert"; break;
case lir_none: ShouldNotReachHere();break; case lir_none: ShouldNotReachHere();break;
default: s = "illegal_op"; break; default: s = "illegal_op"; break;
} }
@ -2017,6 +2025,13 @@ void LIR_OpLock::print_instr(outputStream* out) const {
out->print("[lbl:0x%x]", stub()->entry()); out->print("[lbl:0x%x]", stub()->entry());
} }
void LIR_OpAssert::print_instr(outputStream* out) const {
print_condition(out, condition()); out->print(" ");
in_opr1()->print(out); out->print(" ");
in_opr2()->print(out); out->print(", \"");
out->print(msg()); out->print("\"");
}
void LIR_OpDelay::print_instr(outputStream* out) const { void LIR_OpDelay::print_instr(outputStream* out) const {
_op->print_on(out); _op->print_on(out);

View file

@ -881,6 +881,7 @@ class LIR_OpLock;
class LIR_OpTypeCheck; class LIR_OpTypeCheck;
class LIR_OpCompareAndSwap; class LIR_OpCompareAndSwap;
class LIR_OpProfileCall; class LIR_OpProfileCall;
class LIR_OpAssert;
// LIR operation codes // LIR operation codes
@ -1000,6 +1001,9 @@ enum LIR_Code {
, begin_opMDOProfile , begin_opMDOProfile
, lir_profile_call , lir_profile_call
, end_opMDOProfile , end_opMDOProfile
, begin_opAssert
, lir_assert
, end_opAssert
}; };
@ -1135,6 +1139,7 @@ class LIR_Op: public CompilationResourceObj {
virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; } virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; } virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; } virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
virtual LIR_OpAssert* as_OpAssert() { return NULL; }
virtual void verify() const {} virtual void verify() const {}
}; };
@ -1623,7 +1628,7 @@ class LIR_Op2: public LIR_Op {
, _tmp3(LIR_OprFact::illegalOpr) , _tmp3(LIR_OprFact::illegalOpr)
, _tmp4(LIR_OprFact::illegalOpr) , _tmp4(LIR_OprFact::illegalOpr)
, _tmp5(LIR_OprFact::illegalOpr) { , _tmp5(LIR_OprFact::illegalOpr) {
assert(code == lir_cmp, "code check"); assert(code == lir_cmp || code == lir_assert, "code check");
} }
LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type)
@ -1683,7 +1688,7 @@ class LIR_Op2: public LIR_Op {
LIR_Opr tmp4_opr() const { return _tmp4; } LIR_Opr tmp4_opr() const { return _tmp4; }
LIR_Opr tmp5_opr() const { return _tmp5; } LIR_Opr tmp5_opr() const { return _tmp5; }
LIR_Condition condition() const { LIR_Condition condition() const {
assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); return _condition; assert(code() == lir_cmp || code() == lir_cmove || code() == lir_assert, "only valid for cmp and cmove and assert"); return _condition;
} }
void set_condition(LIR_Condition condition) { void set_condition(LIR_Condition condition) {
assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); _condition = condition; assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); _condition = condition;
@ -1823,6 +1828,30 @@ class LIR_OpDelay: public LIR_Op {
CodeEmitInfo* call_info() const { return info(); } CodeEmitInfo* call_info() const { return info(); }
}; };
#ifdef ASSERT
// LIR_OpAssert
class LIR_OpAssert : public LIR_Op2 {
friend class LIR_OpVisitState;
private:
const char* _msg;
bool _halt;
public:
LIR_OpAssert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt)
: LIR_Op2(lir_assert, condition, opr1, opr2)
, _halt(halt)
, _msg(msg) {
}
const char* msg() const { return _msg; }
bool halt() const { return _halt; }
virtual void emit_code(LIR_Assembler* masm);
virtual LIR_OpAssert* as_OpAssert() { return this; }
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
};
#endif
// LIR_OpCompareAndSwap // LIR_OpCompareAndSwap
class LIR_OpCompareAndSwap : public LIR_Op { class LIR_OpCompareAndSwap : public LIR_Op {
@ -2196,6 +2225,9 @@ class LIR_List: public CompilationResourceObj {
void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); } void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); }
void xchg(LIR_Opr src, LIR_Opr set, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xchg, src, set, res, tmp)); } void xchg(LIR_Opr src, LIR_Opr set, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xchg, src, set, res, tmp)); }
#ifdef ASSERT
void lir_assert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt) { append(new LIR_OpAssert(condition, opr1, opr2, msg, halt)); }
#endif
}; };
void print_LIR(BlockList* blocks); void print_LIR(BlockList* blocks);

View file

@ -210,6 +210,9 @@ class LIR_Assembler: public CompilationResourceObj {
void arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack); void arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack);
void arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info); void arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info);
void intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op); void intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op);
#ifdef ASSERT
void emit_assert(LIR_OpAssert* op);
#endif
void logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest); void logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest);

View file

@ -403,6 +403,10 @@ void LIRGenerator::walk(Value instr) {
CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) { CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
assert(state != NULL, "state must be defined"); assert(state != NULL, "state must be defined");
#ifndef PRODUCT
state->verify();
#endif
ValueStack* s = state; ValueStack* s = state;
for_each_state(s) { for_each_state(s) {
if (s->kind() == ValueStack::EmptyExceptionState) { if (s->kind() == ValueStack::EmptyExceptionState) {
@ -453,7 +457,7 @@ CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ig
} }
} }
return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers()); return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
} }
@ -1792,11 +1796,18 @@ void LIRGenerator::do_LoadField(LoadField* x) {
} }
#endif #endif
bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
if (x->needs_null_check() && if (x->needs_null_check() &&
(needs_patching || (needs_patching ||
MacroAssembler::needs_explicit_null_check(x->offset()))) { MacroAssembler::needs_explicit_null_check(x->offset()) ||
stress_deopt)) {
LIR_Opr obj = object.result();
if (stress_deopt) {
obj = new_register(T_OBJECT);
__ move(LIR_OprFact::oopConst(NULL), obj);
}
// emit an explicit null check because the offset is too large // emit an explicit null check because the offset is too large
__ null_check(object.result(), new CodeEmitInfo(info)); __ null_check(obj, new CodeEmitInfo(info));
} }
LIR_Opr reg = rlock_result(x, field_type); LIR_Opr reg = rlock_result(x, field_type);
@ -1873,6 +1884,11 @@ void LIRGenerator::do_ArrayLength(ArrayLength* x) {
} else { } else {
info = state_for(nc); info = state_for(nc);
} }
if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
LIR_Opr obj = new_register(T_OBJECT);
__ move(LIR_OprFact::oopConst(NULL), obj);
__ null_check(obj, new CodeEmitInfo(info));
}
} }
__ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none); __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
} }
@ -1883,15 +1899,12 @@ void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
LIRItem array(x->array(), this); LIRItem array(x->array(), this);
LIRItem index(x->index(), this); LIRItem index(x->index(), this);
LIRItem length(this); LIRItem length(this);
bool needs_range_check = true; bool needs_range_check = x->compute_needs_range_check();
if (use_length) { if (use_length && needs_range_check) {
needs_range_check = x->compute_needs_range_check();
if (needs_range_check) {
length.set_instruction(x->length()); length.set_instruction(x->length());
length.load_item(); length.load_item();
} }
}
array.load_item(); array.load_item();
if (index.is_constant() && can_inline_as_constant(x->index())) { if (index.is_constant() && can_inline_as_constant(x->index())) {
@ -1910,13 +1923,20 @@ void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
} else { } else {
null_check_info = range_check_info; null_check_info = range_check_info;
} }
if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
LIR_Opr obj = new_register(T_OBJECT);
__ move(LIR_OprFact::oopConst(NULL), obj);
__ null_check(obj, new CodeEmitInfo(null_check_info));
}
} }
// emit array address setup early so it schedules better // emit array address setup early so it schedules better
LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false); LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
if (GenerateRangeChecks && needs_range_check) { if (GenerateRangeChecks && needs_range_check) {
if (use_length) { if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
__ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
} else if (use_length) {
// TODO: use a (modified) version of array_range_check that does not require a // TODO: use a (modified) version of array_range_check that does not require a
// constant length to be loaded to a register // constant length to be loaded to a register
__ cmp(lir_cond_belowEqual, length.result(), index.result()); __ cmp(lir_cond_belowEqual, length.result(), index.result());
@ -2634,7 +2654,7 @@ void LIRGenerator::do_Base(Base* x) {
LIR_Opr lock = new_register(T_INT); LIR_Opr lock = new_register(T_INT);
__ load_stack_address_monitor(0, lock); __ load_stack_address_monitor(0, lock);
CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL); CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
CodeStub* slow_path = new MonitorEnterStub(obj, lock, info); CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
// receiver is guaranteed non-NULL so don't need CodeEmitInfo // receiver is guaranteed non-NULL so don't need CodeEmitInfo
@ -2644,7 +2664,7 @@ void LIRGenerator::do_Base(Base* x) {
// increment invocation counters if needed // increment invocation counters if needed
if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting. if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL); CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
increment_invocation_counter(info); increment_invocation_counter(info);
} }
@ -3102,6 +3122,95 @@ void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
} }
} }
void LIRGenerator::do_Assert(Assert *x) {
#ifdef ASSERT
ValueTag tag = x->x()->type()->tag();
If::Condition cond = x->cond();
LIRItem xitem(x->x(), this);
LIRItem yitem(x->y(), this);
LIRItem* xin = &xitem;
LIRItem* yin = &yitem;
assert(tag == intTag, "Only integer assertions are valid!");
xin->load_item();
yin->dont_load_item();
set_no_result(x);
LIR_Opr left = xin->result();
LIR_Opr right = yin->result();
__ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
#endif
}
void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
Instruction *a = x->x();
Instruction *b = x->y();
if (!a || StressRangeCheckElimination) {
assert(!b || StressRangeCheckElimination, "B must also be null");
CodeEmitInfo *info = state_for(x, x->state());
CodeStub* stub = new PredicateFailedStub(info);
__ jump(stub);
} else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
int a_int = a->type()->as_IntConstant()->value();
int b_int = b->type()->as_IntConstant()->value();
bool ok = false;
switch(x->cond()) {
case Instruction::eql: ok = (a_int == b_int); break;
case Instruction::neq: ok = (a_int != b_int); break;
case Instruction::lss: ok = (a_int < b_int); break;
case Instruction::leq: ok = (a_int <= b_int); break;
case Instruction::gtr: ok = (a_int > b_int); break;
case Instruction::geq: ok = (a_int >= b_int); break;
case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
default: ShouldNotReachHere();
}
if (ok) {
CodeEmitInfo *info = state_for(x, x->state());
CodeStub* stub = new PredicateFailedStub(info);
__ jump(stub);
}
} else {
ValueTag tag = x->x()->type()->tag();
If::Condition cond = x->cond();
LIRItem xitem(x->x(), this);
LIRItem yitem(x->y(), this);
LIRItem* xin = &xitem;
LIRItem* yin = &yitem;
assert(tag == intTag, "Only integer deoptimizations are valid!");
xin->load_item();
yin->dont_load_item();
set_no_result(x);
LIR_Opr left = xin->result();
LIR_Opr right = yin->result();
CodeEmitInfo *info = state_for(x, x->state());
CodeStub* stub = new PredicateFailedStub(info);
__ cmp(lir_cond(cond), left, right);
__ branch(lir_cond(cond), right->type(), stub);
}
}
LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) { LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
LIRItemList args(1); LIRItemList args(1);
LIRItem value(arg1, this); LIRItem value(arg1, this);

View file

@ -412,6 +412,8 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
case If::leq: l = lir_cond_lessEqual; break; case If::leq: l = lir_cond_lessEqual; break;
case If::geq: l = lir_cond_greaterEqual; break; case If::geq: l = lir_cond_greaterEqual; break;
case If::gtr: l = lir_cond_greater; break; case If::gtr: l = lir_cond_greater; break;
case If::aeq: l = lir_cond_aboveEqual; break;
case If::beq: l = lir_cond_belowEqual; break;
}; };
return l; return l;
} }
@ -534,6 +536,8 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
virtual void do_ProfileInvoke (ProfileInvoke* x); virtual void do_ProfileInvoke (ProfileInvoke* x);
virtual void do_RuntimeCall (RuntimeCall* x); virtual void do_RuntimeCall (RuntimeCall* x);
virtual void do_MemBar (MemBar* x); virtual void do_MemBar (MemBar* x);
virtual void do_RangeCheckPredicate(RangeCheckPredicate* x);
virtual void do_Assert (Assert* x);
}; };

View file

@ -6231,6 +6231,8 @@ void ControlFlowOptimizer::delete_unnecessary_jumps(BlockList* code) {
assert(prev_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch"); assert(prev_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch");
LIR_OpBranch* prev_branch = (LIR_OpBranch*)prev_op; LIR_OpBranch* prev_branch = (LIR_OpBranch*)prev_op;
if (prev_branch->stub() == NULL) {
LIR_Op2* prev_cmp = NULL; LIR_Op2* prev_cmp = NULL;
for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) { for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) {
@ -6257,6 +6259,7 @@ void ControlFlowOptimizer::delete_unnecessary_jumps(BlockList* code) {
} }
} }
} }
}
DEBUG_ONLY(verify(code)); DEBUG_ONLY(verify(code));
} }

View file

@ -178,7 +178,7 @@ void CE_Eliminator::block_do(BlockBegin* block) {
// 2) substitute conditional expression // 2) substitute conditional expression
// with an IfOp followed by a Goto // with an IfOp followed by a Goto
// cut if_ away and get node before // cut if_ away and get node before
Instruction* cur_end = if_->prev(block); Instruction* cur_end = if_->prev();
// append constants of true- and false-block if necessary // append constants of true- and false-block if necessary
// clone constants because original block must not be destroyed // clone constants because original block must not be destroyed
@ -202,7 +202,7 @@ void CE_Eliminator::block_do(BlockBegin* block) {
} }
// append Goto to successor // append Goto to successor
ValueStack* state_before = if_->is_safepoint() ? if_->state_before() : NULL; ValueStack* state_before = if_->state_before();
Goto* goto_ = new Goto(sux, state_before, if_->is_safepoint() || t_goto->is_safepoint() || f_goto->is_safepoint()); Goto* goto_ = new Goto(sux, state_before, if_->is_safepoint() || t_goto->is_safepoint() || f_goto->is_safepoint());
// prepare state for Goto // prepare state for Goto
@ -367,10 +367,11 @@ class BlockMerger: public BlockClosure {
#endif #endif
// find instruction before end & append first instruction of sux block // find instruction before end & append first instruction of sux block
Instruction* prev = end->prev(block); Instruction* prev = end->prev();
Instruction* next = sux->next(); Instruction* next = sux->next();
assert(prev->as_BlockEnd() == NULL, "must not be a BlockEnd"); assert(prev->as_BlockEnd() == NULL, "must not be a BlockEnd");
prev->set_next(next); prev->set_next(next);
prev->fixup_block_pointers();
sux->disconnect_from_graph(); sux->disconnect_from_graph();
block->set_end(sux->end()); block->set_end(sux->end());
// add exception handlers of deleted block, if any // add exception handlers of deleted block, if any
@ -533,6 +534,8 @@ public:
void do_ProfileInvoke (ProfileInvoke* x); void do_ProfileInvoke (ProfileInvoke* x);
void do_RuntimeCall (RuntimeCall* x); void do_RuntimeCall (RuntimeCall* x);
void do_MemBar (MemBar* x); void do_MemBar (MemBar* x);
void do_RangeCheckPredicate(RangeCheckPredicate* x);
void do_Assert (Assert* x);
}; };
@ -714,6 +717,8 @@ void NullCheckVisitor::do_ProfileCall (ProfileCall* x) { nce()->clear_las
void NullCheckVisitor::do_ProfileInvoke (ProfileInvoke* x) {} void NullCheckVisitor::do_ProfileInvoke (ProfileInvoke* x) {}
void NullCheckVisitor::do_RuntimeCall (RuntimeCall* x) {} void NullCheckVisitor::do_RuntimeCall (RuntimeCall* x) {}
void NullCheckVisitor::do_MemBar (MemBar* x) {} void NullCheckVisitor::do_MemBar (MemBar* x) {}
void NullCheckVisitor::do_RangeCheckPredicate(RangeCheckPredicate* x) {}
void NullCheckVisitor::do_Assert (Assert* x) {}
void NullCheckEliminator::visit(Value* p) { void NullCheckEliminator::visit(Value* p) {

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,241 @@
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_C1_C1_RANGECHECKELIMINATION_HPP
#define SHARE_VM_C1_C1_RANGECHECKELIMINATION_HPP
#include "c1/c1_Instruction.hpp"
// Base class for range check elimination
class RangeCheckElimination : AllStatic {
public:
static void eliminate(IR *ir);
};
// Implementation
class RangeCheckEliminator VALUE_OBJ_CLASS_SPEC {
private:
int _number_of_instructions;
bool _optimistic; // Insert predicates and deoptimize when they fail
IR *_ir;
define_array(BlockBeginArray, BlockBegin*)
define_stack(BlockBeginList, BlockBeginArray)
define_stack(IntegerStack, intArray)
define_array(IntegerMap, IntegerStack*)
class Verification : public _ValueObj /*VALUE_OBJ_CLASS_SPEC*/, public BlockClosure {
private:
IR *_ir;
boolArray _used;
BlockBeginList _current;
BlockBeginList _successors;
public:
Verification(IR *ir);
virtual void block_do(BlockBegin *block);
bool can_reach(BlockBegin *start, BlockBegin *end, BlockBegin *dont_use = NULL);
bool dominates(BlockBegin *dominator, BlockBegin *block);
};
public:
// Bounds for an instruction in the form x + c which c integer
// constant and x another instruction
class Bound : public CompilationResourceObj {
private:
int _upper;
Value _upper_instr;
int _lower;
Value _lower_instr;
public:
Bound();
Bound(Value v);
Bound(Instruction::Condition cond, Value v, int constant = 0);
Bound(int lower, Value lower_instr, int upper, Value upper_instr);
~Bound();
#ifdef ASSERT
void add_assertion(Instruction *instruction, Instruction *position, int i, Value instr, Instruction::Condition cond);
#endif
int upper();
Value upper_instr();
int lower();
Value lower_instr();
void print();
bool check_no_overflow(int const_value);
void or_op(Bound *b);
void and_op(Bound *b);
bool has_upper();
bool has_lower();
void set_upper(int upper, Value upper_instr);
void set_lower(int lower, Value lower_instr);
bool is_smaller(Bound *b);
void remove_upper();
void remove_lower();
void add_constant(int value);
Bound *copy();
private:
void init();
};
class Visitor : public InstructionVisitor {
private:
Bound *_bound;
RangeCheckEliminator *_rce;
public:
void set_range_check_eliminator(RangeCheckEliminator *rce) { _rce = rce; }
Bound *bound() const { return _bound; }
void clear_bound() { _bound = NULL; }
protected:
// visitor functions
void do_Constant (Constant* x);
void do_IfOp (IfOp* x);
void do_LogicOp (LogicOp* x);
void do_ArithmeticOp (ArithmeticOp* x);
void do_Phi (Phi* x);
void do_StoreField (StoreField* x) { /* nothing to do */ };
void do_StoreIndexed (StoreIndexed* x) { /* nothing to do */ };
void do_MonitorEnter (MonitorEnter* x) { /* nothing to do */ };
void do_MonitorExit (MonitorExit* x) { /* nothing to do */ };
void do_Invoke (Invoke* x) { /* nothing to do */ };
void do_UnsafePutRaw (UnsafePutRaw* x) { /* nothing to do */ };
void do_UnsafePutObject(UnsafePutObject* x) { /* nothing to do */ };
void do_Intrinsic (Intrinsic* x) { /* nothing to do */ };
void do_Local (Local* x) { /* nothing to do */ };
void do_LoadField (LoadField* x) { /* nothing to do */ };
void do_ArrayLength (ArrayLength* x) { /* nothing to do */ };
void do_LoadIndexed (LoadIndexed* x) { /* nothing to do */ };
void do_NegateOp (NegateOp* x) { /* nothing to do */ };
void do_ShiftOp (ShiftOp* x) { /* nothing to do */ };
void do_CompareOp (CompareOp* x) { /* nothing to do */ };
void do_Convert (Convert* x) { /* nothing to do */ };
void do_NullCheck (NullCheck* x) { /* nothing to do */ };
void do_TypeCast (TypeCast* x) { /* nothing to do */ };
void do_NewInstance (NewInstance* x) { /* nothing to do */ };
void do_NewTypeArray (NewTypeArray* x) { /* nothing to do */ };
void do_NewObjectArray (NewObjectArray* x) { /* nothing to do */ };
void do_NewMultiArray (NewMultiArray* x) { /* nothing to do */ };
void do_CheckCast (CheckCast* x) { /* nothing to do */ };
void do_InstanceOf (InstanceOf* x) { /* nothing to do */ };
void do_BlockBegin (BlockBegin* x) { /* nothing to do */ };
void do_Goto (Goto* x) { /* nothing to do */ };
void do_If (If* x) { /* nothing to do */ };
void do_IfInstanceOf (IfInstanceOf* x) { /* nothing to do */ };
void do_TableSwitch (TableSwitch* x) { /* nothing to do */ };
void do_LookupSwitch (LookupSwitch* x) { /* nothing to do */ };
void do_Return (Return* x) { /* nothing to do */ };
void do_Throw (Throw* x) { /* nothing to do */ };
void do_Base (Base* x) { /* nothing to do */ };
void do_OsrEntry (OsrEntry* x) { /* nothing to do */ };
void do_ExceptionObject(ExceptionObject* x) { /* nothing to do */ };
void do_RoundFP (RoundFP* x) { /* nothing to do */ };
void do_UnsafeGetRaw (UnsafeGetRaw* x) { /* nothing to do */ };
void do_UnsafeGetObject(UnsafeGetObject* x) { /* nothing to do */ };
void do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) { /* nothing to do */ };
void do_UnsafePrefetchRead (UnsafePrefetchRead* x) { /* nothing to do */ };
void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ };
void do_ProfileCall (ProfileCall* x) { /* nothing to do */ };
void do_ProfileInvoke (ProfileInvoke* x) { /* nothing to do */ };
void do_RuntimeCall (RuntimeCall* x) { /* nothing to do */ };
void do_MemBar (MemBar* x) { /* nothing to do */ };
void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ };
void do_Assert (Assert* x) { /* nothing to do */ };
};
#ifdef ASSERT
void add_assertions(Bound *bound, Instruction *instruction, Instruction *position);
#endif
define_array(BoundArray, Bound *)
define_stack(BoundStack, BoundArray)
define_array(BoundMap, BoundStack *)
define_array(AccessIndexedArray, AccessIndexed *)
define_stack(AccessIndexedList, AccessIndexedArray)
define_array(InstructionArray, Instruction *)
define_stack(InstructionList, InstructionArray)
class AccessIndexedInfo : public CompilationResourceObj {
public:
AccessIndexedList *_list;
int _min;
int _max;
};
define_array(AccessIndexedInfoArray, AccessIndexedInfo *)
BoundMap _bounds; // Mapping from Instruction's id to current bound
AccessIndexedInfoArray _access_indexed_info; // Mapping from Instruction's id to AccessIndexedInfo for in block motion
Visitor _visitor;
public:
RangeCheckEliminator(IR *ir);
IR *ir() const { return _ir; }
// Pass over the dominator tree to identify blocks where there's an oppportunity for optimization
bool set_process_block_flags(BlockBegin *block);
// The core of the optimization work: pass over the dominator tree
// to propagate bound information, insert predicate out of loops,
// eliminate bound checks when possible and perform in block motion
void calc_bounds(BlockBegin *block, BlockBegin *loop_header);
// reorder bound checks within a block in order to eliminate some of them
void in_block_motion(BlockBegin *block, AccessIndexedList &accessIndexed, InstructionList &arrays);
// update/access current bound
void update_bound(IntegerStack &pushed, Value v, Instruction::Condition cond, Value value, int constant);
void update_bound(IntegerStack &pushed, Value v, Bound *bound);
Bound *get_bound(Value v);
bool loop_invariant(BlockBegin *loop_header, Instruction *instruction); // check for loop invariance
void add_access_indexed_info(InstructionList &indices, int i, Value instruction, AccessIndexed *ai); // record indexed access for in block motion
void remove_range_check(AccessIndexed *ai); // Mark this instructions as not needing a range check
void add_if_condition(IntegerStack &pushed, Value x, Value y, Instruction::Condition condition); // Update bound for an If
bool in_array_bound(Bound *bound, Value array); // Check whether bound is known to fall within array
// helper functions to work with predicates
Instruction* insert_after(Instruction* insert_position, Instruction* instr, int bci);
Instruction* predicate(Instruction* left, Instruction::Condition cond, Instruction* right, ValueStack* state, Instruction *insert_position, int bci=-1);
Instruction* predicate_cmp_with_const(Instruction* instr, Instruction::Condition cond, int constant, ValueStack* state, Instruction *insert_position, int bci=1);
Instruction* predicate_add(Instruction* left, int left_const, Instruction::Condition cond, Instruction* right, ValueStack* state, Instruction *insert_position, int bci=-1);
Instruction* predicate_add_cmp_with_const(Instruction* left, int left_const, Instruction::Condition cond, int constant, ValueStack* state, Instruction *insert_position, int bci=-1);
void insert_deoptimization(ValueStack *state, Instruction *insert_position, Instruction *array_instr, // Add predicate
Instruction *length_instruction, Instruction *lower_instr, int lower,
Instruction *upper_instr, int upper, AccessIndexed *ai);
bool is_ok_for_deoptimization(Instruction *insert_position, Instruction *array_instr, // Can we safely add a predicate?
Instruction *length_instr, Instruction *lower_instr,
int lower, Instruction *upper_instr, int upper);
void process_if(IntegerStack &pushed, BlockBegin *block, If *cond); // process If Instruction
void process_access_indexed(BlockBegin *loop_header, BlockBegin *block, AccessIndexed *ai); // process indexed access
void dump_condition_stack(BlockBegin *cur_block);
static void print_statistics();
};
#endif // SHARE_VM_C1_C1_RANGECHECKELIMINATION_HPP

View file

@ -1330,6 +1330,50 @@ JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0; return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0;
JRT_END JRT_END
JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* thread))
ResourceMark rm;
assert(!TieredCompilation, "incompatible with tiered compilation");
RegisterMap reg_map(thread, false);
frame runtime_frame = thread->last_frame();
frame caller_frame = runtime_frame.sender(&reg_map);
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
assert (nm != NULL, "no more nmethod?");
nm->make_not_entrant();
methodHandle m(nm->method());
MethodData* mdo = m->method_data();
if (mdo == NULL && !HAS_PENDING_EXCEPTION) {
// Build an MDO. Ignore errors like OutOfMemory;
// that simply means we won't have an MDO to update.
Method::build_interpreter_method_data(m, THREAD);
if (HAS_PENDING_EXCEPTION) {
assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
CLEAR_PENDING_EXCEPTION;
}
mdo = m->method_data();
}
if (mdo != NULL) {
mdo->inc_trap_count(Deoptimization::Reason_none);
}
if (TracePredicateFailedTraps) {
stringStream ss1, ss2;
vframeStream vfst(thread);
methodHandle inlinee = methodHandle(vfst.method());
inlinee->print_short_name(&ss1);
m->print_short_name(&ss2);
tty->print_cr("Predicate failed trap in method %s at bci %d inlined in %s at pc %x", ss1.as_string(), vfst.bci(), ss2.as_string(), caller_frame.pc());
}
Deoptimization::deoptimize_frame(thread, caller_frame.id());
JRT_END
#ifndef PRODUCT #ifndef PRODUCT
void Runtime1::print_statistics() { void Runtime1::print_statistics() {

View file

@ -71,6 +71,7 @@ class StubAssembler;
stub(g1_post_barrier_slow) \ stub(g1_post_barrier_slow) \
stub(fpu2long_stub) \ stub(fpu2long_stub) \
stub(counter_overflow) \ stub(counter_overflow) \
stub(predicate_failed_trap) \
last_entry(number_of_ids) last_entry(number_of_ids)
#define DECLARE_STUB_ID(x) x ## _id , #define DECLARE_STUB_ID(x) x ## _id ,
@ -190,6 +191,8 @@ class Runtime1: public AllStatic {
static void oop_arraycopy(HeapWord* src, HeapWord* dst, int length); static void oop_arraycopy(HeapWord* src, HeapWord* dst, int length);
static int is_instance_of(oopDesc* mirror, oopDesc* obj); static int is_instance_of(oopDesc* mirror, oopDesc* obj);
static void predicate_failed_trap(JavaThread* thread);
static void print_statistics() PRODUCT_RETURN; static void print_statistics() PRODUCT_RETURN;
}; };

View file

@ -26,9 +26,9 @@
#include "c1/c1_Canonicalizer.hpp" #include "c1/c1_Canonicalizer.hpp"
#include "c1/c1_IR.hpp" #include "c1/c1_IR.hpp"
#include "c1/c1_ValueMap.hpp" #include "c1/c1_ValueMap.hpp"
#include "c1/c1_ValueStack.hpp"
#include "utilities/bitMap.inline.hpp" #include "utilities/bitMap.inline.hpp"
#ifndef PRODUCT #ifndef PRODUCT
int ValueMap::_number_of_finds = 0; int ValueMap::_number_of_finds = 0;
@ -192,10 +192,6 @@ Value ValueMap::find_insert(Value x) {
&& lf->field()->holder() == field->holder() \ && lf->field()->holder() == field->holder() \
&& (all_offsets || lf->field()->offset() == field->offset()); && (all_offsets || lf->field()->offset() == field->offset());
#define MUST_KILL_EXCEPTION(must_kill, entry, value) \
assert(entry->nesting() < nesting(), "must not find bigger nesting than current"); \
bool must_kill = (entry->nesting() == nesting() - 1);
void ValueMap::kill_memory() { void ValueMap::kill_memory() {
GENERIC_KILL_VALUE(MUST_KILL_MEMORY); GENERIC_KILL_VALUE(MUST_KILL_MEMORY);
@ -209,11 +205,6 @@ void ValueMap::kill_field(ciField* field, bool all_offsets) {
GENERIC_KILL_VALUE(MUST_KILL_FIELD); GENERIC_KILL_VALUE(MUST_KILL_FIELD);
} }
void ValueMap::kill_exception() {
GENERIC_KILL_VALUE(MUST_KILL_EXCEPTION);
}
void ValueMap::kill_map(ValueMap* map) { void ValueMap::kill_map(ValueMap* map) {
assert(is_global_value_numbering(), "only for global value numbering"); assert(is_global_value_numbering(), "only for global value numbering");
_killed_values.set_union(&map->_killed_values); _killed_values.set_union(&map->_killed_values);
@ -274,6 +265,8 @@ class ShortLoopOptimizer : public ValueNumberingVisitor {
GlobalValueNumbering* _gvn; GlobalValueNumbering* _gvn;
BlockList _loop_blocks; BlockList _loop_blocks;
bool _too_complicated_loop; bool _too_complicated_loop;
bool _has_field_store[T_ARRAY + 1];
bool _has_indexed_store[T_ARRAY + 1];
// simplified access to methods of GlobalValueNumbering // simplified access to methods of GlobalValueNumbering
ValueMap* current_map() { return _gvn->current_map(); } ValueMap* current_map() { return _gvn->current_map(); }
@ -281,8 +274,16 @@ class ShortLoopOptimizer : public ValueNumberingVisitor {
// implementation for abstract methods of ValueNumberingVisitor // implementation for abstract methods of ValueNumberingVisitor
void kill_memory() { _too_complicated_loop = true; } void kill_memory() { _too_complicated_loop = true; }
void kill_field(ciField* field, bool all_offsets) { current_map()->kill_field(field, all_offsets); }; void kill_field(ciField* field, bool all_offsets) {
void kill_array(ValueType* type) { current_map()->kill_array(type); }; current_map()->kill_field(field, all_offsets);
assert(field->type()->basic_type() >= 0 && field->type()->basic_type() <= T_ARRAY, "Invalid type");
_has_field_store[field->type()->basic_type()] = true;
}
void kill_array(ValueType* type) {
current_map()->kill_array(type);
BasicType basic_type = as_BasicType(type); assert(basic_type >= 0 && basic_type <= T_ARRAY, "Invalid type");
_has_indexed_store[basic_type] = true;
}
public: public:
ShortLoopOptimizer(GlobalValueNumbering* gvn) ShortLoopOptimizer(GlobalValueNumbering* gvn)
@ -290,11 +291,141 @@ class ShortLoopOptimizer : public ValueNumberingVisitor {
, _loop_blocks(ValueMapMaxLoopSize) , _loop_blocks(ValueMapMaxLoopSize)
, _too_complicated_loop(false) , _too_complicated_loop(false)
{ {
for (int i=0; i<= T_ARRAY; i++){
_has_field_store[i] = false;
_has_indexed_store[i] = false;
}
}
bool has_field_store(BasicType type) {
assert(type >= 0 && type <= T_ARRAY, "Invalid type");
return _has_field_store[type];
}
bool has_indexed_store(BasicType type) {
assert(type >= 0 && type <= T_ARRAY, "Invalid type");
return _has_indexed_store[type];
} }
bool process(BlockBegin* loop_header); bool process(BlockBegin* loop_header);
}; };
class LoopInvariantCodeMotion : public StackObj {
private:
GlobalValueNumbering* _gvn;
ShortLoopOptimizer* _short_loop_optimizer;
Instruction* _insertion_point;
ValueStack * _state;
void set_invariant(Value v) const { _gvn->set_processed(v); }
bool is_invariant(Value v) const { return _gvn->is_processed(v); }
void process_block(BlockBegin* block);
public:
LoopInvariantCodeMotion(ShortLoopOptimizer *slo, GlobalValueNumbering* gvn, BlockBegin* loop_header, BlockList* loop_blocks);
};
LoopInvariantCodeMotion::LoopInvariantCodeMotion(ShortLoopOptimizer *slo, GlobalValueNumbering* gvn, BlockBegin* loop_header, BlockList* loop_blocks)
: _gvn(gvn), _short_loop_optimizer(slo) {
TRACE_VALUE_NUMBERING(tty->print_cr("using loop invariant code motion loop_header = %d", loop_header->block_id()));
TRACE_VALUE_NUMBERING(tty->print_cr("** loop invariant code motion for short loop B%d", loop_header->block_id()));
BlockBegin* insertion_block = loop_header->dominator();
if (insertion_block->number_of_preds() == 0) {
return; // only the entry block does not have a predecessor
}
assert(insertion_block->end()->as_Base() == NULL, "cannot insert into entry block");
_insertion_point = insertion_block->end()->prev();
BlockEnd *block_end = insertion_block->end();
_state = block_end->state_before();
if (!_state) {
// If, TableSwitch and LookupSwitch always have state_before when
// loop invariant code motion happens..
assert(block_end->as_Goto(), "Block has to be goto");
_state = block_end->state();
}
// the loop_blocks are filled by going backward from the loop header, so this processing order is best
assert(loop_blocks->at(0) == loop_header, "loop header must be first loop block");
process_block(loop_header);
for (int i = loop_blocks->length() - 1; i >= 1; i--) {
process_block(loop_blocks->at(i));
}
}
void LoopInvariantCodeMotion::process_block(BlockBegin* block) {
TRACE_VALUE_NUMBERING(tty->print_cr("processing block B%d", block->block_id()));
Instruction* prev = block;
Instruction* cur = block->next();
while (cur != NULL) {
// determine if cur instruction is loop invariant
// only selected instruction types are processed here
bool cur_invariant = false;
if (cur->as_Constant() != NULL) {
cur_invariant = !cur->can_trap();
} else if (cur->as_ArithmeticOp() != NULL || cur->as_LogicOp() != NULL || cur->as_ShiftOp() != NULL) {
assert(cur->as_Op2() != NULL, "must be Op2");
Op2* op2 = (Op2*)cur;
cur_invariant = !op2->can_trap() && is_invariant(op2->x()) && is_invariant(op2->y());
} else if (cur->as_LoadField() != NULL) {
LoadField* lf = (LoadField*)cur;
// deoptimizes on NullPointerException
cur_invariant = !lf->needs_patching() && !lf->field()->is_volatile() && !_short_loop_optimizer->has_field_store(lf->field()->type()->basic_type()) && is_invariant(lf->obj());
} else if (cur->as_ArrayLength() != NULL) {
ArrayLength *length = cur->as_ArrayLength();
cur_invariant = is_invariant(length->array());
} else if (cur->as_LoadIndexed() != NULL) {
LoadIndexed *li = (LoadIndexed *)cur->as_LoadIndexed();
cur_invariant = !_short_loop_optimizer->has_indexed_store(as_BasicType(cur->type())) && is_invariant(li->array()) && is_invariant(li->index());
}
if (cur_invariant) {
// perform value numbering and mark instruction as loop-invariant
_gvn->substitute(cur);
if (cur->as_Constant() == NULL) {
// ensure that code for non-constant instructions is always generated
cur->pin();
}
// remove cur instruction from loop block and append it to block before loop
Instruction* next = cur->next();
Instruction* in = _insertion_point->next();
_insertion_point = _insertion_point->set_next(cur);
cur->set_next(in);
// Deoptimize on exception
cur->set_flag(Instruction::DeoptimizeOnException, true);
// Clear exception handlers
cur->set_exception_handlers(NULL);
TRACE_VALUE_NUMBERING(tty->print_cr("Instruction %c%d is loop invariant", cur->type()->tchar(), cur->id()));
if (cur->state_before() != NULL) {
cur->set_state_before(_state->copy());
}
if (cur->exception_state() != NULL) {
cur->set_exception_state(_state->copy());
}
cur = prev->set_next(next);
} else {
prev = cur;
cur = cur->next();
}
}
}
bool ShortLoopOptimizer::process(BlockBegin* loop_header) { bool ShortLoopOptimizer::process(BlockBegin* loop_header) {
TRACE_VALUE_NUMBERING(tty->print_cr("** loop header block")); TRACE_VALUE_NUMBERING(tty->print_cr("** loop header block"));
@ -316,6 +447,10 @@ bool ShortLoopOptimizer::process(BlockBegin* loop_header) {
for (int j = block->number_of_preds() - 1; j >= 0; j--) { for (int j = block->number_of_preds() - 1; j >= 0; j--) {
BlockBegin* pred = block->pred_at(j); BlockBegin* pred = block->pred_at(j);
if (pred->is_set(BlockBegin::osr_entry_flag)) {
return false;
}
ValueMap* pred_map = value_map_of(pred); ValueMap* pred_map = value_map_of(pred);
if (pred_map != NULL) { if (pred_map != NULL) {
current_map()->kill_map(pred_map); current_map()->kill_map(pred_map);
@ -336,6 +471,12 @@ bool ShortLoopOptimizer::process(BlockBegin* loop_header) {
} }
} }
bool optimistic = this->_gvn->compilation()->is_optimistic();
if (UseLoopInvariantCodeMotion && optimistic) {
LoopInvariantCodeMotion code_motion(this, _gvn, loop_header, &_loop_blocks);
}
TRACE_VALUE_NUMBERING(tty->print_cr("** loop successfully optimized")); TRACE_VALUE_NUMBERING(tty->print_cr("** loop successfully optimized"));
return true; return true;
} }
@ -344,11 +485,11 @@ bool ShortLoopOptimizer::process(BlockBegin* loop_header) {
GlobalValueNumbering::GlobalValueNumbering(IR* ir) GlobalValueNumbering::GlobalValueNumbering(IR* ir)
: _current_map(NULL) : _current_map(NULL)
, _value_maps(ir->linear_scan_order()->length(), NULL) , _value_maps(ir->linear_scan_order()->length(), NULL)
, _compilation(ir->compilation())
{ {
TRACE_VALUE_NUMBERING(tty->print_cr("****** start of global value numbering")); TRACE_VALUE_NUMBERING(tty->print_cr("****** start of global value numbering"));
ShortLoopOptimizer short_loop_optimizer(this); ShortLoopOptimizer short_loop_optimizer(this);
int subst_count = 0;
BlockList* blocks = ir->linear_scan_order(); BlockList* blocks = ir->linear_scan_order();
int num_blocks = blocks->length(); int num_blocks = blocks->length();
@ -357,6 +498,12 @@ GlobalValueNumbering::GlobalValueNumbering(IR* ir)
assert(start_block == ir->start() && start_block->number_of_preds() == 0 && start_block->dominator() == NULL, "must be start block"); assert(start_block == ir->start() && start_block->number_of_preds() == 0 && start_block->dominator() == NULL, "must be start block");
assert(start_block->next()->as_Base() != NULL && start_block->next()->next() == NULL, "start block must not have instructions"); assert(start_block->next()->as_Base() != NULL && start_block->next()->next() == NULL, "start block must not have instructions");
// method parameters are not linked in instructions list, so process them separateley
for_each_state_value(start_block->state(), value,
assert(value->as_Local() != NULL, "only method parameters allowed");
set_processed(value);
);
// initial, empty value map with nesting 0 // initial, empty value map with nesting 0
set_value_map_of(start_block, new ValueMap()); set_value_map_of(start_block, new ValueMap());
@ -374,7 +521,7 @@ GlobalValueNumbering::GlobalValueNumbering(IR* ir)
// create new value map with increased nesting // create new value map with increased nesting
_current_map = new ValueMap(value_map_of(dominator)); _current_map = new ValueMap(value_map_of(dominator));
if (num_preds == 1) { if (num_preds == 1 && !block->is_set(BlockBegin::exception_entry_flag)) {
assert(dominator == block->pred_at(0), "dominator must be equal to predecessor"); assert(dominator == block->pred_at(0), "dominator must be equal to predecessor");
// nothing to do here // nothing to do here
@ -403,36 +550,41 @@ GlobalValueNumbering::GlobalValueNumbering(IR* ir)
} }
} }
if (block->is_set(BlockBegin::exception_entry_flag)) { // phi functions are not linked in instructions list, so process them separateley
current_map()->kill_exception(); for_each_phi_fun(block, phi,
} set_processed(phi);
);
TRACE_VALUE_NUMBERING(tty->print("value map before processing block: "); current_map()->print()); TRACE_VALUE_NUMBERING(tty->print("value map before processing block: "); current_map()->print());
// visit all instructions of this block // visit all instructions of this block
for (Value instr = block->next(); instr != NULL; instr = instr->next()) { for (Value instr = block->next(); instr != NULL; instr = instr->next()) {
assert(!instr->has_subst(), "substitution already set");
// check if instruction kills any values // check if instruction kills any values
instr->visit(this); instr->visit(this);
// perform actual value numbering
if (instr->hash() != 0) { substitute(instr);
Value f = current_map()->find_insert(instr);
if (f != instr) {
assert(!f->has_subst(), "can't have a substitution");
instr->set_subst(f);
subst_count++;
}
}
} }
// remember value map for successors // remember value map for successors
set_value_map_of(block, current_map()); set_value_map_of(block, current_map());
} }
if (subst_count != 0) { if (_has_substitutions) {
SubstitutionResolver resolver(ir); SubstitutionResolver resolver(ir);
} }
TRACE_VALUE_NUMBERING(tty->print("****** end of global value numbering. "); ValueMap::print_statistics()); TRACE_VALUE_NUMBERING(tty->print("****** end of global value numbering. "); ValueMap::print_statistics());
} }
void GlobalValueNumbering::substitute(Instruction* instr) {
assert(!instr->has_subst(), "substitution already set");
Value subst = current_map()->find_insert(instr);
if (subst != instr) {
assert(!subst->has_subst(), "can't have a substitution");
TRACE_VALUE_NUMBERING(tty->print_cr("substitution for %d set to %d", instr->id(), subst->id()));
instr->set_subst(subst);
_has_substitutions = true;
}
set_processed(instr);
}

View file

@ -206,6 +206,8 @@ class ValueNumberingVisitor: public InstructionVisitor {
void do_ProfileInvoke (ProfileInvoke* x) { /* nothing to do */ }; void do_ProfileInvoke (ProfileInvoke* x) { /* nothing to do */ };
void do_RuntimeCall (RuntimeCall* x) { /* nothing to do */ }; void do_RuntimeCall (RuntimeCall* x) { /* nothing to do */ };
void do_MemBar (MemBar* x) { /* nothing to do */ }; void do_MemBar (MemBar* x) { /* nothing to do */ };
void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ };
void do_Assert (Assert* x) { /* nothing to do */ };
}; };
@ -225,15 +227,22 @@ class ValueNumberingEffects: public ValueNumberingVisitor {
class GlobalValueNumbering: public ValueNumberingVisitor { class GlobalValueNumbering: public ValueNumberingVisitor {
private: private:
Compilation* _compilation; // compilation data
ValueMap* _current_map; // value map of current block ValueMap* _current_map; // value map of current block
ValueMapArray _value_maps; // list of value maps for all blocks ValueMapArray _value_maps; // list of value maps for all blocks
ValueSet _processed_values; // marker for instructions that were already processed
bool _has_substitutions; // set to true when substitutions must be resolved
public: public:
// accessors // accessors
Compilation* compilation() const { return _compilation; }
ValueMap* current_map() { return _current_map; } ValueMap* current_map() { return _current_map; }
ValueMap* value_map_of(BlockBegin* block) { return _value_maps.at(block->linear_scan_number()); } ValueMap* value_map_of(BlockBegin* block) { return _value_maps.at(block->linear_scan_number()); }
void set_value_map_of(BlockBegin* block, ValueMap* map) { assert(value_map_of(block) == NULL, ""); _value_maps.at_put(block->linear_scan_number(), map); } void set_value_map_of(BlockBegin* block, ValueMap* map) { assert(value_map_of(block) == NULL, ""); _value_maps.at_put(block->linear_scan_number(), map); }
bool is_processed(Value v) { return _processed_values.contains(v); }
void set_processed(Value v) { _processed_values.put(v); }
// implementation for abstract methods of ValueNumberingVisitor // implementation for abstract methods of ValueNumberingVisitor
void kill_memory() { current_map()->kill_memory(); } void kill_memory() { current_map()->kill_memory(); }
void kill_field(ciField* field, bool all_offsets) { current_map()->kill_field(field, all_offsets); } void kill_field(ciField* field, bool all_offsets) { current_map()->kill_field(field, all_offsets); }
@ -241,6 +250,7 @@ class GlobalValueNumbering: public ValueNumberingVisitor {
// main entry point that performs global value numbering // main entry point that performs global value numbering
GlobalValueNumbering(IR* ir); GlobalValueNumbering(IR* ir);
void substitute(Instruction* instr); // substitute instruction if it is contained in current value map
}; };
#endif // SHARE_VM_C1_C1_VALUEMAP_HPP #endif // SHARE_VM_C1_C1_VALUEMAP_HPP

View file

@ -119,6 +119,24 @@
develop(bool, UseGlobalValueNumbering, true, \ develop(bool, UseGlobalValueNumbering, true, \
"Use Global Value Numbering (separate phase)") \ "Use Global Value Numbering (separate phase)") \
\ \
product(bool, UseLoopInvariantCodeMotion, true, \
"Simple loop invariant code motion for short loops during GVN") \
\
develop(bool, TracePredicateFailedTraps, false, \
"trace runtime traps caused by predicate failure") \
\
develop(bool, StressLoopInvariantCodeMotion, false, \
"stress loop invariant code motion") \
\
develop(bool, TraceRangeCheckElimination, false, \
"Trace Range Check Elimination") \
\
develop(bool, AssertRangeCheckElimination, false, \
"Assert Range Check Elimination") \
\
develop(bool, StressRangeCheckElimination, false, \
"stress Range Check Elimination") \
\
develop(bool, PrintValueNumbering, false, \ develop(bool, PrintValueNumbering, false, \
"Print Value Numbering") \ "Print Value Numbering") \
\ \

View file

@ -790,6 +790,17 @@ int ciMethod::scale_count(int count, float prof_factor) {
return count; return count;
} }
// ------------------------------------------------------------------
// ciMethod::is_special_get_caller_class_method
//
bool ciMethod::is_ignored_by_security_stack_walk() const {
check_is_loaded();
VM_ENTRY_MARK;
return get_Method()->is_ignored_by_security_stack_walk();
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// invokedynamic support // invokedynamic support

View file

@ -166,6 +166,7 @@ class ciMethod : public ciMetadata {
// Code size for inlining decisions. // Code size for inlining decisions.
int code_size_for_inlining(); int code_size_for_inlining();
bool caller_sensitive() { return get_Method()->caller_sensitive(); }
bool force_inline() { return get_Method()->force_inline(); } bool force_inline() { return get_Method()->force_inline(); }
bool dont_inline() { return get_Method()->dont_inline(); } bool dont_inline() { return get_Method()->dont_inline(); }
@ -264,6 +265,9 @@ class ciMethod : public ciMetadata {
int instructions_size(); int instructions_size();
int scale_count(int count, float prof_factor = 1.); // make MDO count commensurate with IIC int scale_count(int count, float prof_factor = 1.); // make MDO count commensurate with IIC
// Stack walking support
bool is_ignored_by_security_stack_walk() const;
// JSR 292 support // JSR 292 support
bool is_method_handle_intrinsic() const; bool is_method_handle_intrinsic() const;
bool is_compiled_lambda_form() const; bool is_compiled_lambda_form() const;

View file

@ -1735,9 +1735,14 @@ ClassFileParser::AnnotationCollector::annotation_index(ClassLoaderData* loader_d
Symbol* name) { Symbol* name) {
vmSymbols::SID sid = vmSymbols::find_sid(name); vmSymbols::SID sid = vmSymbols::find_sid(name);
// Privileged code can use all annotations. Other code silently drops some. // Privileged code can use all annotations. Other code silently drops some.
bool privileged = loader_data->is_the_null_class_loader_data() || const bool privileged = loader_data->is_the_null_class_loader_data() ||
loader_data->is_ext_class_loader_data() ||
loader_data->is_anonymous(); loader_data->is_anonymous();
switch (sid) { switch (sid) {
case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_reflect_CallerSensitive_signature):
if (_location != _in_method) break; // only allow for methods
if (!privileged) break; // only allow in privileged code
return _method_CallerSensitive;
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_ForceInline_signature): case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_ForceInline_signature):
if (_location != _in_method) break; // only allow for methods if (_location != _in_method) break; // only allow for methods
if (!privileged) break; // only allow in privileged code if (!privileged) break; // only allow in privileged code
@ -1775,6 +1780,8 @@ ClassFileParser::FieldAnnotationCollector::~FieldAnnotationCollector() {
} }
void ClassFileParser::MethodAnnotationCollector::apply_to(methodHandle m) { void ClassFileParser::MethodAnnotationCollector::apply_to(methodHandle m) {
if (has_annotation(_method_CallerSensitive))
m->set_caller_sensitive(true);
if (has_annotation(_method_ForceInline)) if (has_annotation(_method_ForceInline))
m->set_force_inline(true); m->set_force_inline(true);
if (has_annotation(_method_DontInline)) if (has_annotation(_method_DontInline))

View file

@ -119,6 +119,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
enum Location { _in_field, _in_method, _in_class }; enum Location { _in_field, _in_method, _in_class };
enum ID { enum ID {
_unknown = 0, _unknown = 0,
_method_CallerSensitive,
_method_ForceInline, _method_ForceInline,
_method_DontInline, _method_DontInline,
_method_LambdaForm_Compiled, _method_LambdaForm_Compiled,

View file

@ -321,6 +321,13 @@ ClassLoaderData::~ClassLoaderData() {
} }
} }
/**
* Returns true if this class loader data is for the extension class loader.
*/
bool ClassLoaderData::is_ext_class_loader_data() const {
return SystemDictionary::is_ext_class_loader(class_loader());
}
Metaspace* ClassLoaderData::metaspace_non_null() { Metaspace* ClassLoaderData::metaspace_non_null() {
assert(!DumpSharedSpaces, "wrong metaspace!"); assert(!DumpSharedSpaces, "wrong metaspace!");
// If the metaspace has not been allocated, create a new one. Might want // If the metaspace has not been allocated, create a new one. Might want

View file

@ -191,6 +191,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
bool is_the_null_class_loader_data() const { bool is_the_null_class_loader_data() const {
return this == _the_null_class_loader_data; return this == _the_null_class_loader_data;
} }
bool is_ext_class_loader_data() const;
// The Metaspace is created lazily so may be NULL. This // The Metaspace is created lazily so may be NULL. This
// method will allocate a Metaspace if needed. // method will allocate a Metaspace if needed.

View file

@ -1054,6 +1054,7 @@ class java_lang_invoke_MemberName: AllStatic {
MN_IS_CONSTRUCTOR = 0x00020000, // constructor MN_IS_CONSTRUCTOR = 0x00020000, // constructor
MN_IS_FIELD = 0x00040000, // field MN_IS_FIELD = 0x00040000, // field
MN_IS_TYPE = 0x00080000, // nested type MN_IS_TYPE = 0x00080000, // nested type
MN_CALLER_SENSITIVE = 0x00100000, // @CallerSensitive annotation detected
MN_REFERENCE_KIND_SHIFT = 24, // refKind MN_REFERENCE_KIND_SHIFT = 24, // refKind
MN_REFERENCE_KIND_MASK = 0x0F000000 >> MN_REFERENCE_KIND_SHIFT, MN_REFERENCE_KIND_MASK = 0x0F000000 >> MN_REFERENCE_KIND_SHIFT,
// The SEARCH_* bits are not for MN.flags but for the matchFlags argument of MHN.getMembers: // The SEARCH_* bits are not for MN.flags but for the matchFlags argument of MHN.getMembers:

View file

@ -146,6 +146,17 @@ bool SystemDictionary::is_parallelDefine(Handle class_loader) {
} }
return false; return false;
} }
/**
* Returns true if the passed class loader is the extension class loader.
*/
bool SystemDictionary::is_ext_class_loader(Handle class_loader) {
if (class_loader.is_null()) {
return false;
}
return (class_loader->klass()->name() == vmSymbols::sun_misc_Launcher_ExtClassLoader());
}
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Resolving of classes // Resolving of classes

View file

@ -106,6 +106,7 @@ class SymbolPropertyTable;
do_klass(ThreadDeath_klass, java_lang_ThreadDeath, Pre ) \ do_klass(ThreadDeath_klass, java_lang_ThreadDeath, Pre ) \
do_klass(Exception_klass, java_lang_Exception, Pre ) \ do_klass(Exception_klass, java_lang_Exception, Pre ) \
do_klass(RuntimeException_klass, java_lang_RuntimeException, Pre ) \ do_klass(RuntimeException_klass, java_lang_RuntimeException, Pre ) \
do_klass(SecurityManager_klass, java_lang_SecurityManager, Pre ) \
do_klass(ProtectionDomain_klass, java_security_ProtectionDomain, Pre ) \ do_klass(ProtectionDomain_klass, java_security_ProtectionDomain, Pre ) \
do_klass(AccessControlContext_klass, java_security_AccessControlContext, Pre ) \ do_klass(AccessControlContext_klass, java_security_AccessControlContext, Pre ) \
do_klass(ClassNotFoundException_klass, java_lang_ClassNotFoundException, Pre ) \ do_klass(ClassNotFoundException_klass, java_lang_ClassNotFoundException, Pre ) \
@ -145,6 +146,7 @@ class SymbolPropertyTable;
do_klass(reflect_DelegatingClassLoader_klass, sun_reflect_DelegatingClassLoader, Opt ) \ do_klass(reflect_DelegatingClassLoader_klass, sun_reflect_DelegatingClassLoader, Opt ) \
do_klass(reflect_ConstantPool_klass, sun_reflect_ConstantPool, Opt_Only_JDK15 ) \ do_klass(reflect_ConstantPool_klass, sun_reflect_ConstantPool, Opt_Only_JDK15 ) \
do_klass(reflect_UnsafeStaticFieldAccessorImpl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt_Only_JDK15 ) \ do_klass(reflect_UnsafeStaticFieldAccessorImpl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt_Only_JDK15 ) \
do_klass(reflect_CallerSensitive_klass, sun_reflect_CallerSensitive, Opt ) \
\ \
/* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \ /* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \
do_klass(MethodHandle_klass, java_lang_invoke_MethodHandle, Pre_JSR292 ) \ do_klass(MethodHandle_klass, java_lang_invoke_MethodHandle, Pre_JSR292 ) \
@ -628,12 +630,15 @@ private:
static bool is_parallelCapable(Handle class_loader); static bool is_parallelCapable(Handle class_loader);
static bool is_parallelDefine(Handle class_loader); static bool is_parallelDefine(Handle class_loader);
public:
static bool is_ext_class_loader(Handle class_loader);
private:
static Klass* find_shared_class(Symbol* class_name); static Klass* find_shared_class(Symbol* class_name);
// Setup link to hierarchy // Setup link to hierarchy
static void add_to_hierarchy(instanceKlassHandle k, TRAPS); static void add_to_hierarchy(instanceKlassHandle k, TRAPS);
private:
// We pass in the hashtable index so we can calculate it outside of // We pass in the hashtable index so we can calculate it outside of
// the SystemDictionary_lock. // the SystemDictionary_lock.

View file

@ -91,6 +91,7 @@
template(java_lang_StringBuffer, "java/lang/StringBuffer") \ template(java_lang_StringBuffer, "java/lang/StringBuffer") \
template(java_lang_StringBuilder, "java/lang/StringBuilder") \ template(java_lang_StringBuilder, "java/lang/StringBuilder") \
template(java_lang_CharSequence, "java/lang/CharSequence") \ template(java_lang_CharSequence, "java/lang/CharSequence") \
template(java_lang_SecurityManager, "java/lang/SecurityManager") \
template(java_security_AccessControlContext, "java/security/AccessControlContext") \ template(java_security_AccessControlContext, "java/security/AccessControlContext") \
template(java_security_ProtectionDomain, "java/security/ProtectionDomain") \ template(java_security_ProtectionDomain, "java/security/ProtectionDomain") \
template(java_io_OutputStream, "java/io/OutputStream") \ template(java_io_OutputStream, "java/io/OutputStream") \
@ -211,6 +212,8 @@
template(sun_reflect_SerializationConstructorAccessorImpl, "sun/reflect/SerializationConstructorAccessorImpl") \ template(sun_reflect_SerializationConstructorAccessorImpl, "sun/reflect/SerializationConstructorAccessorImpl") \
template(sun_reflect_DelegatingClassLoader, "sun/reflect/DelegatingClassLoader") \ template(sun_reflect_DelegatingClassLoader, "sun/reflect/DelegatingClassLoader") \
template(sun_reflect_Reflection, "sun/reflect/Reflection") \ template(sun_reflect_Reflection, "sun/reflect/Reflection") \
template(sun_reflect_CallerSensitive, "sun/reflect/CallerSensitive") \
template(sun_reflect_CallerSensitive_signature, "Lsun/reflect/CallerSensitive;") \
template(checkedExceptions_name, "checkedExceptions") \ template(checkedExceptions_name, "checkedExceptions") \
template(clazz_name, "clazz") \ template(clazz_name, "clazz") \
template(exceptionTypes_name, "exceptionTypes") \ template(exceptionTypes_name, "exceptionTypes") \
@ -343,6 +346,7 @@
template(contextClassLoader_name, "contextClassLoader") \ template(contextClassLoader_name, "contextClassLoader") \
template(inheritedAccessControlContext_name, "inheritedAccessControlContext") \ template(inheritedAccessControlContext_name, "inheritedAccessControlContext") \
template(isPrivileged_name, "isPrivileged") \ template(isPrivileged_name, "isPrivileged") \
template(getClassContext_name, "getClassContext") \
template(wait_name, "wait") \ template(wait_name, "wait") \
template(checkPackageAccess_name, "checkPackageAccess") \ template(checkPackageAccess_name, "checkPackageAccess") \
template(stackSize_name, "stackSize") \ template(stackSize_name, "stackSize") \
@ -463,6 +467,7 @@
template(void_classloader_signature, "()Ljava/lang/ClassLoader;") \ template(void_classloader_signature, "()Ljava/lang/ClassLoader;") \
template(void_object_signature, "()Ljava/lang/Object;") \ template(void_object_signature, "()Ljava/lang/Object;") \
template(void_class_signature, "()Ljava/lang/Class;") \ template(void_class_signature, "()Ljava/lang/Class;") \
template(void_class_array_signature, "()[Ljava/lang/Class;") \
template(void_string_signature, "()Ljava/lang/String;") \ template(void_string_signature, "()Ljava/lang/String;") \
template(object_array_object_signature, "([Ljava/lang/Object;)Ljava/lang/Object;") \ template(object_array_object_signature, "([Ljava/lang/Object;)Ljava/lang/Object;") \
template(object_object_array_object_signature, "(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;")\ template(object_object_array_object_signature, "(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;")\
@ -705,9 +710,8 @@
do_intrinsic(_getLength, java_lang_reflect_Array, getLength_name, object_int_signature, F_SN) \ do_intrinsic(_getLength, java_lang_reflect_Array, getLength_name, object_int_signature, F_SN) \
do_name( getLength_name, "getLength") \ do_name( getLength_name, "getLength") \
\ \
do_intrinsic(_getCallerClass, sun_reflect_Reflection, getCallerClass_name, getCallerClass_signature, F_SN) \ do_intrinsic(_getCallerClass, sun_reflect_Reflection, getCallerClass_name, void_class_signature, F_SN) \
do_name( getCallerClass_name, "getCallerClass") \ do_name( getCallerClass_name, "getCallerClass") \
do_signature(getCallerClass_signature, "(I)Ljava/lang/Class;") \
\ \
do_intrinsic(_newArray, java_lang_reflect_Array, newArray_name, newArray_signature, F_SN) \ do_intrinsic(_newArray, java_lang_reflect_Array, newArray_name, newArray_signature, F_SN) \
do_name( newArray_name, "newArray") \ do_name( newArray_name, "newArray") \

View file

@ -156,6 +156,11 @@ class CodeCache : AllStatic {
static address low_bound() { return (address) _heap->low_boundary(); } static address low_bound() { return (address) _heap->low_boundary(); }
static address high_bound() { return (address) _heap->high_boundary(); } static address high_bound() { return (address) _heap->high_boundary(); }
static bool has_space(int size) {
// Always leave some room in the CodeCache for I2C/C2I adapters
return largest_free_block() > (CodeCacheMinimumFreeSpace + size);
}
// Profiling // Profiling
static address first_address(); // first address used for CodeBlobs static address first_address(); // first address used for CodeBlobs
static address last_address(); // last address used for CodeBlobs static address last_address(); // last address used for CodeBlobs

View file

@ -486,7 +486,6 @@ void nmethod::init_defaults() {
#endif // def HAVE_DTRACE_H #endif // def HAVE_DTRACE_H
} }
nmethod* nmethod::new_native_nmethod(methodHandle method, nmethod* nmethod::new_native_nmethod(methodHandle method,
int compile_id, int compile_id,
CodeBuffer *code_buffer, CodeBuffer *code_buffer,
@ -502,18 +501,20 @@ nmethod* nmethod::new_native_nmethod(methodHandle method,
{ {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod)); int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
if (CodeCache::has_space(native_nmethod_size)) {
CodeOffsets offsets; CodeOffsets offsets;
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
nm = new (native_nmethod_size) nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size,
nmethod(method(), native_nmethod_size, compile_id, &offsets, compile_id, &offsets,
code_buffer, frame_size, code_buffer, frame_size,
basic_lock_owner_sp_offset, basic_lock_sp_offset, basic_lock_owner_sp_offset,
oop_maps); basic_lock_sp_offset, oop_maps);
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_native_nmethod(nm)); NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_native_nmethod(nm));
if (PrintAssembly && nm != NULL) if (PrintAssembly && nm != NULL)
Disassembler::decode(nm); Disassembler::decode(nm);
} }
}
// verify nmethod // verify nmethod
debug_only(if (nm) nm->verify();) // might block debug_only(if (nm) nm->verify();) // might block
@ -537,17 +538,20 @@ nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
{ {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
int nmethod_size = allocation_size(code_buffer, sizeof(nmethod)); int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
if (CodeCache::has_space(nmethod_size)) {
CodeOffsets offsets; CodeOffsets offsets;
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset); offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
nm = new (nmethod_size) nmethod(method(), nmethod_size, &offsets, code_buffer, frame_size); nm = new (nmethod_size) nmethod(method(), nmethod_size,
&offsets, code_buffer, frame_size);
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm)); NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm));
if (PrintAssembly && nm != NULL) if (PrintAssembly && nm != NULL)
Disassembler::decode(nm); Disassembler::decode(nm);
} }
}
// verify nmethod // verify nmethod
debug_only(if (nm) nm->verify();) // might block debug_only(if (nm) nm->verify();) // might block
@ -587,6 +591,7 @@ nmethod* nmethod::new_nmethod(methodHandle method,
+ round_to(handler_table->size_in_bytes(), oopSize) + round_to(handler_table->size_in_bytes(), oopSize)
+ round_to(nul_chk_table->size_in_bytes(), oopSize) + round_to(nul_chk_table->size_in_bytes(), oopSize)
+ round_to(debug_info->data_size() , oopSize); + round_to(debug_info->data_size() , oopSize);
if (CodeCache::has_space(nmethod_size)) {
nm = new (nmethod_size) nm = new (nmethod_size)
nmethod(method(), nmethod_size, compile_id, entry_bci, offsets, nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
orig_pc_offset, debug_info, dependencies, code_buffer, frame_size, orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
@ -595,6 +600,7 @@ nmethod* nmethod::new_nmethod(methodHandle method,
nul_chk_table, nul_chk_table,
compiler, compiler,
comp_level); comp_level);
}
if (nm != NULL) { if (nm != NULL) {
// To make dependency checking during class loading fast, record // To make dependency checking during class loading fast, record
// the nmethod dependencies in the classes it is dependent on. // the nmethod dependencies in the classes it is dependent on.
@ -793,9 +799,9 @@ nmethod::nmethod(
#endif // def HAVE_DTRACE_H #endif // def HAVE_DTRACE_H
void* nmethod::operator new(size_t size, int nmethod_size) { void* nmethod::operator new(size_t size, int nmethod_size) {
// Always leave some room in the CodeCache for I2C/C2I adapters void* alloc = CodeCache::allocate(nmethod_size);
if (CodeCache::largest_free_block() < CodeCacheMinimumFreeSpace) return NULL; guarantee(alloc != NULL, "CodeCache should have enough space");
return CodeCache::allocate(nmethod_size); return alloc;
} }

View file

@ -2166,6 +2166,9 @@ void CompileBroker::print_times() {
comp->print_timers(); comp->print_timers();
} }
tty->cr(); tty->cr();
tty->print_cr(" Total compiled methods : %6d methods", CompileBroker::_total_compile_count);
tty->print_cr(" Standard compilation : %6d methods", CompileBroker::_total_standard_compile_count);
tty->print_cr(" On stack replacement : %6d methods", CompileBroker::_total_osr_compile_count);
int tcb = CompileBroker::_sum_osr_bytes_compiled + CompileBroker::_sum_standard_bytes_compiled; int tcb = CompileBroker::_sum_osr_bytes_compiled + CompileBroker::_sum_standard_bytes_compiled;
tty->print_cr(" Total compiled bytecodes : %6d bytes", tcb); tty->print_cr(" Total compiled bytecodes : %6d bytes", tcb);
tty->print_cr(" Standard compilation : %6d bytes", CompileBroker::_sum_standard_bytes_compiled); tty->print_cr(" Standard compilation : %6d bytes", CompileBroker::_sum_standard_bytes_compiled);

View file

@ -2228,8 +2228,6 @@ void InstanceKlass::clean_implementors_list(BoolObjectClosure* is_alive) {
} }
void InstanceKlass::clean_method_data(BoolObjectClosure* is_alive) { void InstanceKlass::clean_method_data(BoolObjectClosure* is_alive) {
#ifdef COMPILER2
// Currently only used by C2.
for (int m = 0; m < methods()->length(); m++) { for (int m = 0; m < methods()->length(); m++) {
MethodData* mdo = methods()->at(m)->method_data(); MethodData* mdo = methods()->at(m)->method_data();
if (mdo != NULL) { if (mdo != NULL) {
@ -2240,15 +2238,6 @@ void InstanceKlass::clean_method_data(BoolObjectClosure* is_alive) {
} }
} }
} }
#else
#ifdef ASSERT
// Verify that we haven't started to use MDOs for C1.
for (int m = 0; m < methods()->length(); m++) {
MethodData* mdo = methods()->at(m)->method_data();
assert(mdo == NULL, "Didn't expect C1 to use MDOs");
}
#endif // ASSERT
#endif // !COMPILER2
} }

View file

@ -967,6 +967,32 @@ bool Method::should_not_be_cached() const {
return false; return false;
} }
/**
* Returns true if this is one of the specially treated methods for
* security related stack walks (like Reflection.getCallerClass).
*/
bool Method::is_ignored_by_security_stack_walk() const {
const bool use_new_reflection = JDK_Version::is_gte_jdk14x_version() && UseNewReflection;
assert(intrinsic_id() != vmIntrinsics::_invoke || Universe::reflect_invoke_cache()->is_same_method((Method*)this), "sanity");
if (intrinsic_id() == vmIntrinsics::_invoke) {
// This is Method.invoke() -- ignore it
return true;
}
if (use_new_reflection &&
method_holder()->is_subclass_of(SystemDictionary::reflect_MethodAccessorImpl_klass())) {
// This is an auxilary frame -- ignore it
return true;
}
if (is_method_handle_intrinsic() || is_compiled_lambda_form()) {
// This is an internal adapter frame for method handles -- ignore it
return true;
}
return false;
}
// Constant pool structure for invoke methods: // Constant pool structure for invoke methods:
enum { enum {
_imcp_invoke_name = 1, // utf8: 'invokeExact', etc. _imcp_invoke_name = 1, // utf8: 'invokeExact', etc.
@ -1180,13 +1206,13 @@ vmSymbols::SID Method::klass_id_for_intrinsics(Klass* holder) {
// because we are not loading from core libraries // because we are not loading from core libraries
// exception: the AES intrinsics come from lib/ext/sunjce_provider.jar // exception: the AES intrinsics come from lib/ext/sunjce_provider.jar
// which does not use the class default class loader so we check for its loader here // which does not use the class default class loader so we check for its loader here
if ((InstanceKlass::cast(holder)->class_loader() != NULL) && InstanceKlass* ik = InstanceKlass::cast(holder);
InstanceKlass::cast(holder)->class_loader()->klass()->name() != vmSymbols::sun_misc_Launcher_ExtClassLoader()) { if ((ik->class_loader() != NULL) && !SystemDictionary::is_ext_class_loader(ik->class_loader())) {
return vmSymbols::NO_SID; // regardless of name, no intrinsics here return vmSymbols::NO_SID; // regardless of name, no intrinsics here
} }
// see if the klass name is well-known: // see if the klass name is well-known:
Symbol* klass_name = InstanceKlass::cast(holder)->name(); Symbol* klass_name = ik->name();
return vmSymbols::find_sid(klass_name); return vmSymbols::find_sid(klass_name);
} }

View file

@ -119,10 +119,11 @@ class Method : public Metadata {
u2 _method_size; // size of this object u2 _method_size; // size of this object
u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none) u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
u1 _jfr_towrite : 1, // Flags u1 _jfr_towrite : 1, // Flags
_caller_sensitive : 1,
_force_inline : 1, _force_inline : 1,
_hidden : 1, _hidden : 1,
_dont_inline : 1, _dont_inline : 1,
: 4; : 3;
u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
u2 _number_of_breakpoints; // fullspeed debugging support u2 _number_of_breakpoints; // fullspeed debugging support
InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations
@ -618,6 +619,9 @@ class Method : public Metadata {
// Reflection support // Reflection support
bool is_overridden_in(Klass* k) const; bool is_overridden_in(Klass* k) const;
// Stack walking support
bool is_ignored_by_security_stack_walk() const;
// JSR 292 support // JSR 292 support
bool is_method_handle_intrinsic() const; // MethodHandles::is_signature_polymorphic_intrinsic(intrinsic_id) bool is_method_handle_intrinsic() const; // MethodHandles::is_signature_polymorphic_intrinsic(intrinsic_id)
bool is_compiled_lambda_form() const; // intrinsic_id() == vmIntrinsics::_compiledLambdaForm bool is_compiled_lambda_form() const; // intrinsic_id() == vmIntrinsics::_compiledLambdaForm
@ -706,8 +710,9 @@ class Method : public Metadata {
static vmSymbols::SID klass_id_for_intrinsics(Klass* holder); static vmSymbols::SID klass_id_for_intrinsics(Klass* holder);
bool jfr_towrite() { return _jfr_towrite; } bool jfr_towrite() { return _jfr_towrite; }
void set_jfr_towrite(bool towrite) { _jfr_towrite = towrite; } void set_jfr_towrite(bool x) { _jfr_towrite = x; }
bool caller_sensitive() { return _caller_sensitive; }
void set_caller_sensitive(bool x) { _caller_sensitive = x; }
bool force_inline() { return _force_inline; } bool force_inline() { return _force_inline; }
void set_force_inline(bool x) { _force_inline = x; } void set_force_inline(bool x) { _force_inline = x; }
bool dont_inline() { return _dont_inline; } bool dont_inline() { return _dont_inline; }

View file

@ -392,6 +392,9 @@ MethodData* MethodData::allocate(ClassLoaderData* loader_data, methodHandle meth
} }
int MethodData::bytecode_cell_count(Bytecodes::Code code) { int MethodData::bytecode_cell_count(Bytecodes::Code code) {
#if defined(COMPILER1) && !defined(COMPILER2)
return no_profile_data;
#else
switch (code) { switch (code) {
case Bytecodes::_checkcast: case Bytecodes::_checkcast:
case Bytecodes::_instanceof: case Bytecodes::_instanceof:
@ -438,6 +441,7 @@ int MethodData::bytecode_cell_count(Bytecodes::Code code) {
return variable_cell_count; return variable_cell_count;
} }
return no_profile_data; return no_profile_data;
#endif
} }
// Compute the size of the profiling information corresponding to // Compute the size of the profiling information corresponding to
@ -509,6 +513,9 @@ int MethodData::compute_allocation_size_in_words(methodHandle method) {
// the segment in bytes. // the segment in bytes.
int MethodData::initialize_data(BytecodeStream* stream, int MethodData::initialize_data(BytecodeStream* stream,
int data_index) { int data_index) {
#if defined(COMPILER1) && !defined(COMPILER2)
return 0;
#else
int cell_count = -1; int cell_count = -1;
int tag = DataLayout::no_tag; int tag = DataLayout::no_tag;
DataLayout* data_layout = data_layout_at(data_index); DataLayout* data_layout = data_layout_at(data_index);
@ -587,6 +594,7 @@ int MethodData::initialize_data(BytecodeStream* stream,
assert(!bytecode_has_profile(c), "agree w/ !BHP"); assert(!bytecode_has_profile(c), "agree w/ !BHP");
return 0; return 0;
} }
#endif
} }
// Get the data at an arbitrary (sort of) data index. // Get the data at an arbitrary (sort of) data index.

View file

@ -3445,7 +3445,6 @@ void GraphKit::sync_kit(IdealKit& ideal) {
void GraphKit::final_sync(IdealKit& ideal) { void GraphKit::final_sync(IdealKit& ideal) {
// Final sync IdealKit and graphKit. // Final sync IdealKit and graphKit.
__ drain_delay_transform();
sync_kit(ideal); sync_kit(ideal);
} }

View file

@ -48,9 +48,9 @@ IdealKit::IdealKit(GraphKit* gkit, bool delay_all_transforms, bool has_declarati
_cvstate = NULL; _cvstate = NULL;
// We can go memory state free or else we need the entire memory state // We can go memory state free or else we need the entire memory state
assert(_initial_memory == NULL || _initial_memory->Opcode() == Op_MergeMem, "memory must be pre-split"); assert(_initial_memory == NULL || _initial_memory->Opcode() == Op_MergeMem, "memory must be pre-split");
assert(!_gvn.is_IterGVN(), "IdealKit can't be used during Optimize phase");
int init_size = 5; int init_size = 5;
_pending_cvstates = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0); _pending_cvstates = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0);
_delay_transform = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0);
DEBUG_ONLY(_state = new (C->node_arena()) GrowableArray<int>(C->node_arena(), init_size, 0, 0)); DEBUG_ONLY(_state = new (C->node_arena()) GrowableArray<int>(C->node_arena(), init_size, 0, 0));
if (!has_declarations) { if (!has_declarations) {
declarations_done(); declarations_done();
@ -296,19 +296,16 @@ Node* IdealKit::transform(Node* n) {
return delay_transform(n); return delay_transform(n);
} else { } else {
n = gvn().transform(n); n = gvn().transform(n);
if (!gvn().is_IterGVN()) {
C->record_for_igvn(n); C->record_for_igvn(n);
}
return n; return n;
} }
} }
//-----------------------------delay_transform----------------------------------- //-----------------------------delay_transform-----------------------------------
Node* IdealKit::delay_transform(Node* n) { Node* IdealKit::delay_transform(Node* n) {
if (!gvn().is_IterGVN() || !gvn().is_IterGVN()->delay_transform()) { // Delay transform until IterativeGVN
gvn().set_type(n, n->bottom_type()); gvn().set_type(n, n->bottom_type());
} C->record_for_igvn(n);
_delay_transform->push(n);
return n; return n;
} }
@ -332,17 +329,6 @@ void IdealKit::clear(Node* m) {
for (uint i = 0; i < m->req(); i++) m->set_req(i, NULL); for (uint i = 0; i < m->req(); i++) m->set_req(i, NULL);
} }
//-----------------------------drain_delay_transform----------------------------
void IdealKit::drain_delay_transform() {
while (_delay_transform->length() > 0) {
Node* n = _delay_transform->pop();
gvn().transform(n);
if (!gvn().is_IterGVN()) {
C->record_for_igvn(n);
}
}
}
//-----------------------------IdealVariable---------------------------- //-----------------------------IdealVariable----------------------------
IdealVariable::IdealVariable(IdealKit &k) { IdealVariable::IdealVariable(IdealKit &k) {
k.declare(this); k.declare(this);
@ -351,9 +337,7 @@ IdealVariable::IdealVariable(IdealKit &k) {
Node* IdealKit::memory(uint alias_idx) { Node* IdealKit::memory(uint alias_idx) {
MergeMemNode* mem = merged_memory(); MergeMemNode* mem = merged_memory();
Node* p = mem->memory_at(alias_idx); Node* p = mem->memory_at(alias_idx);
if (!gvn().is_IterGVN() || !gvn().is_IterGVN()->delay_transform()) {
_gvn.set_type(p, Type::MEMORY); // must be mapped _gvn.set_type(p, Type::MEMORY); // must be mapped
}
return p; return p;
} }

View file

@ -102,7 +102,6 @@ class IdealKit: public StackObj {
Compile * const C; Compile * const C;
PhaseGVN &_gvn; PhaseGVN &_gvn;
GrowableArray<Node*>* _pending_cvstates; // stack of cvstates GrowableArray<Node*>* _pending_cvstates; // stack of cvstates
GrowableArray<Node*>* _delay_transform; // delay invoking gvn.transform until drain
Node* _cvstate; // current cvstate (control, memory and variables) Node* _cvstate; // current cvstate (control, memory and variables)
uint _var_ct; // number of variables uint _var_ct; // number of variables
bool _delay_all_transforms; // flag forcing all transforms to be delayed bool _delay_all_transforms; // flag forcing all transforms to be delayed
@ -121,7 +120,7 @@ class IdealKit: public StackObj {
void clear(Node* m); // clear a cvstate void clear(Node* m); // clear a cvstate
void stop() { clear(_cvstate); } // clear current cvstate void stop() { clear(_cvstate); } // clear current cvstate
Node* delay_transform(Node* n); Node* delay_transform(Node* n);
Node* transform(Node* n); // gvn.transform or push node on delay list Node* transform(Node* n); // gvn.transform or skip it
Node* promote_to_phi(Node* n, Node* reg);// Promote "n" to a phi on region "reg" Node* promote_to_phi(Node* n, Node* reg);// Promote "n" to a phi on region "reg"
bool was_promoted_to_phi(Node* n, Node* reg) { bool was_promoted_to_phi(Node* n, Node* reg) {
return (n->is_Phi() && n->in(0) == reg); return (n->is_Phi() && n->in(0) == reg);
@ -146,7 +145,6 @@ class IdealKit: public StackObj {
IdealKit(GraphKit* gkit, bool delay_all_transforms = false, bool has_declarations = false); IdealKit(GraphKit* gkit, bool delay_all_transforms = false, bool has_declarations = false);
~IdealKit() { ~IdealKit() {
stop(); stop();
drain_delay_transform();
} }
void sync_kit(GraphKit* gkit); void sync_kit(GraphKit* gkit);
@ -173,7 +171,6 @@ class IdealKit: public StackObj {
void bind(Node* lab); void bind(Node* lab);
void goto_(Node* lab, bool bind = false); void goto_(Node* lab, bool bind = false);
void declarations_done(); void declarations_done();
void drain_delay_transform();
Node* IfTrue(IfNode* iff) { return transform(new (C) IfTrueNode(iff)); } Node* IfTrue(IfNode* iff) { return transform(new (C) IfTrueNode(iff)); }
Node* IfFalse(IfNode* iff) { return transform(new (C) IfFalseNode(iff)); } Node* IfFalse(IfNode* iff) { return transform(new (C) IfFalseNode(iff)); }
@ -198,7 +195,11 @@ class IdealKit: public StackObj {
Node* thread() { return gvn().transform(new (C) ThreadLocalNode()); } Node* thread() { return gvn().transform(new (C) ThreadLocalNode()); }
// Pointers // Pointers
Node* AddP(Node *base, Node *ptr, Node *off) { return transform(new (C) AddPNode(base, ptr, off)); }
// Raw address should be transformed regardless 'delay_transform' flag
// to produce canonical form CastX2P(offset).
Node* AddP(Node *base, Node *ptr, Node *off) { return _gvn.transform(new (C) AddPNode(base, ptr, off)); }
Node* CmpP(Node* l, Node* r) { return transform(new (C) CmpPNode(l, r)); } Node* CmpP(Node* l, Node* r) { return transform(new (C) CmpPNode(l, r)); }
#ifdef _LP64 #ifdef _LP64
Node* XorX(Node* l, Node* r) { return transform(new (C) XorLNode(l, r)); } Node* XorX(Node* l, Node* r) { return transform(new (C) XorLNode(l, r)); }
@ -208,8 +209,6 @@ class IdealKit: public StackObj {
Node* URShiftX(Node* l, Node* r) { return transform(new (C) URShiftXNode(l, r)); } Node* URShiftX(Node* l, Node* r) { return transform(new (C) URShiftXNode(l, r)); }
Node* ConX(jint k) { return (Node*)gvn().MakeConX(k); } Node* ConX(jint k) { return (Node*)gvn().MakeConX(k); }
Node* CastPX(Node* ctl, Node* p) { return transform(new (C) CastP2XNode(ctl, p)); } Node* CastPX(Node* ctl, Node* p) { return transform(new (C) CastP2XNode(ctl, p)); }
// Add a fixed offset to a pointer
Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset);
// Memory operations // Memory operations

View file

@ -37,8 +37,6 @@
#include "opto/memnode.hpp" #include "opto/memnode.hpp"
#include "opto/opcodes.hpp" #include "opto/opcodes.hpp"
#define EXACT_PRESSURE 1
//============================================================================= //=============================================================================
//------------------------------IFG-------------------------------------------- //------------------------------IFG--------------------------------------------
PhaseIFG::PhaseIFG( Arena *arena ) : Phase(Interference_Graph), _arena(arena) { PhaseIFG::PhaseIFG( Arena *arena ) : Phase(Interference_Graph), _arena(arena) {
@ -445,23 +443,15 @@ static void lower_pressure( LRG *lrg, uint where, Block *b, uint *pressure, uint
pressure[1] -= lrg->reg_pressure(); pressure[1] -= lrg->reg_pressure();
if( pressure[1] == (uint)FLOATPRESSURE ) { if( pressure[1] == (uint)FLOATPRESSURE ) {
hrp_index[1] = where; hrp_index[1] = where;
#ifdef EXACT_PRESSURE
if( pressure[1] > b->_freg_pressure ) if( pressure[1] > b->_freg_pressure )
b->_freg_pressure = pressure[1]+1; b->_freg_pressure = pressure[1]+1;
#else
b->_freg_pressure = (uint)FLOATPRESSURE+1;
#endif
} }
} else if( lrg->mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) { } else if( lrg->mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
pressure[0] -= lrg->reg_pressure(); pressure[0] -= lrg->reg_pressure();
if( pressure[0] == (uint)INTPRESSURE ) { if( pressure[0] == (uint)INTPRESSURE ) {
hrp_index[0] = where; hrp_index[0] = where;
#ifdef EXACT_PRESSURE
if( pressure[0] > b->_reg_pressure ) if( pressure[0] > b->_reg_pressure )
b->_reg_pressure = pressure[0]+1; b->_reg_pressure = pressure[0]+1;
#else
b->_reg_pressure = (uint)INTPRESSURE+1;
#endif
} }
} }
} }
@ -526,17 +516,13 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
if (lrg.mask().is_UP() && lrg.mask_size()) { if (lrg.mask().is_UP() && lrg.mask_size()) {
if (lrg._is_float || lrg._is_vector) { // Count float pressure if (lrg._is_float || lrg._is_vector) { // Count float pressure
pressure[1] += lrg.reg_pressure(); pressure[1] += lrg.reg_pressure();
#ifdef EXACT_PRESSURE
if( pressure[1] > b->_freg_pressure ) if( pressure[1] > b->_freg_pressure )
b->_freg_pressure = pressure[1]; b->_freg_pressure = pressure[1];
#endif
// Count int pressure, but do not count the SP, flags // Count int pressure, but do not count the SP, flags
} else if( lrgs(lidx).mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) { } else if( lrgs(lidx).mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
pressure[0] += lrg.reg_pressure(); pressure[0] += lrg.reg_pressure();
#ifdef EXACT_PRESSURE
if( pressure[0] > b->_reg_pressure ) if( pressure[0] > b->_reg_pressure )
b->_reg_pressure = pressure[0]; b->_reg_pressure = pressure[0];
#endif
} }
} }
} }
@ -589,30 +575,20 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
RegMask itmp = lrgs(r).mask(); RegMask itmp = lrgs(r).mask();
itmp.AND(*Matcher::idealreg2regmask[Op_RegI]); itmp.AND(*Matcher::idealreg2regmask[Op_RegI]);
int iregs = itmp.Size(); int iregs = itmp.Size();
#ifdef EXACT_PRESSURE
if( pressure[0]+iregs > b->_reg_pressure ) if( pressure[0]+iregs > b->_reg_pressure )
b->_reg_pressure = pressure[0]+iregs; b->_reg_pressure = pressure[0]+iregs;
#endif
if( pressure[0] <= (uint)INTPRESSURE && if( pressure[0] <= (uint)INTPRESSURE &&
pressure[0]+iregs > (uint)INTPRESSURE ) { pressure[0]+iregs > (uint)INTPRESSURE ) {
#ifndef EXACT_PRESSURE
b->_reg_pressure = (uint)INTPRESSURE+1;
#endif
hrp_index[0] = j-1; hrp_index[0] = j-1;
} }
// Count the float-only registers // Count the float-only registers
RegMask ftmp = lrgs(r).mask(); RegMask ftmp = lrgs(r).mask();
ftmp.AND(*Matcher::idealreg2regmask[Op_RegD]); ftmp.AND(*Matcher::idealreg2regmask[Op_RegD]);
int fregs = ftmp.Size(); int fregs = ftmp.Size();
#ifdef EXACT_PRESSURE
if( pressure[1]+fregs > b->_freg_pressure ) if( pressure[1]+fregs > b->_freg_pressure )
b->_freg_pressure = pressure[1]+fregs; b->_freg_pressure = pressure[1]+fregs;
#endif
if( pressure[1] <= (uint)FLOATPRESSURE && if( pressure[1] <= (uint)FLOATPRESSURE &&
pressure[1]+fregs > (uint)FLOATPRESSURE ) { pressure[1]+fregs > (uint)FLOATPRESSURE ) {
#ifndef EXACT_PRESSURE
b->_freg_pressure = (uint)FLOATPRESSURE+1;
#endif
hrp_index[1] = j-1; hrp_index[1] = j-1;
} }
} }
@ -769,16 +745,12 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
if (lrg.mask().is_UP() && lrg.mask_size()) { if (lrg.mask().is_UP() && lrg.mask_size()) {
if (lrg._is_float || lrg._is_vector) { if (lrg._is_float || lrg._is_vector) {
pressure[1] += lrg.reg_pressure(); pressure[1] += lrg.reg_pressure();
#ifdef EXACT_PRESSURE
if( pressure[1] > b->_freg_pressure ) if( pressure[1] > b->_freg_pressure )
b->_freg_pressure = pressure[1]; b->_freg_pressure = pressure[1];
#endif
} else if( lrg.mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) { } else if( lrg.mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
pressure[0] += lrg.reg_pressure(); pressure[0] += lrg.reg_pressure();
#ifdef EXACT_PRESSURE
if( pressure[0] > b->_reg_pressure ) if( pressure[0] > b->_reg_pressure )
b->_reg_pressure = pressure[0]; b->_reg_pressure = pressure[0];
#endif
} }
} }
assert( pressure[0] == count_int_pressure (&liveout), "" ); assert( pressure[0] == count_int_pressure (&liveout), "" );
@ -794,21 +766,13 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
// the whole block is high pressure. // the whole block is high pressure.
if( pressure[0] > (uint)INTPRESSURE ) { if( pressure[0] > (uint)INTPRESSURE ) {
hrp_index[0] = 0; hrp_index[0] = 0;
#ifdef EXACT_PRESSURE
if( pressure[0] > b->_reg_pressure ) if( pressure[0] > b->_reg_pressure )
b->_reg_pressure = pressure[0]; b->_reg_pressure = pressure[0];
#else
b->_reg_pressure = (uint)INTPRESSURE+1;
#endif
} }
if( pressure[1] > (uint)FLOATPRESSURE ) { if( pressure[1] > (uint)FLOATPRESSURE ) {
hrp_index[1] = 0; hrp_index[1] = 0;
#ifdef EXACT_PRESSURE
if( pressure[1] > b->_freg_pressure ) if( pressure[1] > b->_freg_pressure )
b->_freg_pressure = pressure[1]; b->_freg_pressure = pressure[1];
#else
b->_freg_pressure = (uint)FLOATPRESSURE+1;
#endif
} }
// Compute high pressure indice; avoid landing in the middle of projnodes // Compute high pressure indice; avoid landing in the middle of projnodes

View file

@ -231,7 +231,6 @@ class LibraryCallKit : public GraphKit {
void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark); void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
bool inline_native_clone(bool is_virtual); bool inline_native_clone(bool is_virtual);
bool inline_native_Reflection_getCallerClass(); bool inline_native_Reflection_getCallerClass();
bool is_method_invoke_or_aux_frame(JVMState* jvms);
// Helper function for inlining native object hash method // Helper function for inlining native object hash method
bool inline_native_hashcode(bool is_virtual, bool is_static); bool inline_native_hashcode(bool is_virtual, bool is_static);
bool inline_native_getClass(); bool inline_native_getClass();
@ -393,7 +392,7 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
case vmIntrinsics::_getCallerClass: case vmIntrinsics::_getCallerClass:
if (!UseNewReflection) return NULL; if (!UseNewReflection) return NULL;
if (!InlineReflectionGetCallerClass) return NULL; if (!InlineReflectionGetCallerClass) return NULL;
if (!JDK_Version::is_gte_jdk14x_version()) return NULL; if (SystemDictionary::reflect_CallerSensitive_klass() == NULL) return NULL;
break; break;
case vmIntrinsics::_bitCount_i: case vmIntrinsics::_bitCount_i:
@ -3872,13 +3871,13 @@ bool LibraryCallKit::inline_native_getClass() {
} }
//-----------------inline_native_Reflection_getCallerClass--------------------- //-----------------inline_native_Reflection_getCallerClass---------------------
// public static native Class<?> sun.reflect.Reflection.getCallerClass(int realFramesToSkip); // public static native Class<?> sun.reflect.Reflection.getCallerClass();
// //
// In the presence of deep enough inlining, getCallerClass() becomes a no-op. // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
// //
// NOTE that this code must perform the same logic as // NOTE: This code must perform the same logic as JVM_GetCallerClass
// vframeStream::security_get_caller_frame in that it must skip // in that it must skip particular security frames and checks for
// Method.invoke() and auxiliary frames. // caller sensitive methods.
bool LibraryCallKit::inline_native_Reflection_getCallerClass() { bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
#ifndef PRODUCT #ifndef PRODUCT
if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
@ -3886,35 +3885,6 @@ bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
} }
#endif #endif
Node* caller_depth_node = argument(0);
// The depth value must be a constant in order for the runtime call
// to be eliminated.
const TypeInt* caller_depth_type = _gvn.type(caller_depth_node)->isa_int();
if (caller_depth_type == NULL || !caller_depth_type->is_con()) {
#ifndef PRODUCT
if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
tty->print_cr(" Bailing out because caller depth was not a constant");
}
#endif
return false;
}
// Note that the JVM state at this point does not include the
// getCallerClass() frame which we are trying to inline. The
// semantics of getCallerClass(), however, are that the "first"
// frame is the getCallerClass() frame, so we subtract one from the
// requested depth before continuing. We don't inline requests of
// getCallerClass(0).
int caller_depth = caller_depth_type->get_con() - 1;
if (caller_depth < 0) {
#ifndef PRODUCT
if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
tty->print_cr(" Bailing out because caller depth was %d", caller_depth);
}
#endif
return false;
}
if (!jvms()->has_method()) { if (!jvms()->has_method()) {
#ifndef PRODUCT #ifndef PRODUCT
if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
@ -3923,95 +3893,67 @@ bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
#endif #endif
return false; return false;
} }
int _depth = jvms()->depth(); // cache call chain depth
// Walk back up the JVM state to find the caller at the required // Walk back up the JVM state to find the caller at the required
// depth. NOTE that this code must perform the same logic as // depth.
// vframeStream::security_get_caller_frame in that it must skip JVMState* caller_jvms = jvms();
// Method.invoke() and auxiliary frames. Note also that depth is
// 1-based (1 is the bottom of the inlining).
int inlining_depth = _depth;
JVMState* caller_jvms = NULL;
if (inlining_depth > 0) { // Cf. JVM_GetCallerClass
caller_jvms = jvms(); // NOTE: Start the loop at depth 1 because the current JVM state does
assert(caller_jvms = jvms()->of_depth(inlining_depth), "inlining_depth == our depth"); // not include the Reflection.getCallerClass() frame.
do { for (int n = 1; caller_jvms != NULL; caller_jvms = caller_jvms->caller(), n++) {
// The following if-tests should be performed in this order ciMethod* m = caller_jvms->method();
if (is_method_invoke_or_aux_frame(caller_jvms)) { switch (n) {
// Skip a Method.invoke() or auxiliary frame case 0:
} else if (caller_depth > 0) { fatal("current JVM state does not include the Reflection.getCallerClass frame");
// Skip real frame
--caller_depth;
} else {
// We're done: reached desired caller after skipping.
break; break;
} case 1:
caller_jvms = caller_jvms->caller(); // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
--inlining_depth; if (!m->caller_sensitive()) {
} while (inlining_depth > 0);
}
if (inlining_depth == 0) {
#ifndef PRODUCT #ifndef PRODUCT
if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
tty->print_cr(" Bailing out because caller depth (%d) exceeded inlining depth (%d)", caller_depth_type->get_con(), _depth); tty->print_cr(" Bailing out: CallerSensitive annotation expected at frame %d", n);
tty->print_cr(" JVM state at this point:");
for (int i = _depth; i >= 1; i--) {
ciMethod* m = jvms()->of_depth(i)->method();
tty->print_cr(" %d) %s.%s", i, m->holder()->name()->as_utf8(), m->name()->as_utf8());
}
} }
#endif #endif
return false; // Reached end of inlining return false; // bail-out; let JVM_GetCallerClass do the work
} }
break;
// Acquire method holder as java.lang.Class default:
if (!m->is_ignored_by_security_stack_walk()) {
// We have reached the desired frame; return the holder class.
// Acquire method holder as java.lang.Class and push as constant.
ciInstanceKlass* caller_klass = caller_jvms->method()->holder(); ciInstanceKlass* caller_klass = caller_jvms->method()->holder();
ciInstance* caller_mirror = caller_klass->java_mirror(); ciInstance* caller_mirror = caller_klass->java_mirror();
// Push this as a constant
set_result(makecon(TypeInstPtr::make(caller_mirror))); set_result(makecon(TypeInstPtr::make(caller_mirror)));
#ifndef PRODUCT #ifndef PRODUCT
if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
tty->print_cr(" Succeeded: caller = %s.%s, caller depth = %d, depth = %d", caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), caller_depth_type->get_con(), _depth); tty->print_cr(" Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
tty->print_cr(" JVM state at this point:"); tty->print_cr(" JVM state at this point:");
for (int i = _depth; i >= 1; i--) { for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
ciMethod* m = jvms()->of_depth(i)->method(); ciMethod* m = jvms()->of_depth(i)->method();
tty->print_cr(" %d) %s.%s", i, m->holder()->name()->as_utf8(), m->name()->as_utf8()); tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
} }
} }
#endif #endif
return true; return true;
} }
break;
// Helper routine for above
bool LibraryCallKit::is_method_invoke_or_aux_frame(JVMState* jvms) {
ciMethod* method = jvms->method();
// Is this the Method.invoke method itself?
if (method->intrinsic_id() == vmIntrinsics::_invoke)
return true;
// Is this a helper, defined somewhere underneath MethodAccessorImpl.
ciKlass* k = method->holder();
if (k->is_instance_klass()) {
ciInstanceKlass* ik = k->as_instance_klass();
for (; ik != NULL; ik = ik->super()) {
if (ik->name() == ciSymbol::sun_reflect_MethodAccessorImpl() &&
ik == env()->find_system_klass(ik->name())) {
return true;
} }
} }
}
else if (method->is_method_handle_intrinsic() ||
method->is_compiled_lambda_form()) {
// This is an internal adapter frame from the MethodHandleCompiler -- skip it
return true;
}
return false; #ifndef PRODUCT
if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
tty->print_cr(" Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
tty->print_cr(" JVM state at this point:");
for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
ciMethod* m = jvms()->of_depth(i)->method();
tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
}
}
#endif
return false; // bail-out; let JVM_GetCallerClass do the work
} }
bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) { bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {

View file

@ -2251,6 +2251,11 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts)
return; return;
} }
// clear out the dead code after build_loop_late
while (_deadlist.size()) {
_igvn.remove_globally_dead_node(_deadlist.pop());
}
if (stop_early) { if (stop_early) {
assert(do_expensive_nodes, "why are we here?"); assert(do_expensive_nodes, "why are we here?");
if (process_expensive_nodes()) { if (process_expensive_nodes()) {
@ -2260,9 +2265,7 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts)
// nodes again. // nodes again.
C->set_major_progress(); C->set_major_progress();
} }
_igvn.optimize(); _igvn.optimize();
return; return;
} }
@ -2273,11 +2276,6 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts)
eliminate_useless_predicates(); eliminate_useless_predicates();
} }
// clear out the dead code
while(_deadlist.size()) {
_igvn.remove_globally_dead_node(_deadlist.pop());
}
#ifndef PRODUCT #ifndef PRODUCT
C->verify_graph_edges(); C->verify_graph_edges();
if (_verify_me) { // Nested verify pass? if (_verify_me) { // Nested verify pass?

View file

@ -449,6 +449,17 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit(); int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit();
if (max_loop_pad > 0) { if (max_loop_pad > 0) {
assert(is_power_of_2(max_loop_pad+relocInfo::addr_unit()), ""); assert(is_power_of_2(max_loop_pad+relocInfo::addr_unit()), "");
// Adjust last_call_adr and/or last_avoid_back_to_back_adr.
// If either is the last instruction in this block, bump by
// max_loop_pad in lock-step with blk_size, so sizing
// calculations in subsequent blocks still can conservatively
// detect that it may the last instruction in this block.
if (last_call_adr == blk_starts[i]+blk_size) {
last_call_adr += max_loop_pad;
}
if (last_avoid_back_to_back_adr == blk_starts[i]+blk_size) {
last_avoid_back_to_back_adr += max_loop_pad;
}
blk_size += max_loop_pad; blk_size += max_loop_pad;
} }
} }
@ -1193,8 +1204,6 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
int last_call_offset = -1; int last_call_offset = -1;
int last_avoid_back_to_back_offset = -1; int last_avoid_back_to_back_offset = -1;
#ifdef ASSERT #ifdef ASSERT
int block_alignment_padding = 0;
uint* jmp_target = NEW_RESOURCE_ARRAY(uint,nblocks); uint* jmp_target = NEW_RESOURCE_ARRAY(uint,nblocks);
uint* jmp_offset = NEW_RESOURCE_ARRAY(uint,nblocks); uint* jmp_offset = NEW_RESOURCE_ARRAY(uint,nblocks);
uint* jmp_size = NEW_RESOURCE_ARRAY(uint,nblocks); uint* jmp_size = NEW_RESOURCE_ARRAY(uint,nblocks);
@ -1228,8 +1237,6 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
Node *delay_slot = NULL; Node *delay_slot = NULL;
for (uint i=0; i < nblocks; i++) { for (uint i=0; i < nblocks; i++) {
guarantee(blk_starts[i] >= (uint)cb->insts_size(),"should not increase size");
Block *b = _cfg->_blocks[i]; Block *b = _cfg->_blocks[i];
Node *head = b->head(); Node *head = b->head();
@ -1250,14 +1257,6 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
jmp_offset[i] = 0; jmp_offset[i] = 0;
jmp_size[i] = 0; jmp_size[i] = 0;
jmp_rule[i] = 0; jmp_rule[i] = 0;
// Maximum alignment padding for loop block was used
// during first round of branches shortening, as result
// padding for nodes (sfpt after call) was not added.
// Take this into account for block's size change check
// and allow increase block's size by the difference
// of maximum and actual alignment paddings.
int orig_blk_size = blk_starts[i+1] - blk_starts[i] + block_alignment_padding;
#endif #endif
int blk_offset = current_offset; int blk_offset = current_offset;
@ -1557,8 +1556,6 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
} }
} // End for all instructions in block } // End for all instructions in block
assert((uint)blk_offset <= blk_starts[i], "shouldn't increase distance");
blk_starts[i] = blk_offset;
// If the next block is the top of a loop, pad this block out to align // If the next block is the top of a loop, pad this block out to align
// the loop top a little. Helps prevent pipe stalls at loop back branches. // the loop top a little. Helps prevent pipe stalls at loop back branches.
@ -1572,16 +1569,13 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
nop->emit(*cb, _regalloc); nop->emit(*cb, _regalloc);
current_offset = cb->insts_size(); current_offset = cb->insts_size();
} }
#ifdef ASSERT
int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit();
block_alignment_padding = (max_loop_pad - padding);
assert(block_alignment_padding >= 0, "sanity");
#endif
} }
// Verify that the distance for generated before forward // Verify that the distance for generated before forward
// short branches is still valid. // short branches is still valid.
assert(orig_blk_size >= (current_offset - blk_offset), "shouldn't increase block size"); guarantee((int)(blk_starts[i+1] - blk_starts[i]) >= (current_offset - blk_offset), "shouldn't increase block size");
// Save new block start offset
blk_starts[i] = blk_offset;
} // End of for all blocks } // End of for all blocks
blk_starts[nblocks] = current_offset; blk_starts[nblocks] = current_offset;

View file

@ -104,7 +104,8 @@ Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'"); if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'");
} }
if (!arytype->klass()->is_loaded()) { ciKlass * arytype_klass = arytype->klass();
if ((arytype_klass != NULL) && (!arytype_klass->is_loaded())) {
// Only fails for some -Xcomp runs // Only fails for some -Xcomp runs
// The class is unloaded. We have to run this bytecode in the interpreter. // The class is unloaded. We have to run this bytecode in the interpreter.
uncommon_trap(Deoptimization::Reason_unloaded, uncommon_trap(Deoptimization::Reason_unloaded,
@ -1385,6 +1386,7 @@ void Parse::do_one_bytecode() {
if (TraceOptoParse) { if (TraceOptoParse) {
tty->print(" @"); tty->print(" @");
dump_bci(bci()); dump_bci(bci());
tty->cr();
} }
#endif #endif

View file

@ -1166,8 +1166,6 @@ void PhaseIterGVN::remove_globally_dead_node( Node *dead ) {
if (progress_state == PROCESS_INPUTS) { if (progress_state == PROCESS_INPUTS) {
// After following inputs, continue to outputs // After following inputs, continue to outputs
_stack.set_index(PROCESS_OUTPUTS); _stack.set_index(PROCESS_OUTPUTS);
// Remove from iterative worklist
_worklist.remove(dead);
if (!dead->is_Con()) { // Don't kill cons but uses if (!dead->is_Con()) { // Don't kill cons but uses
bool recurse = false; bool recurse = false;
// Remove from hash table // Remove from hash table
@ -1175,18 +1173,19 @@ void PhaseIterGVN::remove_globally_dead_node( Node *dead ) {
// Smash all inputs to 'dead', isolating him completely // Smash all inputs to 'dead', isolating him completely
for (uint i = 0; i < dead->req(); i++) { for (uint i = 0; i < dead->req(); i++) {
Node *in = dead->in(i); Node *in = dead->in(i);
if( in ) { // Points to something? if (in != NULL && in != C->top()) { // Points to something?
dead->set_req(i,NULL); // Kill the edge int nrep = dead->replace_edge(in, NULL); // Kill edges
if (in->outcnt() == 0 && in != C->top()) {// Made input go dead? assert((nrep > 0), "sanity");
if (in->outcnt() == 0) { // Made input go dead?
_stack.push(in, PROCESS_INPUTS); // Recursively remove _stack.push(in, PROCESS_INPUTS); // Recursively remove
recurse = true; recurse = true;
} else if (in->outcnt() == 1 && } else if (in->outcnt() == 1 &&
in->has_special_unique_user()) { in->has_special_unique_user()) {
_worklist.push(in->unique_out()); _worklist.push(in->unique_out());
} else if (in->outcnt() <= 2 && dead->is_Phi()) { } else if (in->outcnt() <= 2 && dead->is_Phi()) {
if( in->Opcode() == Op_Region ) if (in->Opcode() == Op_Region) {
_worklist.push(in); _worklist.push(in);
else if( in->is_Store() ) { } else if (in->is_Store()) {
DUIterator_Fast imax, i = in->fast_outs(imax); DUIterator_Fast imax, i = in->fast_outs(imax);
_worklist.push(in->fast_out(i)); _worklist.push(in->fast_out(i));
i++; i++;
@ -1209,38 +1208,42 @@ void PhaseIterGVN::remove_globally_dead_node( Node *dead ) {
} }
} }
} }
} } // if (in != NULL && in != C->top())
} } // for (uint i = 0; i < dead->req(); i++)
C->record_dead_node(dead->_idx);
if (dead->is_macro()) {
C->remove_macro_node(dead);
}
if (dead->is_expensive()) {
C->remove_expensive_node(dead);
}
if (recurse) { if (recurse) {
continue; continue;
} }
} } // if (!dead->is_Con())
// Constant node that has no out-edges and has only one in-edge from } // if (progress_state == PROCESS_INPUTS)
// root is usually dead. However, sometimes reshaping walk makes
// it reachable by adding use edges. So, we will NOT count Con nodes
// as dead to be conservative about the dead node count at any
// given time.
}
// Aggressively kill globally dead uses // Aggressively kill globally dead uses
// (Rather than pushing all the outs at once, we push one at a time, // (Rather than pushing all the outs at once, we push one at a time,
// plus the parent to resume later, because of the indefinite number // plus the parent to resume later, because of the indefinite number
// of edge deletions per loop trip.) // of edge deletions per loop trip.)
if (dead->outcnt() > 0) { if (dead->outcnt() > 0) {
// Recursively remove // Recursively remove output edges
_stack.push(dead->raw_out(0), PROCESS_INPUTS); _stack.push(dead->raw_out(0), PROCESS_INPUTS);
} else { } else {
// Finished disconnecting all input and output edges.
_stack.pop(); _stack.pop();
// Remove dead node from iterative worklist
_worklist.remove(dead);
// Constant node that has no out-edges and has only one in-edge from
// root is usually dead. However, sometimes reshaping walk makes
// it reachable by adding use edges. So, we will NOT count Con nodes
// as dead to be conservative about the dead node count at any
// given time.
if (!dead->is_Con()) {
C->record_dead_node(dead->_idx);
}
if (dead->is_macro()) {
C->remove_macro_node(dead);
}
if (dead->is_expensive()) {
C->remove_expensive_node(dead);
} }
} }
} // while (_stack.is_nonempty())
} }
//------------------------------subsume_node----------------------------------- //------------------------------subsume_node-----------------------------------

View file

@ -30,6 +30,7 @@
#include "classfile/systemDictionary.hpp" #include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp" #include "classfile/vmSymbols.hpp"
#include "gc_interface/collectedHeap.inline.hpp" #include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/bytecode.hpp"
#include "memory/oopFactory.hpp" #include "memory/oopFactory.hpp"
#include "memory/universe.inline.hpp" #include "memory/universe.inline.hpp"
#include "oops/fieldStreams.hpp" #include "oops/fieldStreams.hpp"
@ -665,8 +666,51 @@ JVM_END
JVM_ENTRY(jclass, JVM_GetCallerClass(JNIEnv* env, int depth)) JVM_ENTRY(jclass, JVM_GetCallerClass(JNIEnv* env, int depth))
JVMWrapper("JVM_GetCallerClass"); JVMWrapper("JVM_GetCallerClass");
// Pre-JDK 8 and early builds of JDK 8 don't have a CallerSensitive annotation.
if (SystemDictionary::reflect_CallerSensitive_klass() == NULL) {
Klass* k = thread->security_get_caller_class(depth); Klass* k = thread->security_get_caller_class(depth);
return (k == NULL) ? NULL : (jclass) JNIHandles::make_local(env, k->java_mirror()); return (k == NULL) ? NULL : (jclass) JNIHandles::make_local(env, k->java_mirror());
} else {
// Basic handshaking with Java_sun_reflect_Reflection_getCallerClass
assert(depth == -1, "wrong handshake depth");
}
// Getting the class of the caller frame.
//
// The call stack at this point looks something like this:
//
// [0] [ @CallerSensitive public sun.reflect.Reflection.getCallerClass ]
// [1] [ @CallerSensitive API.method ]
// [.] [ (skipped intermediate frames) ]
// [n] [ caller ]
vframeStream vfst(thread);
// Cf. LibraryCallKit::inline_native_Reflection_getCallerClass
for (int n = 0; !vfst.at_end(); vfst.security_next(), n++) {
Method* m = vfst.method();
assert(m != NULL, "sanity");
switch (n) {
case 0:
// This must only be called from Reflection.getCallerClass
if (m->intrinsic_id() != vmIntrinsics::_getCallerClass) {
THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), "JVM_GetCallerClass must only be called from Reflection.getCallerClass");
}
// fall-through
case 1:
// Frame 0 and 1 must be caller sensitive.
if (!m->caller_sensitive()) {
THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), err_msg("CallerSensitive annotation expected at frame %d", n));
}
break;
default:
if (!m->is_ignored_by_security_stack_walk()) {
// We have reached the desired frame; return the holder class.
return (jclass) JNIHandles::make_local(env, m->method_holder()->java_mirror());
}
break;
}
}
return NULL;
JVM_END JVM_END
@ -3208,11 +3252,24 @@ JVM_ENTRY(jobjectArray, JVM_GetClassContext(JNIEnv *env))
KlassLink* first = NULL; KlassLink* first = NULL;
KlassLink* last = NULL; KlassLink* last = NULL;
int depth = 0; int depth = 0;
vframeStream vfst(thread);
for(vframeStream vfst(thread); !vfst.at_end(); vfst.security_get_caller_frame(1)) { if (SystemDictionary::reflect_CallerSensitive_klass() != NULL) {
// This must only be called from SecurityManager.getClassContext
Method* m = vfst.method();
if (!(m->method_holder() == SystemDictionary::SecurityManager_klass() &&
m->name() == vmSymbols::getClassContext_name() &&
m->signature() == vmSymbols::void_class_array_signature())) {
THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), "JVM_GetClassContext must only be called from SecurityManager.getClassContext");
}
}
// Collect method holders
for (; !vfst.at_end(); vfst.security_next()) {
Method* m = vfst.method();
// Native frames are not returned // Native frames are not returned
if (!vfst.method()->is_native()) { if (!m->is_ignored_by_security_stack_walk() && !m->is_native()) {
Klass* holder = vfst.method()->method_holder(); Klass* holder = m->method_holder();
assert(holder->is_klass(), "just checking"); assert(holder->is_klass(), "just checking");
depth++; depth++;
KlassLink* l = new KlassLink(KlassHandle(thread, holder)); KlassLink* l = new KlassLink(KlassHandle(thread, holder));

View file

@ -39,7 +39,12 @@
#include "runtime/vm_operations.hpp" #include "runtime/vm_operations.hpp"
#ifdef JVMTI_TRACE #ifdef JVMTI_TRACE
#define EC_TRACE(out) if (JvmtiTrace::trace_event_controller()) { SafeResourceMark rm; tty->print_cr out; } while (0) #define EC_TRACE(out) do { \
if (JvmtiTrace::trace_event_controller()) { \
SafeResourceMark rm; \
tty->print_cr out; \
} \
} while (0)
#else #else
#define EC_TRACE(out) #define EC_TRACE(out)
#endif /*JVMTI_TRACE */ #endif /*JVMTI_TRACE */

View file

@ -72,36 +72,6 @@
// 0x20000000 | 536870912 - unused // 0x20000000 | 536870912 - unused
// 0x40000000 | 1073741824 - unused // 0x40000000 | 1073741824 - unused
// 0x80000000 | 2147483648 - unused // 0x80000000 | 2147483648 - unused
//
// Note: The ResourceMark is to cleanup resource allocated args.
// The "while (0)" is so we can use semi-colon at end of RC_TRACE().
#define RC_TRACE(level, args) \
if ((TraceRedefineClasses & level) != 0) { \
ResourceMark rm; \
tty->print("RedefineClasses-0x%x: ", level); \
tty->print_cr args; \
} while (0)
#define RC_TRACE_NO_CR(level, args) \
if ((TraceRedefineClasses & level) != 0) { \
ResourceMark rm; \
tty->print("RedefineClasses-0x%x: ", level); \
tty->print args; \
} while (0)
#define RC_TRACE_WITH_THREAD(level, thread, args) \
if ((TraceRedefineClasses & level) != 0) { \
ResourceMark rm(thread); \
tty->print("RedefineClasses-0x%x: ", level); \
tty->print_cr args; \
} while (0)
#define RC_TRACE_MESG(args) \
{ \
ResourceMark rm; \
tty->print("RedefineClasses: "); \
tty->print_cr args; \
} while (0)
// Macro for checking if TraceRedefineClasses has a specific bit // Macro for checking if TraceRedefineClasses has a specific bit
// enabled. Returns true if the bit specified by level is set. // enabled. Returns true if the bit specified by level is set.
@ -120,16 +90,49 @@
#define RC_TRACE_IN_RANGE(low, high) \ #define RC_TRACE_IN_RANGE(low, high) \
(((TraceRedefineClasses & ((high << 1) - 1)) & ~(low - 1)) != 0) (((TraceRedefineClasses & ((high << 1) - 1)) & ~(low - 1)) != 0)
// Timer support macros. Only do timer operations if timer tracing // Note: The ResourceMark is to cleanup resource allocated args.
// is enabled. The "while (0)" is so we can use semi-colon at end of // The "do {...} while (0)" is so we can use semi-colon at end of RC_TRACE().
// the macro. #define RC_TRACE(level, args) do { \
#define RC_TIMER_START(t) \ if (RC_TRACE_ENABLED(level)) { \
ResourceMark rm; \
tty->print("RedefineClasses-0x%x: ", level); \
tty->print_cr args; \
} \
} while (0)
#define RC_TRACE_NO_CR(level, args) do { \
if (RC_TRACE_ENABLED(level)) { \
ResourceMark rm; \
tty->print("RedefineClasses-0x%x: ", level); \
tty->print args; \
} \
} while (0)
#define RC_TRACE_WITH_THREAD(level, thread, args) do { \
if (RC_TRACE_ENABLED(level)) { \
ResourceMark rm(thread); \
tty->print("RedefineClasses-0x%x: ", level); \
tty->print_cr args; \
} \
} while (0)
#define RC_TRACE_MESG(args) do { \
ResourceMark rm; \
tty->print("RedefineClasses: "); \
tty->print_cr args; \
} while (0)
// Timer support macros. Only do timer operations if timer tracing is enabled.
// The "do {...} while (0)" is so we can use semi-colon at end of the macro.
#define RC_TIMER_START(t) do { \
if (RC_TRACE_ENABLED(0x00000004)) { \ if (RC_TRACE_ENABLED(0x00000004)) { \
t.start(); \ t.start(); \
} \
} while (0) } while (0)
#define RC_TIMER_STOP(t) \ #define RC_TIMER_STOP(t) do { \
if (RC_TRACE_ENABLED(0x00000004)) { \ if (RC_TRACE_ENABLED(0x00000004)) { \
t.stop(); \ t.stop(); \
} \
} while (0) } while (0)
#endif // SHARE_VM_PRIMS_JVMTIREDEFINECLASSESTRACE_HPP #endif // SHARE_VM_PRIMS_JVMTIREDEFINECLASSESTRACE_HPP

View file

@ -109,6 +109,7 @@ enum {
IS_CONSTRUCTOR = java_lang_invoke_MemberName::MN_IS_CONSTRUCTOR, IS_CONSTRUCTOR = java_lang_invoke_MemberName::MN_IS_CONSTRUCTOR,
IS_FIELD = java_lang_invoke_MemberName::MN_IS_FIELD, IS_FIELD = java_lang_invoke_MemberName::MN_IS_FIELD,
IS_TYPE = java_lang_invoke_MemberName::MN_IS_TYPE, IS_TYPE = java_lang_invoke_MemberName::MN_IS_TYPE,
CALLER_SENSITIVE = java_lang_invoke_MemberName::MN_CALLER_SENSITIVE,
REFERENCE_KIND_SHIFT = java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT, REFERENCE_KIND_SHIFT = java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT,
REFERENCE_KIND_MASK = java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK, REFERENCE_KIND_MASK = java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK,
SEARCH_SUPERCLASSES = java_lang_invoke_MemberName::MN_SEARCH_SUPERCLASSES, SEARCH_SUPERCLASSES = java_lang_invoke_MemberName::MN_SEARCH_SUPERCLASSES,
@ -207,6 +208,11 @@ oop MethodHandles::init_method_MemberName(oop mname_oop, Method* m, bool do_disp
vmindex = m->vtable_index(); vmindex = m->vtable_index();
} }
// @CallerSensitive annotation detected
if (m->caller_sensitive()) {
flags |= CALLER_SENSITIVE;
}
java_lang_invoke_MemberName::set_flags( mname_oop, flags); java_lang_invoke_MemberName::set_flags( mname_oop, flags);
java_lang_invoke_MemberName::set_vmtarget(mname_oop, m); java_lang_invoke_MemberName::set_vmtarget(mname_oop, m);
java_lang_invoke_MemberName::set_vmindex( mname_oop, vmindex); // vtable/itable index java_lang_invoke_MemberName::set_vmindex( mname_oop, vmindex); // vtable/itable index
@ -940,6 +946,7 @@ JVM_END
template(java_lang_invoke_MemberName,MN_IS_CONSTRUCTOR) \ template(java_lang_invoke_MemberName,MN_IS_CONSTRUCTOR) \
template(java_lang_invoke_MemberName,MN_IS_FIELD) \ template(java_lang_invoke_MemberName,MN_IS_FIELD) \
template(java_lang_invoke_MemberName,MN_IS_TYPE) \ template(java_lang_invoke_MemberName,MN_IS_TYPE) \
template(java_lang_invoke_MemberName,MN_CALLER_SENSITIVE) \
template(java_lang_invoke_MemberName,MN_SEARCH_SUPERCLASSES) \ template(java_lang_invoke_MemberName,MN_SEARCH_SUPERCLASSES) \
template(java_lang_invoke_MemberName,MN_SEARCH_INTERFACES) \ template(java_lang_invoke_MemberName,MN_SEARCH_INTERFACES) \
template(java_lang_invoke_MemberName,MN_REFERENCE_KIND_SHIFT) \ template(java_lang_invoke_MemberName,MN_REFERENCE_KIND_SHIFT) \

View file

@ -868,7 +868,7 @@ static inline void throw_new(JNIEnv *env, const char *ename) {
env->ThrowNew(cls, msg); env->ThrowNew(cls, msg);
} }
static jclass Unsafe_DefineClass(JNIEnv *env, jstring name, jbyteArray data, int offset, int length, jobject loader, jobject pd) { static jclass Unsafe_DefineClass_impl(JNIEnv *env, jstring name, jbyteArray data, int offset, int length, jobject loader, jobject pd) {
{ {
// Code lifted from JDK 1.3 ClassLoader.c // Code lifted from JDK 1.3 ClassLoader.c
@ -939,6 +939,15 @@ static jclass Unsafe_DefineClass(JNIEnv *env, jstring name, jbyteArray data, int
} }
UNSAFE_ENTRY(jclass, Unsafe_DefineClass(JNIEnv *env, jobject unsafe, jstring name, jbyteArray data, int offset, int length, jobject loader, jobject pd))
UnsafeWrapper("Unsafe_DefineClass");
{
ThreadToNativeFromVM ttnfv(thread);
return Unsafe_DefineClass_impl(env, name, data, offset, length, loader, pd);
}
UNSAFE_END
UNSAFE_ENTRY(jclass, Unsafe_DefineClass0(JNIEnv *env, jobject unsafe, jstring name, jbyteArray data, int offset, int length)) UNSAFE_ENTRY(jclass, Unsafe_DefineClass0(JNIEnv *env, jobject unsafe, jstring name, jbyteArray data, int offset, int length))
UnsafeWrapper("Unsafe_DefineClass"); UnsafeWrapper("Unsafe_DefineClass");
{ {
@ -949,20 +958,11 @@ UNSAFE_ENTRY(jclass, Unsafe_DefineClass0(JNIEnv *env, jobject unsafe, jstring na
jobject loader = (caller == NULL) ? NULL : JVM_GetClassLoader(env, caller); jobject loader = (caller == NULL) ? NULL : JVM_GetClassLoader(env, caller);
jobject pd = (caller == NULL) ? NULL : JVM_GetProtectionDomain(env, caller); jobject pd = (caller == NULL) ? NULL : JVM_GetProtectionDomain(env, caller);
return Unsafe_DefineClass(env, name, data, offset, length, loader, pd); return Unsafe_DefineClass_impl(env, name, data, offset, length, loader, pd);
} }
UNSAFE_END UNSAFE_END
UNSAFE_ENTRY(jclass, Unsafe_DefineClass1(JNIEnv *env, jobject unsafe, jstring name, jbyteArray data, int offset, int length, jobject loader, jobject pd))
UnsafeWrapper("Unsafe_DefineClass");
{
ThreadToNativeFromVM ttnfv(thread);
return Unsafe_DefineClass(env, name, data, offset, length, loader, pd);
}
UNSAFE_END
#define DAC_Args CLS"[B["OBJ #define DAC_Args CLS"[B["OBJ
// define a class but do not make it known to the class loader or system dictionary // define a class but do not make it known to the class loader or system dictionary
// - host_class: supplies context for linkage, access control, protection domain, and class loader // - host_class: supplies context for linkage, access control, protection domain, and class loader
@ -1323,7 +1323,7 @@ UNSAFE_END
#define THR LANG"Throwable;" #define THR LANG"Throwable;"
#define DC0_Args LANG"String;[BII" #define DC0_Args LANG"String;[BII"
#define DC1_Args DC0_Args LANG"ClassLoader;" "Ljava/security/ProtectionDomain;" #define DC_Args DC0_Args LANG"ClassLoader;" "Ljava/security/ProtectionDomain;"
#define CC (char*) /*cast a literal from (const char*)*/ #define CC (char*) /*cast a literal from (const char*)*/
#define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f) #define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)
@ -1352,10 +1352,8 @@ UNSAFE_END
// %%% These are temporarily supported until the SDK sources // These are the methods for 1.4.0
// contain the necessarily updated Unsafe.java.
static JNINativeMethod methods_140[] = { static JNINativeMethod methods_140[] = {
{CC"getObject", CC"("OBJ"I)"OBJ"", FN_PTR(Unsafe_GetObject140)}, {CC"getObject", CC"("OBJ"I)"OBJ"", FN_PTR(Unsafe_GetObject140)},
{CC"putObject", CC"("OBJ"I"OBJ")V", FN_PTR(Unsafe_SetObject140)}, {CC"putObject", CC"("OBJ"I"OBJ")V", FN_PTR(Unsafe_SetObject140)},
@ -1381,12 +1379,10 @@ static JNINativeMethod methods_140[] = {
{CC"allocateMemory", CC"(J)"ADR, FN_PTR(Unsafe_AllocateMemory)}, {CC"allocateMemory", CC"(J)"ADR, FN_PTR(Unsafe_AllocateMemory)},
{CC"reallocateMemory", CC"("ADR"J)"ADR, FN_PTR(Unsafe_ReallocateMemory)}, {CC"reallocateMemory", CC"("ADR"J)"ADR, FN_PTR(Unsafe_ReallocateMemory)},
// {CC"setMemory", CC"("ADR"JB)V", FN_PTR(Unsafe_SetMemory)},
// {CC"copyMemory", CC"("ADR ADR"J)V", FN_PTR(Unsafe_CopyMemory)},
{CC"freeMemory", CC"("ADR")V", FN_PTR(Unsafe_FreeMemory)}, {CC"freeMemory", CC"("ADR")V", FN_PTR(Unsafe_FreeMemory)},
{CC"fieldOffset", CC"("FLD")I", FN_PTR(Unsafe_FieldOffset)}, //deprecated {CC"fieldOffset", CC"("FLD")I", FN_PTR(Unsafe_FieldOffset)},
{CC"staticFieldBase", CC"("CLS")"OBJ, FN_PTR(Unsafe_StaticFieldBaseFromClass)}, //deprecated {CC"staticFieldBase", CC"("CLS")"OBJ, FN_PTR(Unsafe_StaticFieldBaseFromClass)},
{CC"ensureClassInitialized",CC"("CLS")V", FN_PTR(Unsafe_EnsureClassInitialized)}, {CC"ensureClassInitialized",CC"("CLS")V", FN_PTR(Unsafe_EnsureClassInitialized)},
{CC"arrayBaseOffset", CC"("CLS")I", FN_PTR(Unsafe_ArrayBaseOffset)}, {CC"arrayBaseOffset", CC"("CLS")I", FN_PTR(Unsafe_ArrayBaseOffset)},
{CC"arrayIndexScale", CC"("CLS")I", FN_PTR(Unsafe_ArrayIndexScale)}, {CC"arrayIndexScale", CC"("CLS")I", FN_PTR(Unsafe_ArrayIndexScale)},
@ -1394,16 +1390,15 @@ static JNINativeMethod methods_140[] = {
{CC"pageSize", CC"()I", FN_PTR(Unsafe_PageSize)}, {CC"pageSize", CC"()I", FN_PTR(Unsafe_PageSize)},
{CC"defineClass", CC"("DC0_Args")"CLS, FN_PTR(Unsafe_DefineClass0)}, {CC"defineClass", CC"("DC0_Args")"CLS, FN_PTR(Unsafe_DefineClass0)},
{CC"defineClass", CC"("DC1_Args")"CLS, FN_PTR(Unsafe_DefineClass1)}, {CC"defineClass", CC"("DC_Args")"CLS, FN_PTR(Unsafe_DefineClass)},
{CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)}, {CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)},
{CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)}, {CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)},
{CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)}, {CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)},
{CC"throwException", CC"("THR")V", FN_PTR(Unsafe_ThrowException)} {CC"throwException", CC"("THR")V", FN_PTR(Unsafe_ThrowException)}
}; };
// These are the old methods prior to the JSR 166 changes in 1.5.0 // These are the methods prior to the JSR 166 changes in 1.5.0
static JNINativeMethod methods_141[] = { static JNINativeMethod methods_141[] = {
{CC"getObject", CC"("OBJ"J)"OBJ"", FN_PTR(Unsafe_GetObject)}, {CC"getObject", CC"("OBJ"J)"OBJ"", FN_PTR(Unsafe_GetObject)},
{CC"putObject", CC"("OBJ"J"OBJ")V", FN_PTR(Unsafe_SetObject)}, {CC"putObject", CC"("OBJ"J"OBJ")V", FN_PTR(Unsafe_SetObject)},
@ -1429,8 +1424,6 @@ static JNINativeMethod methods_141[] = {
{CC"allocateMemory", CC"(J)"ADR, FN_PTR(Unsafe_AllocateMemory)}, {CC"allocateMemory", CC"(J)"ADR, FN_PTR(Unsafe_AllocateMemory)},
{CC"reallocateMemory", CC"("ADR"J)"ADR, FN_PTR(Unsafe_ReallocateMemory)}, {CC"reallocateMemory", CC"("ADR"J)"ADR, FN_PTR(Unsafe_ReallocateMemory)},
// {CC"setMemory", CC"("ADR"JB)V", FN_PTR(Unsafe_SetMemory)},
// {CC"copyMemory", CC"("ADR ADR"J)V", FN_PTR(Unsafe_CopyMemory)},
{CC"freeMemory", CC"("ADR")V", FN_PTR(Unsafe_FreeMemory)}, {CC"freeMemory", CC"("ADR")V", FN_PTR(Unsafe_FreeMemory)},
{CC"objectFieldOffset", CC"("FLD")J", FN_PTR(Unsafe_ObjectFieldOffset)}, {CC"objectFieldOffset", CC"("FLD")J", FN_PTR(Unsafe_ObjectFieldOffset)},
@ -1443,7 +1436,7 @@ static JNINativeMethod methods_141[] = {
{CC"pageSize", CC"()I", FN_PTR(Unsafe_PageSize)}, {CC"pageSize", CC"()I", FN_PTR(Unsafe_PageSize)},
{CC"defineClass", CC"("DC0_Args")"CLS, FN_PTR(Unsafe_DefineClass0)}, {CC"defineClass", CC"("DC0_Args")"CLS, FN_PTR(Unsafe_DefineClass0)},
{CC"defineClass", CC"("DC1_Args")"CLS, FN_PTR(Unsafe_DefineClass1)}, {CC"defineClass", CC"("DC_Args")"CLS, FN_PTR(Unsafe_DefineClass)},
{CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)}, {CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)},
{CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)}, {CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)},
{CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)}, {CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)},
@ -1451,9 +1444,8 @@ static JNINativeMethod methods_141[] = {
}; };
// These are the old methods prior to the JSR 166 changes in 1.6.0 // These are the methods prior to the JSR 166 changes in 1.6.0
static JNINativeMethod methods_15[] = { static JNINativeMethod methods_15[] = {
{CC"getObject", CC"("OBJ"J)"OBJ"", FN_PTR(Unsafe_GetObject)}, {CC"getObject", CC"("OBJ"J)"OBJ"", FN_PTR(Unsafe_GetObject)},
{CC"putObject", CC"("OBJ"J"OBJ")V", FN_PTR(Unsafe_SetObject)}, {CC"putObject", CC"("OBJ"J"OBJ")V", FN_PTR(Unsafe_SetObject)},
{CC"getObjectVolatile",CC"("OBJ"J)"OBJ"", FN_PTR(Unsafe_GetObjectVolatile)}, {CC"getObjectVolatile",CC"("OBJ"J)"OBJ"", FN_PTR(Unsafe_GetObjectVolatile)},
@ -1482,8 +1474,6 @@ static JNINativeMethod methods_15[] = {
{CC"allocateMemory", CC"(J)"ADR, FN_PTR(Unsafe_AllocateMemory)}, {CC"allocateMemory", CC"(J)"ADR, FN_PTR(Unsafe_AllocateMemory)},
{CC"reallocateMemory", CC"("ADR"J)"ADR, FN_PTR(Unsafe_ReallocateMemory)}, {CC"reallocateMemory", CC"("ADR"J)"ADR, FN_PTR(Unsafe_ReallocateMemory)},
// {CC"setMemory", CC"("ADR"JB)V", FN_PTR(Unsafe_SetMemory)},
// {CC"copyMemory", CC"("ADR ADR"J)V", FN_PTR(Unsafe_CopyMemory)},
{CC"freeMemory", CC"("ADR")V", FN_PTR(Unsafe_FreeMemory)}, {CC"freeMemory", CC"("ADR")V", FN_PTR(Unsafe_FreeMemory)},
{CC"objectFieldOffset", CC"("FLD")J", FN_PTR(Unsafe_ObjectFieldOffset)}, {CC"objectFieldOffset", CC"("FLD")J", FN_PTR(Unsafe_ObjectFieldOffset)},
@ -1496,7 +1486,7 @@ static JNINativeMethod methods_15[] = {
{CC"pageSize", CC"()I", FN_PTR(Unsafe_PageSize)}, {CC"pageSize", CC"()I", FN_PTR(Unsafe_PageSize)},
{CC"defineClass", CC"("DC0_Args")"CLS, FN_PTR(Unsafe_DefineClass0)}, {CC"defineClass", CC"("DC0_Args")"CLS, FN_PTR(Unsafe_DefineClass0)},
{CC"defineClass", CC"("DC1_Args")"CLS, FN_PTR(Unsafe_DefineClass1)}, {CC"defineClass", CC"("DC_Args")"CLS, FN_PTR(Unsafe_DefineClass)},
{CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)}, {CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)},
{CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)}, {CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)},
{CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)}, {CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)},
@ -1509,15 +1499,13 @@ static JNINativeMethod methods_15[] = {
}; };
// These are the correct methods, moving forward: // These are the methods for 1.6.0 and 1.7.0
static JNINativeMethod methods[] = { static JNINativeMethod methods_16[] = {
{CC"getObject", CC"("OBJ"J)"OBJ"", FN_PTR(Unsafe_GetObject)}, {CC"getObject", CC"("OBJ"J)"OBJ"", FN_PTR(Unsafe_GetObject)},
{CC"putObject", CC"("OBJ"J"OBJ")V", FN_PTR(Unsafe_SetObject)}, {CC"putObject", CC"("OBJ"J"OBJ")V", FN_PTR(Unsafe_SetObject)},
{CC"getObjectVolatile",CC"("OBJ"J)"OBJ"", FN_PTR(Unsafe_GetObjectVolatile)}, {CC"getObjectVolatile",CC"("OBJ"J)"OBJ"", FN_PTR(Unsafe_GetObjectVolatile)},
{CC"putObjectVolatile",CC"("OBJ"J"OBJ")V", FN_PTR(Unsafe_SetObjectVolatile)}, {CC"putObjectVolatile",CC"("OBJ"J"OBJ")V", FN_PTR(Unsafe_SetObjectVolatile)},
DECLARE_GETSETOOP(Boolean, Z), DECLARE_GETSETOOP(Boolean, Z),
DECLARE_GETSETOOP(Byte, B), DECLARE_GETSETOOP(Byte, B),
DECLARE_GETSETOOP(Short, S), DECLARE_GETSETOOP(Short, S),
@ -1540,8 +1528,6 @@ static JNINativeMethod methods[] = {
{CC"allocateMemory", CC"(J)"ADR, FN_PTR(Unsafe_AllocateMemory)}, {CC"allocateMemory", CC"(J)"ADR, FN_PTR(Unsafe_AllocateMemory)},
{CC"reallocateMemory", CC"("ADR"J)"ADR, FN_PTR(Unsafe_ReallocateMemory)}, {CC"reallocateMemory", CC"("ADR"J)"ADR, FN_PTR(Unsafe_ReallocateMemory)},
// {CC"setMemory", CC"("ADR"JB)V", FN_PTR(Unsafe_SetMemory)},
// {CC"copyMemory", CC"("ADR ADR"J)V", FN_PTR(Unsafe_CopyMemory)},
{CC"freeMemory", CC"("ADR")V", FN_PTR(Unsafe_FreeMemory)}, {CC"freeMemory", CC"("ADR")V", FN_PTR(Unsafe_FreeMemory)},
{CC"objectFieldOffset", CC"("FLD")J", FN_PTR(Unsafe_ObjectFieldOffset)}, {CC"objectFieldOffset", CC"("FLD")J", FN_PTR(Unsafe_ObjectFieldOffset)},
@ -1554,7 +1540,7 @@ static JNINativeMethod methods[] = {
{CC"pageSize", CC"()I", FN_PTR(Unsafe_PageSize)}, {CC"pageSize", CC"()I", FN_PTR(Unsafe_PageSize)},
{CC"defineClass", CC"("DC0_Args")"CLS, FN_PTR(Unsafe_DefineClass0)}, {CC"defineClass", CC"("DC0_Args")"CLS, FN_PTR(Unsafe_DefineClass0)},
{CC"defineClass", CC"("DC1_Args")"CLS, FN_PTR(Unsafe_DefineClass1)}, {CC"defineClass", CC"("DC_Args")"CLS, FN_PTR(Unsafe_DefineClass)},
{CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)}, {CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)},
{CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)}, {CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)},
{CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)}, {CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)},
@ -1566,19 +1552,64 @@ static JNINativeMethod methods[] = {
{CC"putOrderedObject", CC"("OBJ"J"OBJ")V", FN_PTR(Unsafe_SetOrderedObject)}, {CC"putOrderedObject", CC"("OBJ"J"OBJ")V", FN_PTR(Unsafe_SetOrderedObject)},
{CC"putOrderedInt", CC"("OBJ"JI)V", FN_PTR(Unsafe_SetOrderedInt)}, {CC"putOrderedInt", CC"("OBJ"JI)V", FN_PTR(Unsafe_SetOrderedInt)},
{CC"putOrderedLong", CC"("OBJ"JJ)V", FN_PTR(Unsafe_SetOrderedLong)}, {CC"putOrderedLong", CC"("OBJ"JJ)V", FN_PTR(Unsafe_SetOrderedLong)},
{CC"loadFence", CC"()V", FN_PTR(Unsafe_LoadFence)},
{CC"storeFence", CC"()V", FN_PTR(Unsafe_StoreFence)},
{CC"fullFence", CC"()V", FN_PTR(Unsafe_FullFence)},
{CC"park", CC"(ZJ)V", FN_PTR(Unsafe_Park)}, {CC"park", CC"(ZJ)V", FN_PTR(Unsafe_Park)},
{CC"unpark", CC"("OBJ")V", FN_PTR(Unsafe_Unpark)} {CC"unpark", CC"("OBJ")V", FN_PTR(Unsafe_Unpark)}
};
// {CC"getLoadAverage", CC"([DI)I", FN_PTR(Unsafe_Loadavg)}, // These are the methods for 1.8.0
static JNINativeMethod methods_18[] = {
{CC"getObject", CC"("OBJ"J)"OBJ"", FN_PTR(Unsafe_GetObject)},
{CC"putObject", CC"("OBJ"J"OBJ")V", FN_PTR(Unsafe_SetObject)},
{CC"getObjectVolatile",CC"("OBJ"J)"OBJ"", FN_PTR(Unsafe_GetObjectVolatile)},
{CC"putObjectVolatile",CC"("OBJ"J"OBJ")V", FN_PTR(Unsafe_SetObjectVolatile)},
// {CC"prefetchRead", CC"("OBJ"J)V", FN_PTR(Unsafe_PrefetchRead)}, DECLARE_GETSETOOP(Boolean, Z),
// {CC"prefetchWrite", CC"("OBJ"J)V", FN_PTR(Unsafe_PrefetchWrite)} DECLARE_GETSETOOP(Byte, B),
// {CC"prefetchReadStatic", CC"("OBJ"J)V", FN_PTR(Unsafe_PrefetchRead)}, DECLARE_GETSETOOP(Short, S),
// {CC"prefetchWriteStatic",CC"("OBJ"J)V", FN_PTR(Unsafe_PrefetchWrite)} DECLARE_GETSETOOP(Char, C),
DECLARE_GETSETOOP(Int, I),
DECLARE_GETSETOOP(Long, J),
DECLARE_GETSETOOP(Float, F),
DECLARE_GETSETOOP(Double, D),
DECLARE_GETSETNATIVE(Byte, B),
DECLARE_GETSETNATIVE(Short, S),
DECLARE_GETSETNATIVE(Char, C),
DECLARE_GETSETNATIVE(Int, I),
DECLARE_GETSETNATIVE(Long, J),
DECLARE_GETSETNATIVE(Float, F),
DECLARE_GETSETNATIVE(Double, D),
{CC"getAddress", CC"("ADR")"ADR, FN_PTR(Unsafe_GetNativeAddress)},
{CC"putAddress", CC"("ADR""ADR")V", FN_PTR(Unsafe_SetNativeAddress)},
{CC"allocateMemory", CC"(J)"ADR, FN_PTR(Unsafe_AllocateMemory)},
{CC"reallocateMemory", CC"("ADR"J)"ADR, FN_PTR(Unsafe_ReallocateMemory)},
{CC"freeMemory", CC"("ADR")V", FN_PTR(Unsafe_FreeMemory)},
{CC"objectFieldOffset", CC"("FLD")J", FN_PTR(Unsafe_ObjectFieldOffset)},
{CC"staticFieldOffset", CC"("FLD")J", FN_PTR(Unsafe_StaticFieldOffset)},
{CC"staticFieldBase", CC"("FLD")"OBJ, FN_PTR(Unsafe_StaticFieldBaseFromField)},
{CC"ensureClassInitialized",CC"("CLS")V", FN_PTR(Unsafe_EnsureClassInitialized)},
{CC"arrayBaseOffset", CC"("CLS")I", FN_PTR(Unsafe_ArrayBaseOffset)},
{CC"arrayIndexScale", CC"("CLS")I", FN_PTR(Unsafe_ArrayIndexScale)},
{CC"addressSize", CC"()I", FN_PTR(Unsafe_AddressSize)},
{CC"pageSize", CC"()I", FN_PTR(Unsafe_PageSize)},
{CC"defineClass", CC"("DC_Args")"CLS, FN_PTR(Unsafe_DefineClass)},
{CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)},
{CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)},
{CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)},
{CC"tryMonitorEnter", CC"("OBJ")Z", FN_PTR(Unsafe_TryMonitorEnter)},
{CC"throwException", CC"("THR")V", FN_PTR(Unsafe_ThrowException)},
{CC"compareAndSwapObject", CC"("OBJ"J"OBJ""OBJ")Z", FN_PTR(Unsafe_CompareAndSwapObject)},
{CC"compareAndSwapInt", CC"("OBJ"J""I""I"")Z", FN_PTR(Unsafe_CompareAndSwapInt)},
{CC"compareAndSwapLong", CC"("OBJ"J""J""J"")Z", FN_PTR(Unsafe_CompareAndSwapLong)},
{CC"putOrderedObject", CC"("OBJ"J"OBJ")V", FN_PTR(Unsafe_SetOrderedObject)},
{CC"putOrderedInt", CC"("OBJ"JI)V", FN_PTR(Unsafe_SetOrderedInt)},
{CC"putOrderedLong", CC"("OBJ"JJ)V", FN_PTR(Unsafe_SetOrderedLong)},
{CC"park", CC"(ZJ)V", FN_PTR(Unsafe_Park)},
{CC"unpark", CC"("OBJ")V", FN_PTR(Unsafe_Unpark)}
}; };
JNINativeMethod loadavg_method[] = { JNINativeMethod loadavg_method[] = {
@ -1592,7 +1623,7 @@ JNINativeMethod prefetch_methods[] = {
{CC"prefetchWriteStatic",CC"("OBJ"J)V", FN_PTR(Unsafe_PrefetchWrite)} {CC"prefetchWriteStatic",CC"("OBJ"J)V", FN_PTR(Unsafe_PrefetchWrite)}
}; };
JNINativeMethod memcopy_methods[] = { JNINativeMethod memcopy_methods_17[] = {
{CC"copyMemory", CC"("OBJ"J"OBJ"JJ)V", FN_PTR(Unsafe_CopyMemory2)}, {CC"copyMemory", CC"("OBJ"J"OBJ"JJ)V", FN_PTR(Unsafe_CopyMemory2)},
{CC"setMemory", CC"("OBJ"JJB)V", FN_PTR(Unsafe_SetMemory2)} {CC"setMemory", CC"("OBJ"JJB)V", FN_PTR(Unsafe_SetMemory2)}
}; };
@ -1610,6 +1641,12 @@ JNINativeMethod lform_methods[] = {
{CC"shouldBeInitialized",CC"("CLS")Z", FN_PTR(Unsafe_ShouldBeInitialized)}, {CC"shouldBeInitialized",CC"("CLS")Z", FN_PTR(Unsafe_ShouldBeInitialized)},
}; };
JNINativeMethod fence_methods[] = {
{CC"loadFence", CC"()V", FN_PTR(Unsafe_LoadFence)},
{CC"storeFence", CC"()V", FN_PTR(Unsafe_StoreFence)},
{CC"fullFence", CC"()V", FN_PTR(Unsafe_FullFence)},
};
#undef CC #undef CC
#undef FN_PTR #undef FN_PTR
@ -1622,12 +1659,32 @@ JNINativeMethod lform_methods[] = {
#undef MTH #undef MTH
#undef THR #undef THR
#undef DC0_Args #undef DC0_Args
#undef DC1_Args #undef DC_Args
#undef DECLARE_GETSETOOP #undef DECLARE_GETSETOOP
#undef DECLARE_GETSETNATIVE #undef DECLARE_GETSETNATIVE
/**
* Helper method to register native methods.
*/
static bool register_natives(const char* message, JNIEnv* env, jclass clazz, const JNINativeMethod* methods, jint nMethods) {
int status = env->RegisterNatives(clazz, methods, nMethods);
if (status < 0 || env->ExceptionOccurred()) {
if (PrintMiscellaneous && (Verbose || WizardMode)) {
tty->print_cr("Unsafe: failed registering %s", message);
}
env->ExceptionClear();
return false;
} else {
if (PrintMiscellaneous && (Verbose || WizardMode)) {
tty->print_cr("Unsafe: successfully registered %s", message);
}
return true;
}
}
// This one function is exported, used by NativeLookup. // This one function is exported, used by NativeLookup.
// The Unsafe_xxx functions above are called only from the interpreter. // The Unsafe_xxx functions above are called only from the interpreter.
// The optimizer looks at names and signatures to recognize // The optimizer looks at names and signatures to recognize
@ -1637,83 +1694,57 @@ JVM_ENTRY(void, JVM_RegisterUnsafeMethods(JNIEnv *env, jclass unsafecls))
UnsafeWrapper("JVM_RegisterUnsafeMethods"); UnsafeWrapper("JVM_RegisterUnsafeMethods");
{ {
ThreadToNativeFromVM ttnfv(thread); ThreadToNativeFromVM ttnfv(thread);
// Unsafe methods
{ {
env->RegisterNatives(unsafecls, loadavg_method, sizeof(loadavg_method)/sizeof(JNINativeMethod)); bool success = false;
if (env->ExceptionOccurred()) { // We need to register the 1.6 methods first because the 1.8 methods would register fine on 1.7 and 1.6
if (PrintMiscellaneous && (Verbose || WizardMode)) { if (!success) {
tty->print_cr("Warning: SDK 1.6 Unsafe.loadavg not found."); success = register_natives("1.6 methods", env, unsafecls, methods_16, sizeof(methods_16)/sizeof(JNINativeMethod));
} }
env->ExceptionClear(); if (!success) {
success = register_natives("1.8 methods", env, unsafecls, methods_18, sizeof(methods_18)/sizeof(JNINativeMethod));
} }
if (!success) {
success = register_natives("1.5 methods", env, unsafecls, methods_15, sizeof(methods_15)/sizeof(JNINativeMethod));
} }
if (!success) {
success = register_natives("1.4.1 methods", env, unsafecls, methods_141, sizeof(methods_141)/sizeof(JNINativeMethod));
}
if (!success) {
success = register_natives("1.4.0 methods", env, unsafecls, methods_140, sizeof(methods_140)/sizeof(JNINativeMethod));
}
guarantee(success, "register unsafe natives");
}
// Unsafe.getLoadAverage
register_natives("1.6 loadavg method", env, unsafecls, loadavg_method, sizeof(loadavg_method)/sizeof(JNINativeMethod));
// Prefetch methods
register_natives("1.6 prefetch methods", env, unsafecls, prefetch_methods, sizeof(prefetch_methods)/sizeof(JNINativeMethod));
// Memory copy methods
{ {
env->RegisterNatives(unsafecls, prefetch_methods, sizeof(prefetch_methods)/sizeof(JNINativeMethod)); bool success = false;
if (env->ExceptionOccurred()) { if (!success) {
if (PrintMiscellaneous && (Verbose || WizardMode)) { success = register_natives("1.7 memory copy methods", env, unsafecls, memcopy_methods_17, sizeof(memcopy_methods_17)/sizeof(JNINativeMethod));
tty->print_cr("Warning: SDK 1.6 Unsafe.prefetchRead/Write not found.");
}
env->ExceptionClear();
}
}
{
env->RegisterNatives(unsafecls, memcopy_methods, sizeof(memcopy_methods)/sizeof(JNINativeMethod));
if (env->ExceptionOccurred()) {
if (PrintMiscellaneous && (Verbose || WizardMode)) {
tty->print_cr("Warning: SDK 1.7 Unsafe.copyMemory not found.");
}
env->ExceptionClear();
env->RegisterNatives(unsafecls, memcopy_methods_15, sizeof(memcopy_methods_15)/sizeof(JNINativeMethod));
if (env->ExceptionOccurred()) {
if (PrintMiscellaneous && (Verbose || WizardMode)) {
tty->print_cr("Warning: SDK 1.5 Unsafe.copyMemory not found.");
}
env->ExceptionClear();
} }
if (!success) {
success = register_natives("1.5 memory copy methods", env, unsafecls, memcopy_methods_15, sizeof(memcopy_methods_15)/sizeof(JNINativeMethod));
} }
} }
// Unsafe.defineAnonymousClass
if (EnableInvokeDynamic) { if (EnableInvokeDynamic) {
env->RegisterNatives(unsafecls, anonk_methods, sizeof(anonk_methods)/sizeof(JNINativeMethod)); register_natives("1.7 define anonymous class method", env, unsafecls, anonk_methods, sizeof(anonk_methods)/sizeof(JNINativeMethod));
if (env->ExceptionOccurred()) {
if (PrintMiscellaneous && (Verbose || WizardMode)) {
tty->print_cr("Warning: SDK 1.7 Unsafe.defineClass (anonymous version) not found.");
}
env->ExceptionClear();
}
} }
// Unsafe.shouldBeInitialized
if (EnableInvokeDynamic) { if (EnableInvokeDynamic) {
env->RegisterNatives(unsafecls, lform_methods, sizeof(lform_methods)/sizeof(JNINativeMethod)); register_natives("1.7 LambdaForm support", env, unsafecls, lform_methods, sizeof(lform_methods)/sizeof(JNINativeMethod));
if (env->ExceptionOccurred()) {
if (PrintMiscellaneous && (Verbose || WizardMode)) {
tty->print_cr("Warning: SDK 1.7 LambdaForm support in Unsafe not found.");
} }
env->ExceptionClear();
} // Fence methods
} register_natives("1.8 fence methods", env, unsafecls, fence_methods, sizeof(fence_methods)/sizeof(JNINativeMethod));
int status = env->RegisterNatives(unsafecls, methods, sizeof(methods)/sizeof(JNINativeMethod));
if (env->ExceptionOccurred()) {
if (PrintMiscellaneous && (Verbose || WizardMode)) {
tty->print_cr("Warning: SDK 1.6 version of Unsafe not found.");
}
env->ExceptionClear();
// %%% For now, be backward compatible with an older class:
status = env->RegisterNatives(unsafecls, methods_15, sizeof(methods_15)/sizeof(JNINativeMethod));
}
if (env->ExceptionOccurred()) {
if (PrintMiscellaneous && (Verbose || WizardMode)) {
tty->print_cr("Warning: SDK 1.5 version of Unsafe not found.");
}
env->ExceptionClear();
// %%% For now, be backward compatible with an older class:
status = env->RegisterNatives(unsafecls, methods_141, sizeof(methods_141)/sizeof(JNINativeMethod));
}
if (env->ExceptionOccurred()) {
if (PrintMiscellaneous && (Verbose || WizardMode)) {
tty->print_cr("Warning: SDK 1.4.1 version of Unsafe not found.");
}
env->ExceptionClear();
// %%% For now, be backward compatible with an older class:
status = env->RegisterNatives(unsafecls, methods_140, sizeof(methods_140)/sizeof(JNINativeMethod));
}
guarantee(status == 0, "register unsafe natives");
} }
JVM_END JVM_END

View file

@ -2528,7 +2528,7 @@ class CommandLineFlags {
"disable locking assertions (for speed)") \ "disable locking assertions (for speed)") \
\ \
product(bool, RangeCheckElimination, true, \ product(bool, RangeCheckElimination, true, \
"Split loop iterations to eliminate range checks") \ "Eliminate range checks") \
\ \
develop_pd(bool, UncommonNullCast, \ develop_pd(bool, UncommonNullCast, \
"track occurrences of null in casts; adjust compiler tactics") \ "track occurrences of null in casts; adjust compiler tactics") \

View file

@ -391,42 +391,29 @@ vframeStream::vframeStream(JavaThread* thread, frame top_frame,
// Step back n frames, skip any pseudo frames in between. // Step back n frames, skip any pseudo frames in between.
// This function is used in Class.forName, Class.newInstance, Method.Invoke, // This function is used in Class.forName, Class.newInstance, Method.Invoke,
// AccessController.doPrivileged. // AccessController.doPrivileged.
//
// NOTE that in JDK 1.4 this has been exposed to Java as
// sun.reflect.Reflection.getCallerClass(), which can be inlined.
// Inlined versions must match this routine's logic.
// Native method prefixing logic does not need to match since
// the method names don't match and inlining will not occur.
// See, for example,
// Parse::inline_native_Reflection_getCallerClass in
// opto/library_call.cpp.
void vframeStreamCommon::security_get_caller_frame(int depth) { void vframeStreamCommon::security_get_caller_frame(int depth) {
bool use_new_reflection = JDK_Version::is_gte_jdk14x_version() && UseNewReflection; assert(depth >= 0, err_msg("invalid depth: %d", depth));
for (int n = 0; !at_end(); security_next()) {
if (!method()->is_ignored_by_security_stack_walk()) {
if (n == depth) {
// We have reached the desired depth; return.
return;
}
n++; // this is a non-skipped frame; count it against the depth
}
}
// NOTE: At this point there were not enough frames on the stack
// to walk to depth. Callers of this method have to check for at_end.
}
while (!at_end()) {
if (Universe::reflect_invoke_cache()->is_same_method(method())) { void vframeStreamCommon::security_next() {
// This is Method.invoke() -- skip it
} else if (use_new_reflection &&
method()->method_holder()
->is_subclass_of(SystemDictionary::reflect_MethodAccessorImpl_klass())) {
// This is an auxilary frame -- skip it
} else if (method()->is_method_handle_intrinsic() ||
method()->is_compiled_lambda_form()) {
// This is an internal adapter frame for method handles -- skip it
} else {
// This is non-excluded frame, we need to count it against the depth
if (depth-- <= 0) {
// we have reached the desired depth, we are done
break;
}
}
if (method()->is_prefixed_native()) { if (method()->is_prefixed_native()) {
skip_prefixed_method_and_wrappers(); skip_prefixed_method_and_wrappers(); // calls next()
} else { } else {
next(); next();
} }
} }
}
void vframeStreamCommon::skip_prefixed_method_and_wrappers() { void vframeStreamCommon::skip_prefixed_method_and_wrappers() {

View file

@ -336,6 +336,7 @@ class vframeStreamCommon : StackObj {
_frame = _frame.sender(&_reg_map); _frame = _frame.sender(&_reg_map);
} while (!fill_from_frame()); } while (!fill_from_frame());
} }
void security_next();
bool at_end() const { return _mode == at_end_mode; } bool at_end() const { return _mode == at_end_mode; }

View file

@ -25,7 +25,7 @@
* @test * @test
* @bug 8009761 * @bug 8009761
* @summary Deoptimization on sparc doesn't set Llast_SP correctly in the interpreter frames it creates * @summary Deoptimization on sparc doesn't set Llast_SP correctly in the interpreter frames it creates
* @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation Test8009761 * @run main/othervm -Xmixed -XX:-UseOnStackReplacement -XX:-BackgroundCompilation Test8009761
* *
*/ */