mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-24 04:54:40 +02:00
6675699: need comprehensive fix for unconstrained ConvI2L with narrowed type
Emit CastII to make narrow ConvI2L dependent on the corresponding range check. Reviewed-by: kvn, roland
This commit is contained in:
parent
01a4b31e78
commit
ca56200977
18 changed files with 344 additions and 44 deletions
|
@ -89,7 +89,6 @@ private:
|
|||
static const TypePtr* get_address_type(PhaseGVN *phase, Node* n);
|
||||
|
||||
Node* try_clone_instance(PhaseGVN *phase, bool can_reshape, int count);
|
||||
Node* conv_I2X_offset(PhaseGVN *phase, Node* offset, const TypeAryPtr* ary_t);
|
||||
bool prepare_array_copy(PhaseGVN *phase, bool can_reshape,
|
||||
Node*& adr_src, Node*& base_src, Node*& adr_dest, Node*& base_dest,
|
||||
BasicType& copy_type, const Type*& value_type, bool& disjoint_bases);
|
||||
|
|
|
@ -277,6 +277,23 @@ Node *CastIINode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
uint CastIINode::cmp(const Node &n) const {
|
||||
return ConstraintCastNode::cmp(n) && ((CastIINode&)n)._range_check_dependency == _range_check_dependency;
|
||||
}
|
||||
|
||||
uint CastIINode::size_of() const {
|
||||
return sizeof(*this);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void CastIINode::dump_spec(outputStream* st) const {
|
||||
ConstraintCastNode::dump_spec(st);
|
||||
if (_range_check_dependency) {
|
||||
st->print(" range check dependency");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------Identity---------------------------------------
|
||||
// If input is already higher or equal to cast type, then this is an identity.
|
||||
|
|
|
@ -62,13 +62,33 @@ class ConstraintCastNode: public TypeNode {
|
|||
//------------------------------CastIINode-------------------------------------
|
||||
// cast integer to integer (different range)
|
||||
class CastIINode: public ConstraintCastNode {
|
||||
protected:
|
||||
// Is this node dependent on a range check?
|
||||
const bool _range_check_dependency;
|
||||
virtual uint cmp(const Node &n) const;
|
||||
virtual uint size_of() const;
|
||||
|
||||
public:
|
||||
CastIINode(Node *n, const Type *t, bool carry_dependency = false)
|
||||
: ConstraintCastNode(n, t, carry_dependency) {}
|
||||
CastIINode(Node* n, const Type* t, bool carry_dependency = false, bool range_check_dependency = false)
|
||||
: ConstraintCastNode(n, t, carry_dependency), _range_check_dependency(range_check_dependency) {
|
||||
init_class_id(Class_CastII);
|
||||
}
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
virtual const Type* Value(PhaseGVN* phase) const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
const bool has_range_check() {
|
||||
#ifdef _LP64
|
||||
return _range_check_dependency;
|
||||
#else
|
||||
assert(!_range_check_dependency, "Should not have range check dependency");
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
virtual void dump_spec(outputStream* st) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
//------------------------------CastPPNode-------------------------------------
|
||||
|
|
|
@ -277,13 +277,6 @@ class IfNode : public MultiBranchNode {
|
|||
virtual uint size_of() const { return sizeof(*this); }
|
||||
|
||||
private:
|
||||
ProjNode* range_check_trap_proj() {
|
||||
int flip_test = 0;
|
||||
Node* l = NULL;
|
||||
Node* r = NULL;
|
||||
return range_check_trap_proj(flip_test, l, r);
|
||||
}
|
||||
|
||||
// Helper methods for fold_compares
|
||||
bool cmpi_folds(PhaseIterGVN* igvn);
|
||||
bool is_ctrl_folds(Node* ctrl, PhaseIterGVN* igvn);
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include "opto/c2compiler.hpp"
|
||||
#include "opto/callGenerator.hpp"
|
||||
#include "opto/callnode.hpp"
|
||||
#include "opto/castnode.hpp"
|
||||
#include "opto/cfgnode.hpp"
|
||||
#include "opto/chaitin.hpp"
|
||||
#include "opto/compile.hpp"
|
||||
|
@ -402,6 +403,13 @@ void Compile::remove_useless_nodes(Unique_Node_List &useful) {
|
|||
remove_macro_node(n);
|
||||
}
|
||||
}
|
||||
// Remove useless CastII nodes with range check dependency
|
||||
for (int i = range_check_cast_count() - 1; i >= 0; i--) {
|
||||
Node* cast = range_check_cast_node(i);
|
||||
if (!useful.member(cast)) {
|
||||
remove_range_check_cast(cast);
|
||||
}
|
||||
}
|
||||
// Remove useless expensive node
|
||||
for (int i = C->expensive_count()-1; i >= 0; i--) {
|
||||
Node* n = C->expensive_node(i);
|
||||
|
@ -1178,6 +1186,7 @@ void Compile::Init(int aliaslevel) {
|
|||
_macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
|
||||
_predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
|
||||
_expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
|
||||
_range_check_casts = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
|
||||
register_library_intrinsics();
|
||||
}
|
||||
|
||||
|
@ -1924,6 +1933,22 @@ void Compile::cleanup_loop_predicates(PhaseIterGVN &igvn) {
|
|||
assert(predicate_count()==0, "should be clean!");
|
||||
}
|
||||
|
||||
void Compile::add_range_check_cast(Node* n) {
|
||||
assert(n->isa_CastII()->has_range_check(), "CastII should have range check dependency");
|
||||
assert(!_range_check_casts->contains(n), "duplicate entry in range check casts");
|
||||
_range_check_casts->append(n);
|
||||
}
|
||||
|
||||
// Remove all range check dependent CastIINodes.
|
||||
void Compile::remove_range_check_casts(PhaseIterGVN &igvn) {
|
||||
for (int i = range_check_cast_count(); i > 0; i--) {
|
||||
Node* cast = range_check_cast_node(i-1);
|
||||
assert(cast->isa_CastII()->has_range_check(), "CastII should have range check dependency");
|
||||
igvn.replace_node(cast, cast->in(1));
|
||||
}
|
||||
assert(range_check_cast_count() == 0, "should be empty");
|
||||
}
|
||||
|
||||
// StringOpts and late inlining of string methods
|
||||
void Compile::inline_string_calls(bool parse_time) {
|
||||
{
|
||||
|
@ -2284,6 +2309,12 @@ void Compile::Optimize() {
|
|||
PhaseIdealLoop::verify(igvn);
|
||||
}
|
||||
|
||||
if (range_check_cast_count() > 0) {
|
||||
// No more loop optimizations. Remove all range check dependent CastIINodes.
|
||||
C->remove_range_check_casts(igvn);
|
||||
igvn.optimize();
|
||||
}
|
||||
|
||||
{
|
||||
TracePhase tp("macroExpand", &timers[_t_macroExpand]);
|
||||
PhaseMacroExpand mex(igvn);
|
||||
|
@ -3087,6 +3118,16 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
|
|||
|
||||
#endif
|
||||
|
||||
#ifdef ASSERT
|
||||
case Op_CastII:
|
||||
// Verify that all range check dependent CastII nodes were removed.
|
||||
if (n->isa_CastII()->has_range_check()) {
|
||||
n->dump(3);
|
||||
assert(false, "Range check dependent CastII node was not removed");
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
|
||||
case Op_ModI:
|
||||
if (UseDivMod) {
|
||||
// Check if a%b and a/b both exist
|
||||
|
@ -3962,7 +4003,7 @@ int Compile::static_subtype_check(ciKlass* superk, ciKlass* subk) {
|
|||
return SSC_full_test;
|
||||
}
|
||||
|
||||
Node* Compile::conv_I2X_index(PhaseGVN *phase, Node* idx, const TypeInt* sizetype) {
|
||||
Node* Compile::conv_I2X_index(PhaseGVN* phase, Node* idx, const TypeInt* sizetype, Node* ctrl) {
|
||||
#ifdef _LP64
|
||||
// The scaled index operand to AddP must be a clean 64-bit value.
|
||||
// Java allows a 32-bit int to be incremented to a negative
|
||||
|
@ -3976,12 +4017,30 @@ Node* Compile::conv_I2X_index(PhaseGVN *phase, Node* idx, const TypeInt* sizetyp
|
|||
// This assertion is used by ConvI2LNode::Ideal.
|
||||
int index_max = max_jint - 1; // array size is max_jint, index is one less
|
||||
if (sizetype != NULL) index_max = sizetype->_hi - 1;
|
||||
const TypeLong* lidxtype = TypeLong::make(CONST64(0), index_max, Type::WidenMax);
|
||||
idx = phase->transform(new ConvI2LNode(idx, lidxtype));
|
||||
const TypeInt* iidxtype = TypeInt::make(0, index_max, Type::WidenMax);
|
||||
idx = constrained_convI2L(phase, idx, iidxtype, ctrl);
|
||||
#endif
|
||||
return idx;
|
||||
}
|
||||
|
||||
// Convert integer value to a narrowed long type dependent on ctrl (for example, a range check)
|
||||
Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl) {
|
||||
if (ctrl != NULL) {
|
||||
// Express control dependency by a CastII node with a narrow type.
|
||||
value = new CastIINode(value, itype, false, true /* range check dependency */);
|
||||
// Make the CastII node dependent on the control input to prevent the narrowed ConvI2L
|
||||
// node from floating above the range check during loop optimizations. Otherwise, the
|
||||
// ConvI2L node may be eliminated independently of the range check, causing the data path
|
||||
// to become TOP while the control path is still there (although it's unreachable).
|
||||
value->set_req(0, ctrl);
|
||||
// Save CastII node to remove it after loop optimizations.
|
||||
phase->C->add_range_check_cast(value);
|
||||
value = phase->transform(value);
|
||||
}
|
||||
const TypeLong* ltype = TypeLong::make(itype->_lo, itype->_hi, itype->_widen);
|
||||
return phase->transform(new ConvI2LNode(value, ltype));
|
||||
}
|
||||
|
||||
// The message about the current inlining is accumulated in
|
||||
// _print_inlining_stream and transfered into the _print_inlining_list
|
||||
// once we know whether inlining succeeds or not. For regular
|
||||
|
|
|
@ -400,6 +400,7 @@ class Compile : public Phase {
|
|||
GrowableArray<Node*>* _macro_nodes; // List of nodes which need to be expanded before matching.
|
||||
GrowableArray<Node*>* _predicate_opaqs; // List of Opaque1 nodes for the loop predicates.
|
||||
GrowableArray<Node*>* _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
|
||||
GrowableArray<Node*>* _range_check_casts; // List of CastII nodes with a range check dependency
|
||||
ConnectionGraph* _congraph;
|
||||
#ifndef PRODUCT
|
||||
IdealGraphPrinter* _printer;
|
||||
|
@ -753,7 +754,7 @@ class Compile : public Phase {
|
|||
void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;}
|
||||
void add_macro_node(Node * n) {
|
||||
//assert(n->is_macro(), "must be a macro node");
|
||||
assert(!_macro_nodes->contains(n), " duplicate entry in expand list");
|
||||
assert(!_macro_nodes->contains(n), "duplicate entry in expand list");
|
||||
_macro_nodes->append(n);
|
||||
}
|
||||
void remove_macro_node(Node * n) {
|
||||
|
@ -773,10 +774,23 @@ class Compile : public Phase {
|
|||
}
|
||||
}
|
||||
void add_predicate_opaq(Node * n) {
|
||||
assert(!_predicate_opaqs->contains(n), " duplicate entry in predicate opaque1");
|
||||
assert(!_predicate_opaqs->contains(n), "duplicate entry in predicate opaque1");
|
||||
assert(_macro_nodes->contains(n), "should have already been in macro list");
|
||||
_predicate_opaqs->append(n);
|
||||
}
|
||||
|
||||
// Range check dependent CastII nodes that can be removed after loop optimizations
|
||||
void add_range_check_cast(Node* n);
|
||||
void remove_range_check_cast(Node* n) {
|
||||
if (_range_check_casts->contains(n)) {
|
||||
_range_check_casts->remove(n);
|
||||
}
|
||||
}
|
||||
Node* range_check_cast_node(int idx) const { return _range_check_casts->at(idx); }
|
||||
int range_check_cast_count() const { return _range_check_casts->length(); }
|
||||
// Remove all range check dependent CastIINodes.
|
||||
void remove_range_check_casts(PhaseIterGVN &igvn);
|
||||
|
||||
// remove the opaque nodes that protect the predicates so that the unused checks and
|
||||
// uncommon traps will be eliminated from the graph.
|
||||
void cleanup_loop_predicates(PhaseIterGVN &igvn);
|
||||
|
@ -1292,7 +1306,12 @@ class Compile : public Phase {
|
|||
enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
|
||||
int static_subtype_check(ciKlass* superk, ciKlass* subk);
|
||||
|
||||
static Node* conv_I2X_index(PhaseGVN *phase, Node* offset, const TypeInt* sizetype);
|
||||
static Node* conv_I2X_index(PhaseGVN* phase, Node* offset, const TypeInt* sizetype,
|
||||
// Optional control dependency (for example, on range check)
|
||||
Node* ctrl = NULL);
|
||||
|
||||
// Convert integer value to a narrowed long type dependent on ctrl (for example, a range check)
|
||||
static Node* constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl);
|
||||
|
||||
// Auxiliary method for randomized fuzzing/stressing
|
||||
static bool randomized_select(int count);
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
|
||||
#include "precompiled.hpp"
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/castnode.hpp"
|
||||
#include "opto/convertnode.hpp"
|
||||
#include "opto/matcher.hpp"
|
||||
#include "opto/phaseX.hpp"
|
||||
|
@ -293,7 +294,8 @@ Node *ConvI2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
|||
}
|
||||
|
||||
#ifdef _LP64
|
||||
// Convert ConvI2L(AddI(x, y)) to AddL(ConvI2L(x), ConvI2L(y)) ,
|
||||
// Convert ConvI2L(AddI(x, y)) to AddL(ConvI2L(x), ConvI2L(y)) or
|
||||
// ConvI2L(CastII(AddI(x, y))) to AddL(ConvI2L(CastII(x)), ConvI2L(CastII(y))),
|
||||
// but only if x and y have subranges that cannot cause 32-bit overflow,
|
||||
// under the assumption that x+y is in my own subrange this->type().
|
||||
|
||||
|
@ -317,6 +319,13 @@ Node *ConvI2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
|||
|
||||
Node* z = in(1);
|
||||
int op = z->Opcode();
|
||||
Node* ctrl = NULL;
|
||||
if (op == Op_CastII && z->as_CastII()->has_range_check()) {
|
||||
// Skip CastII node but save control dependency
|
||||
ctrl = z->in(0);
|
||||
z = z->in(1);
|
||||
op = z->Opcode();
|
||||
}
|
||||
if (op == Op_AddI || op == Op_SubI) {
|
||||
Node* x = z->in(1);
|
||||
Node* y = z->in(2);
|
||||
|
@ -374,9 +383,10 @@ Node *ConvI2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
|||
rylo = -ryhi;
|
||||
ryhi = -rylo0;
|
||||
}
|
||||
|
||||
Node* cx = phase->transform( new ConvI2LNode(x, TypeLong::make(rxlo, rxhi, widen)) );
|
||||
Node* cy = phase->transform( new ConvI2LNode(y, TypeLong::make(rylo, ryhi, widen)) );
|
||||
assert(rxlo == (int)rxlo && rxhi == (int)rxhi, "x should not overflow");
|
||||
assert(rylo == (int)rylo && ryhi == (int)ryhi, "y should not overflow");
|
||||
Node* cx = phase->C->constrained_convI2L(phase, x, TypeInt::make(rxlo, rxhi, widen), ctrl);
|
||||
Node* cy = phase->C->constrained_convI2L(phase, y, TypeInt::make(rylo, ryhi, widen), ctrl);
|
||||
switch (op) {
|
||||
case Op_AddI: return new AddLNode(cx, cy);
|
||||
case Op_SubI: return new SubLNode(cx, cy);
|
||||
|
|
|
@ -1658,7 +1658,7 @@ Node* GraphKit::store_oop_to_unknown(Node* ctl,
|
|||
|
||||
//-------------------------array_element_address-------------------------
|
||||
Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
|
||||
const TypeInt* sizetype) {
|
||||
const TypeInt* sizetype, Node* ctrl) {
|
||||
uint shift = exact_log2(type2aelembytes(elembt));
|
||||
uint header = arrayOopDesc::base_offset_in_bytes(elembt);
|
||||
|
||||
|
@ -1671,7 +1671,7 @@ Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
|
|||
|
||||
// must be correct type for alignment purposes
|
||||
Node* base = basic_plus_adr(ary, header);
|
||||
idx = Compile::conv_I2X_index(&_gvn, idx, sizetype);
|
||||
idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
|
||||
Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
|
||||
return basic_plus_adr(ary, base, scale);
|
||||
}
|
||||
|
@ -3506,10 +3506,6 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
|
|||
|
||||
Node* initial_slow_cmp = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
|
||||
Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
|
||||
if (initial_slow_test->is_Bool()) {
|
||||
// Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
|
||||
initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
|
||||
}
|
||||
|
||||
// --- Size Computation ---
|
||||
// array_size = round_to_heap(array_header + (length << elem_shift));
|
||||
|
@ -3555,13 +3551,35 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
|
|||
Node* lengthx = ConvI2X(length);
|
||||
Node* headerx = ConvI2X(header_size);
|
||||
#ifdef _LP64
|
||||
{ const TypeLong* tllen = _gvn.find_long_type(lengthx);
|
||||
if (tllen != NULL && tllen->_lo < 0) {
|
||||
{ const TypeInt* tilen = _gvn.find_int_type(length);
|
||||
if (tilen != NULL && tilen->_lo < 0) {
|
||||
// Add a manual constraint to a positive range. Cf. array_element_address.
|
||||
jlong size_max = arrayOopDesc::max_array_length(T_BYTE);
|
||||
if (size_max > tllen->_hi) size_max = tllen->_hi;
|
||||
const TypeLong* tlcon = TypeLong::make(CONST64(0), size_max, Type::WidenMin);
|
||||
lengthx = _gvn.transform( new ConvI2LNode(length, tlcon));
|
||||
jint size_max = fast_size_limit;
|
||||
if (size_max > tilen->_hi) size_max = tilen->_hi;
|
||||
const TypeInt* tlcon = TypeInt::make(0, size_max, Type::WidenMin);
|
||||
|
||||
// Only do a narrow I2L conversion if the range check passed.
|
||||
IfNode* iff = new IfNode(control(), initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
|
||||
_gvn.transform(iff);
|
||||
RegionNode* region = new RegionNode(3);
|
||||
_gvn.set_type(region, Type::CONTROL);
|
||||
lengthx = new PhiNode(region, TypeLong::LONG);
|
||||
_gvn.set_type(lengthx, TypeLong::LONG);
|
||||
|
||||
// Range check passed. Use ConvI2L node with narrow type.
|
||||
Node* passed = IfFalse(iff);
|
||||
region->init_req(1, passed);
|
||||
// Make I2L conversion control dependent to prevent it from
|
||||
// floating above the range check during loop optimizations.
|
||||
lengthx->init_req(1, C->constrained_convI2L(&_gvn, length, tlcon, passed));
|
||||
|
||||
// Range check failed. Use ConvI2L with wide type because length may be invalid.
|
||||
region->init_req(2, IfTrue(iff));
|
||||
lengthx->init_req(2, ConvI2X(length));
|
||||
|
||||
set_control(region);
|
||||
record_for_igvn(region);
|
||||
record_for_igvn(lengthx);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -3592,6 +3610,11 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
|
|||
Node *mem = reset_memory();
|
||||
set_all_memory(mem); // Create new memory state
|
||||
|
||||
if (initial_slow_test->is_Bool()) {
|
||||
// Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
|
||||
initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
|
||||
}
|
||||
|
||||
// Create the AllocateArrayNode and its result projections
|
||||
AllocateArrayNode* alloc
|
||||
= new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
|
||||
|
|
|
@ -634,7 +634,9 @@ class GraphKit : public Phase {
|
|||
// Return addressing for an array element.
|
||||
Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
|
||||
// Optional constraint on the array size:
|
||||
const TypeInt* sizetype = NULL);
|
||||
const TypeInt* sizetype = NULL,
|
||||
// Optional control dependency (for example, on range check)
|
||||
Node* ctrl = NULL);
|
||||
|
||||
// Return a load of array element at idx.
|
||||
Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
|
||||
|
|
|
@ -1104,7 +1104,8 @@ void IfNode::improve_address_types(Node* l, Node* r, ProjNode* fail, PhaseIterGV
|
|||
if (ctrl == fail) {
|
||||
Node* init_n = stack.node_at(1);
|
||||
assert(init_n->Opcode() == Op_ConvI2L, "unexpected first node");
|
||||
Node* new_n = igvn->C->conv_I2X_index(igvn, l, array_size);
|
||||
// Create a new narrow ConvI2L node that is dependent on the range check
|
||||
Node* new_n = igvn->C->conv_I2X_index(igvn, l, array_size, fail);
|
||||
|
||||
// The type of the ConvI2L may be widen and so the new
|
||||
// ConvI2L may not be better than an existing ConvI2L
|
||||
|
|
|
@ -2660,7 +2660,7 @@ bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new )
|
|||
|
||||
//=============================================================================
|
||||
// Process all the loops in the loop tree and replace any fill
|
||||
// patterns with an intrisc version.
|
||||
// patterns with an intrinsic version.
|
||||
bool PhaseIdealLoop::do_intrinsify_fill() {
|
||||
bool changed = false;
|
||||
for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
|
||||
|
@ -2758,8 +2758,9 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st
|
|||
}
|
||||
|
||||
// Make sure the address expression can be handled. It should be
|
||||
// head->phi * elsize + con. head->phi might have a ConvI2L.
|
||||
// head->phi * elsize + con. head->phi might have a ConvI2L(CastII()).
|
||||
Node* elements[4];
|
||||
Node* cast = NULL;
|
||||
Node* conv = NULL;
|
||||
bool found_index = false;
|
||||
int count = store->in(MemNode::Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements));
|
||||
|
@ -2774,6 +2775,12 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st
|
|||
conv = value;
|
||||
value = value->in(1);
|
||||
}
|
||||
if (value->Opcode() == Op_CastII &&
|
||||
value->as_CastII()->has_range_check()) {
|
||||
// Skip range check dependent CastII nodes
|
||||
cast = value;
|
||||
value = value->in(1);
|
||||
}
|
||||
#endif
|
||||
if (value != head->phi()) {
|
||||
msg = "unhandled shift in address";
|
||||
|
@ -2786,9 +2793,16 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st
|
|||
}
|
||||
}
|
||||
} else if (n->Opcode() == Op_ConvI2L && conv == NULL) {
|
||||
if (n->in(1) == head->phi()) {
|
||||
found_index = true;
|
||||
conv = n;
|
||||
n = n->in(1);
|
||||
if (n->Opcode() == Op_CastII &&
|
||||
n->as_CastII()->has_range_check()) {
|
||||
// Skip range check dependent CastII nodes
|
||||
cast = n;
|
||||
n = n->in(1);
|
||||
}
|
||||
if (n == head->phi()) {
|
||||
found_index = true;
|
||||
} else {
|
||||
msg = "unhandled input to ConvI2L";
|
||||
}
|
||||
|
@ -2847,6 +2861,7 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st
|
|||
// Address elements are ok
|
||||
if (con) ok.set(con->_idx);
|
||||
if (shift) ok.set(shift->_idx);
|
||||
if (cast) ok.set(cast->_idx);
|
||||
if (conv) ok.set(conv->_idx);
|
||||
|
||||
for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "precompiled.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "opto/addnode.hpp"
|
||||
#include "opto/castnode.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/castnode.hpp"
|
||||
#include "opto/divnode.hpp"
|
||||
|
@ -997,6 +998,9 @@ static bool merge_point_safe(Node* region) {
|
|||
#ifdef _LP64
|
||||
if (m->Opcode() == Op_ConvI2L)
|
||||
return false;
|
||||
if (m->is_CastII() && m->isa_CastII()->has_range_check()) {
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "precompiled.hpp"
|
||||
#include "libadt/vectset.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "opto/castnode.hpp"
|
||||
#include "opto/cfgnode.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
#include "opto/loopnode.hpp"
|
||||
|
@ -516,6 +517,11 @@ Node *Node::clone() const {
|
|||
C->add_macro_node(n);
|
||||
if (is_expensive())
|
||||
C->add_expensive_node(n);
|
||||
// If the cloned node is a range check dependent CastII, add it to the list.
|
||||
CastIINode* cast = n->isa_CastII();
|
||||
if (cast != NULL && cast->has_range_check()) {
|
||||
C->add_range_check_cast(cast);
|
||||
}
|
||||
|
||||
n->set_idx(C->next_unique()); // Get new unique index as well
|
||||
debug_only( n->verify_construction() );
|
||||
|
@ -644,6 +650,11 @@ void Node::destruct() {
|
|||
if (is_expensive()) {
|
||||
compile->remove_expensive_node(this);
|
||||
}
|
||||
CastIINode* cast = isa_CastII();
|
||||
if (cast != NULL && cast->has_range_check()) {
|
||||
compile->remove_range_check_cast(cast);
|
||||
}
|
||||
|
||||
if (is_SafePoint()) {
|
||||
as_SafePoint()->delete_replaced_nodes();
|
||||
}
|
||||
|
@ -1379,6 +1390,10 @@ static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) {
|
|||
if (dead->is_expensive()) {
|
||||
igvn->C->remove_expensive_node(dead);
|
||||
}
|
||||
CastIINode* cast = dead->isa_CastII();
|
||||
if (cast != NULL && cast->has_range_check()) {
|
||||
igvn->C->remove_range_check_cast(cast);
|
||||
}
|
||||
igvn->C->record_dead_node(dead->_idx);
|
||||
// Kill all inputs to the dead guy
|
||||
for (uint i=0; i < dead->req(); i++) {
|
||||
|
|
|
@ -51,6 +51,7 @@ class CallLeafNode;
|
|||
class CallNode;
|
||||
class CallRuntimeNode;
|
||||
class CallStaticJavaNode;
|
||||
class CastIINode;
|
||||
class CatchNode;
|
||||
class CatchProjNode;
|
||||
class CheckCastPPNode;
|
||||
|
@ -653,6 +654,7 @@ public:
|
|||
DEFINE_CLASS_ID(Type, Node, 2)
|
||||
DEFINE_CLASS_ID(Phi, Type, 0)
|
||||
DEFINE_CLASS_ID(ConstraintCast, Type, 1)
|
||||
DEFINE_CLASS_ID(CastII, ConstraintCast, 0)
|
||||
DEFINE_CLASS_ID(CheckCastPP, Type, 2)
|
||||
DEFINE_CLASS_ID(CMove, Type, 3)
|
||||
DEFINE_CLASS_ID(SafePointScalarObject, Type, 4)
|
||||
|
@ -784,6 +786,7 @@ public:
|
|||
DEFINE_CLASS_QUERY(Catch)
|
||||
DEFINE_CLASS_QUERY(CatchProj)
|
||||
DEFINE_CLASS_QUERY(CheckCastPP)
|
||||
DEFINE_CLASS_QUERY(CastII)
|
||||
DEFINE_CLASS_QUERY(ConstraintCast)
|
||||
DEFINE_CLASS_QUERY(ClearArray)
|
||||
DEFINE_CLASS_QUERY(CMove)
|
||||
|
|
|
@ -166,7 +166,9 @@ Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
|
|||
// Check for always knowing you are throwing a range-check exception
|
||||
if (stopped()) return top();
|
||||
|
||||
Node* ptr = array_element_address(ary, idx, type, sizetype);
|
||||
// Make array address computation control dependent to prevent it
|
||||
// from floating above the range check during loop optimizations.
|
||||
Node* ptr = array_element_address(ary, idx, type, sizetype, control());
|
||||
|
||||
if (result2 != NULL) *result2 = elemtype;
|
||||
|
||||
|
@ -466,12 +468,14 @@ bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi)
|
|||
// of all possible ranges for a switch statement
|
||||
// The key_val input must be converted to a pointer offset and scaled.
|
||||
// Compare Parse::array_addressing above.
|
||||
#ifdef _LP64
|
||||
|
||||
// Clean the 32-bit int into a real 64-bit offset.
|
||||
// Otherwise, the jint value 0 might turn into an offset of 0x0800000000.
|
||||
const TypeLong* lkeytype = TypeLong::make(CONST64(0), num_cases-1, Type::WidenMin);
|
||||
key_val = _gvn.transform( new ConvI2LNode(key_val, lkeytype) );
|
||||
#endif
|
||||
const TypeInt* ikeytype = TypeInt::make(0, num_cases, Type::WidenMin);
|
||||
// Make I2L conversion control dependent to prevent it from
|
||||
// floating above the range check during loop optimizations.
|
||||
key_val = C->conv_I2X_index(&_gvn, key_val, ikeytype, control());
|
||||
|
||||
// Shift the value by wordsize so we have an index into the table, rather
|
||||
// than a switch value
|
||||
Node *shiftWord = _gvn.MakeConX(wordSize);
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include "memory/allocation.inline.hpp"
|
||||
#include "opto/block.hpp"
|
||||
#include "opto/callnode.hpp"
|
||||
#include "opto/castnode.hpp"
|
||||
#include "opto/cfgnode.hpp"
|
||||
#include "opto/idealGraphPrinter.hpp"
|
||||
#include "opto/loopnode.hpp"
|
||||
|
@ -1412,6 +1413,10 @@ void PhaseIterGVN::remove_globally_dead_node( Node *dead ) {
|
|||
if (dead->is_expensive()) {
|
||||
C->remove_expensive_node(dead);
|
||||
}
|
||||
CastIINode* cast = dead->isa_CastII();
|
||||
if (cast != NULL && cast->has_range_check()) {
|
||||
C->remove_range_check_cast(cast);
|
||||
}
|
||||
}
|
||||
} // while (_stack.is_nonempty())
|
||||
}
|
||||
|
|
|
@ -3343,6 +3343,11 @@ bool SWPointer::scaled_iv(Node* n) {
|
|||
return true;
|
||||
}
|
||||
} else if (opc == Op_ConvI2L) {
|
||||
if (n->in(1)->Opcode() == Op_CastII &&
|
||||
n->in(1)->as_CastII()->has_range_check()) {
|
||||
// Skip range check dependent CastII nodes
|
||||
n = n->in(1);
|
||||
}
|
||||
if (scaled_iv_plus_offset(n->in(1))) {
|
||||
NOT_PRODUCT(_tracer.scaled_iv_7(n);)
|
||||
return true;
|
||||
|
@ -3437,6 +3442,12 @@ bool SWPointer::offset_plus_k(Node* n, bool negate) {
|
|||
if (invariant(n)) {
|
||||
if (opc == Op_ConvI2L) {
|
||||
n = n->in(1);
|
||||
if (n->Opcode() == Op_CastII &&
|
||||
n->as_CastII()->has_range_check()) {
|
||||
// Skip range check dependent CastII nodes
|
||||
assert(invariant(n), "sanity");
|
||||
n = n->in(1);
|
||||
}
|
||||
}
|
||||
if (n->bottom_type()->isa_int()) {
|
||||
_negate_invar = negate;
|
||||
|
|
100
hotspot/test/compiler/loopopts/TestLoopPeeling.java
Normal file
100
hotspot/test/compiler/loopopts/TestLoopPeeling.java
Normal file
|
@ -0,0 +1,100 @@
|
|||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8078262
|
||||
* @summary Tests correct dominator information after loop peeling.
|
||||
* @run main/othervm -Xcomp -XX:CompileCommand=compileonly,TestLoopPeeling::test* TestLoopPeeling
|
||||
*/
|
||||
public class TestLoopPeeling {
|
||||
|
||||
public int[] array = new int[100];
|
||||
|
||||
public static void main(String args[]) {
|
||||
TestLoopPeeling test = new TestLoopPeeling();
|
||||
try {
|
||||
test.testArrayAccess(0, 1);
|
||||
test.testArrayAllocation(0, 1);
|
||||
} catch (Exception e) {
|
||||
// Ignore exceptions
|
||||
}
|
||||
}
|
||||
|
||||
public void testArrayAccess(int index, int inc) {
|
||||
int storeIndex = -1;
|
||||
|
||||
for (; index < 10; index += inc) {
|
||||
// This loop invariant check triggers loop peeling because it can
|
||||
// be moved out of the loop (see 'IdealLoopTree::policy_peeling').
|
||||
if (inc == 42) return;
|
||||
|
||||
// This loop variant usage of LShiftL( ConvI2L( Phi(storeIndex) ) )
|
||||
// prevents the split if optimization that would otherwise clone the
|
||||
// LShiftL and ConvI2L nodes and assign them to their corresponding array
|
||||
// address computation (see 'PhaseIdealLoop::split_if_with_blocks_post').
|
||||
if (storeIndex > 0 && array[storeIndex] == 42) return;
|
||||
|
||||
if (index == 42) {
|
||||
// This store and the corresponding range check are moved out of the
|
||||
// loop and both used after old loop and the peeled iteration exit.
|
||||
// For the peeled iteration, storeIndex is always -1 and the ConvI2L
|
||||
// is replaced by TOP. However, the range check is not folded because
|
||||
// we don't do the split if optimization in PhaseIdealLoop2.
|
||||
// As a result, we have a (dead) control path from the peeled iteration
|
||||
// to the StoreI but the data path is removed.
|
||||
array[storeIndex] = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
storeIndex++;
|
||||
}
|
||||
}
|
||||
|
||||
public byte[] testArrayAllocation(int index, int inc) {
|
||||
int allocationCount = -1;
|
||||
byte[] result;
|
||||
|
||||
for (; index < 10; index += inc) {
|
||||
// This loop invariant check triggers loop peeling because it can
|
||||
// be moved out of the loop (see 'IdealLoopTree::policy_peeling').
|
||||
if (inc == 42) return null;
|
||||
|
||||
if (index == 42) {
|
||||
// This allocation and the corresponding size check are moved out of the
|
||||
// loop and both used after old loop and the peeled iteration exit.
|
||||
// For the peeled iteration, allocationCount is always -1 and the ConvI2L
|
||||
// is replaced by TOP. However, the size check is not folded because
|
||||
// we don't do the split if optimization in PhaseIdealLoop2.
|
||||
// As a result, we have a (dead) control path from the peeled iteration
|
||||
// to the allocation but the data path is removed.
|
||||
result = new byte[allocationCount];
|
||||
return result;
|
||||
}
|
||||
|
||||
allocationCount++;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
Loading…
Add table
Add a link
Reference in a new issue