mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-20 11:04:34 +02:00
Merge
This commit is contained in:
commit
39b0e57fdd
5098 changed files with 176905 additions and 81175 deletions
|
@ -47,6 +47,7 @@
|
|||
#include "opto/machnode.hpp"
|
||||
#include "opto/macro.hpp"
|
||||
#include "opto/matcher.hpp"
|
||||
#include "opto/mathexactnode.hpp"
|
||||
#include "opto/memnode.hpp"
|
||||
#include "opto/mulnode.hpp"
|
||||
#include "opto/node.hpp"
|
||||
|
@ -657,7 +658,8 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
|||
_inlining_progress(false),
|
||||
_inlining_incrementally(false),
|
||||
_print_inlining_list(NULL),
|
||||
_print_inlining(0) {
|
||||
_print_inlining_idx(0),
|
||||
_preserve_jvm_state(0) {
|
||||
C = this;
|
||||
|
||||
CompileWrapper cw(this);
|
||||
|
@ -682,6 +684,8 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
|||
set_print_assembly(print_opto_assembly);
|
||||
set_parsed_irreducible_loop(false);
|
||||
#endif
|
||||
set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
|
||||
set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
|
||||
|
||||
if (ProfileTraps) {
|
||||
// Make sure the method being compiled gets its own MDO,
|
||||
|
@ -713,7 +717,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
|||
PhaseGVN gvn(node_arena(), estimated_size);
|
||||
set_initial_gvn(&gvn);
|
||||
|
||||
if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
|
||||
if (print_inlining() || print_intrinsics()) {
|
||||
_print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
|
||||
}
|
||||
{ // Scope for timing the parser
|
||||
|
@ -763,7 +767,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
|||
return;
|
||||
}
|
||||
JVMState* jvms = build_start_state(start(), tf());
|
||||
if ((jvms = cg->generate(jvms)) == NULL) {
|
||||
if ((jvms = cg->generate(jvms, NULL)) == NULL) {
|
||||
record_method_not_compilable("method parse failed");
|
||||
return;
|
||||
}
|
||||
|
@ -940,7 +944,8 @@ Compile::Compile( ciEnv* ci_env,
|
|||
_inlining_progress(false),
|
||||
_inlining_incrementally(false),
|
||||
_print_inlining_list(NULL),
|
||||
_print_inlining(0) {
|
||||
_print_inlining_idx(0),
|
||||
_preserve_jvm_state(0) {
|
||||
C = this;
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
@ -1300,6 +1305,10 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
|
|||
|
||||
// Array pointers need some flattening
|
||||
const TypeAryPtr *ta = tj->isa_aryptr();
|
||||
if (ta && ta->is_stable()) {
|
||||
// Erase stability property for alias analysis.
|
||||
tj = ta = ta->cast_to_stable(false);
|
||||
}
|
||||
if( ta && is_known_inst ) {
|
||||
if ( offset != Type::OffsetBot &&
|
||||
offset > arrayOopDesc::length_offset_in_bytes() ) {
|
||||
|
@ -1354,7 +1363,7 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
|
|||
// During the 2nd round of IterGVN, NotNull castings are removed.
|
||||
// Make sure the Bottom and NotNull variants alias the same.
|
||||
// Also, make sure exact and non-exact variants alias the same.
|
||||
if( ptr == TypePtr::NotNull || ta->klass_is_exact() ) {
|
||||
if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
|
||||
tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset);
|
||||
}
|
||||
}
|
||||
|
@ -1379,6 +1388,9 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
|
|||
// Also, make sure exact and non-exact variants alias the same.
|
||||
tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
|
||||
}
|
||||
if (to->speculative() != NULL) {
|
||||
tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),to->offset(), to->instance_id());
|
||||
}
|
||||
// Canonicalize the holder of this field
|
||||
if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
|
||||
// First handle header references such as a LoadKlassNode, even if the
|
||||
|
@ -1500,6 +1512,7 @@ void Compile::AliasType::Init(int i, const TypePtr* at) {
|
|||
_index = i;
|
||||
_adr_type = at;
|
||||
_field = NULL;
|
||||
_element = NULL;
|
||||
_is_rewritable = true; // default
|
||||
const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL;
|
||||
if (atoop != NULL && atoop->is_known_instance()) {
|
||||
|
@ -1618,6 +1631,16 @@ Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_cr
|
|||
&& flat->is_instptr()->klass() == env()->Class_klass())
|
||||
alias_type(idx)->set_rewritable(false);
|
||||
}
|
||||
if (flat->isa_aryptr()) {
|
||||
#ifdef ASSERT
|
||||
const int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
|
||||
// (T_BYTE has the weakest alignment and size restrictions...)
|
||||
assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
|
||||
#endif
|
||||
if (flat->offset() == TypePtr::OffsetBot) {
|
||||
alias_type(idx)->set_element(flat->is_aryptr()->elem());
|
||||
}
|
||||
}
|
||||
if (flat->isa_klassptr()) {
|
||||
if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
|
||||
alias_type(idx)->set_rewritable(false);
|
||||
|
@ -1680,7 +1703,7 @@ Compile::AliasType* Compile::alias_type(ciField* field) {
|
|||
else
|
||||
t = TypeOopPtr::make_from_klass_raw(field->holder());
|
||||
AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
|
||||
assert(field->is_final() == !atp->is_rewritable(), "must get the rewritable bits correct");
|
||||
assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
|
||||
return atp;
|
||||
}
|
||||
|
||||
|
@ -1996,6 +2019,12 @@ void Compile::Optimize() {
|
|||
if (failing()) return;
|
||||
}
|
||||
|
||||
// Remove the speculative part of types and clean up the graph from
|
||||
// the extra CastPP nodes whose only purpose is to carry them. Do
|
||||
// that early so that optimizations are not disrupted by the extra
|
||||
// CastPP nodes.
|
||||
remove_speculative_types(igvn);
|
||||
|
||||
// No more new expensive nodes will be added to the list from here
|
||||
// so keep only the actual candidates for optimizations.
|
||||
cleanup_expensive_nodes(igvn);
|
||||
|
@ -2261,7 +2290,7 @@ void Compile::dump_asm(int *pcs, uint pc_limit) {
|
|||
if (block->is_connector() && !Verbose) {
|
||||
continue;
|
||||
}
|
||||
n = block->_nodes[0];
|
||||
n = block->head();
|
||||
if (pcs && n->_idx < pc_limit) {
|
||||
tty->print("%3.3x ", pcs[n->_idx]);
|
||||
} else {
|
||||
|
@ -2276,12 +2305,12 @@ void Compile::dump_asm(int *pcs, uint pc_limit) {
|
|||
|
||||
// For all instructions
|
||||
Node *delay = NULL;
|
||||
for (uint j = 0; j < block->_nodes.size(); j++) {
|
||||
for (uint j = 0; j < block->number_of_nodes(); j++) {
|
||||
if (VMThread::should_terminate()) {
|
||||
cut_short = true;
|
||||
break;
|
||||
}
|
||||
n = block->_nodes[j];
|
||||
n = block->get_node(j);
|
||||
if (valid_bundle_info(n)) {
|
||||
Bundle* bundle = node_bundling(n);
|
||||
if (bundle->used_in_unconditional_delay()) {
|
||||
|
@ -2634,7 +2663,7 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
|
|||
addp->in(AddPNode::Base) == n->in(AddPNode::Base),
|
||||
"Base pointers must match" );
|
||||
#ifdef _LP64
|
||||
if ((UseCompressedOops || UseCompressedKlassPointers) &&
|
||||
if ((UseCompressedOops || UseCompressedClassPointers) &&
|
||||
addp->Opcode() == Op_ConP &&
|
||||
addp == n->in(AddPNode::Base) &&
|
||||
n->in(AddPNode::Offset)->is_Con()) {
|
||||
|
@ -2972,6 +3001,37 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
|
|||
n->set_req(MemBarNode::Precedent, top());
|
||||
}
|
||||
break;
|
||||
// Must set a control edge on all nodes that produce a FlagsProj
|
||||
// so they can't escape the block that consumes the flags.
|
||||
// Must also set the non throwing branch as the control
|
||||
// for all nodes that depends on the result. Unless the node
|
||||
// already have a control that isn't the control of the
|
||||
// flag producer
|
||||
case Op_FlagsProj:
|
||||
{
|
||||
MathExactNode* math = (MathExactNode*) n->in(0);
|
||||
Node* ctrl = math->control_node();
|
||||
Node* non_throwing = math->non_throwing_branch();
|
||||
math->set_req(0, ctrl);
|
||||
|
||||
Node* result = math->result_node();
|
||||
if (result != NULL) {
|
||||
for (DUIterator_Fast jmax, j = result->fast_outs(jmax); j < jmax; j++) {
|
||||
Node* out = result->fast_out(j);
|
||||
// Phi nodes shouldn't be moved. They would only match below if they
|
||||
// had the same control as the MathExactNode. The only time that
|
||||
// would happen is if the Phi is also an input to the MathExact
|
||||
if (!out->is_Phi()) {
|
||||
if (out->in(0) == NULL) {
|
||||
out->set_req(0, non_throwing);
|
||||
} else if (out->in(0) == ctrl) {
|
||||
out->set_req(0, non_throwing);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
assert( !n->is_Call(), "" );
|
||||
assert( !n->is_Mem(), "" );
|
||||
|
@ -3021,7 +3081,7 @@ void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_
|
|||
|
||||
// Skip next transformation if compressed oops are not used.
|
||||
if ((UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) ||
|
||||
(!UseCompressedOops && !UseCompressedKlassPointers))
|
||||
(!UseCompressedOops && !UseCompressedClassPointers))
|
||||
return;
|
||||
|
||||
// Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges.
|
||||
|
@ -3599,7 +3659,7 @@ void Compile::ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n
|
|||
}
|
||||
|
||||
void Compile::dump_inlining() {
|
||||
if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
|
||||
if (print_inlining() || print_intrinsics()) {
|
||||
// Print inlining message for candidates that we couldn't inline
|
||||
// for lack of space or non constant receiver
|
||||
for (int i = 0; i < _late_inlines.length(); i++) {
|
||||
|
@ -3623,7 +3683,7 @@ void Compile::dump_inlining() {
|
|||
}
|
||||
}
|
||||
for (int i = 0; i < _print_inlining_list->length(); i++) {
|
||||
tty->print(_print_inlining_list->at(i).ss()->as_string());
|
||||
tty->print(_print_inlining_list->adr_at(i)->ss()->as_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3751,6 +3811,45 @@ void Compile::add_expensive_node(Node * n) {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove the speculative part of types and clean up the graph
|
||||
*/
|
||||
void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
|
||||
if (UseTypeSpeculation) {
|
||||
Unique_Node_List worklist;
|
||||
worklist.push(root());
|
||||
int modified = 0;
|
||||
// Go over all type nodes that carry a speculative type, drop the
|
||||
// speculative part of the type and enqueue the node for an igvn
|
||||
// which may optimize it out.
|
||||
for (uint next = 0; next < worklist.size(); ++next) {
|
||||
Node *n = worklist.at(next);
|
||||
if (n->is_Type() && n->as_Type()->type()->isa_oopptr() != NULL &&
|
||||
n->as_Type()->type()->is_oopptr()->speculative() != NULL) {
|
||||
TypeNode* tn = n->as_Type();
|
||||
const TypeOopPtr* t = tn->type()->is_oopptr();
|
||||
bool in_hash = igvn.hash_delete(n);
|
||||
assert(in_hash, "node should be in igvn hash table");
|
||||
tn->set_type(t->remove_speculative());
|
||||
igvn.hash_insert(n);
|
||||
igvn._worklist.push(n); // give it a chance to go away
|
||||
modified++;
|
||||
}
|
||||
uint max = n->len();
|
||||
for( uint i = 0; i < max; ++i ) {
|
||||
Node *m = n->in(i);
|
||||
if (not_a_node(m)) continue;
|
||||
worklist.push(m);
|
||||
}
|
||||
}
|
||||
// Drop the speculative part of all types in the igvn's type table
|
||||
igvn.remove_speculative_types();
|
||||
if (modified > 0) {
|
||||
igvn.optimize();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Auxiliary method to support randomized stressing/fuzzing.
|
||||
//
|
||||
// This method can be called the arbitrary number of times, with current count
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue