mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-18 01:54:47 +02:00
8257211: C2: Enable call devirtualization during post-parse phase
Reviewed-by: kvn, neliasso, thartmann
This commit is contained in:
parent
149a02f99a
commit
62c7788b29
15 changed files with 628 additions and 265 deletions
|
@ -494,8 +494,10 @@ void CompilerConfig::ergo_initialize() {
|
|||
if (!EliminateLocks) {
|
||||
EliminateNestedLocks = false;
|
||||
}
|
||||
if (!Inline) {
|
||||
if (!Inline || !IncrementalInline) {
|
||||
IncrementalInline = false;
|
||||
IncrementalInlineMH = false;
|
||||
IncrementalInlineVirtual = false;
|
||||
}
|
||||
#ifndef PRODUCT
|
||||
if (!IncrementalInline) {
|
||||
|
|
|
@ -713,6 +713,12 @@
|
|||
product(bool, IncrementalInline, true, \
|
||||
"do post parse inlining") \
|
||||
\
|
||||
product(bool, IncrementalInlineMH, true, DIAGNOSTIC, \
|
||||
"do post parse inlining of method handle calls") \
|
||||
\
|
||||
product(bool, IncrementalInlineVirtual, true, DIAGNOSTIC, \
|
||||
"do post parse inlining of virtual calls") \
|
||||
\
|
||||
develop(bool, AlwaysIncrementalInline, false, \
|
||||
"do all inlining incrementally") \
|
||||
\
|
||||
|
|
|
@ -121,6 +121,9 @@ class DirectCallGenerator : public CallGenerator {
|
|||
// paths to facilitate late inlinig.
|
||||
bool _separate_io_proj;
|
||||
|
||||
protected:
|
||||
void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
|
||||
|
||||
public:
|
||||
DirectCallGenerator(ciMethod* method, bool separate_io_proj)
|
||||
: CallGenerator(method),
|
||||
|
@ -129,7 +132,12 @@ class DirectCallGenerator : public CallGenerator {
|
|||
}
|
||||
virtual JVMState* generate(JVMState* jvms);
|
||||
|
||||
CallStaticJavaNode* call_node() const { return _call_node; }
|
||||
virtual CallNode* call_node() const { return _call_node; }
|
||||
virtual CallGenerator* with_call_node(CallNode* call) {
|
||||
DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
|
||||
dcg->set_call_node(call->as_CallStaticJava());
|
||||
return dcg;
|
||||
}
|
||||
};
|
||||
|
||||
JVMState* DirectCallGenerator::generate(JVMState* jvms) {
|
||||
|
@ -179,15 +187,30 @@ JVMState* DirectCallGenerator::generate(JVMState* jvms) {
|
|||
class VirtualCallGenerator : public CallGenerator {
|
||||
private:
|
||||
int _vtable_index;
|
||||
bool _separate_io_proj;
|
||||
CallDynamicJavaNode* _call_node;
|
||||
|
||||
protected:
|
||||
void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
|
||||
|
||||
public:
|
||||
VirtualCallGenerator(ciMethod* method, int vtable_index)
|
||||
: CallGenerator(method), _vtable_index(vtable_index)
|
||||
VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
|
||||
: CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(NULL)
|
||||
{
|
||||
assert(vtable_index == Method::invalid_vtable_index ||
|
||||
vtable_index >= 0, "either invalid or usable");
|
||||
}
|
||||
virtual bool is_virtual() const { return true; }
|
||||
virtual JVMState* generate(JVMState* jvms);
|
||||
|
||||
virtual CallNode* call_node() const { return _call_node; }
|
||||
int vtable_index() const { return _vtable_index; }
|
||||
|
||||
virtual CallGenerator* with_call_node(CallNode* call) {
|
||||
VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
|
||||
cg->set_call_node(call->as_CallDynamicJava());
|
||||
return cg;
|
||||
}
|
||||
};
|
||||
|
||||
JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
|
||||
|
@ -250,9 +273,11 @@ JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
|
|||
// make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
|
||||
call->set_override_symbolic_info(true);
|
||||
}
|
||||
_call_node = call; // Save the call node in case we need it later
|
||||
|
||||
kit.set_arguments_for_java_call(call);
|
||||
kit.set_edges_for_java_call(call);
|
||||
Node* ret = kit.set_results_for_java_call(call);
|
||||
kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
|
||||
Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
|
||||
kit.push_node(method()->return_type()->basic_type(), ret);
|
||||
|
||||
// Represent the effect of an implicit receiver null_check
|
||||
|
@ -285,7 +310,7 @@ CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj
|
|||
CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
|
||||
assert(!m->is_static(), "for_virtual_call mismatch");
|
||||
assert(!m->is_method_handle_intrinsic(), "should be a direct call");
|
||||
return new VirtualCallGenerator(m, vtable_index);
|
||||
return new VirtualCallGenerator(m, vtable_index, false /*separate_io_projs*/);
|
||||
}
|
||||
|
||||
// Allow inlining decisions to be delayed
|
||||
|
@ -296,7 +321,9 @@ class LateInlineCallGenerator : public DirectCallGenerator {
|
|||
|
||||
protected:
|
||||
CallGenerator* _inline_cg;
|
||||
virtual bool do_late_inline_check(JVMState* jvms) { return true; }
|
||||
virtual bool do_late_inline_check(Compile* C, JVMState* jvms) { return true; }
|
||||
virtual CallGenerator* inline_cg() const { return _inline_cg; }
|
||||
virtual bool is_pure_call() const { return _is_pure_call; }
|
||||
|
||||
public:
|
||||
LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg, bool is_pure_call = false) :
|
||||
|
@ -341,11 +368,197 @@ class LateInlineCallGenerator : public DirectCallGenerator {
|
|||
virtual jlong unique_id() const {
|
||||
return _unique_id;
|
||||
}
|
||||
|
||||
virtual CallGenerator* with_call_node(CallNode* call) {
|
||||
LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
|
||||
cg->set_call_node(call->as_CallStaticJava());
|
||||
return cg;
|
||||
}
|
||||
};
|
||||
|
||||
CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
|
||||
return new LateInlineCallGenerator(method, inline_cg);
|
||||
}
|
||||
|
||||
class LateInlineMHCallGenerator : public LateInlineCallGenerator {
|
||||
ciMethod* _caller;
|
||||
bool _input_not_const;
|
||||
|
||||
virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
|
||||
|
||||
public:
|
||||
LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
|
||||
LateInlineCallGenerator(callee, NULL), _caller(caller), _input_not_const(input_not_const) {}
|
||||
|
||||
virtual bool is_mh_late_inline() const { return true; }
|
||||
|
||||
// Convert the CallStaticJava into an inline
|
||||
virtual void do_late_inline();
|
||||
|
||||
virtual JVMState* generate(JVMState* jvms) {
|
||||
JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
|
||||
|
||||
Compile* C = Compile::current();
|
||||
if (_input_not_const) {
|
||||
// inlining won't be possible so no need to enqueue right now.
|
||||
call_node()->set_generator(this);
|
||||
} else {
|
||||
C->add_late_inline(this);
|
||||
}
|
||||
return new_jvms;
|
||||
}
|
||||
|
||||
virtual CallGenerator* with_call_node(CallNode* call) {
|
||||
LateInlineMHCallGenerator* cg = new LateInlineMHCallGenerator(_caller, method(), _input_not_const);
|
||||
cg->set_call_node(call->as_CallStaticJava());
|
||||
return cg;
|
||||
}
|
||||
};
|
||||
|
||||
bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
|
||||
// Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
|
||||
bool allow_inline = C->inlining_incrementally();
|
||||
CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, _input_not_const);
|
||||
assert(!_input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
|
||||
|
||||
if (cg != NULL) {
|
||||
assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");
|
||||
_inline_cg = cg;
|
||||
C->dec_number_of_mh_late_inlines();
|
||||
return true;
|
||||
} else {
|
||||
// Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
|
||||
// unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
|
||||
// so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
|
||||
assert(IncrementalInlineMH, "required");
|
||||
Compile::current()->inc_number_of_mh_late_inlines();
|
||||
CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
|
||||
return cg;
|
||||
}
|
||||
|
||||
// Allow inlining decisions to be delayed
|
||||
class LateInlineVirtualCallGenerator : public VirtualCallGenerator {
|
||||
private:
|
||||
jlong _unique_id; // unique id for log compilation
|
||||
CallGenerator* _inline_cg;
|
||||
ciMethod* _callee;
|
||||
bool _is_pure_call;
|
||||
float _prof_factor;
|
||||
|
||||
protected:
|
||||
virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
|
||||
virtual CallGenerator* inline_cg() const { return _inline_cg; }
|
||||
virtual bool is_pure_call() const { return _is_pure_call; }
|
||||
|
||||
public:
|
||||
LateInlineVirtualCallGenerator(ciMethod* method, int vtable_index, float prof_factor)
|
||||
: VirtualCallGenerator(method, vtable_index, true /*separate_io_projs*/),
|
||||
_unique_id(0), _inline_cg(NULL), _callee(NULL), _is_pure_call(false), _prof_factor(prof_factor) {}
|
||||
|
||||
virtual bool is_late_inline() const { return true; }
|
||||
|
||||
virtual bool is_virtual_late_inline() const { return true; }
|
||||
|
||||
// Convert the CallDynamicJava into an inline
|
||||
virtual void do_late_inline();
|
||||
|
||||
virtual void set_callee_method(ciMethod* m) {
|
||||
assert(_callee == NULL, "repeated inlining attempt");
|
||||
_callee = m;
|
||||
}
|
||||
|
||||
virtual JVMState* generate(JVMState* jvms) {
|
||||
// Emit the CallDynamicJava and request separate projections so
|
||||
// that the late inlining logic can distinguish between fall
|
||||
// through and exceptional uses of the memory and io projections
|
||||
// as is done for allocations and macro expansion.
|
||||
JVMState* new_jvms = VirtualCallGenerator::generate(jvms);
|
||||
if (call_node() != NULL) {
|
||||
call_node()->set_generator(this);
|
||||
}
|
||||
return new_jvms;
|
||||
}
|
||||
|
||||
virtual void print_inlining_late(const char* msg) {
|
||||
CallNode* call = call_node();
|
||||
Compile* C = Compile::current();
|
||||
C->print_inlining_assert_ready();
|
||||
C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
|
||||
C->print_inlining_move_to(this);
|
||||
C->print_inlining_update_delayed(this);
|
||||
}
|
||||
|
||||
virtual void set_unique_id(jlong id) {
|
||||
_unique_id = id;
|
||||
}
|
||||
|
||||
virtual jlong unique_id() const {
|
||||
return _unique_id;
|
||||
}
|
||||
|
||||
virtual CallGenerator* with_call_node(CallNode* call) {
|
||||
LateInlineVirtualCallGenerator* cg = new LateInlineVirtualCallGenerator(method(), vtable_index(), _prof_factor);
|
||||
cg->set_call_node(call->as_CallDynamicJava());
|
||||
return cg;
|
||||
}
|
||||
};
|
||||
|
||||
bool LateInlineVirtualCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
|
||||
// Method handle linker case is handled in CallDynamicJavaNode::Ideal().
|
||||
// Unless inlining is performed, _override_symbolic_info bit will be set in DirectCallGenerator::generate().
|
||||
|
||||
// Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
|
||||
bool allow_inline = C->inlining_incrementally();
|
||||
CallGenerator* cg = C->call_generator(_callee,
|
||||
vtable_index(),
|
||||
false /*call_does_dispatch*/,
|
||||
jvms,
|
||||
allow_inline,
|
||||
_prof_factor,
|
||||
NULL /*speculative_receiver_type*/,
|
||||
true /*allow_intrinsics*/);
|
||||
|
||||
if (cg != NULL) {
|
||||
assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");
|
||||
_inline_cg = cg;
|
||||
return true;
|
||||
} else {
|
||||
// Virtual call which provably doesn't dispatch should be either inlined or replaced with a direct call.
|
||||
assert(false, "no progress");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
CallGenerator* CallGenerator::for_late_inline_virtual(ciMethod* m, int vtable_index, float prof_factor) {
|
||||
assert(IncrementalInlineVirtual, "required");
|
||||
assert(!m->is_static(), "for_virtual_call mismatch");
|
||||
assert(!m->is_method_handle_intrinsic(), "should be a direct call");
|
||||
return new LateInlineVirtualCallGenerator(m, vtable_index, prof_factor);
|
||||
}
|
||||
|
||||
void LateInlineCallGenerator::do_late_inline() {
|
||||
CallGenerator::do_late_inline_helper();
|
||||
}
|
||||
|
||||
void LateInlineMHCallGenerator::do_late_inline() {
|
||||
CallGenerator::do_late_inline_helper();
|
||||
}
|
||||
|
||||
void LateInlineVirtualCallGenerator::do_late_inline() {
|
||||
assert(_callee != NULL, "required"); // set up in CallDynamicJavaNode::Ideal
|
||||
CallGenerator::do_late_inline_helper();
|
||||
}
|
||||
|
||||
void CallGenerator::do_late_inline_helper() {
|
||||
assert(is_late_inline(), "only late inline allowed");
|
||||
|
||||
// Can't inline it
|
||||
CallStaticJavaNode* call = call_node();
|
||||
CallNode* call = call_node();
|
||||
if (call == NULL || call->outcnt() == 0 ||
|
||||
call->in(0) == NULL || call->in(0)->is_top()) {
|
||||
return;
|
||||
|
@ -373,12 +586,12 @@ void LateInlineCallGenerator::do_late_inline() {
|
|||
// check for unreachable loop
|
||||
CallProjections callprojs;
|
||||
call->extract_projections(&callprojs, true);
|
||||
if (callprojs.fallthrough_catchproj == call->in(0) ||
|
||||
callprojs.catchall_catchproj == call->in(0) ||
|
||||
callprojs.fallthrough_memproj == call->in(TypeFunc::Memory) ||
|
||||
callprojs.catchall_memproj == call->in(TypeFunc::Memory) ||
|
||||
callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O) ||
|
||||
callprojs.catchall_ioproj == call->in(TypeFunc::I_O) ||
|
||||
if ((callprojs.fallthrough_catchproj == call->in(0)) ||
|
||||
(callprojs.catchall_catchproj == call->in(0)) ||
|
||||
(callprojs.fallthrough_memproj == call->in(TypeFunc::Memory)) ||
|
||||
(callprojs.catchall_memproj == call->in(TypeFunc::Memory)) ||
|
||||
(callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O)) ||
|
||||
(callprojs.catchall_ioproj == call->in(TypeFunc::I_O)) ||
|
||||
(callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) ||
|
||||
(callprojs.exobj != NULL && call->find_edge(callprojs.exobj) != -1)) {
|
||||
return;
|
||||
|
@ -391,7 +604,7 @@ void LateInlineCallGenerator::do_late_inline() {
|
|||
}
|
||||
|
||||
bool result_not_used = (callprojs.resproj == NULL || callprojs.resproj->outcnt() == 0);
|
||||
if (_is_pure_call && result_not_used) {
|
||||
if (is_pure_call() && result_not_used) {
|
||||
// The call is marked as pure (no important side effects), but result isn't used.
|
||||
// It's safe to remove the call.
|
||||
GraphKit kit(call->jvms());
|
||||
|
@ -434,10 +647,10 @@ void LateInlineCallGenerator::do_late_inline() {
|
|||
|
||||
C->log_late_inline(this);
|
||||
|
||||
// This check is done here because for_method_handle_inline() method
|
||||
// needs jvms for inlined state.
|
||||
if (!do_late_inline_check(jvms)) {
|
||||
// JVMState is ready, so time to perform some checks and prepare for inlining attempt.
|
||||
if (!do_late_inline_check(C, jvms)) {
|
||||
map->disconnect_inputs(C);
|
||||
C->print_inlining_update_delayed(this);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -449,8 +662,15 @@ void LateInlineCallGenerator::do_late_inline() {
|
|||
C->set_default_node_notes(entry_nn);
|
||||
}
|
||||
|
||||
// Virtual call involves a receiver null check which can be made implicit.
|
||||
if (is_virtual_late_inline()) {
|
||||
GraphKit kit(jvms);
|
||||
kit.null_check_receiver();
|
||||
jvms = kit.transfer_exceptions_into_jvms();
|
||||
}
|
||||
|
||||
// Now perform the inlining using the synthesized JVMState
|
||||
JVMState* new_jvms = _inline_cg->generate(jvms);
|
||||
JVMState* new_jvms = inline_cg()->generate(jvms);
|
||||
if (new_jvms == NULL) return; // no change
|
||||
if (C->failing()) return;
|
||||
|
||||
|
@ -464,73 +684,16 @@ void LateInlineCallGenerator::do_late_inline() {
|
|||
result = (result_size == 1) ? kit.pop() : kit.pop_pair();
|
||||
}
|
||||
|
||||
C->env()->notice_inlined_method(_inline_cg->method());
|
||||
if (inline_cg()->is_inline()) {
|
||||
C->set_has_loops(C->has_loops() || inline_cg()->method()->has_loops());
|
||||
C->env()->notice_inlined_method(inline_cg()->method());
|
||||
}
|
||||
C->set_inlining_progress(true);
|
||||
C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
|
||||
kit.replace_call(call, result, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
|
||||
return new LateInlineCallGenerator(method, inline_cg);
|
||||
}
|
||||
|
||||
class LateInlineMHCallGenerator : public LateInlineCallGenerator {
|
||||
ciMethod* _caller;
|
||||
int _attempt;
|
||||
bool _input_not_const;
|
||||
|
||||
virtual bool do_late_inline_check(JVMState* jvms);
|
||||
virtual bool already_attempted() const { return _attempt > 0; }
|
||||
|
||||
public:
|
||||
LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
|
||||
LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
|
||||
|
||||
virtual bool is_mh_late_inline() const { return true; }
|
||||
|
||||
virtual JVMState* generate(JVMState* jvms) {
|
||||
JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
|
||||
|
||||
Compile* C = Compile::current();
|
||||
if (_input_not_const) {
|
||||
// inlining won't be possible so no need to enqueue right now.
|
||||
call_node()->set_generator(this);
|
||||
} else {
|
||||
C->add_late_inline(this);
|
||||
}
|
||||
return new_jvms;
|
||||
}
|
||||
};
|
||||
|
||||
bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
|
||||
|
||||
CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
|
||||
|
||||
Compile::current()->print_inlining_update_delayed(this);
|
||||
|
||||
if (!_input_not_const) {
|
||||
_attempt++;
|
||||
}
|
||||
|
||||
if (cg != NULL && cg->is_inline()) {
|
||||
assert(!cg->is_late_inline(), "we're doing late inlining");
|
||||
_inline_cg = cg;
|
||||
Compile::current()->dec_number_of_mh_late_inlines();
|
||||
return true;
|
||||
}
|
||||
|
||||
call_node()->set_generator(this);
|
||||
return false;
|
||||
}
|
||||
|
||||
CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
|
||||
Compile::current()->inc_number_of_mh_late_inlines();
|
||||
CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
|
||||
return cg;
|
||||
}
|
||||
|
||||
class LateInlineStringCallGenerator : public LateInlineCallGenerator {
|
||||
|
||||
public:
|
||||
|
@ -549,6 +712,12 @@ class LateInlineStringCallGenerator : public LateInlineCallGenerator {
|
|||
}
|
||||
|
||||
virtual bool is_string_late_inline() const { return true; }
|
||||
|
||||
virtual CallGenerator* with_call_node(CallNode* call) {
|
||||
LateInlineStringCallGenerator* cg = new LateInlineStringCallGenerator(method(), _inline_cg);
|
||||
cg->set_call_node(call->as_CallStaticJava());
|
||||
return cg;
|
||||
}
|
||||
};
|
||||
|
||||
CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
|
||||
|
@ -571,6 +740,12 @@ class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
|
|||
JVMState* new_jvms = DirectCallGenerator::generate(jvms);
|
||||
return new_jvms;
|
||||
}
|
||||
|
||||
virtual CallGenerator* with_call_node(CallNode* call) {
|
||||
LateInlineBoxingCallGenerator* cg = new LateInlineBoxingCallGenerator(method(), _inline_cg);
|
||||
cg->set_call_node(call->as_CallStaticJava());
|
||||
return cg;
|
||||
}
|
||||
};
|
||||
|
||||
CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
|
||||
|
@ -593,6 +768,12 @@ class LateInlineVectorReboxingCallGenerator : public LateInlineCallGenerator {
|
|||
JVMState* new_jvms = DirectCallGenerator::generate(jvms);
|
||||
return new_jvms;
|
||||
}
|
||||
|
||||
virtual CallGenerator* with_call_node(CallNode* call) {
|
||||
LateInlineVectorReboxingCallGenerator* cg = new LateInlineVectorReboxingCallGenerator(method(), _inline_cg);
|
||||
cg->set_call_node(call->as_CallStaticJava());
|
||||
return cg;
|
||||
}
|
||||
};
|
||||
|
||||
// static CallGenerator* for_vector_reboxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
|
||||
|
@ -850,10 +1031,10 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
|
|||
}
|
||||
|
||||
|
||||
CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee) {
|
||||
CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
|
||||
assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
|
||||
bool input_not_const;
|
||||
CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const);
|
||||
CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
|
||||
Compile* C = Compile::current();
|
||||
if (cg != NULL) {
|
||||
if (AlwaysIncrementalInline) {
|
||||
|
@ -866,7 +1047,7 @@ CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* c
|
|||
ciCallProfile profile = caller->call_profile_at_bci(bci);
|
||||
int call_site_count = caller->scale_count(profile.count());
|
||||
|
||||
if (IncrementalInline && call_site_count > 0 &&
|
||||
if (IncrementalInlineMH && call_site_count > 0 &&
|
||||
(input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
|
||||
return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
|
||||
} else {
|
||||
|
@ -900,12 +1081,15 @@ JVMState* NativeCallGenerator::generate(JVMState* jvms) {
|
|||
return kit.transfer_exceptions_into_jvms();
|
||||
}
|
||||
|
||||
CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
|
||||
CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
|
||||
GraphKit kit(jvms);
|
||||
PhaseGVN& gvn = kit.gvn();
|
||||
Compile* C = kit.C;
|
||||
vmIntrinsics::ID iid = callee->intrinsic_id();
|
||||
input_not_const = true;
|
||||
if (StressMethodHandleLinkerInlining) {
|
||||
allow_inline = false;
|
||||
}
|
||||
switch (iid) {
|
||||
case vmIntrinsics::_invokeBasic:
|
||||
{
|
||||
|
@ -926,7 +1110,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
|
|||
CallGenerator* cg = C->call_generator(target, vtable_index,
|
||||
false /* call_does_dispatch */,
|
||||
jvms,
|
||||
true /* allow_inline */,
|
||||
allow_inline,
|
||||
PROB_ALWAYS);
|
||||
return cg;
|
||||
} else {
|
||||
|
@ -1002,7 +1186,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
|
|||
// optimize_virtual_call() takes 2 different holder
|
||||
// arguments for a corner case that doesn't apply here (see
|
||||
// Parse::do_call())
|
||||
target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass,
|
||||
target = C->optimize_virtual_call(caller, klass, klass,
|
||||
target, receiver_type, is_virtual,
|
||||
call_does_dispatch, vtable_index, // out-parameters
|
||||
false /* check_access */);
|
||||
|
@ -1011,7 +1195,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
|
|||
speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
|
||||
}
|
||||
CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
|
||||
!StressMethodHandleLinkerInlining /* allow_inline */,
|
||||
allow_inline,
|
||||
PROB_ALWAYS,
|
||||
speculative_receiver_type);
|
||||
return cg;
|
||||
|
@ -1026,6 +1210,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
|
|||
{
|
||||
Node* nep = kit.argument(callee->arg_size() - 1);
|
||||
if (nep->Opcode() == Op_ConP) {
|
||||
input_not_const = false;
|
||||
const TypeOopPtr* oop_ptr = nep->bottom_type()->is_oopptr();
|
||||
ciNativeEntryPoint* nep = oop_ptr->const_oop()->as_native_entry_point();
|
||||
return new NativeCallGenerator(callee, nep);
|
||||
|
@ -1060,7 +1245,7 @@ public:
|
|||
}
|
||||
|
||||
virtual bool is_virtual() const { return true; }
|
||||
virtual bool is_inlined() const { return true; }
|
||||
virtual bool is_inline() const { return true; }
|
||||
virtual bool is_intrinsic() const { return true; }
|
||||
|
||||
virtual JVMState* generate(JVMState* jvms);
|
||||
|
|
|
@ -42,6 +42,12 @@ class CallGenerator : public ResourceObj {
|
|||
protected:
|
||||
CallGenerator(ciMethod* method) : _method(method) {}
|
||||
|
||||
void do_late_inline_helper();
|
||||
|
||||
virtual bool do_late_inline_check(Compile* C, JVMState* jvms) { ShouldNotReachHere(); return false; }
|
||||
virtual CallGenerator* inline_cg() const { ShouldNotReachHere(); return NULL; }
|
||||
virtual bool is_pure_call() const { ShouldNotReachHere(); return false; }
|
||||
|
||||
public:
|
||||
// Accessors
|
||||
ciMethod* method() const { return _method; }
|
||||
|
@ -69,21 +75,28 @@ class CallGenerator : public ResourceObj {
|
|||
// same but for method handle calls
|
||||
virtual bool is_mh_late_inline() const { return false; }
|
||||
virtual bool is_string_late_inline() const { return false; }
|
||||
|
||||
// for method handle calls: have we tried inlinining the call already?
|
||||
virtual bool already_attempted() const { ShouldNotReachHere(); return false; }
|
||||
virtual bool is_virtual_late_inline() const { return false; }
|
||||
|
||||
// Replace the call with an inline version of the code
|
||||
virtual void do_late_inline() { ShouldNotReachHere(); }
|
||||
|
||||
virtual CallStaticJavaNode* call_node() const { ShouldNotReachHere(); return NULL; }
|
||||
virtual CallNode* call_node() const { return NULL; }
|
||||
virtual CallGenerator* with_call_node(CallNode* call) { return this; }
|
||||
|
||||
virtual void set_unique_id(jlong id) { fatal("unique id only for late inlines"); };
|
||||
virtual jlong unique_id() const { fatal("unique id only for late inlines"); return 0; };
|
||||
|
||||
virtual void set_callee_method(ciMethod* callee) { ShouldNotReachHere(); }
|
||||
|
||||
// Note: It is possible for a CG to be both inline and virtual.
|
||||
// (The hashCode intrinsic does a vtable check and an inlined fast path.)
|
||||
|
||||
// Allocate CallGenerators only in Compile arena since some of them are referenced from CallNodes.
|
||||
void* operator new(size_t size) throw() {
|
||||
Compile* C = Compile::current();
|
||||
return ResourceObj::operator new(size, C->comp_arena());
|
||||
}
|
||||
|
||||
// Utilities:
|
||||
const TypeFunc* tf() const;
|
||||
|
||||
|
@ -119,8 +132,8 @@ class CallGenerator : public ResourceObj {
|
|||
static CallGenerator* for_direct_call(ciMethod* m, bool separate_io_projs = false); // static, special
|
||||
static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index); // virtual, interface
|
||||
|
||||
static CallGenerator* for_method_handle_call( JVMState* jvms, ciMethod* caller, ciMethod* callee);
|
||||
static CallGenerator* for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const);
|
||||
static CallGenerator* for_method_handle_call( JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline);
|
||||
static CallGenerator* for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const);
|
||||
|
||||
// How to generate a replace a direct call with an inline version
|
||||
static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg);
|
||||
|
@ -134,6 +147,8 @@ class CallGenerator : public ResourceObj {
|
|||
CallGenerator* if_cold,
|
||||
CallGenerator* if_hot);
|
||||
|
||||
static CallGenerator* for_late_inline_virtual(ciMethod* m, int vtable_index, float expected_uses);
|
||||
|
||||
// How to make a call that optimistically assumes a receiver type:
|
||||
static CallGenerator* for_predicted_call(ciKlass* predicted_receiver,
|
||||
CallGenerator* if_missed,
|
||||
|
|
|
@ -627,7 +627,7 @@ JVMState* JVMState::clone_deep(Compile* C) const {
|
|||
* Reset map for all callers
|
||||
*/
|
||||
void JVMState::set_map_deep(SafePointNode* map) {
|
||||
for (JVMState* p = this; p->_caller != NULL; p = p->_caller) {
|
||||
for (JVMState* p = this; p != NULL; p = p->_caller) {
|
||||
p->set_map(map);
|
||||
}
|
||||
}
|
||||
|
@ -943,24 +943,14 @@ void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj
|
|||
}
|
||||
|
||||
Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
||||
#ifdef ASSERT
|
||||
// Validate attached generator
|
||||
CallGenerator* cg = generator();
|
||||
if (can_reshape && cg != NULL && cg->is_mh_late_inline() && !cg->already_attempted()) {
|
||||
// Check whether this MH handle call becomes a candidate for inlining
|
||||
ciMethod* callee = cg->method();
|
||||
vmIntrinsics::ID iid = callee->intrinsic_id();
|
||||
if (iid == vmIntrinsics::_invokeBasic) {
|
||||
if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
|
||||
phase->C->prepend_late_inline(cg);
|
||||
set_generator(NULL);
|
||||
}
|
||||
} else {
|
||||
assert(callee->has_member_arg(), "wrong type of call?");
|
||||
if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
|
||||
phase->C->prepend_late_inline(cg);
|
||||
set_generator(NULL);
|
||||
}
|
||||
}
|
||||
if (cg != NULL) {
|
||||
assert(is_CallStaticJava() && cg->is_mh_late_inline() ||
|
||||
is_CallDynamicJava() && cg->is_virtual_late_inline(), "mismatch");
|
||||
}
|
||||
#endif // ASSERT
|
||||
return SafePointNode::Ideal(phase, can_reshape);
|
||||
}
|
||||
|
||||
|
@ -1055,6 +1045,32 @@ bool CallStaticJavaNode::cmp( const Node &n ) const {
|
|||
return CallJavaNode::cmp(call);
|
||||
}
|
||||
|
||||
Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
||||
CallGenerator* cg = generator();
|
||||
if (can_reshape && cg != NULL) {
|
||||
assert(IncrementalInlineMH, "required");
|
||||
assert(cg->call_node() == this, "mismatch");
|
||||
assert(cg->is_mh_late_inline(), "not virtual");
|
||||
|
||||
// Check whether this MH handle call becomes a candidate for inlining.
|
||||
ciMethod* callee = cg->method();
|
||||
vmIntrinsics::ID iid = callee->intrinsic_id();
|
||||
if (iid == vmIntrinsics::_invokeBasic) {
|
||||
if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
|
||||
phase->C->prepend_late_inline(cg);
|
||||
set_generator(NULL);
|
||||
}
|
||||
} else {
|
||||
assert(callee->has_member_arg(), "wrong type of call?");
|
||||
if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
|
||||
phase->C->prepend_late_inline(cg);
|
||||
set_generator(NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
return CallNode::Ideal(phase, can_reshape);
|
||||
}
|
||||
|
||||
//----------------------------uncommon_trap_request----------------------------
|
||||
// If this is an uncommon trap, return the request code, else zero.
|
||||
int CallStaticJavaNode::uncommon_trap_request() const {
|
||||
|
@ -1111,6 +1127,48 @@ bool CallDynamicJavaNode::cmp( const Node &n ) const {
|
|||
CallDynamicJavaNode &call = (CallDynamicJavaNode&)n;
|
||||
return CallJavaNode::cmp(call);
|
||||
}
|
||||
|
||||
Node* CallDynamicJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
||||
CallGenerator* cg = generator();
|
||||
if (can_reshape && cg != NULL) {
|
||||
assert(IncrementalInlineVirtual, "required");
|
||||
assert(cg->call_node() == this, "mismatch");
|
||||
assert(cg->is_virtual_late_inline(), "not virtual");
|
||||
|
||||
// Recover symbolic info for method resolution.
|
||||
ciMethod* caller = jvms()->method();
|
||||
ciBytecodeStream iter(caller);
|
||||
iter.force_bci(jvms()->bci());
|
||||
|
||||
bool not_used1;
|
||||
ciSignature* not_used2;
|
||||
ciMethod* orig_callee = iter.get_method(not_used1, ¬_used2); // callee in the bytecode
|
||||
ciKlass* holder = iter.get_declared_method_holder();
|
||||
if (orig_callee->is_method_handle_intrinsic()) {
|
||||
assert(_override_symbolic_info, "required");
|
||||
orig_callee = method();
|
||||
holder = method()->holder();
|
||||
}
|
||||
|
||||
ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
|
||||
|
||||
Node* receiver_node = in(TypeFunc::Parms);
|
||||
const TypeOopPtr* receiver_type = phase->type(receiver_node)->isa_oopptr();
|
||||
|
||||
int not_used3;
|
||||
bool call_does_dispatch;
|
||||
ciMethod* callee = phase->C->optimize_virtual_call(caller, klass, holder, orig_callee, receiver_type, true /*is_virtual*/,
|
||||
call_does_dispatch, not_used3); // out-parameters
|
||||
if (!call_does_dispatch) {
|
||||
// Register for late inlining.
|
||||
cg->set_callee_method(callee);
|
||||
phase->C->prepend_late_inline(cg); // MH late inlining prepends to the list, so do the same
|
||||
set_generator(NULL);
|
||||
}
|
||||
}
|
||||
return CallNode::Ideal(phase, can_reshape);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void CallDynamicJavaNode::dump_spec(outputStream *st) const {
|
||||
st->print("# Dynamic ");
|
||||
|
|
|
@ -736,7 +736,7 @@ public:
|
|||
bool is_boxing_method() const {
|
||||
return is_macro() && (method() != NULL) && method()->is_boxing_method();
|
||||
}
|
||||
// Later inlining modifies the JVMState, so we need to clone it
|
||||
// Late inlining modifies the JVMState, so we need to clone it
|
||||
// when the call node is cloned (because it is macro node).
|
||||
virtual void clone_jvms(Compile* C) {
|
||||
if ((jvms() != NULL) && is_boxing_method()) {
|
||||
|
@ -746,6 +746,8 @@ public:
|
|||
}
|
||||
|
||||
virtual int Opcode() const;
|
||||
virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
|
||||
|
||||
#ifndef PRODUCT
|
||||
virtual void dump_spec(outputStream *st) const;
|
||||
virtual void dump_compact_spec(outputStream *st) const;
|
||||
|
@ -762,8 +764,18 @@ public:
|
|||
init_class_id(Class_CallDynamicJava);
|
||||
}
|
||||
|
||||
// Late inlining modifies the JVMState, so we need to clone it
|
||||
// when the call node is cloned.
|
||||
virtual void clone_jvms(Compile* C) {
|
||||
if ((jvms() != NULL) && IncrementalInlineVirtual) {
|
||||
set_jvms(jvms()->clone_deep(C));
|
||||
jvms()->set_map_deep(this);
|
||||
}
|
||||
}
|
||||
|
||||
int _vtable_index;
|
||||
virtual int Opcode() const;
|
||||
virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
|
||||
#ifndef PRODUCT
|
||||
virtual void dump_spec(outputStream *st) const;
|
||||
#endif
|
||||
|
|
|
@ -339,26 +339,71 @@ void Compile::remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines
|
|||
int shift = 0;
|
||||
for (int i = 0; i < inlines->length(); i++) {
|
||||
CallGenerator* cg = inlines->at(i);
|
||||
CallNode* call = cg->call_node();
|
||||
if (useful.member(cg->call_node())) {
|
||||
if (shift > 0) {
|
||||
inlines->at_put(i - shift, cg);
|
||||
}
|
||||
if (!useful.member(call)) {
|
||||
shift++;
|
||||
} else {
|
||||
shift++; // skip over the dead element
|
||||
}
|
||||
}
|
||||
inlines->trunc_to(inlines->length()-shift);
|
||||
if (shift > 0) {
|
||||
inlines->trunc_to(inlines->length() - shift); // remove last elements from compacted array
|
||||
}
|
||||
}
|
||||
|
||||
void Compile::remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Node* dead) {
|
||||
assert(dead != NULL && dead->is_Call(), "sanity");
|
||||
int found = 0;
|
||||
for (int i = 0; i < inlines->length(); i++) {
|
||||
if (inlines->at(i)->call_node() == dead) {
|
||||
inlines->remove_at(i);
|
||||
found++;
|
||||
NOT_DEBUG( break; ) // elements are unique, so exit early
|
||||
}
|
||||
}
|
||||
assert(found <= 1, "not unique");
|
||||
}
|
||||
|
||||
void Compile::remove_useless_nodes(GrowableArray<Node*>& node_list, Unique_Node_List& useful) {
|
||||
for (int i = node_list.length() - 1; i >= 0; i--) {
|
||||
Node* n = node_list.at(i);
|
||||
if (!useful.member(n)) {
|
||||
node_list.remove_if_existing(n);
|
||||
node_list.delete_at(i); // replaces i-th with last element which is known to be useful (already processed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Compile::remove_useless_node(Node* dead) {
|
||||
remove_modified_node(dead);
|
||||
|
||||
// Constant node that has no out-edges and has only one in-edge from
|
||||
// root is usually dead. However, sometimes reshaping walk makes
|
||||
// it reachable by adding use edges. So, we will NOT count Con nodes
|
||||
// as dead to be conservative about the dead node count at any
|
||||
// given time.
|
||||
if (!dead->is_Con()) {
|
||||
record_dead_node(dead->_idx);
|
||||
}
|
||||
if (dead->is_macro()) {
|
||||
remove_macro_node(dead);
|
||||
}
|
||||
if (dead->is_expensive()) {
|
||||
remove_expensive_node(dead);
|
||||
}
|
||||
if (dead->for_post_loop_opts_igvn()) {
|
||||
remove_from_post_loop_opts_igvn(dead);
|
||||
}
|
||||
if (dead->is_Call()) {
|
||||
remove_useless_late_inlines( &_late_inlines, dead);
|
||||
remove_useless_late_inlines( &_string_late_inlines, dead);
|
||||
remove_useless_late_inlines( &_boxing_late_inlines, dead);
|
||||
remove_useless_late_inlines(&_vector_reboxing_late_inlines, dead);
|
||||
}
|
||||
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
|
||||
bs->unregister_potential_barrier_node(dead);
|
||||
}
|
||||
|
||||
// Disconnect all useless nodes by disconnecting those at the boundary.
|
||||
void Compile::remove_useless_nodes(Unique_Node_List &useful) {
|
||||
uint next = 0;
|
||||
|
@ -394,9 +439,9 @@ void Compile::remove_useless_nodes(Unique_Node_List &useful) {
|
|||
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
|
||||
bs->eliminate_useless_gc_barriers(useful, this);
|
||||
// clean up the late inline lists
|
||||
remove_useless_late_inlines( &_late_inlines, useful);
|
||||
remove_useless_late_inlines( &_string_late_inlines, useful);
|
||||
remove_useless_late_inlines( &_boxing_late_inlines, useful);
|
||||
remove_useless_late_inlines(&_late_inlines, useful);
|
||||
remove_useless_late_inlines(&_vector_reboxing_late_inlines, useful);
|
||||
debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
|
||||
}
|
||||
|
@ -1860,21 +1905,34 @@ bool Compile::inline_incrementally_one() {
|
|||
assert(IncrementalInline, "incremental inlining should be on");
|
||||
|
||||
TracePhase tp("incrementalInline_inline", &timers[_t_incrInline_inline]);
|
||||
|
||||
set_inlining_progress(false);
|
||||
set_do_cleanup(false);
|
||||
int i = 0;
|
||||
for (; i <_late_inlines.length() && !inlining_progress(); i++) {
|
||||
CallGenerator* cg = _late_inlines.at(i);
|
||||
|
||||
for (int i = 0; i < _late_inlines.length(); i++) {
|
||||
_late_inlines_pos = i+1;
|
||||
CallGenerator* cg = _late_inlines.at(i);
|
||||
bool does_dispatch = cg->is_virtual_late_inline() || cg->is_mh_late_inline();
|
||||
if (inlining_incrementally() || does_dispatch) { // a call can be either inlined or strength-reduced to a direct call
|
||||
cg->do_late_inline();
|
||||
if (failing()) return false;
|
||||
assert(_late_inlines.at(i) == cg, "no insertions before current position allowed");
|
||||
if (failing()) {
|
||||
return false;
|
||||
} else if (inlining_progress()) {
|
||||
_late_inlines_pos = i+1; // restore the position in case new elements were inserted
|
||||
print_method(PHASE_INCREMENTAL_INLINE_STEP, cg->call_node(), 3);
|
||||
break; // process one call site at a time
|
||||
}
|
||||
int j = 0;
|
||||
for (; i < _late_inlines.length(); i++, j++) {
|
||||
_late_inlines.at_put(j, _late_inlines.at(i));
|
||||
} else {
|
||||
// Ignore late inline direct calls when inlining is not allowed.
|
||||
// They are left in the late inline list when node budget is exhausted until the list is fully drained.
|
||||
}
|
||||
_late_inlines.trunc_to(j);
|
||||
assert(inlining_progress() || _late_inlines.length() == 0, "");
|
||||
}
|
||||
// Remove processed elements.
|
||||
_late_inlines.remove_till(_late_inlines_pos);
|
||||
_late_inlines_pos = 0;
|
||||
|
||||
assert(inlining_progress() || _late_inlines.length() == 0, "no progress");
|
||||
|
||||
bool needs_cleanup = do_cleanup() || over_inlining_cutoff();
|
||||
|
||||
|
@ -1896,6 +1954,7 @@ void Compile::inline_incrementally_cleanup(PhaseIterGVN& igvn) {
|
|||
igvn = PhaseIterGVN(initial_gvn());
|
||||
igvn.optimize();
|
||||
}
|
||||
print_method(PHASE_INCREMENTAL_INLINE_CLEANUP, 3);
|
||||
}
|
||||
|
||||
// Perform incremental inlining until bound on number of live nodes is reached
|
||||
|
@ -1919,6 +1978,18 @@ void Compile::inline_incrementally(PhaseIterGVN& igvn) {
|
|||
}
|
||||
|
||||
if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
|
||||
bool do_print_inlining = print_inlining() || print_intrinsics();
|
||||
if (do_print_inlining || log() != NULL) {
|
||||
// Print inlining message for candidates that we couldn't inline for lack of space.
|
||||
for (int i = 0; i < _late_inlines.length(); i++) {
|
||||
CallGenerator* cg = _late_inlines.at(i);
|
||||
const char* msg = "live nodes > LiveNodeCountInliningCutoff";
|
||||
if (do_print_inlining) {
|
||||
cg->print_inlining_late(msg);
|
||||
}
|
||||
log_late_inline_failure(cg, msg);
|
||||
}
|
||||
}
|
||||
break; // finish
|
||||
}
|
||||
}
|
||||
|
@ -1929,7 +2000,6 @@ void Compile::inline_incrementally(PhaseIterGVN& igvn) {
|
|||
while (inline_incrementally_one()) {
|
||||
assert(!failing(), "inconsistent");
|
||||
}
|
||||
|
||||
if (failing()) return;
|
||||
|
||||
inline_incrementally_cleanup(igvn);
|
||||
|
@ -1937,6 +2007,10 @@ void Compile::inline_incrementally(PhaseIterGVN& igvn) {
|
|||
print_method(PHASE_INCREMENTAL_INLINE_STEP, 3);
|
||||
|
||||
if (failing()) return;
|
||||
|
||||
if (_late_inlines.length() == 0) {
|
||||
break; // no more progress
|
||||
}
|
||||
}
|
||||
assert( igvn._worklist.size() == 0, "should be done with igvn" );
|
||||
|
||||
|
@ -1955,6 +2029,27 @@ void Compile::inline_incrementally(PhaseIterGVN& igvn) {
|
|||
set_inlining_incrementally(false);
|
||||
}
|
||||
|
||||
void Compile::process_late_inline_calls_no_inline(PhaseIterGVN& igvn) {
|
||||
// "inlining_incrementally() == false" is used to signal that no inlining is allowed
|
||||
// (see LateInlineVirtualCallGenerator::do_late_inline_check() for details).
|
||||
// Tracking and verification of modified nodes is disabled by setting "_modified_nodes == NULL"
|
||||
// as if "inlining_incrementally() == true" were set.
|
||||
assert(inlining_incrementally() == false, "not allowed");
|
||||
assert(_modified_nodes == NULL, "not allowed");
|
||||
assert(_late_inlines.length() > 0, "sanity");
|
||||
|
||||
while (_late_inlines.length() > 0) {
|
||||
for_igvn()->clear();
|
||||
initial_gvn()->replace_with(&igvn);
|
||||
|
||||
while (inline_incrementally_one()) {
|
||||
assert(!failing(), "inconsistent");
|
||||
}
|
||||
if (failing()) return;
|
||||
|
||||
inline_incrementally_cleanup(igvn);
|
||||
}
|
||||
}
|
||||
|
||||
bool Compile::optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode) {
|
||||
if (_loop_opts_cnt > 0) {
|
||||
|
@ -2235,10 +2330,20 @@ void Compile::Optimize() {
|
|||
}
|
||||
|
||||
DEBUG_ONLY( _modified_nodes = NULL; )
|
||||
|
||||
assert(igvn._worklist.size() == 0, "not empty");
|
||||
|
||||
assert(_late_inlines.length() == 0 || IncrementalInlineMH || IncrementalInlineVirtual, "not empty");
|
||||
|
||||
if (_late_inlines.length() > 0) {
|
||||
// More opportunities to optimize virtual and MH calls.
|
||||
// Though it's maybe too late to perform inlining, strength-reducing them to direct calls is still an option.
|
||||
process_late_inline_calls_no_inline(igvn);
|
||||
}
|
||||
} // (End scope of igvn; run destructor if necessary for asserts.)
|
||||
|
||||
process_print_inlining();
|
||||
|
||||
// A method with only infinite loops has no edges entering loops from root
|
||||
{
|
||||
TracePhase tp("graphReshape", &timers[_t_graphReshaping]);
|
||||
|
@ -2254,8 +2359,6 @@ void Compile::Optimize() {
|
|||
|
||||
void Compile::inline_vector_reboxing_calls() {
|
||||
if (C->_vector_reboxing_late_inlines.length() > 0) {
|
||||
PhaseGVN* gvn = C->initial_gvn();
|
||||
|
||||
_late_inlines_pos = C->_late_inlines.length();
|
||||
while (_vector_reboxing_late_inlines.length() > 0) {
|
||||
CallGenerator* cg = _vector_reboxing_late_inlines.pop();
|
||||
|
@ -3261,25 +3364,17 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f
|
|||
}
|
||||
|
||||
case Op_Proj: {
|
||||
if (OptimizeStringConcat) {
|
||||
ProjNode* p = n->as_Proj();
|
||||
if (p->_is_io_use) {
|
||||
if (OptimizeStringConcat || IncrementalInline) {
|
||||
ProjNode* proj = n->as_Proj();
|
||||
if (proj->_is_io_use) {
|
||||
assert(proj->_con == TypeFunc::I_O || proj->_con == TypeFunc::Memory, "");
|
||||
// Separate projections were used for the exception path which
|
||||
// are normally removed by a late inline. If it wasn't inlined
|
||||
// then they will hang around and should just be replaced with
|
||||
// the original one.
|
||||
Node* proj = NULL;
|
||||
// Replace with just one
|
||||
for (SimpleDUIterator i(p->in(0)); i.has_next(); i.next()) {
|
||||
Node *use = i.get();
|
||||
if (use->is_Proj() && p != use && use->as_Proj()->_con == p->_con) {
|
||||
proj = use;
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert(proj != NULL || p->_con == TypeFunc::I_O, "io may be dropped at an infinite loop");
|
||||
if (proj != NULL) {
|
||||
p->subsume_by(proj, this);
|
||||
// the original one. Merge them.
|
||||
Node* non_io_proj = proj->in(0)->as_Multi()->proj_out_or_null(proj->_con, false /*is_io_use*/);
|
||||
if (non_io_proj != NULL) {
|
||||
proj->subsume_by(non_io_proj , this);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4141,12 +4236,7 @@ Compile::PrintInliningBuffer& Compile::print_inlining_current() {
|
|||
|
||||
void Compile::print_inlining_update(CallGenerator* cg) {
|
||||
if (print_inlining() || print_intrinsics()) {
|
||||
if (!cg->is_late_inline()) {
|
||||
if (print_inlining_current().cg() != NULL) {
|
||||
print_inlining_push();
|
||||
}
|
||||
print_inlining_commit();
|
||||
} else {
|
||||
if (cg->is_late_inline()) {
|
||||
if (print_inlining_current().cg() != cg &&
|
||||
(print_inlining_current().cg() != NULL ||
|
||||
print_inlining_current().ss()->size() != 0)) {
|
||||
|
@ -4154,6 +4244,11 @@ void Compile::print_inlining_update(CallGenerator* cg) {
|
|||
}
|
||||
print_inlining_commit();
|
||||
print_inlining_current().set_cg(cg);
|
||||
} else {
|
||||
if (print_inlining_current().cg() != NULL) {
|
||||
print_inlining_push();
|
||||
}
|
||||
print_inlining_commit();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4161,7 +4256,7 @@ void Compile::print_inlining_update(CallGenerator* cg) {
|
|||
void Compile::print_inlining_move_to(CallGenerator* cg) {
|
||||
// We resume inlining at a late inlining call site. Locate the
|
||||
// corresponding inlining buffer so that we can update it.
|
||||
if (print_inlining()) {
|
||||
if (print_inlining() || print_intrinsics()) {
|
||||
for (int i = 0; i < _print_inlining_list->length(); i++) {
|
||||
if (_print_inlining_list->adr_at(i)->cg() == cg) {
|
||||
_print_inlining_idx = i;
|
||||
|
@ -4173,7 +4268,7 @@ void Compile::print_inlining_move_to(CallGenerator* cg) {
|
|||
}
|
||||
|
||||
void Compile::print_inlining_update_delayed(CallGenerator* cg) {
|
||||
if (print_inlining()) {
|
||||
if (print_inlining() || print_intrinsics()) {
|
||||
assert(_print_inlining_stream->size() > 0, "missing inlining msg");
|
||||
assert(print_inlining_current().cg() == cg, "wrong entry");
|
||||
// replace message with new message
|
||||
|
@ -4188,22 +4283,8 @@ void Compile::print_inlining_assert_ready() {
|
|||
}
|
||||
|
||||
void Compile::process_print_inlining() {
|
||||
bool do_print_inlining = print_inlining() || print_intrinsics();
|
||||
if (do_print_inlining || log() != NULL) {
|
||||
// Print inlining message for candidates that we couldn't inline
|
||||
// for lack of space
|
||||
for (int i = 0; i < _late_inlines.length(); i++) {
|
||||
CallGenerator* cg = _late_inlines.at(i);
|
||||
if (!cg->is_mh_late_inline()) {
|
||||
const char* msg = "live nodes > LiveNodeCountInliningCutoff";
|
||||
if (do_print_inlining) {
|
||||
cg->print_inlining_late(msg);
|
||||
}
|
||||
log_late_inline_failure(cg, msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (do_print_inlining) {
|
||||
assert(_late_inlines.length() == 0, "not drained yet");
|
||||
if (print_inlining() || print_intrinsics()) {
|
||||
ResourceMark rm;
|
||||
stringStream ss;
|
||||
assert(_print_inlining_list != NULL, "process_print_inlining should be called only once.");
|
||||
|
|
|
@ -864,12 +864,12 @@ class Compile : public Phase {
|
|||
bool should_delay_vector_reboxing_inlining(ciMethod* call_method, JVMState* jvms);
|
||||
|
||||
// Helper functions to identify inlining potential at call-site
|
||||
ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,
|
||||
ciMethod* optimize_virtual_call(ciMethod* caller, ciInstanceKlass* klass,
|
||||
ciKlass* holder, ciMethod* callee,
|
||||
const TypeOopPtr* receiver_type, bool is_virtual,
|
||||
bool &call_does_dispatch, int &vtable_index,
|
||||
bool check_access = true);
|
||||
ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
|
||||
ciMethod* optimize_inlining(ciMethod* caller, ciInstanceKlass* klass,
|
||||
ciMethod* callee, const TypeOopPtr* receiver_type,
|
||||
bool check_access = true);
|
||||
|
||||
|
@ -911,6 +911,8 @@ class Compile : public Phase {
|
|||
void update_dead_node_list(Unique_Node_List &useful);
|
||||
void remove_useless_nodes (Unique_Node_List &useful);
|
||||
|
||||
void remove_useless_node(Node* dead);
|
||||
|
||||
WarmCallInfo* warm_calls() const { return _warm_calls; }
|
||||
void set_warm_calls(WarmCallInfo* l) { _warm_calls = l; }
|
||||
WarmCallInfo* pop_warm_call();
|
||||
|
@ -941,9 +943,11 @@ class Compile : public Phase {
|
|||
|
||||
const GrowableArray<BufferBlob*>& native_invokers() const { return _native_invokers; }
|
||||
|
||||
void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful);
|
||||
void remove_useless_nodes (GrowableArray<Node*>& node_list, Unique_Node_List &useful);
|
||||
|
||||
void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful);
|
||||
void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Node* dead);
|
||||
|
||||
void process_print_inlining();
|
||||
void dump_print_inlining();
|
||||
|
||||
|
@ -974,6 +978,8 @@ class Compile : public Phase {
|
|||
void inline_vector_reboxing_calls();
|
||||
bool has_vbox_nodes();
|
||||
|
||||
void process_late_inline_calls_no_inline(PhaseIterGVN& igvn);
|
||||
|
||||
// Matching, CFG layout, allocation, code generation
|
||||
PhaseCFG* cfg() { return _cfg; }
|
||||
bool has_java_calls() const { return _java_calls > 0; }
|
||||
|
|
|
@ -148,7 +148,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
|
|||
// MethodHandle.invoke* are native methods which obviously don't
|
||||
// have bytecodes and so normal inlining fails.
|
||||
if (callee->is_method_handle_intrinsic()) {
|
||||
CallGenerator* cg = CallGenerator::for_method_handle_call(jvms, caller, callee);
|
||||
CallGenerator* cg = CallGenerator::for_method_handle_call(jvms, caller, callee, allow_inline);
|
||||
return cg;
|
||||
}
|
||||
|
||||
|
@ -274,7 +274,8 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
|
|||
} else {
|
||||
// Generate virtual call for class check failure path
|
||||
// in case of polymorphic virtual call site.
|
||||
miss_cg = CallGenerator::for_virtual_call(callee, vtable_index);
|
||||
miss_cg = (IncrementalInlineVirtual ? CallGenerator::for_late_inline_virtual(callee, vtable_index, prof_factor)
|
||||
: CallGenerator::for_virtual_call(callee, vtable_index));
|
||||
}
|
||||
if (miss_cg != NULL) {
|
||||
if (next_hit_cg != NULL) {
|
||||
|
@ -341,15 +342,15 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} // call_does_dispatch && bytecode == Bytecodes::_invokeinterface
|
||||
|
||||
// Nothing claimed the intrinsic, we go with straight-forward inlining
|
||||
// for already discovered intrinsic.
|
||||
if (allow_inline && allow_intrinsics && cg_intrinsic != NULL) {
|
||||
if (allow_intrinsics && cg_intrinsic != NULL) {
|
||||
assert(cg_intrinsic->does_virtual_dispatch(), "sanity");
|
||||
return cg_intrinsic;
|
||||
}
|
||||
} // allow_inline
|
||||
|
||||
// There was no special inlining tactic, or it bailed out.
|
||||
// Use a more generic tactic, like a simple call.
|
||||
|
@ -359,7 +360,11 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
|
|||
print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
|
||||
}
|
||||
C->log_inline_failure(msg);
|
||||
if (IncrementalInlineVirtual && allow_inline) {
|
||||
return CallGenerator::for_late_inline_virtual(callee, vtable_index, prof_factor); // attempt to inline through virtual call later
|
||||
} else {
|
||||
return CallGenerator::for_virtual_call(callee, vtable_index);
|
||||
}
|
||||
} else {
|
||||
// Class Hierarchy Analysis or Type Profile reveals a unique target,
|
||||
// or it is a static or special call.
|
||||
|
@ -560,7 +565,7 @@ void Parse::do_call() {
|
|||
// finalize() won't be compiled as vtable calls (IC call
|
||||
// resolution will catch the illegal call) and the few legal calls
|
||||
// on array types won't be either.
|
||||
callee = C->optimize_virtual_call(method(), bci(), klass, holder, orig_callee,
|
||||
callee = C->optimize_virtual_call(method(), klass, holder, orig_callee,
|
||||
receiver_type, is_virtual,
|
||||
call_does_dispatch, vtable_index); // out-parameters
|
||||
speculative_receiver_type = receiver_type != NULL ? receiver_type->speculative_type() : NULL;
|
||||
|
@ -1069,7 +1074,7 @@ void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) {
|
|||
#endif //PRODUCT
|
||||
|
||||
|
||||
ciMethod* Compile::optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,
|
||||
ciMethod* Compile::optimize_virtual_call(ciMethod* caller, ciInstanceKlass* klass,
|
||||
ciKlass* holder, ciMethod* callee,
|
||||
const TypeOopPtr* receiver_type, bool is_virtual,
|
||||
bool& call_does_dispatch, int& vtable_index,
|
||||
|
@ -1079,7 +1084,7 @@ ciMethod* Compile::optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKl
|
|||
vtable_index = Method::invalid_vtable_index;
|
||||
|
||||
// Choose call strategy.
|
||||
ciMethod* optimized_virtual_method = optimize_inlining(caller, bci, klass, callee,
|
||||
ciMethod* optimized_virtual_method = optimize_inlining(caller, klass, callee,
|
||||
receiver_type, check_access);
|
||||
|
||||
// Have the call been sufficiently improved such that it is no longer a virtual?
|
||||
|
@ -1094,7 +1099,7 @@ ciMethod* Compile::optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKl
|
|||
}
|
||||
|
||||
// Identify possible target method and inlining style
|
||||
ciMethod* Compile::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
|
||||
ciMethod* Compile::optimize_inlining(ciMethod* caller, ciInstanceKlass* klass,
|
||||
ciMethod* callee, const TypeOopPtr* receiver_type,
|
||||
bool check_access) {
|
||||
// only use for virtual or interface calls
|
||||
|
@ -1185,11 +1190,6 @@ ciMethod* Compile::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass*
|
|||
// such method can be changed when its class is redefined.
|
||||
ciMethod* exact_method = callee->resolve_invoke(calling_klass, actual_receiver);
|
||||
if (exact_method != NULL) {
|
||||
if (PrintOpto) {
|
||||
tty->print(" Calling method via exact type @%d --- ", bci);
|
||||
exact_method->print_name();
|
||||
tty->cr();
|
||||
}
|
||||
return exact_method;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -63,6 +63,16 @@ ProjNode* MultiNode::proj_out_or_null(uint which_proj) const {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
ProjNode* MultiNode::proj_out_or_null(uint which_proj, bool is_io_use) const {
|
||||
for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
|
||||
ProjNode* proj = fast_out(i)->isa_Proj();
|
||||
if (proj != NULL && (proj->_con == which_proj) && (proj->_is_io_use == is_io_use)) {
|
||||
return proj;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Get a named projection
|
||||
ProjNode* MultiNode::proj_out(uint which_proj) const {
|
||||
ProjNode* p = proj_out_or_null(which_proj);
|
||||
|
|
|
@ -48,7 +48,7 @@ public:
|
|||
virtual uint ideal_reg() const { return NotAMachineReg; }
|
||||
ProjNode* proj_out(uint which_proj) const; // Get a named projection
|
||||
ProjNode* proj_out_or_null(uint which_proj) const;
|
||||
|
||||
ProjNode* proj_out_or_null(uint which_proj, bool is_io_use) const;
|
||||
};
|
||||
|
||||
//------------------------------ProjNode---------------------------------------
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "opto/ad.hpp"
|
||||
#include "opto/callGenerator.hpp"
|
||||
#include "opto/castnode.hpp"
|
||||
#include "opto/cfgnode.hpp"
|
||||
#include "opto/connode.hpp"
|
||||
|
@ -554,9 +555,15 @@ Node *Node::clone() const {
|
|||
to[i] = from[i]->clone();
|
||||
}
|
||||
}
|
||||
// cloning CallNode may need to clone JVMState
|
||||
if (n->is_Call()) {
|
||||
// cloning CallNode may need to clone JVMState
|
||||
n->as_Call()->clone_jvms(C);
|
||||
// CallGenerator is linked to the original node.
|
||||
CallGenerator* cg = n->as_Call()->generator();
|
||||
if (cg != NULL) {
|
||||
CallGenerator* cloned_cg = cg->with_call_node(n->as_Call());
|
||||
n->as_Call()->set_generator(cloned_cg);
|
||||
}
|
||||
}
|
||||
if (n->is_SafePoint()) {
|
||||
n->as_SafePoint()->clone_replaced_nodes();
|
||||
|
@ -1403,18 +1410,6 @@ static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) {
|
|||
igvn->hash_delete(dead);
|
||||
igvn->_worklist.remove(dead);
|
||||
igvn->set_type(dead, Type::TOP);
|
||||
if (dead->is_macro()) {
|
||||
igvn->C->remove_macro_node(dead);
|
||||
}
|
||||
if (dead->is_expensive()) {
|
||||
igvn->C->remove_expensive_node(dead);
|
||||
}
|
||||
if (dead->for_post_loop_opts_igvn()) {
|
||||
igvn->C->remove_from_post_loop_opts_igvn(dead);
|
||||
}
|
||||
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
|
||||
bs->unregister_potential_barrier_node(dead);
|
||||
igvn->C->record_dead_node(dead->_idx);
|
||||
// Kill all inputs to the dead guy
|
||||
for (uint i=0; i < dead->req(); i++) {
|
||||
Node *n = dead->in(i); // Get input to dead guy
|
||||
|
@ -1437,7 +1432,7 @@ static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) {
|
|||
}
|
||||
}
|
||||
}
|
||||
igvn->C->remove_modified_node(dead);
|
||||
igvn->C->remove_useless_node(dead);
|
||||
} // (dead->outcnt() == 0)
|
||||
} // while (nstack.size() > 0) for outputs
|
||||
return;
|
||||
|
|
|
@ -1117,11 +1117,13 @@ void PhaseIterGVN::verify_PhaseIterGVN() {
|
|||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
while (modified_list->size()) {
|
||||
if (modified_list != NULL) {
|
||||
while (modified_list->size() > 0) {
|
||||
Node* n = modified_list->pop();
|
||||
n->dump();
|
||||
assert(false, "VerifyIterativeGVN: new modified node was added");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif /* PRODUCT */
|
||||
|
@ -1409,26 +1411,7 @@ void PhaseIterGVN::remove_globally_dead_node( Node *dead ) {
|
|||
_stack.pop();
|
||||
// Remove dead node from iterative worklist
|
||||
_worklist.remove(dead);
|
||||
C->remove_modified_node(dead);
|
||||
// Constant node that has no out-edges and has only one in-edge from
|
||||
// root is usually dead. However, sometimes reshaping walk makes
|
||||
// it reachable by adding use edges. So, we will NOT count Con nodes
|
||||
// as dead to be conservative about the dead node count at any
|
||||
// given time.
|
||||
if (!dead->is_Con()) {
|
||||
C->record_dead_node(dead->_idx);
|
||||
}
|
||||
if (dead->is_macro()) {
|
||||
C->remove_macro_node(dead);
|
||||
}
|
||||
if (dead->is_expensive()) {
|
||||
C->remove_expensive_node(dead);
|
||||
}
|
||||
if (dead->for_post_loop_opts_igvn()) {
|
||||
C->remove_from_post_loop_opts_igvn(dead);
|
||||
}
|
||||
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
|
||||
bs->unregister_potential_barrier_node(dead);
|
||||
C->remove_useless_node(dead);
|
||||
}
|
||||
} // while (_stack.is_nonempty())
|
||||
}
|
||||
|
|
|
@ -60,6 +60,7 @@ enum CompilerPhaseType {
|
|||
PHASE_MATCHING,
|
||||
PHASE_INCREMENTAL_INLINE,
|
||||
PHASE_INCREMENTAL_INLINE_STEP,
|
||||
PHASE_INCREMENTAL_INLINE_CLEANUP,
|
||||
PHASE_INCREMENTAL_BOXING_INLINE,
|
||||
PHASE_CALL_CATCH_CLEANUP,
|
||||
PHASE_INSERT_BARRIER,
|
||||
|
@ -111,6 +112,7 @@ class CompilerPhaseTypeHelper {
|
|||
case PHASE_MATCHING: return "After matching";
|
||||
case PHASE_INCREMENTAL_INLINE: return "Incremental Inline";
|
||||
case PHASE_INCREMENTAL_INLINE_STEP: return "Incremental Inline Step";
|
||||
case PHASE_INCREMENTAL_INLINE_CLEANUP: return "Incremental Inline Cleanup";
|
||||
case PHASE_INCREMENTAL_BOXING_INLINE: return "Incremental Boxing Inline";
|
||||
case PHASE_CALL_CATCH_CLEANUP: return "Call catch cleanup";
|
||||
case PHASE_INSERT_BARRIER: return "Insert barrier";
|
||||
|
|
|
@ -252,6 +252,14 @@ public:
|
|||
_len--;
|
||||
}
|
||||
|
||||
// Remove all elements up to the index (exclusive). The order is preserved.
|
||||
void remove_till(int idx) {
|
||||
for (int i = 0, j = idx; j < length(); i++, j++) {
|
||||
at_put(i, at(j));
|
||||
}
|
||||
trunc_to(length() - idx);
|
||||
}
|
||||
|
||||
// The order is changed.
|
||||
void delete_at(int index) {
|
||||
assert(0 <= index && index < _len, "illegal index");
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue