This commit is contained in:
Bharadwaj Yadavalli 2014-04-17 13:50:26 -04:00
commit 32024b5144
175 changed files with 10280 additions and 1205 deletions

View file

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2947,7 +2947,50 @@ void BytecodeInterpreter::pd_layout_interpreterState(interpreterState istate,
istate->_last_Java_fp = last_Java_fp;
}
int AbstractInterpreter::layout_activation(Method* method,
// Computes monitor_size and top_frame_size in bytes.
static void frame_size_helper(int max_stack,
int monitors,
int& monitor_size,
int& top_frame_size) {
monitor_size = frame::interpreter_frame_monitor_size_in_bytes() * monitors;
top_frame_size = round_to(frame::interpreter_frame_cinterpreterstate_size_in_bytes()
+ monitor_size
+ max_stack * Interpreter::stackElementSize
+ 2 * Interpreter::stackElementSize,
frame::alignment_in_bytes)
+ frame::top_ijava_frame_abi_size;
}
// Returns number of stackElementWords needed for the interpreter frame with the
// given sections.
int AbstractInterpreter::size_activation(int max_stack,
int temps,
int extra_args,
int monitors,
int callee_params,
int callee_locals,
bool is_top_frame) {
int monitor_size = 0;
int top_frame_size = 0;
frame_size_helper(max_stack, monitors, monitor_size, top_frame_size);
int frame_size;
if (is_top_frame) {
frame_size = top_frame_size;
} else {
frame_size = round_to(frame::interpreter_frame_cinterpreterstate_size_in_bytes()
+ monitor_size
+ (temps - callee_params + callee_locals) * Interpreter::stackElementSize
+ 2 * Interpreter::stackElementSize,
frame::alignment_in_bytes)
+ frame::parent_ijava_frame_abi_size;
assert(extra_args == 0, "non-zero for top_frame only");
}
return frame_size / Interpreter::stackElementSize;
}
void AbstractInterpreter::layout_activation(Method* method,
int temps, // Number of slots on java expression stack in use.
int popframe_args,
int monitors, // Number of active monitors.
@ -2967,29 +3010,9 @@ int AbstractInterpreter::layout_activation(Method* method,
// both the abi scratch area and a place to hold a result from a
// callee on its way to the callers stack.
int monitor_size = frame::interpreter_frame_monitor_size_in_bytes() * monitors;
int frame_size;
int top_frame_size = round_to(frame::interpreter_frame_cinterpreterstate_size_in_bytes()
+ monitor_size
+ (method->max_stack() *Interpreter::stackElementWords * BytesPerWord)
+ 2*BytesPerWord,
frame::alignment_in_bytes)
+ frame::top_ijava_frame_abi_size;
if (is_top_frame) {
frame_size = top_frame_size;
} else {
frame_size = round_to(frame::interpreter_frame_cinterpreterstate_size_in_bytes()
+ monitor_size
+ ((temps - callee_params + callee_locals) *
Interpreter::stackElementWords * BytesPerWord)
+ 2*BytesPerWord,
frame::alignment_in_bytes)
+ frame::parent_ijava_frame_abi_size;
assert(popframe_args==0, "non-zero for top_frame only");
}
// If we actually have a frame to layout we must now fill in all the pieces.
if (interpreter_frame != NULL) {
int monitor_size = 0;
int top_frame_size = 0;
frame_size_helper(method->max_stack(), monitors, monitor_size, top_frame_size);
intptr_t sp = (intptr_t)interpreter_frame->sp();
intptr_t fp = *(intptr_t *)sp;
@ -3040,13 +3063,11 @@ int AbstractInterpreter::layout_activation(Method* method,
stack,
stack_base,
monitor_base,
(intptr_t*)(((intptr_t)fp)-top_frame_size),
(intptr_t*)(((intptr_t)fp) - top_frame_size),
is_top_frame);
BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address,
interpreter_frame->fp());
}
return frame_size/BytesPerWord;
}
#endif // CC_INTERP

View file

@ -1,6 +1,6 @@
//
// Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
// Copyright 2012, 2013 SAP AG. All rights reserved.
// Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
// Copyright 2012, 2014 SAP AG. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@ -1362,8 +1362,8 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
MacroAssembler _masm(&cbuf);
const long framesize = ((long)C->frame_slots()) << LogBytesPerInt;
assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
const long framesize = C->frame_size_in_bytes();
assert(framesize % (2 * wordSize) == 0, "must preserve 2*wordSize alignment");
const bool method_is_frameless = false /* TODO: PPC port C->is_frameless_method()*/;
@ -1388,19 +1388,22 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
// careful, because some VM calls (such as call site linkage) can
// use several kilobytes of stack. But the stack safety zone should
// account for that. See bugs 4446381, 4468289, 4497237.
if (C->need_stack_bang(framesize) && UseStackBanging) {
int bangsize = C->bang_size_in_bytes();
assert(bangsize >= framesize || bangsize <= 0, "stack bang size incorrect");
if (C->need_stack_bang(bangsize) && UseStackBanging) {
// Unfortunately we cannot use the function provided in
// assembler.cpp as we have to emulate the pipes. So I had to
// insert the code of generate_stack_overflow_check(), see
// assembler.cpp for some illuminative comments.
const int page_size = os::vm_page_size();
int bang_end = StackShadowPages*page_size;
int bang_end = StackShadowPages * page_size;
// This is how far the previous frame's stack banging extended.
const int bang_end_safe = bang_end;
if (framesize > page_size) {
bang_end += framesize;
if (bangsize > page_size) {
bang_end += bangsize;
}
int bang_offset = bang_end_safe;
@ -1446,7 +1449,7 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
unsigned int bytes = (unsigned int)framesize;
long offset = Assembler::align_addr(bytes, frame::alignment_in_bytes);
ciMethod *currMethod = C -> method();
ciMethod *currMethod = C->method();
// Optimized version for most common case.
if (UsePower6SchedulerPPC64 &&

View file

@ -1334,21 +1334,42 @@ bool AbstractInterpreter::can_be_compiled(methodHandle m) {
int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
const int max_alignment_size = 2;
const int abi_scratch = frame::abi_reg_args_size;
return method->max_locals() + method->max_stack() + frame::interpreter_frame_monitor_size() + max_alignment_size + abi_scratch;
return method->max_locals() + method->max_stack() +
frame::interpreter_frame_monitor_size() + max_alignment_size + abi_scratch;
}
// Fills a sceletal interpreter frame generated during deoptimizations
// and returns the frame size in slots.
// Returns number of stackElementWords needed for the interpreter frame with the
// given sections.
// This overestimates the stack by one slot in case of alignments.
int AbstractInterpreter::size_activation(int max_stack,
int temps,
int extra_args,
int monitors,
int callee_params,
int callee_locals,
bool is_top_frame) {
// Note: This calculation must exactly parallel the frame setup
// in AbstractInterpreterGenerator::generate_method_entry.
assert(Interpreter::stackElementWords == 1, "sanity");
const int max_alignment_space = StackAlignmentInBytes / Interpreter::stackElementSize;
const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :
(frame::abi_minframe_size / Interpreter::stackElementSize);
const int size =
max_stack +
(callee_locals - callee_params) +
monitors * frame::interpreter_frame_monitor_size() +
max_alignment_space +
abi_scratch +
frame::ijava_state_size / Interpreter::stackElementSize;
// Fixed size of an interpreter frame, align to 16-byte.
return (size & -2);
}
// Fills a sceletal interpreter frame generated during deoptimizations.
//
// Parameters:
//
// interpreter_frame == NULL:
// Only calculate the size of an interpreter activation, no actual layout.
// Note: This calculation must exactly parallel the frame setup
// in TemplateInterpreter::generate_normal_entry. But it does not
// account for the SP alignment, that might further enhance the
// frame size, depending on FP.
//
// interpreter_frame != NULL:
// set up the method, locals, and monitors.
// The frame interpreter_frame, if not NULL, is guaranteed to be the
@ -1365,33 +1386,21 @@ int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
// the arguments off advance the esp by dummy popframe_extra_args slots.
// Popping off those will establish the stack layout as it was before the call.
//
int AbstractInterpreter::layout_activation(Method* method,
void AbstractInterpreter::layout_activation(Method* method,
int tempcount,
int popframe_extra_args,
int moncount,
int caller_actual_parameters,
int callee_param_count,
int callee_locals,
int callee_locals_count,
frame* caller,
frame* interpreter_frame,
bool is_top_frame,
bool is_bottom_frame) {
const int max_alignment_space = 2;
const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :
(frame::abi_minframe_size / Interpreter::stackElementSize) ;
const int conservative_framesize_in_slots =
method->max_stack() + callee_locals - callee_param_count +
(moncount * frame::interpreter_frame_monitor_size()) + max_alignment_space +
abi_scratch + frame::ijava_state_size / Interpreter::stackElementSize;
(frame::abi_minframe_size / Interpreter::stackElementSize);
assert(!is_top_frame || conservative_framesize_in_slots * 8 > frame::abi_reg_args_size + frame::ijava_state_size, "frame too small");
if (interpreter_frame == NULL) {
// Since we don't know the exact alignment, we return the conservative size.
return (conservative_framesize_in_slots & -2);
} else {
// Now we know our caller, calc the exact frame layout and size.
intptr_t* locals_base = (caller->is_interpreted_frame()) ?
caller->interpreter_frame_esp() + caller_actual_parameters :
caller->sp() + method->max_locals() - 1 + (frame::abi_minframe_size / Interpreter::stackElementSize) ;
@ -1400,7 +1409,7 @@ int AbstractInterpreter::layout_activation(Method* method,
intptr_t* monitor = monitor_base - (moncount * frame::interpreter_frame_monitor_size());
intptr_t* esp_base = monitor - 1;
intptr_t* esp = esp_base - tempcount - popframe_extra_args;
intptr_t* sp = (intptr_t *) (((intptr_t) (esp_base- callee_locals + callee_param_count - method->max_stack()- abi_scratch)) & -StackAlignmentInBytes);
intptr_t* sp = (intptr_t *) (((intptr_t) (esp_base - callee_locals_count + callee_param_count - method->max_stack()- abi_scratch)) & -StackAlignmentInBytes);
intptr_t* sender_sp = caller->sp() + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
intptr_t* top_frame_sp = is_top_frame ? sp : sp + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
@ -1413,12 +1422,6 @@ int AbstractInterpreter::layout_activation(Method* method,
if (!is_bottom_frame) {
interpreter_frame->interpreter_frame_set_sender_sp(sender_sp);
}
int framesize_in_slots = caller->sp() - sp;
assert(!is_top_frame ||framesize_in_slots >= (frame::abi_reg_args_size / Interpreter::stackElementSize) + frame::ijava_state_size / Interpreter::stackElementSize, "frame too small");
assert(framesize_in_slots <= conservative_framesize_in_slots, "exact frame size must be smaller than the convervative size!");
return framesize_in_slots;
}
}
// =============================================================================

View file

@ -630,11 +630,20 @@ class Assembler : public AbstractAssembler {
}
protected:
// Insert a nop if the previous is cbcond
void insert_nop_after_cbcond() {
if (UseCBCond && cbcond_before()) {
nop();
}
}
// Delay slot helpers
// cti is called when emitting control-transfer instruction,
// BEFORE doing the emitting.
// Only effective when assertion-checking is enabled.
void cti() {
// A cbcond instruction immediately followed by a CTI
// instruction introduces pipeline stalls, we need to avoid that.
no_cbcond_before();
#ifdef CHECK_DELAY
assert_not_delayed("cti should not be in delay slot");
#endif
@ -658,7 +667,6 @@ class Assembler : public AbstractAssembler {
void no_cbcond_before() {
assert(offset() == 0 || !cbcond_before(), "cbcond should not follow an other cbcond");
}
public:
bool use_cbcond(Label& L) {

View file

@ -54,33 +54,33 @@ inline void Assembler::emit_data(int x, RelocationHolder const& rspec) {
inline void Assembler::add(Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::add(Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); }
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { bpr( c, a, p, s1, target(L)); }
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); }
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { insert_nop_after_cbcond(); bpr( c, a, p, s1, target(L)); }
inline void Assembler::fb( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
inline void Assembler::fb( Condition c, bool a, Label& L ) { fb(c, a, target(L)); }
inline void Assembler::fb( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
inline void Assembler::fb( Condition c, bool a, Label& L ) { insert_nop_after_cbcond(); fb(c, a, target(L)); }
inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { fbp(c, a, cc, p, target(L)); }
inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { insert_nop_after_cbcond(); fbp(c, a, cc, p, target(L)); }
inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
inline void Assembler::br( Condition c, bool a, Label& L ) { br(c, a, target(L)); }
inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
inline void Assembler::br( Condition c, bool a, Label& L ) { insert_nop_after_cbcond(); br(c, a, target(L)); }
inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) { bp(c, a, cc, p, target(L)); }
inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) { insert_nop_after_cbcond(); bp(c, a, cc, p, target(L)); }
// compare and branch
inline void Assembler::cbcond(Condition c, CC cc, Register s1, Register s2, Label& L) { cti(); no_cbcond_before(); emit_data(op(branch_op) | cond_cbcond(c) | op2(bpr_op2) | branchcc(cc) | wdisp10(intptr_t(target(L)), intptr_t(pc())) | rs1(s1) | rs2(s2)); }
inline void Assembler::cbcond(Condition c, CC cc, Register s1, int simm5, Label& L) { cti(); no_cbcond_before(); emit_data(op(branch_op) | cond_cbcond(c) | op2(bpr_op2) | branchcc(cc) | wdisp10(intptr_t(target(L)), intptr_t(pc())) | rs1(s1) | immed(true) | simm(simm5, 5)); }
inline void Assembler::call( address d, relocInfo::relocType rt ) { cti(); emit_data( op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rt); has_delay_slot(); assert(rt != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec"); }
inline void Assembler::call( Label& L, relocInfo::relocType rt ) { call( target(L), rt); }
inline void Assembler::call( address d, relocInfo::relocType rt ) { insert_nop_after_cbcond(); cti(); emit_data( op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rt); has_delay_slot(); assert(rt != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec"); }
inline void Assembler::call( Label& L, relocInfo::relocType rt ) { insert_nop_after_cbcond(); call( target(L), rt); }
inline void Assembler::flush( Register s1, Register s2) { emit_int32( op(arith_op) | op3(flush_op3) | rs1(s1) | rs2(s2)); }
inline void Assembler::flush( Register s1, int simm13a) { emit_data( op(arith_op) | op3(flush_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::jmpl( Register s1, Register s2, Register d ) { cti(); emit_int32( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { cti(); emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); }
inline void Assembler::jmpl( Register s1, Register s2, Register d ) { insert_nop_after_cbcond(); cti(); emit_int32( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { insert_nop_after_cbcond(); cti(); emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); }
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }

View file

@ -152,7 +152,7 @@ LIR_Opr LIR_Assembler::osrBufferPointer() {
}
int LIR_Assembler::initial_frame_size_in_bytes() {
int LIR_Assembler::initial_frame_size_in_bytes() const {
return in_bytes(frame_map()->framesize_in_bytes());
}
@ -182,7 +182,7 @@ void LIR_Assembler::osr_entry() {
int number_of_locks = entry_state->locks_size();
// Create a frame for the compiled activation.
__ build_frame(initial_frame_size_in_bytes());
__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
// OSR buffer is
//

View file

@ -55,9 +55,9 @@ void C1_MacroAssembler::explicit_null_check(Register base) {
}
void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
generate_stack_overflow_check(frame_size_in_bytes);
void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
generate_stack_overflow_check(bang_size_in_bytes);
// Create the frame.
save_frame_c1(frame_size_in_bytes);
}

View file

@ -2183,8 +2183,45 @@ void BytecodeInterpreter::pd_layout_interpreterState(interpreterState istate, ad
istate->_last_Java_pc = (intptr_t*) last_Java_pc;
}
static int frame_size_helper(int max_stack,
int moncount,
int callee_param_size,
int callee_locals_size,
bool is_top_frame,
int& monitor_size,
int& full_frame_words) {
int extra_locals_size = callee_locals_size - callee_param_size;
monitor_size = (sizeof(BasicObjectLock) * moncount) / wordSize;
full_frame_words = size_activation_helper(extra_locals_size, max_stack, monitor_size);
int short_frame_words = size_activation_helper(extra_locals_size, max_stack, monitor_size);
int frame_words = is_top_frame ? full_frame_words : short_frame_words;
int AbstractInterpreter::layout_activation(Method* method,
return frame_words;
}
int AbstractInterpreter::size_activation(int max_stack,
int tempcount,
int extra_args,
int moncount,
int callee_param_size,
int callee_locals_size,
bool is_top_frame) {
assert(extra_args == 0, "NEED TO FIX");
// NOTE: return size is in words not bytes
// Calculate the amount our frame will be adjust by the callee. For top frame
// this is zero.
// NOTE: ia64 seems to do this wrong (or at least backwards) in that it
// calculates the extra locals based on itself. Not what the callee does
// to it. So it ignores last_frame_adjust value. Seems suspicious as far
// as getting sender_sp correct.
int unused_monitor_size = 0;
int unused_full_frame_words = 0;
return frame_size_helper(max_stack, moncount, callee_param_size, callee_locals_size, is_top_frame,
unused_monitor_size, unused_full_frame_words);
}
void AbstractInterpreter::layout_activation(Method* method,
int tempcount, // Number of slots on java expression stack in use
int popframe_extra_args,
int moncount, // Number of active monitors
@ -2195,39 +2232,26 @@ int AbstractInterpreter::layout_activation(Method* method,
frame* interpreter_frame,
bool is_top_frame,
bool is_bottom_frame) {
assert(popframe_extra_args == 0, "NEED TO FIX");
// NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
// does as far as allocating an interpreter frame.
// If interpreter_frame!=NULL, set up the method, locals, and monitors.
// The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
// as determined by a previous call to this method.
// Set up the method, locals, and monitors.
// The frame interpreter_frame is guaranteed to be the right size,
// as determined by a previous call to the size_activation() method.
// It is also guaranteed to be walkable even though it is in a skeletal state
// NOTE: return size is in words not bytes
// NOTE: tempcount is the current size of the java expression stack. For top most
// frames we will allocate a full sized expression stack and not the curback
// version that non-top frames have.
// Calculate the amount our frame will be adjust by the callee. For top frame
// this is zero.
// NOTE: ia64 seems to do this wrong (or at least backwards) in that it
// calculates the extra locals based on itself. Not what the callee does
// to it. So it ignores last_frame_adjust value. Seems suspicious as far
// as getting sender_sp correct.
int extra_locals_size = callee_locals_size - callee_param_size;
int monitor_size = (sizeof(BasicObjectLock) * moncount) / wordSize;
int full_frame_words = size_activation_helper(extra_locals_size, method->max_stack(), monitor_size);
int short_frame_words = size_activation_helper(extra_locals_size, method->max_stack(), monitor_size);
int frame_words = is_top_frame ? full_frame_words : short_frame_words;
int monitor_size = 0;
int full_frame_words = 0;
int frame_words = frame_size_helper(method->max_stack(), moncount, callee_param_size, callee_locals_size,
is_top_frame, monitor_size, full_frame_words);
/*
if we actually have a frame to layout we must now fill in all the pieces. This means both
We must now fill in all the pieces of the frame. This means both
the interpreterState and the registers.
*/
if (interpreter_frame != NULL) {
// MUCHO HACK
@ -2294,9 +2318,6 @@ int AbstractInterpreter::layout_activation(Method* method,
is_top_frame);
BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
}
return frame_words;
}
#endif // CC_INTERP

View file

@ -3531,7 +3531,7 @@ void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp,
// was post-decremented.) Skip this address by starting at i=1, and
// touch a few more pages below. N.B. It is important to touch all
// the way down to and including i=StackShadowPages.
for (int i = 1; i <= StackShadowPages; i++) {
for (int i = 1; i < StackShadowPages; i++) {
set((-i*offset)+STACK_BIAS, Rscratch);
st(G0, Rtsp, Rscratch);
}

View file

@ -233,6 +233,7 @@ inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, reloc
}
inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
insert_nop_after_cbcond();
br(c, a, p, target(L));
}
@ -248,6 +249,7 @@ inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relo
}
inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
insert_nop_after_cbcond();
brx(c, a, p, target(L));
}
@ -269,6 +271,7 @@ inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, reloc
}
inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
insert_nop_after_cbcond();
fb(c, a, p, target(L));
}
@ -318,6 +321,7 @@ inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
}
inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
insert_nop_after_cbcond();
MacroAssembler::call( target(L), rt);
}

View file

@ -3355,13 +3355,16 @@ static void make_new_frames(MacroAssembler* masm, bool deopt) {
Register O4array_size = O4;
Label loop;
// Before we make new frames, check to see if stack is available.
// Do this after the caller's return address is on top of stack
#ifdef ASSERT
// Compilers generate code that bang the stack by as much as the
// interpreter would need. So this stack banging should never
// trigger a fault. Verify that it does not on non product builds.
if (UseStackBanging) {
// Get total frame size for interpreted frames
__ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
__ bang_stack_size(O4, O3, G3_scratch);
}
#endif
__ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
__ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
@ -3409,9 +3412,11 @@ void SharedRuntime::generate_deopt_blob() {
ResourceMark rm;
// setup code generation tools
int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
#ifdef ASSERT
if (UseStackBanging) {
pad += StackShadowPages*16 + 32;
}
#endif
#ifdef _LP64
CodeBuffer buffer("deopt_blob", 2100+pad, 512);
#else
@ -3632,9 +3637,11 @@ void SharedRuntime::generate_uncommon_trap_blob() {
ResourceMark rm;
// setup code generation tools
int pad = VerifyThread ? 512 : 0;
#ifdef ASSERT
if (UseStackBanging) {
pad += StackShadowPages*16 + 32;
}
#endif
#ifdef _LP64
CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
#else

View file

@ -1193,15 +1193,16 @@ void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
st->print_cr("Verify_Thread"); st->print("\t");
}
size_t framesize = C->frame_slots() << LogBytesPerInt;
size_t framesize = C->frame_size_in_bytes();
int bangsize = C->bang_size_in_bytes();
// Calls to C2R adapters often do not accept exceptional returns.
// We require that their callers must bang for them. But be careful, because
// some VM calls (such as call site linkage) can use several kilobytes of
// stack. But the stack safety zone should account for that.
// See bugs 4446381, 4468289, 4497237.
if (C->need_stack_bang(framesize)) {
st->print_cr("! stack bang"); st->print("\t");
if (C->need_stack_bang(bangsize)) {
st->print_cr("! stack bang (%d bytes)", bangsize); st->print("\t");
}
if (Assembler::is_simm13(-framesize)) {
@ -1225,17 +1226,18 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
__ verify_thread();
size_t framesize = C->frame_slots() << LogBytesPerInt;
size_t framesize = C->frame_size_in_bytes();
assert(framesize >= 16*wordSize, "must have room for reg. save area");
assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
int bangsize = C->bang_size_in_bytes();
// Calls to C2R adapters often do not accept exceptional returns.
// We require that their callers must bang for them. But be careful, because
// some VM calls (such as call site linkage) can use several kilobytes of
// stack. But the stack safety zone should account for that.
// See bugs 4446381, 4468289, 4497237.
if (C->need_stack_bang(framesize)) {
__ generate_stack_overflow_check(framesize);
if (C->need_stack_bang(bangsize)) {
__ generate_stack_overflow_check(bangsize);
}
if (Assembler::is_simm13(-framesize)) {
@ -1268,7 +1270,7 @@ int MachPrologNode::reloc() const {
void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
Compile* C = ra_->C;
if( do_polling() && ra_->C->is_method_compilation() ) {
if(do_polling() && ra_->C->is_method_compilation()) {
st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t");
#ifdef _LP64
st->print("LDX [L0],G0\t!Poll for Safepointing\n\t");
@ -1277,8 +1279,12 @@ void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
#endif
}
if( do_polling() )
if(do_polling()) {
if (UseCBCond && !ra_->C->is_method_compilation()) {
st->print("NOP\n\t");
}
st->print("RET\n\t");
}
st->print("RESTORE");
}
@ -1291,15 +1297,20 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
__ verify_thread();
// If this does safepoint polling, then do it here
if( do_polling() && ra_->C->is_method_compilation() ) {
if(do_polling() && ra_->C->is_method_compilation()) {
AddressLiteral polling_page(os::get_polling_page());
__ sethi(polling_page, L0);
__ relocate(relocInfo::poll_return_type);
__ ld_ptr( L0, 0, G0 );
__ ld_ptr(L0, 0, G0);
}
// If this is a return, then stuff the restore in the delay slot
if( do_polling() ) {
if(do_polling()) {
if (UseCBCond && !ra_->C->is_method_compilation()) {
// Insert extra padding for the case when the epilogue is preceded by
// a cbcond jump, which can't be followed by a CTI instruction
__ nop();
}
__ ret();
__ delayed()->restore();
} else {
@ -2538,7 +2549,7 @@ encode %{
enc_class call_epilog %{
if( VerifyStackAtCalls ) {
MacroAssembler _masm(&cbuf);
int framesize = ra_->C->frame_slots() << LogBytesPerInt;
int framesize = ra_->C->frame_size_in_bytes();
Register temp_reg = G3;
__ add(SP, framesize, temp_reg);
__ cmp(temp_reg, FP);
@ -3330,7 +3341,18 @@ op_attrib op_cost(1); // Required cost attribute
//----------Instruction Attributes---------------------------------------------
ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute
ins_attrib ins_size(32); // Required size attribute (in bits)
ins_attrib ins_avoid_back_to_back(0); // instruction should not be generated back to back
// avoid_back_to_back attribute is an expression that must return
// one of the following values defined in MachNode:
// AVOID_NONE - instruction can be placed anywhere
// AVOID_BEFORE - instruction cannot be placed after an
// instruction with MachNode::AVOID_AFTER
// AVOID_AFTER - the next instruction cannot be the one
// with MachNode::AVOID_BEFORE
// AVOID_BEFORE_AND_AFTER - BEFORE and AFTER attributes at
// the same time
ins_attrib ins_avoid_back_to_back(MachNode::AVOID_NONE);
ins_attrib ins_short_branch(0); // Required flag: is this instruction a
// non-matching short branch variant of some
// long branch?
@ -6630,6 +6652,7 @@ instruct encodeHeapOop(iRegN dst, iRegP src) %{
ins_encode %{
__ encode_heap_oop($src$$Register, $dst$$Register);
%}
ins_avoid_back_to_back(Universe::narrow_oop_base() == NULL ? AVOID_NONE : AVOID_BEFORE);
ins_pipe(ialu_reg);
%}
@ -9199,6 +9222,7 @@ instruct branch(label labl) %{
__ ba(*L);
__ delayed()->nop();
%}
ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(br);
%}
@ -9217,7 +9241,7 @@ instruct branch_short(label labl) %{
__ ba_short(*L);
%}
ins_short_branch(1);
ins_avoid_back_to_back(1);
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_imm);
%}
@ -9231,6 +9255,7 @@ instruct branchCon(cmpOp cmp, flagsReg icc, label labl) %{
format %{ "BP$cmp $icc,$labl" %}
// Prim = bits 24-22, Secnd = bits 31-30
ins_encode( enc_bp( labl, cmp, icc ) );
ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(br_cc);
%}
@ -9242,6 +9267,7 @@ instruct branchConU(cmpOpU cmp, flagsRegU icc, label labl) %{
format %{ "BP$cmp $icc,$labl" %}
// Prim = bits 24-22, Secnd = bits 31-30
ins_encode( enc_bp( labl, cmp, icc ) );
ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(br_cc);
%}
@ -9260,6 +9286,7 @@ instruct branchConP(cmpOpP cmp, flagsRegP pcc, label labl) %{
__ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
__ delayed()->nop();
%}
ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(br_cc);
%}
@ -9278,6 +9305,7 @@ instruct branchConF(cmpOpF cmp, flagsRegF fcc, label labl) %{
__ fbp( (Assembler::Condition)($cmp$$cmpcode), false, (Assembler::CC)($fcc$$reg), predict_taken, *L);
__ delayed()->nop();
%}
ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(br_fcc);
%}
@ -9290,6 +9318,7 @@ instruct branchLoopEnd(cmpOp cmp, flagsReg icc, label labl) %{
format %{ "BP$cmp $icc,$labl\t! Loop end" %}
// Prim = bits 24-22, Secnd = bits 31-30
ins_encode( enc_bp( labl, cmp, icc ) );
ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(br_cc);
%}
@ -9302,6 +9331,7 @@ instruct branchLoopEndU(cmpOpU cmp, flagsRegU icc, label labl) %{
format %{ "BP$cmp $icc,$labl\t! Loop end" %}
// Prim = bits 24-22, Secnd = bits 31-30
ins_encode( enc_bp( labl, cmp, icc ) );
ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(br_cc);
%}
@ -9552,7 +9582,7 @@ instruct cmpI_reg_branch_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flag
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
%}
ins_short_branch(1);
ins_avoid_back_to_back(1);
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_reg);
%}
@ -9570,7 +9600,7 @@ instruct cmpI_imm_branch_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flag
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
%}
ins_short_branch(1);
ins_avoid_back_to_back(1);
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_imm);
%}
@ -9588,7 +9618,7 @@ instruct cmpU_reg_branch_short(cmpOpU cmp, iRegI op1, iRegI op2, label labl, fla
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
%}
ins_short_branch(1);
ins_avoid_back_to_back(1);
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_reg);
%}
@ -9606,7 +9636,7 @@ instruct cmpU_imm_branch_short(cmpOpU cmp, iRegI op1, immI5 op2, label labl, fla
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
%}
ins_short_branch(1);
ins_avoid_back_to_back(1);
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_imm);
%}
@ -9624,7 +9654,7 @@ instruct cmpL_reg_branch_short(cmpOp cmp, iRegL op1, iRegL op2, label labl, flag
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$Register, *L);
%}
ins_short_branch(1);
ins_avoid_back_to_back(1);
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_reg);
%}
@ -9642,7 +9672,7 @@ instruct cmpL_imm_branch_short(cmpOp cmp, iRegL op1, immL5 op2, label labl, flag
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$constant, *L);
%}
ins_short_branch(1);
ins_avoid_back_to_back(1);
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_imm);
%}
@ -9665,7 +9695,7 @@ instruct cmpP_reg_branch_short(cmpOpP cmp, iRegP op1, iRegP op2, label labl, fla
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, $op2$$Register, *L);
%}
ins_short_branch(1);
ins_avoid_back_to_back(1);
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_reg);
%}
@ -9687,7 +9717,7 @@ instruct cmpP_null_branch_short(cmpOpP cmp, iRegP op1, immP0 null, label labl, f
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, G0, *L);
%}
ins_short_branch(1);
ins_avoid_back_to_back(1);
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_reg);
%}
@ -9705,7 +9735,7 @@ instruct cmpN_reg_branch_short(cmpOp cmp, iRegN op1, iRegN op2, label labl, flag
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
%}
ins_short_branch(1);
ins_avoid_back_to_back(1);
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_reg);
%}
@ -9723,7 +9753,7 @@ instruct cmpN_null_branch_short(cmpOp cmp, iRegN op1, immN0 null, label labl, fl
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, G0, *L);
%}
ins_short_branch(1);
ins_avoid_back_to_back(1);
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_reg);
%}
@ -9742,7 +9772,7 @@ instruct cmpI_reg_branchLoopEnd_short(cmpOp cmp, iRegI op1, iRegI op2, label lab
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
%}
ins_short_branch(1);
ins_avoid_back_to_back(1);
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_reg);
%}
@ -9760,7 +9790,7 @@ instruct cmpI_imm_branchLoopEnd_short(cmpOp cmp, iRegI op1, immI5 op2, label lab
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
%}
ins_short_branch(1);
ins_avoid_back_to_back(1);
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
ins_pipe(cbcond_reg_imm);
%}
@ -9777,6 +9807,7 @@ instruct branchCon_regI(cmpOp_reg cmp, iRegI op1, immI0 zero, label labl) %{
ins_cost(BRANCH_COST);
format %{ "BR$cmp $op1,$labl" %}
ins_encode( enc_bpr( labl, cmp, op1 ) );
ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(br_reg);
%}
@ -9789,6 +9820,7 @@ instruct branchCon_regP(cmpOp_reg cmp, iRegP op1, immP0 null, label labl) %{
ins_cost(BRANCH_COST);
format %{ "BR$cmp $op1,$labl" %}
ins_encode( enc_bpr( labl, cmp, op1 ) );
ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(br_reg);
%}
@ -9801,6 +9833,7 @@ instruct branchCon_regL(cmpOp_reg cmp, iRegL op1, immL0 zero, label labl) %{
ins_cost(BRANCH_COST);
format %{ "BR$cmp $op1,$labl" %}
ins_encode( enc_bpr( labl, cmp, op1 ) );
ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(br_reg);
%}
@ -9841,6 +9874,7 @@ instruct branchCon_long(cmpOp cmp, flagsRegL xcc, label labl) %{
__ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
__ delayed()->nop();
%}
ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(br_cc);
%}
@ -9968,6 +10002,7 @@ instruct CallStaticJavaDirect( method meth ) %{
ins_cost(CALL_COST);
format %{ "CALL,static ; NOP ==> " %}
ins_encode( Java_Static_Call( meth ), call_epilog );
ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(simple_call);
%}
@ -10004,6 +10039,7 @@ instruct CallRuntimeDirect(method meth, l7RegP l7) %{
format %{ "CALL,runtime" %}
ins_encode( Java_To_Runtime( meth ),
call_epilog, adjust_long_from_native_call );
ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(simple_call);
%}
@ -10016,6 +10052,7 @@ instruct CallLeafDirect(method meth, l7RegP l7) %{
ins_encode( Java_To_Runtime( meth ),
call_epilog,
adjust_long_from_native_call );
ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(simple_call);
%}
@ -10028,6 +10065,7 @@ instruct CallLeafNoFPDirect(method meth, l7RegP l7) %{
ins_encode( Java_To_Runtime( meth ),
call_epilog,
adjust_long_from_native_call );
ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(simple_call);
%}
@ -10041,6 +10079,7 @@ instruct TailCalljmpInd(g3RegP jump_target, inline_cache_regP method_oop) %{
ins_cost(CALL_COST);
format %{ "Jmp $jump_target ; NOP \t! $method_oop holds method oop" %}
ins_encode(form_jmpl(jump_target));
ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(tail_call);
%}
@ -10072,6 +10111,7 @@ instruct tailjmpInd(g1RegP jump_target, i0RegP ex_oop) %{
// opcode(Assembler::jmpl_op3, Assembler::arith_op);
// The hack duplicates the exception oop into G3, so that CreateEx can use it there.
// ins_encode( form3_rs1_simm13_rd( jump_target, 0x00, R_G0 ), move_return_pc_to_o1() );
ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(tail_call);
%}
@ -10102,6 +10142,7 @@ instruct RethrowException()
// use the following format syntax
format %{ "Jmp rethrow_stub" %}
ins_encode(enc_rethrow);
ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(tail_call);
%}
@ -10130,6 +10171,7 @@ instruct partialSubtypeCheck( o0RegP index, o1RegP sub, o2RegP super, flagsRegP
ins_cost(DEFAULT_COST*10);
format %{ "CALL PartialSubtypeCheck\n\tNOP" %}
ins_encode( enc_PartialSubtypeCheck() );
ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(partial_subtype_check_pipe);
%}
@ -10139,6 +10181,7 @@ instruct partialSubtypeCheck_vs_zero( flagsRegP pcc, o1RegP sub, o2RegP super, i
ins_cost(DEFAULT_COST*10);
format %{ "CALL PartialSubtypeCheck\n\tNOP\t# (sets condition codes)" %}
ins_encode( enc_PartialSubtypeCheck() );
ins_avoid_back_to_back(AVOID_BEFORE);
ins_pipe(partial_subtype_check_pipe);
%}

View file

@ -1567,34 +1567,20 @@ int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
monitor_size) + call_stub_size;
}
int AbstractInterpreter::layout_activation(Method* method,
int tempcount,
int popframe_extra_args,
int moncount,
int caller_actual_parameters,
int callee_param_count,
int callee_local_count,
frame* caller,
frame* interpreter_frame,
bool is_top_frame,
bool is_bottom_frame) {
int AbstractInterpreter::size_activation(int max_stack,
int temps,
int extra_args,
int monitors,
int callee_params,
int callee_locals,
bool is_top_frame) {
// Note: This calculation must exactly parallel the frame setup
// in InterpreterGenerator::generate_fixed_frame.
// If f!=NULL, set up the following variables:
// - Lmethod
// - Llocals
// - Lmonitors (to the indicated number of monitors)
// - Lesp (to the indicated number of temps)
// The frame f (if not NULL) on entry is a description of the caller of the frame
// we are about to layout. We are guaranteed that we will be able to fill in a
// new interpreter frame as its callee (i.e. the stack space is allocated and
// the amount was determined by an earlier call to this method with f == NULL).
// On return f (if not NULL) while describe the interpreter frame we just layed out.
int monitor_size = moncount * frame::interpreter_frame_monitor_size();
int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
int monitor_size = monitors * frame::interpreter_frame_monitor_size();
assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
//
// Note: if you look closely this appears to be doing something much different
// than generate_fixed_frame. What is happening is this. On sparc we have to do
@ -1619,17 +1605,45 @@ int AbstractInterpreter::layout_activation(Method* method,
// there is no sense in messing working code.
//
int rounded_cls = round_to((callee_local_count - callee_param_count), WordsPerLong);
int rounded_cls = round_to((callee_locals - callee_params), WordsPerLong);
assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align");
int raw_frame_size = size_activation_helper(rounded_cls, method->max_stack(),
monitor_size);
int raw_frame_size = size_activation_helper(rounded_cls, max_stack, monitor_size);
return raw_frame_size;
}
void AbstractInterpreter::layout_activation(Method* method,
int tempcount,
int popframe_extra_args,
int moncount,
int caller_actual_parameters,
int callee_param_count,
int callee_local_count,
frame* caller,
frame* interpreter_frame,
bool is_top_frame,
bool is_bottom_frame) {
// Set up the following variables:
// - Lmethod
// - Llocals
// - Lmonitors (to the indicated number of monitors)
// - Lesp (to the indicated number of temps)
// The frame caller on entry is a description of the caller of the
// frame we are about to layout. We are guaranteed that we will be
// able to fill in a new interpreter frame as its callee (i.e. the
// stack space is allocated and the amount was determined by an
// earlier call to the size_activation() method). On return caller
// while describe the interpreter frame we just layed out.
if (interpreter_frame != NULL) {
// The skeleton frame must already look like an interpreter frame
// even if not fully filled out.
assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame");
int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
int monitor_size = moncount * frame::interpreter_frame_monitor_size();
assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
intptr_t* fp = interpreter_frame->fp();
JavaThread* thread = JavaThread::current();
@ -1756,9 +1770,6 @@ int AbstractInterpreter::layout_activation(Method* method,
assert(lo < monitors && montop <= hi, "monitors in bounds");
assert(lo <= esp && esp < monitors, "esp in bounds");
#endif // ASSERT
}
return raw_frame_size;
}
//----------------------------------------------------------------------------------------------------

View file

@ -288,7 +288,7 @@ void LIR_Assembler::osr_entry() {
// build frame
ciMethod* m = compilation()->method();
__ build_frame(initial_frame_size_in_bytes());
__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
// OSR buffer is
//
@ -376,7 +376,7 @@ void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
}
// This specifies the rsp decrement needed to build the frame
int LIR_Assembler::initial_frame_size_in_bytes() {
int LIR_Assembler::initial_frame_size_in_bytes() const {
// if rounding, must let FrameMap know!
// The frame_map records size in slots (32bit word)

View file

@ -349,13 +349,14 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
}
void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
// Make sure there is enough stack space for this method's activation.
// Note that we do this before doing an enter(). This matches the
// ordering of C2's stack overflow check / rsp decrement and allows
// the SharedRuntime stack overflow handling to be consistent
// between the two compilers.
generate_stack_overflow_check(frame_size_in_bytes);
generate_stack_overflow_check(bang_size_in_bytes);
push(rbp);
#ifdef TIERED

View file

@ -2342,7 +2342,58 @@ void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
"Stack top out of range");
}
int AbstractInterpreter::layout_activation(Method* method,
static int frame_size_helper(int max_stack,
int tempcount,
int moncount,
int callee_param_count,
int callee_locals,
bool is_top_frame,
int& monitor_size,
int& full_frame_size) {
int extra_locals_size = (callee_locals - callee_param_count) * BytesPerWord;
monitor_size = sizeof(BasicObjectLock) * moncount;
// First calculate the frame size without any java expression stack
int short_frame_size = size_activation_helper(extra_locals_size,
monitor_size);
// Now with full size expression stack
full_frame_size = short_frame_size + max_stack * BytesPerWord;
// and now with only live portion of the expression stack
short_frame_size = short_frame_size + tempcount * BytesPerWord;
// the size the activation is right now. Only top frame is full size
int frame_size = (is_top_frame ? full_frame_size : short_frame_size);
return frame_size;
}
int AbstractInterpreter::size_activation(int max_stack,
int tempcount,
int extra_args,
int moncount,
int callee_param_count,
int callee_locals,
bool is_top_frame) {
assert(extra_args == 0, "FIX ME");
// NOTE: return size is in words not bytes
// Calculate the amount our frame will be adjust by the callee. For top frame
// this is zero.
// NOTE: ia64 seems to do this wrong (or at least backwards) in that it
// calculates the extra locals based on itself. Not what the callee does
// to it. So it ignores last_frame_adjust value. Seems suspicious as far
// as getting sender_sp correct.
int unused_monitor_size = 0;
int unused_full_frame_size = 0;
return frame_size_helper(max_stack, tempcount, moncount, callee_param_count, callee_locals,
is_top_frame, unused_monitor_size, unused_full_frame_size)/BytesPerWord;
}
void AbstractInterpreter::layout_activation(Method* method,
int tempcount, //
int popframe_extra_args,
int moncount,
@ -2357,40 +2408,19 @@ int AbstractInterpreter::layout_activation(Method* method,
assert(popframe_extra_args == 0, "FIX ME");
// NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
// does as far as allocating an interpreter frame.
// If interpreter_frame!=NULL, set up the method, locals, and monitors.
// The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
// as determined by a previous call to this method.
// Set up the method, locals, and monitors.
// The frame interpreter_frame is guaranteed to be the right size,
// as determined by a previous call to the size_activation() method.
// It is also guaranteed to be walkable even though it is in a skeletal state
// NOTE: return size is in words not bytes
// NOTE: tempcount is the current size of the java expression stack. For top most
// frames we will allocate a full sized expression stack and not the curback
// version that non-top frames have.
// Calculate the amount our frame will be adjust by the callee. For top frame
// this is zero.
int monitor_size = 0;
int full_frame_size = 0;
int frame_size = frame_size_helper(method->max_stack(), tempcount, moncount, callee_param_count, callee_locals,
is_top_frame, monitor_size, full_frame_size);
// NOTE: ia64 seems to do this wrong (or at least backwards) in that it
// calculates the extra locals based on itself. Not what the callee does
// to it. So it ignores last_frame_adjust value. Seems suspicious as far
// as getting sender_sp correct.
int extra_locals_size = (callee_locals - callee_param_count) * BytesPerWord;
int monitor_size = sizeof(BasicObjectLock) * moncount;
// First calculate the frame size without any java expression stack
int short_frame_size = size_activation_helper(extra_locals_size,
monitor_size);
// Now with full size expression stack
int full_frame_size = short_frame_size + method->max_stack() * BytesPerWord;
// and now with only live portion of the expression stack
short_frame_size = short_frame_size + tempcount * BytesPerWord;
// the size the activation is right now. Only top frame is full size
int frame_size = (is_top_frame ? full_frame_size : short_frame_size);
if (interpreter_frame != NULL) {
#ifdef ASSERT
assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
#endif
@ -2453,8 +2483,6 @@ int AbstractInterpreter::layout_activation(Method* method,
is_top_frame);
// BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
}
return frame_size/BytesPerWord;
}
bool AbstractInterpreter::can_be_compiled(methodHandle m) {

View file

@ -1051,7 +1051,7 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) {
// was post-decremented.) Skip this address by starting at i=1, and
// touch a few more pages below. N.B. It is important to touch all
// the way down to and including i=StackShadowPages.
for (int i = 1; i <= StackShadowPages; i++) {
for (int i = 1; i < StackShadowPages; i++) {
// this could be any sized move but this is can be a debugging crumb
// so the bigger the better.
movptr(Address(tmp, (-i*os::vm_page_size())), size );
@ -6093,7 +6093,7 @@ void MacroAssembler::reinit_heapbase() {
// C2 compiled method's prolog code.
void MacroAssembler::verified_entry(int framesize, bool stack_bang, bool fp_mode_24b) {
void MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b) {
// WARNING: Initial instruction MUST be 5 bytes or longer so that
// NativeJump::patch_verified_entry will be able to patch out the entry
@ -6101,18 +6101,20 @@ void MacroAssembler::verified_entry(int framesize, bool stack_bang, bool fp_mode
// the frame allocation can be either 3 or 6 bytes. So if we don't do
// stack bang then we must use the 6 byte frame allocation even if
// we have no frame. :-(
assert(stack_bang_size >= framesize || stack_bang_size <= 0, "stack bang size incorrect");
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
// Remove word for return addr
framesize -= wordSize;
stack_bang_size -= wordSize;
// Calls to C2R adapters often do not accept exceptional returns.
// We require that their callers must bang for them. But be careful, because
// some VM calls (such as call site linkage) can use several kilobytes of
// stack. But the stack safety zone should account for that.
// See bugs 4446381, 4468289, 4497237.
if (stack_bang) {
generate_stack_overflow_check(framesize);
if (stack_bang_size > 0) {
generate_stack_overflow_check(stack_bang_size);
// We always push rbp, so that on return to interpreter rbp, will be
// restored correctly and we can correct the stack.

View file

@ -1170,7 +1170,7 @@ public:
void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
// C2 compiled method's prolog code.
void verified_entry(int framesize, bool stack_bang, bool fp_mode_24b);
void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b);
// clear memory of size 'cnt' qwords, starting at 'base'.
void clear_mem(Register base, Register cnt, Register rtmp);

View file

@ -3014,11 +3014,15 @@ void SharedRuntime::generate_deopt_blob() {
// restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
// Stack bang to make sure there's enough room for these interpreter frames.
#ifdef ASSERT
// Compilers generate code that bang the stack by as much as the
// interpreter would need. So this stack banging should never
// trigger a fault. Verify that it does not on non product builds.
if (UseStackBanging) {
__ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
__ bang_stack_size(rbx, rcx);
}
#endif
// Load array of frame pcs into ECX
__ movptr(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
@ -3240,12 +3244,15 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
// Stack bang to make sure there's enough room for these interpreter frames.
#ifdef ASSERT
// Compilers generate code that bang the stack by as much as the
// interpreter would need. So this stack banging should never
// trigger a fault. Verify that it does not on non product builds.
if (UseStackBanging) {
__ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
__ bang_stack_size(rbx, rcx);
}
#endif
// Load array of frame pcs into ECX
__ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));

View file

@ -3484,11 +3484,15 @@ void SharedRuntime::generate_deopt_blob() {
// restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
// Stack bang to make sure there's enough room for these interpreter frames.
#ifdef ASSERT
// Compilers generate code that bang the stack by as much as the
// interpreter would need. So this stack banging should never
// trigger a fault. Verify that it does not on non product builds.
if (UseStackBanging) {
__ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
__ bang_stack_size(rbx, rcx);
}
#endif
// Load address of array of frame pcs into rcx
__ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
@ -3682,11 +3686,15 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
// Stack bang to make sure there's enough room for these interpreter frames.
#ifdef ASSERT
// Compilers generate code that bang the stack by as much as the
// interpreter would need. So this stack banging should never
// trigger a fault. Verify that it does not on non product builds.
if (UseStackBanging) {
__ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
__ bang_stack_size(rbx, rcx);
}
#endif
// Load address of array of frame pcs into rcx (address*)
__ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));

View file

@ -0,0 +1,124 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "ci/ciMethod.hpp"
#include "interpreter/interpreter.hpp"
#include "runtime/frame.inline.hpp"
#ifndef CC_INTERP
// asm based interpreter deoptimization helpers
int AbstractInterpreter::size_activation(int max_stack,
int temps,
int extra_args,
int monitors,
int callee_params,
int callee_locals,
bool is_top_frame) {
// Note: This calculation must exactly parallel the frame setup
// in AbstractInterpreterGenerator::generate_method_entry.
// fixed size of an interpreter frame:
int overhead = frame::sender_sp_offset -
frame::interpreter_frame_initial_sp_offset;
// Our locals were accounted for by the caller (or last_frame_adjust
// on the transistion) Since the callee parameters already account
// for the callee's params we only need to account for the extra
// locals.
int size = overhead +
(callee_locals - callee_params)*Interpreter::stackElementWords +
monitors * frame::interpreter_frame_monitor_size() +
temps* Interpreter::stackElementWords + extra_args;
return size;
}
void AbstractInterpreter::layout_activation(Method* method,
int tempcount,
int popframe_extra_args,
int moncount,
int caller_actual_parameters,
int callee_param_count,
int callee_locals,
frame* caller,
frame* interpreter_frame,
bool is_top_frame,
bool is_bottom_frame) {
// The frame interpreter_frame is guaranteed to be the right size,
// as determined by a previous call to the size_activation() method.
// It is also guaranteed to be walkable even though it is in a
// skeletal state
int max_locals = method->max_locals() * Interpreter::stackElementWords;
int extra_locals = (method->max_locals() - method->size_of_parameters()) *
Interpreter::stackElementWords;
#ifdef ASSERT
if (!EnableInvokeDynamic) {
// @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
// Probably, since deoptimization doesn't work yet.
assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
}
assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
#endif
interpreter_frame->interpreter_frame_set_method(method);
// NOTE the difference in using sender_sp and
// interpreter_frame_sender_sp interpreter_frame_sender_sp is
// the original sp of the caller (the unextended_sp) and
// sender_sp is fp+8/16 (32bit/64bit) XXX
intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
#ifdef ASSERT
if (caller->is_interpreted_frame()) {
assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
}
#endif
interpreter_frame->interpreter_frame_set_locals(locals);
BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
BasicObjectLock* monbot = montop - moncount;
interpreter_frame->interpreter_frame_set_monitor_end(monbot);
// Set last_sp
intptr_t* esp = (intptr_t*) monbot -
tempcount*Interpreter::stackElementWords -
popframe_extra_args;
interpreter_frame->interpreter_frame_set_last_sp(esp);
// All frames but the initial (oldest) interpreter frame we fill in have
// a value for sender_sp that allows walking the stack but isn't
// truly correct. Correct the value here.
if (extra_locals != 0 &&
interpreter_frame->sender_sp() ==
interpreter_frame->interpreter_frame_sender_sp()) {
interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() +
extra_locals);
}
*interpreter_frame->interpreter_frame_cache_addr() =
method->constants()->cache();
}
#endif // CC_INTERP

View file

@ -1686,91 +1686,6 @@ int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
return overhead_size + method_stack + stub_code;
}
// asm based interpreter deoptimization helpers
int AbstractInterpreter::layout_activation(Method* method,
int tempcount,
int popframe_extra_args,
int moncount,
int caller_actual_parameters,
int callee_param_count,
int callee_locals,
frame* caller,
frame* interpreter_frame,
bool is_top_frame,
bool is_bottom_frame) {
// Note: This calculation must exactly parallel the frame setup
// in AbstractInterpreterGenerator::generate_method_entry.
// If interpreter_frame!=NULL, set up the method, locals, and monitors.
// The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
// as determined by a previous call to this method.
// It is also guaranteed to be walkable even though it is in a skeletal state
// NOTE: return size is in words not bytes
// fixed size of an interpreter frame:
int max_locals = method->max_locals() * Interpreter::stackElementWords;
int extra_locals = (method->max_locals() - method->size_of_parameters()) *
Interpreter::stackElementWords;
int overhead = frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset;
// Our locals were accounted for by the caller (or last_frame_adjust on the transistion)
// Since the callee parameters already account for the callee's params we only need to account for
// the extra locals.
int size = overhead +
((callee_locals - callee_param_count)*Interpreter::stackElementWords) +
(moncount*frame::interpreter_frame_monitor_size()) +
tempcount*Interpreter::stackElementWords + popframe_extra_args;
if (interpreter_frame != NULL) {
#ifdef ASSERT
if (!EnableInvokeDynamic)
// @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
// Probably, since deoptimization doesn't work yet.
assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
#endif
interpreter_frame->interpreter_frame_set_method(method);
// NOTE the difference in using sender_sp and interpreter_frame_sender_sp
// interpreter_frame_sender_sp is the original sp of the caller (the unextended_sp)
// and sender_sp is fp+8
intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
#ifdef ASSERT
if (caller->is_interpreted_frame()) {
assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
}
#endif
interpreter_frame->interpreter_frame_set_locals(locals);
BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
BasicObjectLock* monbot = montop - moncount;
interpreter_frame->interpreter_frame_set_monitor_end(monbot);
// Set last_sp
intptr_t* rsp = (intptr_t*) monbot -
tempcount*Interpreter::stackElementWords -
popframe_extra_args;
interpreter_frame->interpreter_frame_set_last_sp(rsp);
// All frames but the initial (oldest) interpreter frame we fill in have a
// value for sender_sp that allows walking the stack but isn't
// truly correct. Correct the value here.
if (extra_locals != 0 &&
interpreter_frame->sender_sp() == interpreter_frame->interpreter_frame_sender_sp() ) {
interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + extra_locals);
}
*interpreter_frame->interpreter_frame_cache_addr() =
method->constants()->cache();
}
return size;
}
//------------------------------------------------------------------------------------------------------------------------
// Exceptions

View file

@ -1695,87 +1695,6 @@ int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
return (overhead_size + method_stack + stub_code);
}
int AbstractInterpreter::layout_activation(Method* method,
int tempcount,
int popframe_extra_args,
int moncount,
int caller_actual_parameters,
int callee_param_count,
int callee_locals,
frame* caller,
frame* interpreter_frame,
bool is_top_frame,
bool is_bottom_frame) {
// Note: This calculation must exactly parallel the frame setup
// in AbstractInterpreterGenerator::generate_method_entry.
// If interpreter_frame!=NULL, set up the method, locals, and monitors.
// The frame interpreter_frame, if not NULL, is guaranteed to be the
// right size, as determined by a previous call to this method.
// It is also guaranteed to be walkable even though it is in a skeletal state
// fixed size of an interpreter frame:
int max_locals = method->max_locals() * Interpreter::stackElementWords;
int extra_locals = (method->max_locals() - method->size_of_parameters()) *
Interpreter::stackElementWords;
int overhead = frame::sender_sp_offset -
frame::interpreter_frame_initial_sp_offset;
// Our locals were accounted for by the caller (or last_frame_adjust
// on the transistion) Since the callee parameters already account
// for the callee's params we only need to account for the extra
// locals.
int size = overhead +
(callee_locals - callee_param_count)*Interpreter::stackElementWords +
moncount * frame::interpreter_frame_monitor_size() +
tempcount* Interpreter::stackElementWords + popframe_extra_args;
if (interpreter_frame != NULL) {
#ifdef ASSERT
if (!EnableInvokeDynamic)
// @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
// Probably, since deoptimization doesn't work yet.
assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
#endif
interpreter_frame->interpreter_frame_set_method(method);
// NOTE the difference in using sender_sp and
// interpreter_frame_sender_sp interpreter_frame_sender_sp is
// the original sp of the caller (the unextended_sp) and
// sender_sp is fp+16 XXX
intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
#ifdef ASSERT
if (caller->is_interpreted_frame()) {
assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
}
#endif
interpreter_frame->interpreter_frame_set_locals(locals);
BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
BasicObjectLock* monbot = montop - moncount;
interpreter_frame->interpreter_frame_set_monitor_end(monbot);
// Set last_sp
intptr_t* esp = (intptr_t*) monbot -
tempcount*Interpreter::stackElementWords -
popframe_extra_args;
interpreter_frame->interpreter_frame_set_last_sp(esp);
// All frames but the initial (oldest) interpreter frame we fill in have
// a value for sender_sp that allows walking the stack but isn't
// truly correct. Correct the value here.
if (extra_locals != 0 &&
interpreter_frame->sender_sp() ==
interpreter_frame->interpreter_frame_sender_sp()) {
interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() +
extra_locals);
}
*interpreter_frame->interpreter_frame_cache_addr() =
method->constants()->cache();
}
return size;
}
//-----------------------------------------------------------------------------
// Exceptions

View file

@ -512,14 +512,15 @@ void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
Compile* C = ra_->C;
int framesize = C->frame_slots() << LogBytesPerInt;
int framesize = C->frame_size_in_bytes();
int bangsize = C->bang_size_in_bytes();
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
// Remove wordSize for return addr which is already pushed.
framesize -= wordSize;
if (C->need_stack_bang(framesize)) {
if (C->need_stack_bang(bangsize)) {
framesize -= wordSize;
st->print("# stack bang");
st->print("# stack bang (%d bytes)", bangsize);
st->print("\n\t");
st->print("PUSH EBP\t# Save EBP");
if (framesize) {
@ -563,9 +564,10 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
MacroAssembler _masm(&cbuf);
int framesize = C->frame_slots() << LogBytesPerInt;
int framesize = C->frame_size_in_bytes();
int bangsize = C->bang_size_in_bytes();
__ verified_entry(framesize, C->need_stack_bang(framesize), C->in_24_bit_fp_mode());
__ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, C->in_24_bit_fp_mode());
C->set_frame_complete(cbuf.insts_size());
@ -589,7 +591,7 @@ int MachPrologNode::reloc() const {
#ifndef PRODUCT
void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
Compile *C = ra_->C;
int framesize = C->frame_slots() << LogBytesPerInt;
int framesize = C->frame_size_in_bytes();
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
// Remove two words for return addr and rbp,
framesize -= 2*wordSize;
@ -629,7 +631,7 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
}
int framesize = C->frame_slots() << LogBytesPerInt;
int framesize = C->frame_size_in_bytes();
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
// Remove two words for return addr and rbp,
framesize -= 2*wordSize;
@ -663,7 +665,7 @@ uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
if (C->max_vector_size() > 16) size += 3; // vzeroupper
if (do_polling() && C->is_method_compilation()) size += 6;
int framesize = C->frame_slots() << LogBytesPerInt;
int framesize = C->frame_size_in_bytes();
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
// Remove two words for return addr and rbp,
framesize -= 2*wordSize;

View file

@ -713,14 +713,15 @@ void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
Compile* C = ra_->C;
int framesize = C->frame_slots() << LogBytesPerInt;
int framesize = C->frame_size_in_bytes();
int bangsize = C->bang_size_in_bytes();
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
// Remove wordSize for return addr which is already pushed.
framesize -= wordSize;
if (C->need_stack_bang(framesize)) {
if (C->need_stack_bang(bangsize)) {
framesize -= wordSize;
st->print("# stack bang");
st->print("# stack bang (%d bytes)", bangsize);
st->print("\n\t");
st->print("pushq rbp\t# Save rbp");
if (framesize) {
@ -751,9 +752,10 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
MacroAssembler _masm(&cbuf);
int framesize = C->frame_slots() << LogBytesPerInt;
int framesize = C->frame_size_in_bytes();
int bangsize = C->bang_size_in_bytes();
__ verified_entry(framesize, C->need_stack_bang(framesize), false);
__ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
C->set_frame_complete(cbuf.insts_size());
@ -786,7 +788,7 @@ void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
st->cr(); st->print("\t");
}
int framesize = C->frame_slots() << LogBytesPerInt;
int framesize = C->frame_size_in_bytes();
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
// Remove word for return adr already pushed
// and RBP
@ -822,7 +824,7 @@ void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
__ vzeroupper();
}
int framesize = C->frame_slots() << LogBytesPerInt;
int framesize = C->frame_size_in_bytes();
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
// Remove word for return adr already pushed
// and RBP

View file

@ -916,7 +916,22 @@ InterpreterFrame *InterpreterFrame::build(int size, TRAPS) {
return (InterpreterFrame *) fp;
}
int AbstractInterpreter::layout_activation(Method* method,
int AbstractInterpreter::size_activation(int max_stack,
int tempcount,
int extra_args,
int moncount,
int callee_param_count,
int callee_locals,
bool is_top_frame) {
int header_words = InterpreterFrame::header_words;
int monitor_words = moncount * frame::interpreter_frame_monitor_size();
int stack_words = is_top_frame ? max_stack : tempcount;
int callee_extra_locals = callee_locals - callee_param_count;
return header_words + monitor_words + stack_words + callee_extra_locals;
}
void AbstractInterpreter::layout_activation(Method* method,
int tempcount,
int popframe_extra_args,
int moncount,
@ -935,21 +950,15 @@ int AbstractInterpreter::layout_activation(Method* method,
// does (the full InterpreterFrame::build, that is, not the
// one that creates empty frames for the deoptimizer).
//
// If interpreter_frame is not NULL then it will be filled in.
// It's size is determined by a previous call to this method,
// so it should be correct.
// interpreter_frame will be filled in. It's size is determined by
// a previous call to the size_activation() method,
//
// Note that tempcount is the current size of the expression
// stack. For top most frames we will allocate a full sized
// expression stack and not the trimmed version that non-top
// frames have.
int header_words = InterpreterFrame::header_words;
int monitor_words = moncount * frame::interpreter_frame_monitor_size();
int stack_words = is_top_frame ? method->max_stack() : tempcount;
int callee_extra_locals = callee_locals - callee_param_count;
if (interpreter_frame) {
intptr_t *locals = interpreter_frame->fp() + method->max_locals();
interpreterState istate = interpreter_frame->get_interpreterState();
intptr_t *monitor_base = (intptr_t*) istate;
@ -966,8 +975,6 @@ int AbstractInterpreter::layout_activation(Method* method,
monitor_base,
NULL,
is_top_frame);
}
return header_words + monitor_words + stack_words + callee_extra_locals;
}
void BytecodeInterpreter::layout_interpreterState(interpreterState istate,

View file

@ -25,6 +25,7 @@
package com.sun.hotspot.tools.compiler;
import java.io.PrintStream;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.List;
@ -40,6 +41,7 @@ public class CallSite {
private int endNodes;
private int endLiveNodes;
private double timeStamp;
private long inlineId;
CallSite() {
}
@ -94,7 +96,7 @@ public class CallSite {
public void print(PrintStream stream, int indent) {
emit(stream, indent);
String m = getMethod().getHolder().replace('/', '.') + "::" + getMethod().getName();
String m = getMethod().getHolder() + "::" + getMethod().getName();
if (getReason() == null) {
stream.print(" @ " + getBci() + " " + m + " (" + getMethod().getBytes() + " bytes)");
@ -214,4 +216,45 @@ public class CallSite {
return timeStamp;
}
private boolean matches(CallSite other) {
// Every late inline call site has a unique inline id. If the
// call site we're looking for has one then use it other rely
// on method name and bci.
if (other.inlineId != 0) {
return inlineId == other.inlineId;
}
return method.equals(other.method) && bci == other.bci;
}
public CallSite findCallSite(ArrayDeque<CallSite> sites) {
// Locate a late inline call site. Multiple chains of
// identical call sites with the same method name/bci are
// possible so we have to try them all until we find the late
// inline call site that has a matching inline id.
CallSite site = sites.pop();
for (CallSite c : calls) {
if (c.matches(site)) {
if (!sites.isEmpty()) {
CallSite res = c.findCallSite(sites);
if (res != null) {
sites.push(site);
return res;
}
} else {
sites.push(site);
return c;
}
}
}
sites.push(site);
return null;
}
public long getInlineId() {
return inlineId;
}
public void setInlineId(long inlineId) {
this.inlineId = inlineId;
}
}

View file

@ -31,6 +31,7 @@ package com.sun.hotspot.tools.compiler;
import java.io.FileReader;
import java.io.Reader;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
@ -144,9 +145,12 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
private Stack<CallSite> scopes = new Stack<CallSite>();
private Compilation compile;
private CallSite site;
private CallSite methodHandleSite;
private Stack<Phase> phaseStack = new Stack<Phase>();
private UncommonTrapEvent currentTrap;
private Stack<CallSite> late_inline_scope;
private Stack<CallSite> lateInlineScope;
private boolean lateInlining;
long parseLong(String l) {
try {
@ -330,18 +334,61 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
}
methods.put(id, m);
} else if (qname.equals("call")) {
site = new CallSite(bci, method(search(atts, "method")));
if (methodHandleSite != null) {
methodHandleSite = null;
}
Method m = method(search(atts, "method"));
if (lateInlining && scopes.size() == 0) {
// re-attempting already seen call site (late inlining for MH invokes)
if (m != site.getMethod()) {
if (bci != site.getBci()) {
System.out.println(m + " bci: " + bci);
System.out.println(site.getMethod() + " bci: " + site.getBci());
throw new InternalError("bci mismatch after late inlining");
}
site.setMethod(m);
}
} else {
site = new CallSite(bci, m);
}
site.setCount(Integer.parseInt(search(atts, "count", "0")));
String receiver = atts.getValue("receiver");
if (receiver != null) {
site.setReceiver(type(receiver));
site.setReceiver_count(Integer.parseInt(search(atts, "receiver_count")));
}
int methodHandle = Integer.parseInt(search(atts, "method_handle_intrinsic", "0"));
if (lateInlining && scopes.size() == 0) {
// The call was added before this round of late inlining
} else if (methodHandle == 0) {
scopes.peek().add(site);
} else {
// method handle call site can be followed by another
// call (in case it is inlined). If that happens we
// discard the method handle call site. So we keep
// track of it but don't add it to the list yet.
methodHandleSite = site;
}
} else if (qname.equals("regalloc")) {
compile.setAttempts(Integer.parseInt(search(atts, "attempts")));
} else if (qname.equals("inline_fail")) {
if (methodHandleSite != null) {
scopes.peek().add(methodHandleSite);
methodHandleSite = null;
}
if (lateInlining && scopes.size() == 0) {
site.setReason(search(atts, "reason"));
lateInlining = false;
} else {
scopes.peek().last().setReason(search(atts, "reason"));
}
} else if (qname.equals("inline_success")) {
if (methodHandleSite != null) {
throw new InternalError("method handle site should have been replaced");
}
if (lateInlining && scopes.size() == 0) {
site.setReason(null);
}
} else if (qname.equals("failure")) {
failureReason = search(atts, "reason");
} else if (qname.equals("task_done")) {
@ -371,22 +418,30 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
// ignore for now
}
} else if (qname.equals("late_inline")) {
late_inline_scope = new Stack<CallSite>();
long inlineId = Long.parseLong(search(atts, "inline_id"));
lateInlineScope = new Stack<CallSite>();
site = new CallSite(-999, method(search(atts, "method")));
late_inline_scope.push(site);
site.setInlineId(inlineId);
lateInlineScope.push(site);
} else if (qname.equals("jvms")) {
// <jvms bci='4' method='java/io/DataInputStream readChar ()C' bytes='40' count='5815' iicount='20815'/>
if (currentTrap != null) {
currentTrap.addJVMS(atts.getValue("method"), Integer.parseInt(atts.getValue("bci")));
} else if (late_inline_scope != null) {
} else if (lateInlineScope != null) {
bci = Integer.parseInt(search(atts, "bci"));
site = new CallSite(bci, method(search(atts, "method")));
late_inline_scope.push(site);
lateInlineScope.push(site);
} else {
// Ignore <eliminate_allocation type='667'>,
// <eliminate_lock lock='1'>,
// <replace_string_concat arguments='2' string_alloc='0' multiple='0'>
}
} else if (qname.equals("inline_id")) {
if (methodHandleSite != null) {
throw new InternalError("method handle site should have been replaced");
}
long id = Long.parseLong(search(atts, "id"));
site.setInlineId(id);
} else if (qname.equals("nmethod")) {
String id = makeId(atts);
NMethod nm = new NMethod(Double.parseDouble(search(atts, "stamp")),
@ -396,8 +451,18 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
nmethods.put(id, nm);
events.add(nm);
} else if (qname.equals("parse")) {
if (methodHandleSite != null) {
throw new InternalError("method handle site should have been replaced");
}
Method m = method(search(atts, "method"));
if (scopes.size() == 0) {
if (lateInlining && scopes.size() == 0) {
if (site.getMethod() != m) {
System.out.println(site.getMethod());
System.out.println(m);
throw new InternalError("Unexpected method mismatch during late inlining");
}
}
if (scopes.size() == 0 && !lateInlining) {
compile.setMethod(m);
scopes.push(site);
} else {
@ -427,14 +492,19 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
if (qname.equals("parse")) {
indent -= 2;
scopes.pop();
if (scopes.size() == 0) {
lateInlining = false;
}
} else if (qname.equals("uncommon_trap")) {
currentTrap = null;
} else if (qname.equals("late_inline")) {
// Populate late inlining info.
// late_inline scopes are specified in reverse order:
if (scopes.size() != 0) {
throw new InternalError("scopes should be empty for late inline");
}
// late inline scopes are specified in reverse order:
// compiled method should be on top of stack.
CallSite caller = late_inline_scope.pop();
CallSite caller = lateInlineScope.pop();
Method m = compile.getMethod();
if (m != caller.getMethod()) {
System.out.println(m);
@ -444,28 +514,42 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
// late_inline contains caller+bci info, convert it
// to bci+callee info used by LogCompilation.
site = compile.getLateInlineCall();
CallSite lateInlineSite = compile.getLateInlineCall();
ArrayDeque<CallSite> thisCallScopes = new ArrayDeque<CallSite>();
do {
bci = caller.getBci();
// Next inlined call.
caller = late_inline_scope.pop();
caller = lateInlineScope.pop();
CallSite callee = new CallSite(bci, caller.getMethod());
site.add(callee);
site = callee;
} while (!late_inline_scope.empty());
callee.setInlineId(caller.getInlineId());
thisCallScopes.addLast(callee);
lateInlineSite.add(callee);
lateInlineSite = callee;
} while (!lateInlineScope.empty());
site = compile.getCall().findCallSite(thisCallScopes);
if (site == null) {
System.out.println(caller.getMethod() + " bci: " + bci);
throw new InternalError("couldn't find call site");
}
lateInlining = true;
if (caller.getBci() != -999) {
System.out.println(caller.getMethod());
throw new InternalError("broken late_inline info");
}
if (site.getMethod() != caller.getMethod()) {
if (site.getInlineId() == caller.getInlineId()) {
site.setMethod(caller.getMethod());
} else {
System.out.println(site.getMethod());
System.out.println(caller.getMethod());
throw new InternalError("call site and late_inline info don't match");
}
}
// late_inline is followed by parse with scopes.size() == 0,
// 'site' will be pushed to scopes.
late_inline_scope = null;
lateInlineScope = null;
} else if (qname.equals("task")) {
types.clear();
methods.clear();

View file

@ -51,15 +51,15 @@ public class Method implements Constants {
String format(int osr_bci) {
if (osr_bci >= 0) {
return getHolder().replace('/', '.') + "::" + getName() + " @ " + osr_bci + " (" + getBytes() + " bytes)";
return getHolder() + "::" + getName() + " @ " + osr_bci + " (" + getBytes() + " bytes)";
} else {
return getHolder().replace('/', '.') + "::" + getName() + " (" + getBytes() + " bytes)";
return getHolder() + "::" + getName() + " (" + getBytes() + " bytes)";
}
}
@Override
public String toString() {
return getHolder().replace('/', '.') + "::" + getName() + " (" + getBytes() + " bytes)";
return getHolder() + "::" + getName() + " (" + getBytes() + " bytes)";
}
public String getHolder() {
@ -117,4 +117,14 @@ public class Method implements Constants {
public void setFlags(String flags) {
this.flags = flags;
}
@Override
public boolean equals(Object o) {
if (o instanceof Method) {
Method other = (Method)o;
return holder.equals(other.holder) && name.equals(other.name) &&
arguments.equals(other.arguments) && returnType.equals(other.returnType);
}
return false;
}
}

View file

@ -1613,21 +1613,20 @@ void ArchDesc::declareClasses(FILE *fp) {
// Each instruction attribute results in a virtual call of same name.
// The ins_cost is not handled here.
Attribute *attr = instr->_attribs;
bool avoid_back_to_back = false;
Attribute *avoid_back_to_back_attr = NULL;
while (attr != NULL) {
if (strcmp (attr->_ident, "ins_cost") != 0 &&
if (strcmp (attr->_ident, "ins_is_TrapBasedCheckNode") == 0) {
fprintf(fp, " virtual bool is_TrapBasedCheckNode() const { return %s; }\n", attr->_val);
} else if (strcmp (attr->_ident, "ins_cost") != 0 &&
strncmp(attr->_ident, "ins_field_", 10) != 0 &&
// Must match function in node.hpp: return type bool, no prefix "ins_".
strcmp (attr->_ident, "ins_is_TrapBasedCheckNode") != 0 &&
strcmp (attr->_ident, "ins_short_branch") != 0) {
fprintf(fp, " virtual int %s() const { return %s; }\n", attr->_ident, attr->_val);
}
// Check value for ins_avoid_back_to_back, and if it is true (1), set the flag
if (!strcmp(attr->_ident, "ins_avoid_back_to_back") != 0 && attr->int_val(*this) != 0)
avoid_back_to_back = true;
if (strcmp (attr->_ident, "ins_is_TrapBasedCheckNode") == 0)
fprintf(fp, " virtual bool is_TrapBasedCheckNode() const { return %s; }\n", attr->_val);
if (strcmp(attr->_ident, "ins_avoid_back_to_back") == 0) {
avoid_back_to_back_attr = attr;
}
attr = (Attribute *)attr->_next;
}
@ -1799,11 +1798,11 @@ void ArchDesc::declareClasses(FILE *fp) {
}
// flag: if this instruction should not be generated back to back.
if ( avoid_back_to_back ) {
if ( node_flags_set ) {
fprintf(fp," | Flag_avoid_back_to_back");
if (avoid_back_to_back_attr != NULL) {
if (node_flags_set) {
fprintf(fp," | (%s)", avoid_back_to_back_attr->_val);
} else {
fprintf(fp,"init_flags(Flag_avoid_back_to_back");
fprintf(fp,"init_flags((%s)", avoid_back_to_back_attr->_val);
node_flags_set = true;
}
}

View file

@ -968,6 +968,7 @@ void CodeBuffer::verify_section_allocation() {
void CodeBuffer::log_section_sizes(const char* name) {
if (xtty != NULL) {
ttyLocker ttyl;
// log info about buffer usage
xtty->print_cr("<blob name='%s' size='%d'>", name, _total_size);
for (int n = (int) CodeBuffer::SECT_FIRST; n < (int) CodeBuffer::SECT_LIMIT; n++) {

View file

@ -546,6 +546,7 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
, _code(buffer_blob)
, _has_access_indexed(false)
, _current_instruction(NULL)
, _interpreter_frame_size(0)
#ifndef PRODUCT
, _last_instruction_printed(NULL)
#endif // PRODUCT

View file

@ -88,6 +88,7 @@ class Compilation: public StackObj {
CodeOffsets _offsets;
CodeBuffer _code;
bool _has_access_indexed;
int _interpreter_frame_size; // Stack space needed in case of a deoptimization
// compilation helpers
void initialize();
@ -262,6 +263,18 @@ class Compilation: public StackObj {
// Dump inlining replay data to the stream.
void dump_inline_data(outputStream* out) { /* do nothing now */ }
// How much stack space would the interpreter need in case of a
// deoptimization (worst case)
void update_interpreter_frame_size(int size) {
if (_interpreter_frame_size < size) {
_interpreter_frame_size = size;
}
}
int interpreter_frame_size() const {
return _interpreter_frame_size;
}
};

View file

@ -227,8 +227,38 @@ void CodeEmitInfo::add_register_oop(LIR_Opr opr) {
_oop_map->set_oop(name);
}
// Mirror the stack size calculation in the deopt code
// How much stack space would we need at this point in the program in
// case of deoptimization?
int CodeEmitInfo::interpreter_frame_size() const {
ValueStack* state = _stack;
int size = 0;
int callee_parameters = 0;
int callee_locals = 0;
int extra_args = state->scope()->method()->max_stack() - state->stack_size();
while (state != NULL) {
int locks = state->locks_size();
int temps = state->stack_size();
bool is_top_frame = (state == _stack);
ciMethod* method = state->scope()->method();
int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(),
temps + callee_parameters,
extra_args,
locks,
callee_parameters,
callee_locals,
is_top_frame);
size += frame_size;
callee_parameters = method->size_of_parameters();
callee_locals = method->max_locals();
extra_args = 0;
state = state->caller_state();
}
return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord;
}
// Implementation of IR

View file

@ -284,6 +284,8 @@ class CodeEmitInfo: public CompilationResourceObj {
bool is_method_handle_invoke() const { return _is_method_handle_invoke; }
void set_is_method_handle_invoke(bool x) { _is_method_handle_invoke = x; }
int interpreter_frame_size() const;
};

View file

@ -190,6 +190,13 @@ address LIR_Assembler::pc() const {
return _masm->pc();
}
// To bang the stack of this compiled method we use the stack size
// that the interpreter would need in case of a deoptimization. This
// removes the need to bang the stack in the deoptimization blob which
// in turn simplifies stack overflow handling.
int LIR_Assembler::bang_size_in_bytes() const {
return MAX2(initial_frame_size_in_bytes(), _compilation->interpreter_frame_size());
}
void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
for (int i = 0; i < info_list->length(); i++) {
@ -797,7 +804,7 @@ void LIR_Assembler::emit_op2(LIR_Op2* op) {
void LIR_Assembler::build_frame() {
_masm->build_frame(initial_frame_size_in_bytes());
_masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
}

View file

@ -132,7 +132,8 @@ class LIR_Assembler: public CompilationResourceObj {
int code_offset() const;
address pc() const;
int initial_frame_size_in_bytes();
int initial_frame_size_in_bytes() const;
int bang_size_in_bytes() const;
// test for constants which can be encoded directly in instructions
static bool is_small_constant(LIR_Opr opr);

View file

@ -2451,6 +2451,9 @@ void LinearScan::compute_oop_map(IntervalWalker* iw, const LIR_OpVisitState &vis
CodeEmitInfo* info = visitor.info_at(i);
OopMap* oop_map = first_oop_map;
// compute worst case interpreter size in case of a deoptimization
_compilation->update_interpreter_frame_size(info->interpreter_frame_size());
if (info->stack()->locks_size() != first_info->stack()->locks_size()) {
// this info has a different number of locks then the precomputed oop map
// (possible for lock and unlock instructions) -> compute oop map with

View file

@ -39,7 +39,7 @@ class C1_MacroAssembler: public MacroAssembler {
void explicit_null_check(Register base);
void inline_cache_check(Register receiver, Register iCache);
void build_frame(int frame_size_in_bytes);
void build_frame(int frame_size_in_bytes, int bang_size_in_bytes);
void remove_frame(int frame_size_in_bytes);
void unverified_entry(Register receiver, Register ic_klass);

View file

@ -237,3 +237,9 @@ void ciKlass::print_impl(outputStream* st) {
void ciKlass::print_name_on(outputStream* st) {
name()->print_symbol_on(st);
}
const char* ciKlass::external_name() const {
GUARDED_VM_ENTRY(
return get_Klass()->external_name();
)
}

View file

@ -125,6 +125,8 @@ public:
virtual ciKlass* exact_klass() = 0;
void print_name_on(outputStream* st);
const char* external_name() const;
};
#endif // SHARE_VM_CI_CIKLASS_HPP

View file

@ -80,6 +80,7 @@ ciMethod::ciMethod(methodHandle h_m) : ciMetadata(h_m()) {
_code_size = h_m()->code_size();
_intrinsic_id = h_m()->intrinsic_id();
_handler_count = h_m()->exception_table_length();
_size_of_parameters = h_m()->size_of_parameters();
_uses_monitors = h_m()->access_flags().has_monitor_bytecodes();
_balanced_monitors = !_uses_monitors || h_m()->access_flags().is_monitor_matching();
_is_c1_compilable = !h_m()->is_not_c1_compilable();

View file

@ -71,6 +71,7 @@ class ciMethod : public ciMetadata {
int _interpreter_invocation_count;
int _interpreter_throwout_count;
int _instructions_size;
int _size_of_parameters;
bool _uses_monitors;
bool _balanced_monitors;
@ -166,6 +167,7 @@ class ciMethod : public ciMetadata {
int exception_table_length() const { check_is_loaded(); return _handler_count; }
int interpreter_invocation_count() const { check_is_loaded(); return _interpreter_invocation_count; }
int interpreter_throwout_count() const { check_is_loaded(); return _interpreter_throwout_count; }
int size_of_parameters() const { check_is_loaded(); return _size_of_parameters; }
// Code size for inlining decisions.
int code_size_for_inlining();
@ -241,7 +243,6 @@ class ciMethod : public ciMetadata {
ciField* get_field_at_bci( int bci, bool &will_link);
ciMethod* get_method_at_bci(int bci, bool &will_link, ciSignature* *declared_signature);
// Given a certain calling environment, find the monomorphic target
// for the call. Return NULL if the call is not monomorphic in
// its calling environment.

View file

@ -123,6 +123,10 @@ void ciSymbol::print_symbol_on(outputStream *st) {
GUARDED_VM_ENTRY(get_symbol()->print_symbol_on(st);)
}
const char* ciSymbol::as_klass_external_name() const {
GUARDED_VM_ENTRY(return get_symbol()->as_klass_external_name(););
}
// ------------------------------------------------------------------
// ciSymbol::make_impl
//

View file

@ -90,6 +90,7 @@ public:
void print_symbol() {
print_symbol_on(tty);
}
const char* as_klass_external_name() const;
// Make a ciSymbol from a C string.
// Consider adding to vmSymbols.hpp instead of using this constructor.

View file

@ -106,7 +106,7 @@ int CompileLog::identify(ciBaseObject* obj) {
if (mobj->is_klass()) {
ciKlass* klass = mobj->as_klass();
begin_elem("klass id='%d'", id);
name(klass->name());
name(klass);
if (!klass->is_loaded()) {
print(" unloaded='1'");
} else {
@ -171,6 +171,15 @@ void CompileLog::name(ciSymbol* name) {
print("'");
}
void CompileLog::name(ciKlass* k) {
print(" name='");
if (!k->is_loaded()) {
text()->print(k->name()->as_klass_external_name());
} else {
text()->print(k->external_name());
}
print("'");
}
// ------------------------------------------------------------------
// CompileLog::clear_identities

View file

@ -28,6 +28,7 @@
#include "utilities/xmlstream.hpp"
class ciBaseObject;
class ciKlass;
class ciObject;
class ciMetadata;
class ciSymbol;
@ -72,6 +73,7 @@ class CompileLog : public xmlStream {
void name(ciSymbol* s); // name='s'
void name(Symbol* s) { xmlStream::name(s); }
void name(ciKlass* k);
// Output an object description, return obj->ident().
int identify(ciBaseObject* obj);

View file

@ -181,30 +181,16 @@ class AbstractInterpreter: AllStatic {
// Deoptimization should reexecute this bytecode
static bool bytecode_should_reexecute(Bytecodes::Code code);
// share implementation of size_activation and layout_activation:
static int size_activation(Method* method,
// deoptimization support
static int size_activation(int max_stack,
int temps,
int popframe_args,
int extra_args,
int monitors,
int caller_actual_parameters,
int callee_params,
int callee_locals,
bool is_top_frame,
bool is_bottom_frame) {
return layout_activation(method,
temps,
popframe_args,
monitors,
caller_actual_parameters,
callee_params,
callee_locals,
(frame*)NULL,
(frame*)NULL,
is_top_frame,
is_bottom_frame);
}
bool is_top_frame);
static int layout_activation(Method* method,
static void layout_activation(Method* method,
int temps,
int popframe_args,
int monitors,

View file

@ -266,14 +266,17 @@ CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
// Allow inlining decisions to be delayed
class LateInlineCallGenerator : public DirectCallGenerator {
private:
// unique id for log compilation
jlong _unique_id;
protected:
CallGenerator* _inline_cg;
virtual bool do_late_inline_check(JVMState* jvms) { return true; }
public:
LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
DirectCallGenerator(method, true), _inline_cg(inline_cg), _unique_id(0) {}
virtual bool is_late_inline() const { return true; }
@ -283,6 +286,8 @@ class LateInlineCallGenerator : public DirectCallGenerator {
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
Compile *C = Compile::current();
C->log_inline_id(this);
// Record that this call site should be revisited once the main
// parse is finished.
if (!is_mh_late_inline()) {
@ -304,6 +309,14 @@ class LateInlineCallGenerator : public DirectCallGenerator {
C->print_inlining_move_to(this);
C->print_inlining_update_delayed(this);
}
virtual void set_unique_id(jlong id) {
_unique_id = id;
}
virtual jlong unique_id() const {
return _unique_id;
}
};
void LateInlineCallGenerator::do_late_inline() {
@ -368,6 +381,8 @@ void LateInlineCallGenerator::do_late_inline() {
C->print_inlining_move_to(this);
C->log_late_inline(this);
// This check is done here because for_method_handle_inline() method
// needs jvms for inlined state.
if (!do_late_inline_check(jvms)) {
@ -375,17 +390,6 @@ void LateInlineCallGenerator::do_late_inline() {
return;
}
CompileLog* log = C->log();
if (log != NULL) {
log->head("late_inline method='%d'", log->identify(method()));
JVMState* p = jvms;
while (p != NULL) {
log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
p = p->caller();
}
log->tail("late_inline");
}
// Setup default node notes to be picked up by the inlining
Node_Notes* old_nn = C->default_node_notes();
if (old_nn != NULL) {
@ -438,11 +442,12 @@ class LateInlineMHCallGenerator : public LateInlineCallGenerator {
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser);
Compile* C = Compile::current();
if (_input_not_const) {
// inlining won't be possible so no need to enqueue right now.
call_node()->set_generator(this);
} else {
Compile::current()->add_late_inline(this);
C->add_late_inline(this);
}
return new_jvms;
}
@ -483,6 +488,9 @@ class LateInlineStringCallGenerator : public LateInlineCallGenerator {
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
Compile *C = Compile::current();
C->log_inline_id(this);
C->add_string_late_inline(this);
JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser);
@ -505,6 +513,8 @@ class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
Compile *C = Compile::current();
C->log_inline_id(this);
C->add_boxing_late_inline(this);
JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser);
@ -786,6 +796,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
} else {
const char* msg = "receiver not constant";
if (PrintInlining) C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
C->log_inline_failure(msg);
}
}
break;
@ -858,6 +869,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
} else {
const char* msg = "member_name not constant";
if (PrintInlining) C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
C->log_inline_failure(msg);
}
}
break;

View file

@ -84,6 +84,9 @@ class CallGenerator : public ResourceObj {
virtual CallStaticJavaNode* call_node() const { ShouldNotReachHere(); return NULL; }
virtual void set_unique_id(jlong id) { fatal("unique id only for late inlines"); };
virtual jlong unique_id() const { fatal("unique id only for late inlines"); return 0; };
// Note: It is possible for a CG to be both inline and virtual.
// (The hashCode intrinsic does a vtable check and an inlined fast path.)

View file

@ -608,6 +608,39 @@ void JVMState::adapt_position(int delta) {
}
}
// Mirror the stack size calculation in the deopt code
// How much stack space would we need at this point in the program in
// case of deoptimization?
int JVMState::interpreter_frame_size() const {
const JVMState* jvms = this;
int size = 0;
int callee_parameters = 0;
int callee_locals = 0;
int extra_args = method()->max_stack() - stk_size();
while (jvms != NULL) {
int locks = jvms->nof_monitors();
int temps = jvms->stk_size();
bool is_top_frame = (jvms == this);
ciMethod* method = jvms->method();
int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(),
temps + callee_parameters,
extra_args,
locks,
callee_parameters,
callee_locals,
is_top_frame);
size += frame_size;
callee_parameters = method->size_of_parameters();
callee_locals = method->max_locals();
extra_args = 0;
jvms = jvms->caller();
}
return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord;
}
//=============================================================================
uint CallNode::cmp( const Node &n ) const
{ return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }

View file

@ -300,6 +300,7 @@ public:
JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
void set_map_deep(SafePointNode *map);// reset map for all callers
void adapt_position(int delta); // Adapt offsets in in-array after adding an edge.
int interpreter_frame_size() const;
#ifndef PRODUCT
void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;

View file

@ -440,6 +440,14 @@ int Compile::frame_size_in_words() const {
return words;
}
// To bang the stack of this compiled method we use the stack size
// that the interpreter would need in case of a deoptimization. This
// removes the need to bang the stack in the deoptimization blob which
// in turn simplifies stack overflow handling.
int Compile::bang_size_in_bytes() const {
return MAX2(_interpreter_frame_size, frame_size_in_bytes());
}
// ============================================================================
//------------------------------CompileWrapper---------------------------------
class CompileWrapper : public StackObj {
@ -664,7 +672,8 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
_print_inlining_list(NULL),
_print_inlining_stream(NULL),
_print_inlining_idx(0),
_preserve_jvm_state(0) {
_preserve_jvm_state(0),
_interpreter_frame_size(0) {
C = this;
CompileWrapper cw(this);
@ -969,7 +978,8 @@ Compile::Compile( ciEnv* ci_env,
_print_inlining_stream(NULL),
_print_inlining_idx(0),
_preserve_jvm_state(0),
_allowed_reasons(0) {
_allowed_reasons(0),
_interpreter_frame_size(0) {
C = this;
#ifndef PRODUCT
@ -3078,8 +3088,12 @@ void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_
Node* m = n->in(i);
++i;
if (m != NULL && !frc._visited.test_set(m->_idx)) {
if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL)
if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL) {
// compute worst case interpreter size in case of a deoptimization
update_interpreter_frame_size(m->as_SafePoint()->jvms()->interpreter_frame_size());
sfpt.push(m);
}
cnt = m->req();
nstack.push(n, i); // put on stack parent and next input's index
n = m;
@ -3851,7 +3865,7 @@ void Compile::print_inlining_assert_ready() {
void Compile::dump_inlining() {
bool do_print_inlining = print_inlining() || print_intrinsics();
if (do_print_inlining) {
if (do_print_inlining || log() != NULL) {
// Print inlining message for candidates that we couldn't inline
// for lack of space
for (int i = 0; i < _late_inlines.length(); i++) {
@ -3861,6 +3875,7 @@ void Compile::dump_inlining() {
if (do_print_inlining) {
cg->print_inlining_late(msg);
}
log_late_inline_failure(cg, msg);
}
}
}
@ -3871,6 +3886,48 @@ void Compile::dump_inlining() {
}
}
void Compile::log_late_inline(CallGenerator* cg) {
if (log() != NULL) {
log()->head("late_inline method='%d' inline_id='" JLONG_FORMAT "'", log()->identify(cg->method()),
cg->unique_id());
JVMState* p = cg->call_node()->jvms();
while (p != NULL) {
log()->elem("jvms bci='%d' method='%d'", p->bci(), log()->identify(p->method()));
p = p->caller();
}
log()->tail("late_inline");
}
}
void Compile::log_late_inline_failure(CallGenerator* cg, const char* msg) {
log_late_inline(cg);
if (log() != NULL) {
log()->inline_fail(msg);
}
}
void Compile::log_inline_id(CallGenerator* cg) {
if (log() != NULL) {
// The LogCompilation tool needs a unique way to identify late
// inline call sites. This id must be unique for this call site in
// this compilation. Try to have it unique across compilations as
// well because it can be convenient when grepping through the log
// file.
// Distinguish OSR compilations from others in case CICountOSR is
// on.
jlong id = ((jlong)unique()) + (((jlong)compile_id()) << 33) + (CICountOSR && is_osr_compilation() ? ((jlong)1) << 32 : 0);
cg->set_unique_id(id);
log()->elem("inline_id id='" JLONG_FORMAT "'", id);
}
}
void Compile::log_inline_failure(const char* msg) {
if (C->log() != NULL) {
C->log()->inline_fail(msg);
}
}
// Dump inlining replay data to the stream.
// Don't change thread state and acquire any locks.
void Compile::dump_inline_data(outputStream* out) {
@ -4048,8 +4105,8 @@ void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
worklist.push(root());
for (uint next = 0; next < worklist.size(); ++next) {
Node *n = worklist.at(next);
const Type* t = igvn.type(n);
assert(t == t->remove_speculative(), "no more speculative types");
const Type* t = igvn.type_or_null(n);
assert((t == NULL) || (t == t->remove_speculative()), "no more speculative types");
if (n->is_Type()) {
t = n->as_Type()->type();
assert(t == t->remove_speculative(), "no more speculative types");

View file

@ -440,6 +440,8 @@ class Compile : public Phase {
void print_inlining_push();
PrintInliningBuffer& print_inlining_current();
void log_late_inline_failure(CallGenerator* cg, const char* msg);
public:
outputStream* print_inlining_stream() const {
@ -459,6 +461,10 @@ class Compile : public Phase {
print_inlining_stream()->print(ss.as_string());
}
void log_late_inline(CallGenerator* cg);
void log_inline_id(CallGenerator* cg);
void log_inline_failure(const char* msg);
void* replay_inline_data() const { return _replay_inline_data; }
// Dump inlining replay data to the stream.
@ -478,6 +484,7 @@ class Compile : public Phase {
RegMask _FIRST_STACK_mask; // All stack slots usable for spills (depends on frame layout)
Arena* _indexSet_arena; // control IndexSet allocation within PhaseChaitin
void* _indexSet_free_block_list; // free list of IndexSet bit blocks
int _interpreter_frame_size;
uint _node_bundling_limit;
Bundle* _node_bundling_base; // Information for instruction bundling
@ -935,6 +942,7 @@ class Compile : public Phase {
PhaseRegAlloc* regalloc() { return _regalloc; }
int frame_slots() const { return _frame_slots; }
int frame_size_in_words() const; // frame_slots in units of the polymorphic 'words'
int frame_size_in_bytes() const { return _frame_slots << LogBytesPerInt; }
RegMask& FIRST_STACK_mask() { return _FIRST_STACK_mask; }
Arena* indexSet_arena() { return _indexSet_arena; }
void* indexSet_free_block_list() { return _indexSet_free_block_list; }
@ -946,6 +954,13 @@ class Compile : public Phase {
bool need_stack_bang(int frame_size_in_bytes) const;
bool need_register_stack_bang() const;
void update_interpreter_frame_size(int size) {
if (_interpreter_frame_size < size) {
_interpreter_frame_size = size;
}
}
int bang_size_in_bytes() const;
void set_matcher(Matcher* m) { _matcher = m; }
//void set_regalloc(PhaseRegAlloc* ra) { _regalloc = ra; }
void set_indexSet_arena(Arena* a) { _indexSet_arena = a; }

View file

@ -104,6 +104,9 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1));
}
}
if (callee->is_method_handle_intrinsic()) {
log->print(" method_handle_intrinsic='1'");
}
log->end_elem();
}
@ -296,6 +299,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
if (call_does_dispatch) {
const char* msg = "virtual call";
if (PrintInlining) print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
C->log_inline_failure(msg);
return CallGenerator::for_virtual_call(callee, vtable_index);
} else {
// Class Hierarchy Analysis or Type Profile reveals a unique target,

View file

@ -711,7 +711,7 @@ void ConnectionGraph::add_final_edges(Node *n) {
Node *val = n->in(MemNode::ValueIn);
PointsToNode* ptn = ptnode_adr(val->_idx);
assert(ptn != NULL, "node should be registered");
ptn->set_escape_state(PointsToNode::GlobalEscape);
set_escape_state(ptn, PointsToNode::GlobalEscape);
// Add edge to object for unsafe access with offset.
PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
assert(adr_ptn != NULL, "node should be registered");

View file

@ -1150,6 +1150,7 @@ void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_
// Now force out all loop-invariant dominating tests. The optimizer
// finds some, but we _know_ they are all useless.
peeled_dom_test_elim(loop,old_new);
loop->record_for_igvn();
}
//------------------------------is_invariant-----------------------------

View file

@ -3172,17 +3172,16 @@ bool PhaseIdealLoop::verify_dominance(Node* n, Node* use, Node* LCA, Node* early
bool had_error = false;
#ifdef ASSERT
if (early != C->root()) {
// Make sure that there's a dominance path from use to LCA
Node* d = use;
while (d != LCA) {
d = idom(d);
// Make sure that there's a dominance path from LCA to early
Node* d = LCA;
while (d != early) {
if (d == C->root()) {
tty->print_cr("*** Use %d isn't dominated by def %s", use->_idx, n->_idx);
n->dump();
use->dump();
dump_bad_graph("Bad graph detected in compute_lca_of_uses", n, early, LCA);
tty->print_cr("*** Use %d isn't dominated by def %d ***", use->_idx, n->_idx);
had_error = true;
break;
}
d = idom(d);
}
}
#endif
@ -3435,6 +3434,13 @@ void PhaseIdealLoop::build_loop_late_post( Node *n ) {
_igvn._worklist.push(n); // Maybe we'll normalize it, if no more loops.
}
#ifdef ASSERT
if (_verify_only && !n->is_CFG()) {
// Check def-use domination.
compute_lca_of_uses(n, get_ctrl(n), true /* verify */);
}
#endif
// CFG and pinned nodes already handled
if( n->in(0) ) {
if( n->in(0)->is_top() ) return; // Dead?

View file

@ -2700,6 +2700,7 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
// Inhibit more partial peeling on this loop
new_head_clone->set_partial_peel_loop();
C->set_major_progress();
loop->record_for_igvn();
#if !defined(PRODUCT)
if (TracePartialPeeling) {

View file

@ -210,7 +210,14 @@ public:
bool may_be_short_branch() const { return (flags() & Flag_may_be_short_branch) != 0; }
// Avoid back to back some instructions on some CPUs.
bool avoid_back_to_back() const { return (flags() & Flag_avoid_back_to_back) != 0; }
enum AvoidBackToBackFlag { AVOID_NONE = 0,
AVOID_BEFORE = Flag_avoid_back_to_back_before,
AVOID_AFTER = Flag_avoid_back_to_back_after,
AVOID_BEFORE_AND_AFTER = AVOID_BEFORE | AVOID_AFTER };
bool avoid_back_to_back(AvoidBackToBackFlag flag_value) const {
return (flags() & flag_value) == flag_value;
}
// instruction implemented with a call
bool has_call() const { return (flags() & Flag_has_call) != 0; }

View file

@ -653,8 +653,9 @@ public:
Flag_is_cisc_alternate = Flag_is_Con << 1,
Flag_is_dead_loop_safe = Flag_is_cisc_alternate << 1,
Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1,
Flag_avoid_back_to_back = Flag_may_be_short_branch << 1,
Flag_has_call = Flag_avoid_back_to_back << 1,
Flag_avoid_back_to_back_before = Flag_may_be_short_branch << 1,
Flag_avoid_back_to_back_after = Flag_avoid_back_to_back_before << 1,
Flag_has_call = Flag_avoid_back_to_back_after << 1,
Flag_is_expensive = Flag_has_call << 1,
_max_flags = (Flag_is_expensive << 1) - 1 // allow flags combination
};

View file

@ -165,8 +165,13 @@ bool Compile::need_stack_bang(int frame_size_in_bytes) const {
// Determine if we need to generate a stack overflow check.
// Do it if the method is not a stub function and
// has java calls or has frame size > vm_page_size/8.
// The debug VM checks that deoptimization doesn't trigger an
// unexpected stack overflow (compiled method stack banging should
// guarantee it doesn't happen) so we always need the stack bang in
// a debug VM.
return (UseStackBanging && stub_function() == NULL &&
(has_java_calls() || frame_size_in_bytes > os::vm_page_size()>>3));
(has_java_calls() || frame_size_in_bytes > os::vm_page_size()>>3
DEBUG_ONLY(|| true)));
}
bool Compile::need_register_stack_bang() const {
@ -411,7 +416,7 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
blk_size += nop_size;
}
}
if (mach->avoid_back_to_back()) {
if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
// Nop is inserted between "avoid back to back" instructions.
// ScheduleAndBundle() can rearrange nodes in a block,
// check for all offsets inside this block.
@ -439,7 +444,7 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
last_call_adr = blk_starts[i]+blk_size;
}
// Remember end of avoid_back_to_back offset
if (nj->is_Mach() && nj->as_Mach()->avoid_back_to_back()) {
if (nj->is_Mach() && nj->as_Mach()->avoid_back_to_back(MachNode::AVOID_AFTER)) {
last_avoid_back_to_back_adr = blk_starts[i]+blk_size;
}
}
@ -525,11 +530,11 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
int new_size = replacement->size(_regalloc);
int diff = br_size - new_size;
assert(diff >= (int)nop_size, "short_branch size should be smaller");
// Conservatively take into accound padding between
// Conservatively take into account padding between
// avoid_back_to_back branches. Previous branch could be
// converted into avoid_back_to_back branch during next
// rounds.
if (needs_padding && replacement->avoid_back_to_back()) {
if (needs_padding && replacement->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
jmp_offset[i] += nop_size;
diff -= nop_size;
}
@ -548,7 +553,7 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
}
} // (mach->may_be_short_branch())
if (mach != NULL && (mach->may_be_short_branch() ||
mach->avoid_back_to_back())) {
mach->avoid_back_to_back(MachNode::AVOID_AFTER))) {
last_may_be_short_branch_adr = blk_starts[i] + jmp_offset[i] + jmp_size[i];
}
blk_starts[i+1] -= adjust_block_start;
@ -1313,7 +1318,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
padding = nop_size;
}
if (padding == 0 && mach->avoid_back_to_back() &&
if (padding == 0 && mach->avoid_back_to_back(MachNode::AVOID_BEFORE) &&
current_offset == last_avoid_back_to_back_offset) {
// Avoid back to back some instructions.
padding = nop_size;
@ -1407,7 +1412,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
int new_size = replacement->size(_regalloc);
assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller");
// Insert padding between avoid_back_to_back branches.
if (needs_padding && replacement->avoid_back_to_back()) {
if (needs_padding && replacement->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
MachNode *nop = new (this) MachNopNode();
block->insert_node(nop, j++);
_cfg->map_node_to_block(nop, block);
@ -1515,7 +1520,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
last_call_offset = current_offset;
}
if (n->is_Mach() && n->as_Mach()->avoid_back_to_back()) {
if (n->is_Mach() && n->as_Mach()->avoid_back_to_back(MachNode::AVOID_AFTER)) {
// Avoid back to back some instructions.
last_avoid_back_to_back_offset = current_offset;
}

View file

@ -831,13 +831,36 @@ PhaseIterGVN::PhaseIterGVN( PhaseGVN *gvn ) : PhaseGVN(gvn),
}
}
/**
* Initialize worklist for each node.
*/
void PhaseIterGVN::init_worklist(Node* first) {
Unique_Node_List to_process;
to_process.push(first);
while (to_process.size() > 0) {
Node* n = to_process.pop();
if (!_worklist.member(n)) {
_worklist.push(n);
uint cnt = n->req();
for(uint i = 0; i < cnt; i++) {
Node* m = n->in(i);
if (m != NULL) {
to_process.push(m);
}
}
}
}
}
#ifndef PRODUCT
void PhaseIterGVN::verify_step(Node* n) {
if (VerifyIterativeGVN) {
_verify_window[_verify_counter % _verify_window_size] = n;
++_verify_counter;
ResourceMark rm;
ResourceArea *area = Thread::current()->resource_area();
ResourceArea* area = Thread::current()->resource_area();
VectorSet old_space(area), new_space(area);
if (C->unique() < 1000 ||
0 == _verify_counter % (C->unique() < 10000 ? 10 : 100)) {
@ -855,83 +878,12 @@ void PhaseIterGVN::verify_step(Node* n) {
// Typical fanout is 1-2, so this call visits about 6 nodes.
Node::verify_recur(n, verify_depth, old_space, new_space);
}
}
#endif
//------------------------------init_worklist----------------------------------
// Initialize worklist for each node.
void PhaseIterGVN::init_worklist( Node *n ) {
if( _worklist.member(n) ) return;
_worklist.push(n);
uint cnt = n->req();
for( uint i =0 ; i < cnt; i++ ) {
Node *m = n->in(i);
if( m ) init_worklist(m);
}
}
//------------------------------optimize---------------------------------------
void PhaseIterGVN::optimize() {
debug_only(uint num_processed = 0;);
#ifndef PRODUCT
{
_verify_counter = 0;
_verify_full_passes = 0;
for ( int i = 0; i < _verify_window_size; i++ ) {
_verify_window[i] = NULL;
}
}
#endif
#ifdef ASSERT
Node* prev = NULL;
uint rep_cnt = 0;
#endif
uint loop_count = 0;
// Pull from worklist; transform node;
// If node has changed: update edge info and put uses on worklist.
while( _worklist.size() ) {
if (C->check_node_count(NodeLimitFudgeFactor * 2,
"out of nodes optimizing method")) {
return;
}
Node *n = _worklist.pop();
if (++loop_count >= K * C->live_nodes()) {
debug_only(n->dump(4);)
assert(false, "infinite loop in PhaseIterGVN::optimize");
C->record_method_not_compilable("infinite loop in PhaseIterGVN::optimize");
return;
}
#ifdef ASSERT
if (n == prev) {
if (++rep_cnt > 3) {
n->dump(4);
assert(false, "loop in Ideal transformation");
}
} else {
rep_cnt = 0;
}
prev = n;
#endif
if (TraceIterativeGVN && Verbose) {
tty->print(" Pop ");
NOT_PRODUCT( n->dump(); )
debug_only(if( (num_processed++ % 100) == 0 ) _worklist.print_set();)
}
if (n->outcnt() != 0) {
#ifndef PRODUCT
uint wlsize = _worklist.size();
const Type* oldtype = type_or_null(n);
#endif //PRODUCT
Node *nn = transform_old(n);
#ifndef PRODUCT
void PhaseIterGVN::trace_PhaseIterGVN(Node* n, Node* nn, const Type* oldtype) {
if (TraceIterativeGVN) {
uint wlsize = _worklist.size();
const Type* newtype = type_or_null(n);
if (nn != n) {
// print old node
@ -968,17 +920,22 @@ void PhaseIterGVN::optimize() {
}
tty->print_cr(" }");
}
}
if( VerifyIterativeGVN && nn != n ) {
verify_step((Node*) NULL); // ignore n, it might be subsumed
}
#endif
} else if (!n->is_top()) {
remove_dead_node(n);
if (nn != n) {
// ignore n, it might be subsumed
verify_step((Node*) NULL);
}
}
}
#ifndef PRODUCT
void PhaseIterGVN::init_verifyPhaseIterGVN() {
_verify_counter = 0;
_verify_full_passes = 0;
for (int i = 0; i < _verify_window_size; i++) {
_verify_window[i] = NULL;
}
}
void PhaseIterGVN::verify_PhaseIterGVN() {
C->verify_graph_edges();
if( VerifyOpto && allow_progress() ) {
// Must turn off allow_progress to enable assert and break recursion
@ -998,21 +955,78 @@ void PhaseIterGVN::optimize() {
igvn2.set_allow_progress(true);
}
}
if ( VerifyIterativeGVN && PrintOpto ) {
if ( _verify_counter == _verify_full_passes )
if (VerifyIterativeGVN && PrintOpto) {
if (_verify_counter == _verify_full_passes) {
tty->print_cr("VerifyIterativeGVN: %d transforms and verify passes",
_verify_full_passes);
else
} else {
tty->print_cr("VerifyIterativeGVN: %d transforms, %d full verify passes",
_verify_counter, _verify_full_passes);
}
#endif
}
}
#endif /* PRODUCT */
#ifdef ASSERT
/**
* Dumps information that can help to debug the problem. A debug
* build fails with an assert.
*/
void PhaseIterGVN::dump_infinite_loop_info(Node* n) {
n->dump(4);
_worklist.dump();
assert(false, "infinite loop in PhaseIterGVN::optimize");
}
/**
* Prints out information about IGVN if the 'verbose' option is used.
*/
void PhaseIterGVN::trace_PhaseIterGVN_verbose(Node* n, int num_processed) {
if (TraceIterativeGVN && Verbose) {
tty->print(" Pop ");
n->dump();
if ((num_processed % 100) == 0) {
_worklist.print_set();
}
}
}
#endif /* ASSERT */
void PhaseIterGVN::optimize() {
DEBUG_ONLY(uint num_processed = 0;)
NOT_PRODUCT(init_verifyPhaseIterGVN();)
uint loop_count = 0;
// Pull from worklist and transform the node. If the node has changed,
// update edge info and put uses on worklist.
while(_worklist.size()) {
if (C->check_node_count(NodeLimitFudgeFactor * 2, "Out of nodes")) {
return;
}
Node* n = _worklist.pop();
if (++loop_count >= K * C->live_nodes()) {
DEBUG_ONLY(dump_infinite_loop_info(n);)
C->record_method_not_compilable("infinite loop in PhaseIterGVN::optimize");
return;
}
DEBUG_ONLY(trace_PhaseIterGVN_verbose(n, num_processed++);)
if (n->outcnt() != 0) {
NOT_PRODUCT(const Type* oldtype = type_or_null(n));
// Do the transformation
Node* nn = transform_old(n);
NOT_PRODUCT(trace_PhaseIterGVN(n, nn, oldtype);)
} else if (!n->is_top()) {
remove_dead_node(n);
}
}
NOT_PRODUCT(verify_PhaseIterGVN();)
}
//------------------register_new_node_with_optimizer---------------------------
// Register a new node with the optimizer. Update the types array, the def-use
// info. Put on worklist.
/**
* Register a new node with the optimizer. Update the types array, the def-use
* info. Put on worklist.
*/
Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
set_type_bottom(n);
_worklist.push(n);
@ -1038,32 +1052,29 @@ Node *PhaseIterGVN::transform( Node *n ) {
return transform_old(n);
}
//------------------------------transform_old----------------------------------
Node *PhaseIterGVN::transform_old( Node *n ) {
#ifndef PRODUCT
debug_only(uint loop_count = 0;);
set_transforms();
#endif
Node *PhaseIterGVN::transform_old(Node* n) {
DEBUG_ONLY(uint loop_count = 0;);
NOT_PRODUCT(set_transforms());
// Remove 'n' from hash table in case it gets modified
_table.hash_delete(n);
if( VerifyIterativeGVN ) {
assert( !_table.find_index(n->_idx), "found duplicate entry in table");
if (VerifyIterativeGVN) {
assert(!_table.find_index(n->_idx), "found duplicate entry in table");
}
// Apply the Ideal call in a loop until it no longer applies
Node *k = n;
Node* k = n;
DEBUG_ONLY(dead_loop_check(k);)
DEBUG_ONLY(bool is_new = (k->outcnt() == 0);)
Node *i = k->Ideal(this, /*can_reshape=*/true);
Node* i = k->Ideal(this, /*can_reshape=*/true);
assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
#ifndef PRODUCT
if( VerifyIterativeGVN )
verify_step(k);
if( i && VerifyOpto ) {
if( !allow_progress() ) {
if (i->is_Add() && i->outcnt() == 1) {
if (i && VerifyOpto ) {
if (!allow_progress()) {
if (i->is_Add() && (i->outcnt() == 1)) {
// Switched input to left side because this is the only use
} else if( i->is_If() && (i->in(0) == NULL) ) {
} else if (i->is_If() && (i->in(0) == NULL)) {
// This IF is dead because it is dominated by an equivalent IF When
// dominating if changed, info is not propagated sparsely to 'this'
// Propagating this info further will spuriously identify other
@ -1071,35 +1082,38 @@ Node *PhaseIterGVN::transform_old( Node *n ) {
return i;
} else
set_progress();
} else
} else {
set_progress();
}
}
#endif
while( i ) {
while (i != NULL) {
#ifndef PRODUCT
debug_only( if( loop_count >= K ) i->dump(4); )
assert(loop_count < K, "infinite loop in PhaseIterGVN::transform");
debug_only( loop_count++; )
if (loop_count >= K) {
dump_infinite_loop_info(i);
}
loop_count++;
#endif
assert((i->_idx >= k->_idx) || i->is_top(), "Idealize should return new nodes, use Identity to return old nodes");
// Made a change; put users of original Node on worklist
add_users_to_worklist( k );
add_users_to_worklist(k);
// Replacing root of transform tree?
if( k != i ) {
if (k != i) {
// Make users of old Node now use new.
subsume_node( k, i );
subsume_node(k, i);
k = i;
}
DEBUG_ONLY(dead_loop_check(k);)
// Try idealizing again
DEBUG_ONLY(is_new = (k->outcnt() == 0);)
i = k->Ideal(this, /*can_reshape=*/true);
assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
assert(i != k || is_new || (i->outcnt() > 0), "don't return dead nodes");
#ifndef PRODUCT
if( VerifyIterativeGVN )
verify_step(k);
if( i && VerifyOpto ) set_progress();
if (i && VerifyOpto) {
set_progress();
}
#endif
}
@ -1107,48 +1121,49 @@ Node *PhaseIterGVN::transform_old( Node *n ) {
ensure_type_or_null(k);
// See what kind of values 'k' takes on at runtime
const Type *t = k->Value(this);
const Type* t = k->Value(this);
assert(t != NULL, "value sanity");
// Since I just called 'Value' to compute the set of run-time values
// for this Node, and 'Value' is non-local (and therefore expensive) I'll
// cache Value. Later requests for the local phase->type of this Node can
// use the cached Value instead of suffering with 'bottom_type'.
if (t != type_or_null(k)) {
NOT_PRODUCT( set_progress(); )
NOT_PRODUCT( inc_new_values();)
if (type_or_null(k) != t) {
#ifndef PRODUCT
inc_new_values();
set_progress();
#endif
set_type(k, t);
// If k is a TypeNode, capture any more-precise type permanently into Node
k->raise_bottom_type(t);
// Move users of node to worklist
add_users_to_worklist( k );
add_users_to_worklist(k);
}
// If 'k' computes a constant, replace it with a constant
if( t->singleton() && !k->is_Con() ) {
NOT_PRODUCT( set_progress(); )
Node *con = makecon(t); // Make a constant
add_users_to_worklist( k );
subsume_node( k, con ); // Everybody using k now uses con
if (t->singleton() && !k->is_Con()) {
NOT_PRODUCT(set_progress();)
Node* con = makecon(t); // Make a constant
add_users_to_worklist(k);
subsume_node(k, con); // Everybody using k now uses con
return con;
}
// Now check for Identities
i = k->Identity(this); // Look for a nearby replacement
if( i != k ) { // Found? Return replacement!
NOT_PRODUCT( set_progress(); )
add_users_to_worklist( k );
subsume_node( k, i ); // Everybody using k now uses i
if (i != k) { // Found? Return replacement!
NOT_PRODUCT(set_progress();)
add_users_to_worklist(k);
subsume_node(k, i); // Everybody using k now uses i
return i;
}
// Global Value Numbering
i = hash_find_insert(k); // Check for pre-existing node
if( i && (i != k) ) {
if (i && (i != k)) {
// Return the pre-existing node if it isn't dead
NOT_PRODUCT( set_progress(); )
add_users_to_worklist( k );
subsume_node( k, i ); // Everybody using k now uses i
NOT_PRODUCT(set_progress();)
add_users_to_worklist(k);
subsume_node(k, i); // Everybody using k now uses i
return i;
}
@ -1514,6 +1529,21 @@ void PhaseCCP::do_transform() {
C->set_root( transform(C->root())->as_Root() );
assert( C->top(), "missing TOP node" );
assert( C->root(), "missing root" );
// Eagerly remove castPP nodes here. CastPP nodes might not be
// removed in the subsequent IGVN phase if a node that changes
// in(1) of a castPP is processed prior to the castPP node.
for (uint i = 0; i < _worklist.size(); i++) {
Node* n = _worklist.at(i);
if (n->is_ConstraintCast()) {
Node* nn = n->Identity(this);
if (nn != n) {
replace_node(n, nn);
--i;
}
}
}
}
//------------------------------transform--------------------------------------

View file

@ -440,6 +440,17 @@ public:
// and dominator info to a fixed point.
void optimize();
#ifndef PRODUCT
void trace_PhaseIterGVN(Node* n, Node* nn, const Type* old_type);
void init_verifyPhaseIterGVN();
void verify_PhaseIterGVN();
#endif
#ifdef ASSERT
void dump_infinite_loop_info(Node* n);
void trace_PhaseIterGVN_verbose(Node* n, int num_processed);
#endif
// Register a new node with the iter GVN pass without transforming it.
// Used when we need to restructure a Region/Phi area and all the Regions
// and Phis need to complete this one big transform before any other

View file

@ -53,7 +53,8 @@ void AdvancedThresholdPolicy::initialize() {
}
set_c1_count(MAX2(count / 3, 1));
set_c2_count(MAX2(count - count / 3, 1));
set_c2_count(MAX2(count - c1_count(), 1));
FLAG_SET_ERGO(intx, CICompilerCount, c1_count() + c2_count());
// Some inlining tuning
#ifdef X86

View file

@ -2402,6 +2402,10 @@ bool Arguments::check_vm_args_consistency() {
const int num_min_compiler_threads = (TieredCompilation && (TieredStopAtLevel >= CompLevel_full_optimization)) ? 2 : 1;
status &=verify_min_value(CICompilerCount, num_min_compiler_threads, "CICompilerCount");
if (!FLAG_IS_DEFAULT(CICompilerCount) && !FLAG_IS_DEFAULT(CICompilerCountPerCPU) && CICompilerCountPerCPU) {
warning("The VM option CICompilerCountPerCPU overrides CICompilerCount.");
}
return status;
}

View file

@ -182,6 +182,7 @@ void NonTieredCompPolicy::initialize() {
// max(log2(8)-1,1) = 2 compiler threads on an 8-way machine.
// May help big-app startup time.
_compiler_count = MAX2(log2_intptr(os::active_processor_count())-1,1);
FLAG_SET_ERGO(intx, CICompilerCount, _compiler_count);
} else {
_compiler_count = CICompilerCount;
}

View file

@ -420,15 +420,9 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
// frame[number_of_frames - 1 ] = on_stack_size(youngest)
// frame[number_of_frames - 2 ] = on_stack_size(sender(youngest))
// frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))
int caller_parms = callee_parameters;
if ((index == array->frames() - 1) && caller_was_method_handle) {
caller_parms = 0;
}
frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(caller_parms,
callee_parameters,
frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters,
callee_locals,
index == 0,
index == array->frames() - 1,
popframe_extra_args);
// This pc doesn't have to be perfect just good enough to identify the frame
// as interpreted so the skeleton frame will be walkable

View file

@ -775,10 +775,13 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
// going to be unwound. Dispatch to a shared runtime stub
// which will cause the StackOverflowError to be fabricated
// and processed.
// For stack overflow in deoptimization blob, cleanup thread.
if (thread->deopt_mark() != NULL) {
Deoptimization::cleanup_deopt_info(thread, NULL);
}
// Stack overflow should never occur during deoptimization:
// the compiled method bangs the stack by as much as the
// interpreter would need in case of a deoptimization. The
// deoptimization blob and uncommon trap blob bang the stack
// in a debug VM to verify the correctness of the compiled
// method stack banging.
assert(thread->deopt_mark() == NULL, "no stack overflow from deopt blob/uncommon trap");
Events::log_exception(thread, "StackOverflowError at " INTPTR_FORMAT, pc);
return StubRoutines::throw_StackOverflowError_entry();
}

View file

@ -142,7 +142,8 @@ void SimpleThresholdPolicy::initialize() {
count = MAX2(log2_intptr(os::active_processor_count()), 1) * 3 / 2;
}
set_c1_count(MAX2(count / 3, 1));
set_c2_count(MAX2(count - count / 3, 1));
set_c2_count(MAX2(count - c1_count(), 1));
FLAG_SET_ERGO(intx, CICompilerCount, c1_count() + c2_count());
}
void SimpleThresholdPolicy::set_carry_if_necessary(InvocationCounter *counter) {

View file

@ -418,24 +418,20 @@ void vframeArrayElement::unpack_on_stack(int caller_actual_parameters,
}
int vframeArrayElement::on_stack_size(int caller_actual_parameters,
int callee_parameters,
int vframeArrayElement::on_stack_size(int callee_parameters,
int callee_locals,
bool is_top_frame,
bool is_bottom_frame,
int popframe_extra_stack_expression_els) const {
assert(method()->max_locals() == locals()->size(), "just checking");
int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors();
int temps = expressions()->size();
return Interpreter::size_activation(method(),
return Interpreter::size_activation(method()->max_stack(),
temps + callee_parameters,
popframe_extra_stack_expression_els,
locks,
caller_actual_parameters,
callee_parameters,
callee_locals,
is_top_frame,
is_bottom_frame);
is_top_frame);
}

View file

@ -85,10 +85,8 @@ class vframeArrayElement : public _ValueObj {
// Returns the on stack word size for this frame
// callee_parameters is the number of callee locals residing inside this frame
int on_stack_size(int caller_actual_parameters,
int callee_parameters,
int on_stack_size(int callee_parameters,
int callee_locals,
bool is_bottom_frame,
bool is_top_frame,
int popframe_extra_stack_expression_els) const;

View file

@ -147,6 +147,9 @@ class GenericGrowableArray : public ResourceObj {
}
};
template<class E> class GrowableArrayIterator;
template<class E, class UnaryPredicate> class GrowableArrayFilterIterator;
template<class E> class GrowableArray : public GenericGrowableArray {
friend class VMStructs;
@ -243,6 +246,14 @@ template<class E> class GrowableArray : public GenericGrowableArray {
return _data[_len-1];
}
GrowableArrayIterator<E> begin() const {
return GrowableArrayIterator<E>(this, 0);
}
GrowableArrayIterator<E> end() const {
return GrowableArrayIterator<E>(this, length());
}
void push(const E& elem) { append(elem); }
E pop() {
@ -412,4 +423,83 @@ template<class E> void GrowableArray<E>::print() {
tty->print("}\n");
}
// Custom STL-style iterator to iterate over GrowableArrays
// It is constructed by invoking GrowableArray::begin() and GrowableArray::end()
template<class E> class GrowableArrayIterator : public StackObj {
friend class GrowableArray<E>;
template<class F, class UnaryPredicate> friend class GrowableArrayFilterIterator;
private:
const GrowableArray<E>* _array; // GrowableArray we iterate over
int _position; // The current position in the GrowableArray
// Private constructor used in GrowableArray::begin() and GrowableArray::end()
GrowableArrayIterator(const GrowableArray<E>* array, int position) : _array(array), _position(position) {
assert(0 <= position && position <= _array->length(), "illegal position");
}
public:
GrowableArrayIterator<E>& operator++() { ++_position; return *this; }
E operator*() { return _array->at(_position); }
bool operator==(const GrowableArrayIterator<E>& rhs) {
assert(_array == rhs._array, "iterator belongs to different array");
return _position == rhs._position;
}
bool operator!=(const GrowableArrayIterator<E>& rhs) {
assert(_array == rhs._array, "iterator belongs to different array");
return _position != rhs._position;
}
};
// Custom STL-style iterator to iterate over elements of a GrowableArray that satisfy a given predicate
template<class E, class UnaryPredicate> class GrowableArrayFilterIterator : public StackObj {
friend class GrowableArray<E>;
private:
const GrowableArray<E>* _array; // GrowableArray we iterate over
int _position; // Current position in the GrowableArray
UnaryPredicate _predicate; // Unary predicate the elements of the GrowableArray should satisfy
public:
GrowableArrayFilterIterator(const GrowableArrayIterator<E>& begin, UnaryPredicate filter_predicate)
: _array(begin._array), _position(begin._position), _predicate(filter_predicate) {
// Advance to first element satisfying the predicate
while(_position != _array->length() && !_predicate(_array->at(_position))) {
++_position;
}
}
GrowableArrayFilterIterator<E, UnaryPredicate>& operator++() {
do {
// Advance to next element satisfying the predicate
++_position;
} while(_position != _array->length() && !_predicate(_array->at(_position)));
return *this;
}
E operator*() { return _array->at(_position); }
bool operator==(const GrowableArrayIterator<E>& rhs) {
assert(_array == rhs._array, "iterator belongs to different array");
return _position == rhs._position;
}
bool operator!=(const GrowableArrayIterator<E>& rhs) {
assert(_array == rhs._array, "iterator belongs to different array");
return _position != rhs._position;
}
bool operator==(const GrowableArrayFilterIterator<E, UnaryPredicate>& rhs) {
assert(_array == rhs._array, "iterator belongs to different array");
return _position == rhs._position;
}
bool operator!=(const GrowableArrayFilterIterator<E, UnaryPredicate>& rhs) {
assert(_array == rhs._array, "iterator belongs to different array");
return _position != rhs._position;
}
};
#endif // SHARE_VM_UTILITIES_GROWABLEARRAY_HPP

View file

@ -396,10 +396,10 @@ void xmlStream::method(methodHandle method) {
}
void xmlStream::method_text(methodHandle method) {
ResourceMark rm;
assert_if_no_error(inside_attrs(), "printing attributes");
if (method.is_null()) return;
//method->print_short_name(text());
method->method_holder()->name()->print_symbol_on(text());
text()->print(method->method_holder()->external_name());
print_raw(" "); // " " is easier for tools to parse than "::"
method->name()->print_symbol_on(text());
print_raw(" "); // separator

View file

@ -134,7 +134,8 @@ needs_compact3 = \
runtime/InternalApi/ThreadCpuTimesDeadlock.java \
serviceability/threads/TestFalseDeadLock.java \
compiler/tiered/NonTieredLevelsTest.java \
compiler/tiered/TieredLevelsTest.java
compiler/tiered/TieredLevelsTest.java \
compiler/intrinsics/bmi/verifycode
# Compact 2 adds full VM tests
compact2 = \

View file

@ -0,0 +1,84 @@
/*
* Copyright 2014 SAP AG. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* @test
* @bug 8038048
* @summary assert(null_obj->escape_state() == PointsToNode::NoEscape,etc)
* @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+DoEscapeAnalysis -XX:-TieredCompilation -Xbatch TestUnsafePutAddressNullObjMustNotEscape
* @author Richard Reingruber richard DOT reingruber AT sap DOT com
*/
import java.lang.reflect.Field;
import sun.misc.Unsafe;
public class TestUnsafePutAddressNullObjMustNotEscape {
public static Unsafe usafe;
public static long mem;
public static long checksum;
public static void main(String[] args) throws Exception {
System.out.println("EXECUTING test.");
{
System.out.println("Acquiring sun.misc.Unsafe.theUnsafe using reflection.");
getUnsafe();
System.out.println("Allocating raw memory.");
mem = (usafe.allocateMemory(1024) + 8L) & ~7L;
System.out.println("Triggering JIT compilation of the test method");
triggerJitCompilationOfTestMethod();
}
System.out.println("SUCCESSFULLY passed test.");
}
public static void triggerJitCompilationOfTestMethod() {
long sum = 0;
for (int ii = 50000; ii >= 0; ii--) {
sum = testMethod();
}
checksum = sum;
}
public static class IDGen {
private static long id;
public long nextId() {
return id++;
}
}
public static long testMethod() {
// dummy alloc to trigger escape analysis
IDGen gen = new IDGen();
// StoreP of null_obj to raw mem triggers assertion in escape analysis
usafe.putAddress(mem, 0L);
return gen.nextId();
}
private static void getUnsafe() throws Exception {
Field field = sun.misc.Unsafe.class.getDeclaredField("theUnsafe");
field.setAccessible(true);
usafe = (sun.misc.Unsafe) field.get(null);
}
}

View file

@ -0,0 +1,57 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8031321
* @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
* @build AddnTestI
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
* -XX:+IgnoreUnrecognizedVMOptions -XX:+UseBMI1Instructions AddnTestI
*/
import java.lang.reflect.Method;
public class AddnTestI extends BmiIntrinsicBase.BmiTestCase {
protected AddnTestI(Method method) {
super(method);
// from intel manual VEX.NDS.LZ.0F38.W0 F2 /r, example c4e260f2c2
instrMask = new byte[]{
(byte) 0xFF,
(byte) 0x1F,
(byte) 0x00,
(byte) 0xFF};
instrPattern = new byte[]{
(byte) 0xC4, // prefix for 3-byte VEX instruction
(byte) 0x02, // 00010 implied 0F 38 leading opcode bytes
(byte) 0x00,
(byte) 0xF2};
}
public static void main(String[] args) throws Exception {
BmiIntrinsicBase.verifyTestCase(AddnTestI::new, TestAndnI.AndnIExpr.class.getDeclaredMethods());
BmiIntrinsicBase.verifyTestCase(AddnTestI::new, TestAndnI.AndnICommutativeExpr.class.getDeclaredMethods());
}
}

View file

@ -0,0 +1,47 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8031321
* @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
* @build AddnTestL
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
* -XX:+IgnoreUnrecognizedVMOptions -XX:+UseBMI1Instructions AddnTestL
*/
import java.lang.reflect.Method;
public class AddnTestL extends AddnTestI {
protected AddnTestL(Method method) {
super(method);
isLongOperation = true;
}
public static void main(String[] args) throws Exception {
BmiIntrinsicBase.verifyTestCase(AddnTestL::new, TestAndnL.AndnLExpr.class.getDeclaredMethods());
BmiIntrinsicBase.verifyTestCase(AddnTestL::new, TestAndnL.AndnLCommutativeExpr.class.getDeclaredMethods());
}
}

View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8031321
* @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
* @build BlsiTestI
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
* -XX:+IgnoreUnrecognizedVMOptions -XX:+UseBMI1Instructions BlsiTestI
*/
import java.lang.reflect.Method;
public class BlsiTestI extends BmiIntrinsicBase.BmiTestCase {
protected BlsiTestI(Method method) {
super(method);
//from intel manual VEX.NDD.LZ.0F38.W0 F3 /3
instrMask = new byte[]{
(byte) 0xFF,
(byte) 0x1F,
(byte) 0x00,
(byte) 0xFF,
(byte) 0b0011_1000};
instrPattern = new byte[]{
(byte) 0xC4, // prefix for 3-byte VEX instruction
(byte) 0x02, // 00010 implied 0F 38 leading opcode bytes
(byte) 0x00,
(byte) 0xF3,
(byte) 0b0001_1000}; // bits 543 == 011 (3)
}
public static void main(String[] args) throws Exception {
BmiIntrinsicBase.verifyTestCase(BlsiTestI::new, TestBlsiI.BlsiIExpr.class.getDeclaredMethods());
BmiIntrinsicBase.verifyTestCase(BlsiTestI::new, TestBlsiI.BlsiICommutativeExpr.class.getDeclaredMethods());
}
}

View file

@ -0,0 +1,47 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8031321
* @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
* @build BlsiTestL
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
* -XX:+IgnoreUnrecognizedVMOptions -XX:+UseBMI1Instructions BlsiTestL
*/
import java.lang.reflect.Method;
public class BlsiTestL extends BlsiTestI {
protected BlsiTestL(Method method) {
super(method);
isLongOperation = true;
}
public static void main(String[] args) throws Exception {
BmiIntrinsicBase.verifyTestCase(BlsiTestL::new, TestBlsiL.BlsiLExpr.class.getDeclaredMethods());
BmiIntrinsicBase.verifyTestCase(BlsiTestL::new, TestBlsiL.BlsiLCommutativeExpr.class.getDeclaredMethods());
}
}

View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8031321
* @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
* @build BlsmskTestI
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
* -XX:+IgnoreUnrecognizedVMOptions -XX:+UseBMI1Instructions BlsmskTestI
*/
import java.lang.reflect.Method;
public class BlsmskTestI extends BmiIntrinsicBase.BmiTestCase {
protected BlsmskTestI(Method method) {
super(method);
//from intel manual VEX.NDD.LZ.0F38.W0 F3 /2
instrMask = new byte[]{
(byte) 0xFF,
(byte) 0x1F,
(byte) 0x00,
(byte) 0xFF,
(byte) 0b0011_1000};
instrPattern = new byte[]{
(byte) 0xC4, // prefix for 3-byte VEX instruction
(byte) 0x02, // 00010 implied 0F 38 leading opcode bytes
(byte) 0x00,
(byte) 0xF3,
(byte) 0b0001_0000}; // bits 543 == 011 (3)
}
public static void main(String[] args) throws Exception {
BmiIntrinsicBase.verifyTestCase(BlsmskTestI::new, TestBlsmskI.BlsmskIExpr.class.getDeclaredMethods());
BmiIntrinsicBase.verifyTestCase(BlsmskTestI::new, TestBlsmskI.BlsmskICommutativeExpr.class.getDeclaredMethods());
}
}

View file

@ -0,0 +1,47 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8031321
* @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
* @build BlsmskTestL
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
* -XX:+IgnoreUnrecognizedVMOptions -XX:+UseBMI1Instructions BlsmskTestL
*/
import java.lang.reflect.Method;
public class BlsmskTestL extends BlsmskTestI {
protected BlsmskTestL(Method method) {
super(method);
isLongOperation = true;
}
public static void main(String[] args) throws Exception {
BmiIntrinsicBase.verifyTestCase(BlsmskTestL::new, TestBlsmskL.BlsmskLExpr.class.getDeclaredMethods());
BmiIntrinsicBase.verifyTestCase(BlsmskTestL::new, TestBlsmskL.BlsmskLCommutativeExpr.class.getDeclaredMethods());
}
}

View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8031321
* @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
* @build BlsrTestI
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
* -XX:+IgnoreUnrecognizedVMOptions -XX:+UseBMI1Instructions BlsrTestI
*/
import java.lang.reflect.Method;
public class BlsrTestI extends BmiIntrinsicBase.BmiTestCase {
protected BlsrTestI(Method method) {
super(method);
//from intel manual VEX.NDD.LZ.0F38.W0 F3 /1
instrMask = new byte[]{
(byte) 0xFF,
(byte) 0x1F,
(byte) 0x00,
(byte) 0xFF,
(byte) 0b0011_1000};
instrPattern = new byte[]{
(byte) 0xC4, // prefix for 3-byte VEX instruction
(byte) 0x02, // 00010 implied 0F 38 leading opcode bytes
(byte) 0x00,
(byte) 0xF3,
(byte) 0b0000_1000}; // bits 543 == 011 (3)
}
public static void main(String[] args) throws Exception {
BmiIntrinsicBase.verifyTestCase(BlsrTestI::new, TestBlsrI.BlsrIExpr.class.getDeclaredMethods());
BmiIntrinsicBase.verifyTestCase(BlsrTestI::new, TestBlsrI.BlsrICommutativeExpr.class.getDeclaredMethods());
}
}

View file

@ -0,0 +1,47 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8031321
* @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
* @build BlsrTestL
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
* -XX:+IgnoreUnrecognizedVMOptions -XX:+UseBMI1Instructions BlsrTestL
*/
import java.lang.reflect.Method;
public class BlsrTestL extends BlsrTestI {
protected BlsrTestL(Method method) {
super(method);
isLongOperation = true;
}
public static void main(String[] args) throws Exception {
BmiIntrinsicBase.verifyTestCase(BlsrTestL::new, TestBlsrL.BlsrLExpr.class.getDeclaredMethods());
BmiIntrinsicBase.verifyTestCase(BlsrTestL::new, TestBlsrL.BlsrLCommutativeExpr.class.getDeclaredMethods());
}
}

View file

@ -0,0 +1,186 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
import com.oracle.java.testlibrary.Asserts;
import com.oracle.java.testlibrary.Platform;
import com.oracle.java.testlibrary.Utils;
import sun.hotspot.code.NMethod;
import sun.hotspot.cpuinfo.CPUInfo;
import java.lang.reflect.Executable;
import java.lang.reflect.Method;
import java.util.concurrent.Callable;
import java.util.function.Function;
public class BmiIntrinsicBase extends CompilerWhiteBoxTest {
protected BmiIntrinsicBase(BmiTestCase testCase) {
super(testCase);
}
public static void verifyTestCase(Function<Method, BmiTestCase> constructor, Method... methods) throws Exception {
for (Method method : methods) {
new BmiIntrinsicBase(constructor.apply(method)).test();
}
}
@Override
protected void test() throws Exception {
BmiTestCase bmiTestCase = (BmiTestCase) testCase;
if (!(Platform.isX86() || Platform.isX64())) {
System.out.println("Unsupported platform, test SKIPPED");
return;
}
if (!Platform.isServer()) {
System.out.println("Not server VM, test SKIPPED");
return;
}
if (!CPUInfo.hasFeature(bmiTestCase.getCpuFlag())) {
System.out.println("Unsupported hardware, no required CPU flag " + bmiTestCase.getCpuFlag() + " , test SKIPPED");
return;
}
if (!Boolean.valueOf(getVMOption(bmiTestCase.getVMFlag()))) {
System.out.println("VM flag " + bmiTestCase.getVMFlag() + " disabled, test SKIPPED");
return;
}
System.out.println(testCase.name());
switch (MODE) {
case "compiled mode":
case "mixed mode":
if (TIERED_COMPILATION && TIERED_STOP_AT_LEVEL != CompilerWhiteBoxTest.COMP_LEVEL_MAX) {
System.out.println("TieredStopAtLevel value (" + TIERED_STOP_AT_LEVEL + ") is too low, test SKIPPED");
return;
}
deoptimize();
compileAtLevelAndCheck(CompilerWhiteBoxTest.COMP_LEVEL_MAX);
break;
case "interpreted mode": // test is not applicable in this mode;
System.err.println("Warning: This test is not applicable in mode: " + MODE);
break;
default:
throw new AssertionError("Test bug, unknown VM mode: " + MODE);
}
}
protected void compileAtLevelAndCheck(int level) {
WHITE_BOX.enqueueMethodForCompilation(method, level);
waitBackgroundCompilation();
checkCompilation(method, level);
checkEmittedCode(method);
}
protected void checkCompilation(Executable executable, int level) {
if (!WHITE_BOX.isMethodCompiled(executable)) {
throw new AssertionError("Test bug, expected compilation (level): " + level + ", but not compiled" + WHITE_BOX.isMethodCompilable(executable, level));
}
final int compilationLevel = WHITE_BOX.getMethodCompilationLevel(executable);
if (compilationLevel != level) {
throw new AssertionError("Test bug, expected compilation (level): " + level + ", but level: " + compilationLevel);
}
}
protected void checkEmittedCode(Executable executable) {
final byte[] nativeCode = NMethod.get(executable, false).insts;
if (!((BmiTestCase) testCase).verifyPositive(nativeCode)) {
throw new AssertionError(testCase.name() + "CPU instructions expected not found: " + Utils.toHexString(nativeCode));
} else {
System.out.println("CPU instructions found, PASSED");
}
}
abstract static class BmiTestCase implements CompilerWhiteBoxTest.TestCase {
private final Method method;
protected byte[] instrMask;
protected byte[] instrPattern;
protected boolean isLongOperation;
public BmiTestCase(Method method) {
this.method = method;
}
@Override
public String name() {
return method.toGenericString();
}
@Override
public Executable getExecutable() {
return method;
}
@Override
public Callable<Integer> getCallable() {
return null;
}
@Override
public boolean isOsr() {
return false;
}
protected int countCpuInstructions(byte[] nativeCode) {
int count = 0;
int patternSize = Math.min(instrMask.length, instrPattern.length);
boolean found;
Asserts.assertGreaterThan(patternSize, 0);
for (int i = 0, n = nativeCode.length - patternSize; i < n; i++) {
found = true;
for (int j = 0; j < patternSize; j++) {
if ((nativeCode[i + j] & instrMask[j]) != instrPattern[j]) {
found = false;
break;
}
}
if (found) {
++count;
i += patternSize - 1;
}
}
return count;
}
public boolean verifyPositive(byte[] nativeCode) {
final int cnt = countCpuInstructions(nativeCode);
if (Platform.isX86()) {
return cnt >= (isLongOperation ? 2 : 1);
} else {
return Platform.isX64() && cnt >= 1;
}
}
protected String getCpuFlag() {
return "bmi1";
}
protected String getVMFlag() {
return "UseBMI1Instructions";
}
}
}

View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8031321
* @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
* @build LZcntTestI
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
* -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCountLeadingZerosInstruction LZcntTestI
*/
import java.lang.reflect.Method;
public class LZcntTestI extends BmiIntrinsicBase.BmiTestCase {
protected LZcntTestI(Method method) {
super(method);
instrMask = new byte[]{(byte) 0xFF, (byte) 0xFF, (byte) 0xFF};
instrPattern = new byte[]{(byte) 0xF3, (byte) 0x0F, (byte) 0xBD};
}
public static void main(String[] args) throws Exception {
// j.l.Integer and Long should be loaded to allow a compilation of the methods that use their methods
System.out.println("class java.lang.Integer should be loaded. Proof: " + Integer.class);
BmiIntrinsicBase.verifyTestCase(LZcntTestI::new, TestLzcntI.LzcntIExpr.class.getDeclaredMethods());
}
@Override
protected String getVMFlag() {
return "UseCountLeadingZerosInstruction";
}
@Override
protected String getCpuFlag() {
return "lzcnt";
}
}

View file

@ -0,0 +1,54 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8031321
* @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
* @build LZcntTestL
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
* -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCountLeadingZerosInstruction LZcntTestL
*/
import com.oracle.java.testlibrary.Platform;
import java.lang.reflect.Method;
public class LZcntTestL extends LZcntTestI {
protected LZcntTestL(Method method) {
super(method);
isLongOperation = true;
if (Platform.isX64()) {
instrMask = new byte[]{(byte) 0xFF, (byte) 0x00, (byte) 0xFF, (byte) 0xFF};
instrPattern = new byte[]{(byte) 0xF3, (byte) 0x00, (byte) 0x0F, (byte) 0xBD};
}
}
public static void main(String[] args) throws Exception {
// j.l.Integer and Long should be loaded to allow a compilation of the methods that use their methods
System.out.println("classes java.lang.Long should be loaded. Proof: " + Long.class);
BmiIntrinsicBase.verifyTestCase(LZcntTestL::new, TestLzcntL.LzcntLExpr.class.getDeclaredMethods());
}
}

View file

@ -0,0 +1,54 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8031321
* @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
* @build TZcntTestI
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
* -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCountTrailingZerosInstruction TZcntTestI
*/
import java.lang.reflect.Method;
public class TZcntTestI extends BmiIntrinsicBase.BmiTestCase {
protected TZcntTestI(Method method) {
super(method);
instrMask = new byte[]{(byte) 0xFF, (byte) 0xFF, (byte) 0xFF};
instrPattern = new byte[]{(byte) 0xF3, (byte) 0x0F, (byte) 0xBC};
}
public static void main(String[] args) throws Exception {
// j.l.Integer and Long should be loaded to allow a compilation of the methods that use their methods
System.out.println("class java.lang.Integer should be loaded. Proof: " + Integer.class);
BmiIntrinsicBase.verifyTestCase(TZcntTestI::new, TestTzcntI.TzcntIExpr.class.getDeclaredMethods());
}
@Override
protected String getVMFlag() {
return "UseCountTrailingZerosInstruction";
}
}

View file

@ -0,0 +1,55 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8031321
* @library /testlibrary /testlibrary/whitebox /compiler/whitebox ..
* @build TZcntTestL
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
* -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCountTrailingZerosInstruction TZcntTestL
*/
import com.oracle.java.testlibrary.Platform;
import java.lang.reflect.Method;
public class TZcntTestL extends TZcntTestI {
protected TZcntTestL(Method method) {
super(method);
isLongOperation = true;
if (Platform.isX64()) {
instrMask = new byte[]{(byte) 0xFF, (byte) 0x00, (byte) 0xFF, (byte) 0xFF};
instrPattern = new byte[]{(byte) 0xF3, (byte) 0x00, (byte) 0x0F, (byte) 0xBC};
}
isLongOperation = true;
}
public static void main(String[] args) throws Exception {
// j.l.Integer and Long should be loaded to allow a compilation of the methods that use their methods
System.out.println("classes java.lang.Long should be loaded. Proof: " + Long.class);
BmiIntrinsicBase.verifyTestCase(TZcntTestL::new, TestTzcntL.TzcntLExpr.class.getDeclaredMethods());
}
}

View file

@ -0,0 +1,198 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
import com.oracle.java.testlibrary.*;
import com.oracle.java.testlibrary.cli.*;
import java.util.function.BooleanSupplier;
/**
* Base for all RTM-related CLI tests.
*/
public abstract class RTMGenericCommandLineOptionTest
extends CommandLineOptionTest {
protected static final String RTM_INSTR_ERROR
= "RTM instructions are not available on this CPU";
protected static final String RTM_UNSUPPORTED_VM_ERROR
= "RTM locking optimization is not supported in this VM";
protected static final String RTM_ABORT_RATIO_WARNING
= "RTMAbortRatio must be in the range 0 to 100, resetting it to 50";
protected static final String RTM_FOR_STACK_LOCKS_WARNING
= "UseRTMForStackLocks flag should be off when UseRTMLocking "
+ "flag is off";
protected static final String RTM_COUNT_INCR_WARNING
= "RTMTotalCountIncrRate must be a power of 2, resetting it to 64";
protected static final String RTM_BIASED_LOCKING_WARNING
= "Biased locking is not supported with RTM locking; "
+ "ignoring UseBiasedLocking flag";
protected final String optionName;
protected final String errorMessage;
protected final String experimentalOptionError;
protected final boolean isExperimental;
protected final boolean isBoolean;
protected final String defaultValue;
protected final String[] optionValues;
/**
* Constructs new genetic RTM CLI test, for option {@code optionName} which
* has default value {@code defaultValue}. Test cases will use option's
* values passed via {@code optionValues} for verification of correct
* option processing.
*
* Test constructed using this ctor will be started on any cpu regardless
* it's architecture and supported/unsupported features.
*
* @param predicate predicate responsible for test's preconditions check
* @param optionName name of option to be tested
* @param isBoolean {@code true} if option is binary
* @param isExperimental {@code true} if option is experimental
* @param defaultValue default value of tested option
* @param optionValues different option values
*/
public RTMGenericCommandLineOptionTest(BooleanSupplier predicate,
String optionName, boolean isBoolean, boolean isExperimental,
String defaultValue, String... optionValues) {
super(predicate);
this.optionName = optionName;
this.isExperimental = isExperimental;
this.isBoolean = isBoolean;
this.defaultValue = defaultValue;
this.optionValues = optionValues;
this.errorMessage = CommandLineOptionTest.
getUnrecognizedOptionErrorMessage(optionName);
this.experimentalOptionError = CommandLineOptionTest.
getExperimentalOptionErrorMessage(optionName);
}
@Override
public void runTestCases() throws Throwable {
if (Platform.isX86() || Platform.isX64()) {
if (Platform.isServer() && !Platform.isEmbedded()) {
runX86SupportedVMTestCases();
} else {
runX86UnsupportedVMTestCases();
}
} else {
runNonX86TestCases();
}
}
/**
* Runs test cases on X86 CPU if VM supports RTM locking.
* @throws Throwable
*/
protected void runX86SupportedVMTestCases() throws Throwable {
runGenericX86TestCases();
}
/**
* Runs test cases on non-X86 CPU if VM does not support RTM locking.
* @throws Throwable
*/
protected void runX86UnsupportedVMTestCases() throws Throwable {
runGenericX86TestCases();
}
/**
* Runs test cases on non-X86 CPU.
* @throws Throwable
*/
protected void runNonX86TestCases() throws Throwable {
CommandLineOptionTest.verifySameJVMStartup(
new String[] { errorMessage }, null, ExitCode.FAIL,
prepareOptionValue(defaultValue));
}
/**
* Runs generic X86 test cases.
* @throws Throwable
*/
protected void runGenericX86TestCases() throws Throwable {
verifyJVMStartup();
verifyOptionValues();
}
protected void verifyJVMStartup() throws Throwable {
String optionValue = prepareOptionValue(defaultValue);
if (isExperimental) {
// verify that option is experimental
CommandLineOptionTest.verifySameJVMStartup(
new String[] { experimentalOptionError },
new String[] { errorMessage }, ExitCode.FAIL,
optionValue);
// verify that it could be passed if experimental options
// are unlocked
CommandLineOptionTest.verifySameJVMStartup(null,
new String[] {
experimentalOptionError,
errorMessage
},
ExitCode.OK,
CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
optionValue);
} else {
// verify that option could be passed
CommandLineOptionTest.verifySameJVMStartup(null,
new String[]{errorMessage}, ExitCode.OK, optionValue);
}
}
protected void verifyOptionValues() throws Throwable {
// verify default value
if (isExperimental) {
CommandLineOptionTest.verifyOptionValueForSameVM(optionName,
defaultValue,
CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS);
} else {
CommandLineOptionTest.verifyOptionValueForSameVM(optionName,
defaultValue);
}
// verify other specified option values
if (optionValues == null) {
return;
}
for (String value : optionValues) {
if (isExperimental) {
CommandLineOptionTest.verifyOptionValueForSameVM(optionName,
value,
CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
prepareOptionValue(value));
} else {
CommandLineOptionTest.verifyOptionValueForSameVM(optionName,
value, prepareOptionValue(value));
}
}
}
protected String prepareOptionValue(String value) {
if (isBoolean) {
return CommandLineOptionTest.prepareBooleanFlag(optionName,
Boolean.valueOf(value));
} else {
return String.format("-XX:%s=%s", optionName, value);
}
}
}

View file

@ -0,0 +1,154 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
import java.util.List;
import java.util.LinkedList;
import com.oracle.java.testlibrary.ExitCode;
import com.oracle.java.testlibrary.cli.*;
import com.oracle.java.testlibrary.cli.predicate.AndPredicate;
import rtm.predicate.SupportedCPU;
import rtm.predicate.SupportedVM;
/**
* Base for all RTM-related CLI tests on options whose processing depends
* on UseRTMLocking value.
*
* Since UseRTMLocking option could be used when both CPU and VM supports RTM
* locking, this test will be skipped on all unsupported configurations.
*/
public abstract class RTMLockingAwareTest
extends RTMGenericCommandLineOptionTest {
protected final String warningMessage;
protected final String[] correctValues;
protected final String[] incorrectValues;
/**
* Constructs new test for option {@code optionName} that should be executed
* only on CPU with RTM support.
* Test will be executed using set of correct values from
* {@code correctValues} and set of incorrect values from
* {@code incorrectValues}.
*
* @param optionName name of option to be tested
* @param isBoolean {@code true} if tested option is binary
* @param isExperimental {@code true} if tested option is experimental
* @param defaultValue default value of tested option
* @param correctValues array with correct values, that should not emit
* {@code warningMessage} to VM output
* @param incorrectValues array with incorrect values, that should emit
* {@code waningMessage} to VM output
* @param warningMessage warning message associated with tested option
*/
protected RTMLockingAwareTest(String optionName, boolean isBoolean,
boolean isExperimental, String defaultValue,
String[] correctValues, String[] incorrectValues,
String warningMessage) {
super(new AndPredicate(new SupportedCPU(), new SupportedVM()),
optionName, isBoolean, isExperimental, defaultValue);
this.correctValues = correctValues;
this.incorrectValues = incorrectValues;
this.warningMessage = warningMessage;
}
@Override
protected void verifyJVMStartup() throws Throwable {
// Run generic sanity checks
super.verifyJVMStartup();
// Verify how option values will be processed depending on
// UseRTMLocking value.
if (correctValues != null) {
for (String correctValue : correctValues) {
// For correct values it is expected to see no warnings
// regardless to UseRTMLocking
verifyStartupWarning(correctValue, true, false);
verifyStartupWarning(correctValue, false, false);
}
}
if (incorrectValues != null) {
for (String incorrectValue : incorrectValues) {
// For incorrect values it is expected to see warning
// only with -XX:+UseRTMLocking
verifyStartupWarning(incorrectValue, true, true);
verifyStartupWarning(incorrectValue, false, false);
}
}
}
@Override
protected void verifyOptionValues() throws Throwable {
super.verifyOptionValues();
// Verify how option values will be setup after processing
// depending on UseRTMLocking value
if (correctValues != null) {
for (String correctValue : correctValues) {
// Correct value could be set up regardless to UseRTMLocking
verifyOptionValues(correctValue, false, correctValue);
verifyOptionValues(correctValue, true, correctValue);
}
}
if (incorrectValues != null) {
for (String incorrectValue : incorrectValues) {
// With -XX:+UseRTMLocking, incorrect value will be changed to
// default value.
verifyOptionValues(incorrectValue, false, incorrectValue);
verifyOptionValues(incorrectValue, true, defaultValue);
}
}
}
private void verifyStartupWarning(String value, boolean useRTMLocking,
boolean isWarningExpected) throws Throwable {
String warnings[] = new String[] { warningMessage };
List<String> options = new LinkedList<>();
options.add(CommandLineOptionTest.prepareBooleanFlag("UseRTMLocking",
useRTMLocking));
if (isExperimental) {
options.add(CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS);
}
options.add(prepareOptionValue(value));
CommandLineOptionTest.verifySameJVMStartup(
(isWarningExpected ? warnings : null),
(isWarningExpected ? null : warnings),
ExitCode.OK, options.toArray(new String[options.size()]));
}
private void verifyOptionValues(String value, boolean useRTMLocking,
String expectedValue) throws Throwable {
List<String> options = new LinkedList<>();
options.add(CommandLineOptionTest.prepareBooleanFlag("UseRTMLocking",
useRTMLocking));
if (isExperimental) {
options.add(CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS);
}
options.add(prepareOptionValue(value));
CommandLineOptionTest.verifyOptionValueForSameVM(optionName,
expectedValue, options.toArray(new String[options.size()]));
}
}

View file

@ -0,0 +1,85 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
import com.oracle.java.testlibrary.*;
import com.oracle.java.testlibrary.cli.*;
import java.util.function.BooleanSupplier;
public abstract class TestPrintPreciseRTMLockingStatisticsBase
extends RTMGenericCommandLineOptionTest {
protected static final String DEFAULT_VALUE = "false";
protected TestPrintPreciseRTMLockingStatisticsBase(
BooleanSupplier predicate) {
super(predicate, "PrintPreciseRTMLockingStatistics", true, false,
TestPrintPreciseRTMLockingStatisticsBase.DEFAULT_VALUE);
}
@Override
protected void runNonX86TestCases() throws Throwable {
verifyJVMStartup();
verifyOptionValues();
}
@Override
protected void verifyJVMStartup() throws Throwable {
if (Platform.isServer()) {
if (!Platform.isDebugBuild()) {
String errorMessage = CommandLineOptionTest.
getDiagnosticOptionErrorMessage(optionName);
// verify that option is actually diagnostic
CommandLineOptionTest.verifySameJVMStartup(
new String[] { errorMessage }, null, ExitCode.FAIL,
prepareOptionValue("true"));
CommandLineOptionTest.verifySameJVMStartup(null,
new String[] { errorMessage }, ExitCode.OK,
CommandLineOptionTest.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
prepareOptionValue("true"));
} else {
CommandLineOptionTest.verifySameJVMStartup(
null, null, ExitCode.OK, prepareOptionValue("true"));
}
} else {
String errorMessage = CommandLineOptionTest.
getUnrecognizedOptionErrorMessage(optionName);
CommandLineOptionTest.verifySameJVMStartup(
new String[]{errorMessage}, null, ExitCode.FAIL,
CommandLineOptionTest.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
prepareOptionValue("true"));
}
}
@Override
protected void verifyOptionValues() throws Throwable {
if (Platform.isServer()) {
// Verify default value
CommandLineOptionTest.verifyOptionValueForSameVM(optionName,
TestPrintPreciseRTMLockingStatisticsBase.DEFAULT_VALUE,
CommandLineOptionTest.UNLOCK_DIAGNOSTIC_VM_OPTIONS);
}
}
}

View file

@ -0,0 +1,73 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 8031320
* @summary Verify PrintPreciseRTMLockingStatistics on CPUs with
* rtm support and on VM with rtm locking support,
* @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
* @build TestPrintPreciseRTMLockingStatisticsOptionOnSupportedConfig
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI
* TestPrintPreciseRTMLockingStatisticsOptionOnSupportedConfig
*/
import com.oracle.java.testlibrary.cli.*;
import com.oracle.java.testlibrary.cli.predicate.AndPredicate;
import rtm.predicate.SupportedCPU;
import rtm.predicate.SupportedVM;
public class TestPrintPreciseRTMLockingStatisticsOptionOnSupportedConfig
extends TestPrintPreciseRTMLockingStatisticsBase {
private TestPrintPreciseRTMLockingStatisticsOptionOnSupportedConfig() {
super(new AndPredicate(new SupportedVM(), new SupportedCPU()));
}
@Override
protected void verifyOptionValues() throws Throwable {
super.verifyOptionValues();
// verify default value
CommandLineOptionTest.verifyOptionValueForSameVM(optionName,
TestPrintPreciseRTMLockingStatisticsBase.DEFAULT_VALUE,
CommandLineOptionTest.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
"-XX:+UseRTMLocking");
CommandLineOptionTest.verifyOptionValueForSameVM(optionName,
TestPrintPreciseRTMLockingStatisticsBase.DEFAULT_VALUE,
CommandLineOptionTest.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
"-XX:-UseRTMLocking", prepareOptionValue("true"));
// verify that option could be turned on
CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "true",
CommandLineOptionTest.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
"-XX:+UseRTMLocking", prepareOptionValue("true"));
}
public static void main(String args[]) throws Throwable {
new TestPrintPreciseRTMLockingStatisticsOptionOnSupportedConfig()
.test();
}
}

View file

@ -0,0 +1,54 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 8031320
* @summary Verify PrintPreciseRTMLockingStatistics on CPUs without
* rtm support and/or unsupported VM.
* @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
* @build TestPrintPreciseRTMLockingStatisticsOptionOnUnsupportedConfig
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI
* TestPrintPreciseRTMLockingStatisticsOptionOnUnsupportedConfig
*/
import com.oracle.java.testlibrary.cli.predicate.AndPredicate;
import com.oracle.java.testlibrary.cli.predicate.NotPredicate;
import rtm.predicate.SupportedCPU;
import rtm.predicate.SupportedVM;
public class TestPrintPreciseRTMLockingStatisticsOptionOnUnsupportedConfig
extends TestPrintPreciseRTMLockingStatisticsBase {
private TestPrintPreciseRTMLockingStatisticsOptionOnUnsupportedConfig() {
super(new NotPredicate(new AndPredicate(new SupportedCPU(),
new SupportedVM())));
}
public static void main(String args[]) throws Throwable {
new TestPrintPreciseRTMLockingStatisticsOptionOnUnsupportedConfig()
.test();
}
}

View file

@ -0,0 +1,54 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 8031320
* @summary Verify RTMAbortRatio option processing on CPU with rtm
* support and on VM with rtm locking support.
* @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
* @build TestRTMAbortRatioOptionOnSupportedConfig
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI TestRTMAbortRatioOptionOnSupportedConfig
*/
public class TestRTMAbortRatioOptionOnSupportedConfig
extends RTMLockingAwareTest {
private static final String DEFAULT_VALUE = "50";
private TestRTMAbortRatioOptionOnSupportedConfig() {
super("RTMAbortRatio", false, true,
TestRTMAbortRatioOptionOnSupportedConfig.DEFAULT_VALUE,
/* correct values */
new String[] { "0", "20", "100" },
/* incorrect values */
new String[] { "-1", "101" },
RTMGenericCommandLineOptionTest.RTM_ABORT_RATIO_WARNING);
}
public static void main(String args[]) throws Throwable {
new TestRTMAbortRatioOptionOnSupportedConfig().test();
}
}

View file

@ -0,0 +1,57 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 8031320
* @summary Verify RTMAbortRatio option processing on CPU without rtm
* support or on VM that does not support rtm locking.
* @library /testlibrary /testlibrary/whitebox /compiler/testlibrary
* @build TestRTMAbortRatioOptionOnUnsupportedConfig
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI TestRTMAbortRatioOptionOnUnsupportedConfig
*/
import com.oracle.java.testlibrary.cli.predicate.AndPredicate;
import com.oracle.java.testlibrary.cli.predicate.NotPredicate;
import rtm.predicate.SupportedCPU;
import rtm.predicate.SupportedVM;
public class TestRTMAbortRatioOptionOnUnsupportedConfig
extends RTMGenericCommandLineOptionTest {
private static final String DEFAULT_VALUE = "50";
private TestRTMAbortRatioOptionOnUnsupportedConfig() {
super(new NotPredicate(new AndPredicate(new SupportedVM(),
new SupportedCPU())),
"RTMAbortRatio", false, true,
TestRTMAbortRatioOptionOnUnsupportedConfig.DEFAULT_VALUE,
"0", "10", "100", "200");
}
public static void main(String args[]) throws Throwable {
new TestRTMAbortRatioOptionOnUnsupportedConfig().test();
}
}

View file

@ -0,0 +1,47 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 8031320
* @summary Verify processing of RTMAbortThreshold option.
* @library /testlibrary
* @build TestRTMAbortThresholdOption
* @run main/othervm TestRTMAbortThresholdOption
*/
public class TestRTMAbortThresholdOption
extends RTMGenericCommandLineOptionTest {
private static final String DEFAULT_VALUE = "1000";
private TestRTMAbortThresholdOption() {
super(Boolean.TRUE::booleanValue, "RTMAbortThreshold", false, true,
TestRTMAbortThresholdOption.DEFAULT_VALUE,
"0", "42", "100", "10000");
}
public static void main(String args[]) throws Throwable {
new TestRTMAbortThresholdOption().test();
}
}

View file

@ -0,0 +1,46 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 8031320
* @summary Verify processing of RTMLockingCalculationDelay option.
* @library /testlibrary
* @build TestRTMLockingCalculationDelayOption
* @run main/othervm TestRTMLockingCalculationDelayOption
*/
public class TestRTMLockingCalculationDelayOption
extends RTMGenericCommandLineOptionTest {
private static final String DEFAULT_VALUE = "0";
private TestRTMLockingCalculationDelayOption() {
super(Boolean.TRUE::booleanValue, "RTMLockingCalculationDelay", false,
true, TestRTMLockingCalculationDelayOption.DEFAULT_VALUE);
}
public static void main(String agrs[]) throws Throwable {
new TestRTMLockingCalculationDelayOption().test();
}
}

View file

@ -0,0 +1,46 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 8031320
* @summary Verify processing of RTMLockingThreshold option.
* @library /testlibrary
* @build TestRTMLockingThresholdOption
* @run main/othervm TestRTMLockingThresholdOption
*/
public class TestRTMLockingThresholdOption
extends RTMGenericCommandLineOptionTest {
private static final String DEFAULT_VALUE = "10000";
private TestRTMLockingThresholdOption() {
super(Boolean.TRUE::booleanValue, "RTMLockingThreshold", false, true,
TestRTMLockingThresholdOption.DEFAULT_VALUE);
}
public static void main(String args[]) throws Throwable {
new TestRTMLockingThresholdOption().test();
}
}

View file

@ -0,0 +1,46 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 8031320
* @summary Verify processing of RTMRetryCount option.
* @library /testlibrary
* @build TestRTMRetryCountOption
* @run main/othervm TestRTMRetryCountOption
*/
public class TestRTMRetryCountOption extends RTMGenericCommandLineOptionTest {
private static final String DEFAULT_VALUE = "5";
private TestRTMRetryCountOption() {
super(Boolean.TRUE::booleanValue, "RTMRetryCount", false, false,
TestRTMRetryCountOption.DEFAULT_VALUE,
"0", "10", "100", "1000");
}
public static void main(String args[]) throws Throwable {
new TestRTMRetryCountOption().test();
}
}

Some files were not shown because too many files have changed in this diff Show more