mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-23 20:44:41 +02:00
Merge
This commit is contained in:
commit
80b04bb48e
10 changed files with 367 additions and 66 deletions
|
@ -279,7 +279,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
__ stmxcsr(mxcsr_save);
|
||||
__ movl(rax, mxcsr_save);
|
||||
__ andl(rax, MXCSR_MASK); // Only check control and mask bits
|
||||
ExternalAddress mxcsr_std(StubRoutines::x86::mxcsr_std());
|
||||
ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std());
|
||||
__ cmp32(rax, mxcsr_std);
|
||||
__ jcc(Assembler::equal, skip_ldmx);
|
||||
__ ldmxcsr(mxcsr_std);
|
||||
|
@ -729,17 +729,18 @@ class StubGenerator: public StubCodeGenerator {
|
|||
|
||||
if (CheckJNICalls) {
|
||||
Label ok_ret;
|
||||
ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std());
|
||||
__ push(rax);
|
||||
__ subptr(rsp, wordSize); // allocate a temp location
|
||||
__ stmxcsr(mxcsr_save);
|
||||
__ movl(rax, mxcsr_save);
|
||||
__ andl(rax, MXCSR_MASK); // Only check control and mask bits
|
||||
__ cmpl(rax, *(int *)(StubRoutines::x86::mxcsr_std()));
|
||||
__ cmp32(rax, mxcsr_std);
|
||||
__ jcc(Assembler::equal, ok_ret);
|
||||
|
||||
__ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall");
|
||||
|
||||
__ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
|
||||
__ ldmxcsr(mxcsr_std);
|
||||
|
||||
__ bind(ok_ret);
|
||||
__ addptr(rsp, wordSize);
|
||||
|
@ -3767,12 +3768,35 @@ class StubGenerator: public StubCodeGenerator {
|
|||
return stub->entry_point();
|
||||
}
|
||||
|
||||
void create_control_words() {
|
||||
// Round to nearest, 53-bit mode, exceptions masked
|
||||
StubRoutines::_fpu_cntrl_wrd_std = 0x027F;
|
||||
// Round to zero, 53-bit mode, exception mased
|
||||
StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F;
|
||||
// Round to nearest, 24-bit mode, exceptions masked
|
||||
StubRoutines::_fpu_cntrl_wrd_24 = 0x007F;
|
||||
// Round to nearest, 64-bit mode, exceptions masked
|
||||
StubRoutines::_fpu_cntrl_wrd_64 = 0x037F;
|
||||
// Round to nearest, 64-bit mode, exceptions masked
|
||||
StubRoutines::_mxcsr_std = 0x1F80;
|
||||
// Note: the following two constants are 80-bit values
|
||||
// layout is critical for correct loading by FPU.
|
||||
// Bias for strict fp multiply/divide
|
||||
StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000
|
||||
StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000;
|
||||
StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff;
|
||||
// Un-Bias for strict fp multiply/divide
|
||||
StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000
|
||||
StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000;
|
||||
StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff;
|
||||
}
|
||||
|
||||
// Initialization
|
||||
void generate_initial() {
|
||||
// Generates all stubs and initializes the entry points
|
||||
|
||||
// This platform-specific stub is needed by generate_call_stub()
|
||||
StubRoutines::x86::_mxcsr_std = generate_fp_mask("mxcsr_std", 0x0000000000001F80);
|
||||
// This platform-specific settings are needed by generate_call_stub()
|
||||
create_control_words();
|
||||
|
||||
// entry points that exist in all platforms Note: This is code
|
||||
// that could be shared among different platforms - however the
|
||||
|
|
|
@ -42,4 +42,3 @@ address StubRoutines::x86::_float_sign_mask = NULL;
|
|||
address StubRoutines::x86::_float_sign_flip = NULL;
|
||||
address StubRoutines::x86::_double_sign_mask = NULL;
|
||||
address StubRoutines::x86::_double_sign_flip = NULL;
|
||||
address StubRoutines::x86::_mxcsr_std = NULL;
|
||||
|
|
|
@ -52,7 +52,6 @@ class x86 {
|
|||
static address _float_sign_flip;
|
||||
static address _double_sign_mask;
|
||||
static address _double_sign_flip;
|
||||
static address _mxcsr_std;
|
||||
|
||||
public:
|
||||
|
||||
|
@ -106,11 +105,6 @@ class x86 {
|
|||
return _double_sign_flip;
|
||||
}
|
||||
|
||||
static address mxcsr_std()
|
||||
{
|
||||
return _mxcsr_std;
|
||||
}
|
||||
|
||||
# include "stubRoutines_x86.hpp"
|
||||
|
||||
};
|
||||
|
|
|
@ -138,6 +138,16 @@ bool BCEscapeAnalyzer::is_arg_stack(ArgumentMap vars){
|
|||
return false;
|
||||
}
|
||||
|
||||
// return true if all argument elements of vars are returned
|
||||
bool BCEscapeAnalyzer::returns_all(ArgumentMap vars) {
|
||||
for (int i = 0; i < _arg_size; i++) {
|
||||
if (vars.contains(i) && !_arg_returned.test(i)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void BCEscapeAnalyzer::clear_bits(ArgumentMap vars, VectorSet &bm) {
|
||||
for (int i = 0; i < _arg_size; i++) {
|
||||
if (vars.contains(i)) {
|
||||
|
@ -166,6 +176,11 @@ void BCEscapeAnalyzer::set_global_escape(ArgumentMap vars, bool merge) {
|
|||
if (vars.contains_unknown() || vars.contains_vars()) {
|
||||
_return_allocated = false;
|
||||
}
|
||||
if (_return_local && vars.contains_vars() && !returns_all(vars)) {
|
||||
// Return result should be invalidated if args in new
|
||||
// state are not recorded in return state.
|
||||
_return_local = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -80,6 +80,7 @@ class BCEscapeAnalyzer : public ResourceObj {
|
|||
void set_returned(ArgumentMap vars);
|
||||
bool is_argument(ArgumentMap vars);
|
||||
bool is_arg_stack(ArgumentMap vars);
|
||||
bool returns_all(ArgumentMap vars);
|
||||
void clear_bits(ArgumentMap vars, VectorSet &bs);
|
||||
void set_method_escape(ArgumentMap vars);
|
||||
void set_global_escape(ArgumentMap vars, bool merge = false);
|
||||
|
|
|
@ -2305,26 +2305,26 @@ void Matcher::validate_null_checks( ) {
|
|||
// atomic instruction acting as a store_load barrier without any
|
||||
// intervening volatile load, and thus we don't need a barrier here.
|
||||
// We retain the Node to act as a compiler ordering barrier.
|
||||
bool Matcher::post_store_load_barrier(const Node *vmb) {
|
||||
Compile *C = Compile::current();
|
||||
assert( vmb->is_MemBar(), "" );
|
||||
assert( vmb->Opcode() != Op_MemBarAcquire, "" );
|
||||
const MemBarNode *mem = (const MemBarNode*)vmb;
|
||||
bool Matcher::post_store_load_barrier(const Node* vmb) {
|
||||
Compile* C = Compile::current();
|
||||
assert(vmb->is_MemBar(), "");
|
||||
assert(vmb->Opcode() != Op_MemBarAcquire, "");
|
||||
const MemBarNode* membar = vmb->as_MemBar();
|
||||
|
||||
// Get the Proj node, ctrl, that can be used to iterate forward
|
||||
Node *ctrl = NULL;
|
||||
DUIterator_Fast imax, i = mem->fast_outs(imax);
|
||||
while( true ) {
|
||||
ctrl = mem->fast_out(i); // Throw out-of-bounds if proj not found
|
||||
assert( ctrl->is_Proj(), "only projections here" );
|
||||
ProjNode *proj = (ProjNode*)ctrl;
|
||||
if( proj->_con == TypeFunc::Control &&
|
||||
!C->node_arena()->contains(ctrl) ) // Unmatched old-space only
|
||||
// Get the Ideal Proj node, ctrl, that can be used to iterate forward
|
||||
Node* ctrl = NULL;
|
||||
for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) {
|
||||
Node* p = membar->fast_out(i);
|
||||
assert(p->is_Proj(), "only projections here");
|
||||
if ((p->as_Proj()->_con == TypeFunc::Control) &&
|
||||
!C->node_arena()->contains(p)) { // Unmatched old-space only
|
||||
ctrl = p;
|
||||
break;
|
||||
i++;
|
||||
}
|
||||
}
|
||||
assert((ctrl != NULL), "missing control projection");
|
||||
|
||||
for( DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++ ) {
|
||||
for (DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++) {
|
||||
Node *x = ctrl->fast_out(j);
|
||||
int xop = x->Opcode();
|
||||
|
||||
|
@ -2336,37 +2336,36 @@ bool Matcher::post_store_load_barrier(const Node *vmb) {
|
|||
// that a monitor exit operation contains a serializing instruction.
|
||||
|
||||
if (xop == Op_MemBarVolatile ||
|
||||
xop == Op_FastLock ||
|
||||
xop == Op_CompareAndSwapL ||
|
||||
xop == Op_CompareAndSwapP ||
|
||||
xop == Op_CompareAndSwapN ||
|
||||
xop == Op_CompareAndSwapI)
|
||||
xop == Op_CompareAndSwapI) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Op_FastLock previously appeared in the Op_* list above.
|
||||
// With biased locking we're no longer guaranteed that a monitor
|
||||
// enter operation contains a serializing instruction.
|
||||
if ((xop == Op_FastLock) && !UseBiasedLocking) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (x->is_MemBar()) {
|
||||
// We must retain this membar if there is an upcoming volatile
|
||||
// load, which will be preceded by acquire membar.
|
||||
if (xop == Op_MemBarAcquire)
|
||||
// load, which will be followed by acquire membar.
|
||||
if (xop == Op_MemBarAcquire) {
|
||||
return false;
|
||||
// For other kinds of barriers, check by pretending we
|
||||
// are them, and seeing if we can be removed.
|
||||
else
|
||||
return post_store_load_barrier((const MemBarNode*)x);
|
||||
} else {
|
||||
// For other kinds of barriers, check by pretending we
|
||||
// are them, and seeing if we can be removed.
|
||||
return post_store_load_barrier(x->as_MemBar());
|
||||
}
|
||||
}
|
||||
|
||||
// Delicate code to detect case of an upcoming fastlock block
|
||||
if( x->is_If() && x->req() > 1 &&
|
||||
!C->node_arena()->contains(x) ) { // Unmatched old-space only
|
||||
Node *iff = x;
|
||||
Node *bol = iff->in(1);
|
||||
// The iff might be some random subclass of If or bol might be Con-Top
|
||||
if (!bol->is_Bool()) return false;
|
||||
assert( bol->req() > 1, "" );
|
||||
return (bol->in(1)->Opcode() == Op_FastUnlock);
|
||||
}
|
||||
// probably not necessary to check for these
|
||||
if (x->is_Call() || x->is_SafePoint() || x->is_block_proj())
|
||||
if (x->is_Call() || x->is_SafePoint() || x->is_block_proj()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -294,25 +294,7 @@ void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
|
|||
// If reference is volatile, prevent following volatiles ops from
|
||||
// floating up before the volatile write.
|
||||
if (is_vol) {
|
||||
// First place the specific membar for THIS volatile index. This first
|
||||
// membar is dependent on the store, keeping any other membars generated
|
||||
// below from floating up past the store.
|
||||
int adr_idx = C->get_alias_index(adr_type);
|
||||
insert_mem_bar_volatile(Op_MemBarVolatile, adr_idx, store);
|
||||
|
||||
// Now place a membar for AliasIdxBot for the unknown yet-to-be-parsed
|
||||
// volatile alias indices. Skip this if the membar is redundant.
|
||||
if (adr_idx != Compile::AliasIdxBot) {
|
||||
insert_mem_bar_volatile(Op_MemBarVolatile, Compile::AliasIdxBot, store);
|
||||
}
|
||||
|
||||
// Finally, place alias-index-specific membars for each volatile index
|
||||
// that isn't the adr_idx membar. Typically there's only 1 or 2.
|
||||
for( int i = Compile::AliasIdxRaw; i < C->num_alias_types(); i++ ) {
|
||||
if (i != adr_idx && C->alias_type(i)->is_volatile()) {
|
||||
insert_mem_bar_volatile(Op_MemBarVolatile, i, store);
|
||||
}
|
||||
}
|
||||
insert_mem_bar(Op_MemBarVolatile); // Use fat membar
|
||||
}
|
||||
|
||||
// If the field is final, the rules of Java say we are in <init> or <clinit>.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue