mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-28 07:14:30 +02:00
Merge
This commit is contained in:
commit
55f6f35697
37 changed files with 190 additions and 1105 deletions
|
@ -57,7 +57,6 @@ class Assembler : public AbstractAssembler {
|
||||||
fbp_op2 = 5,
|
fbp_op2 = 5,
|
||||||
br_op2 = 2,
|
br_op2 = 2,
|
||||||
bp_op2 = 1,
|
bp_op2 = 1,
|
||||||
cb_op2 = 7, // V8
|
|
||||||
sethi_op2 = 4
|
sethi_op2 = 4
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -145,7 +144,6 @@ class Assembler : public AbstractAssembler {
|
||||||
ldsh_op3 = 0x0a,
|
ldsh_op3 = 0x0a,
|
||||||
ldx_op3 = 0x0b,
|
ldx_op3 = 0x0b,
|
||||||
|
|
||||||
ldstub_op3 = 0x0d,
|
|
||||||
stx_op3 = 0x0e,
|
stx_op3 = 0x0e,
|
||||||
swap_op3 = 0x0f,
|
swap_op3 = 0x0f,
|
||||||
|
|
||||||
|
@ -163,15 +161,6 @@ class Assembler : public AbstractAssembler {
|
||||||
|
|
||||||
prefetch_op3 = 0x2d,
|
prefetch_op3 = 0x2d,
|
||||||
|
|
||||||
|
|
||||||
ldc_op3 = 0x30,
|
|
||||||
ldcsr_op3 = 0x31,
|
|
||||||
lddc_op3 = 0x33,
|
|
||||||
stc_op3 = 0x34,
|
|
||||||
stcsr_op3 = 0x35,
|
|
||||||
stdcq_op3 = 0x36,
|
|
||||||
stdc_op3 = 0x37,
|
|
||||||
|
|
||||||
casa_op3 = 0x3c,
|
casa_op3 = 0x3c,
|
||||||
casxa_op3 = 0x3e,
|
casxa_op3 = 0x3e,
|
||||||
|
|
||||||
|
@ -574,17 +563,11 @@ class Assembler : public AbstractAssembler {
|
||||||
static void vis3_only() { assert( VM_Version::has_vis3(), "This instruction only works on SPARC with VIS3"); }
|
static void vis3_only() { assert( VM_Version::has_vis3(), "This instruction only works on SPARC with VIS3"); }
|
||||||
|
|
||||||
// instruction only in v9
|
// instruction only in v9
|
||||||
static void v9_only() { assert( VM_Version::v9_instructions_work(), "This instruction only works on SPARC V9"); }
|
static void v9_only() { } // do nothing
|
||||||
|
|
||||||
// instruction only in v8
|
|
||||||
static void v8_only() { assert( VM_Version::v8_instructions_work(), "This instruction only works on SPARC V8"); }
|
|
||||||
|
|
||||||
// instruction deprecated in v9
|
// instruction deprecated in v9
|
||||||
static void v9_dep() { } // do nothing for now
|
static void v9_dep() { } // do nothing for now
|
||||||
|
|
||||||
// some float instructions only exist for single prec. on v8
|
|
||||||
static void v8_s_only(FloatRegisterImpl::Width w) { if (w != FloatRegisterImpl::S) v9_only(); }
|
|
||||||
|
|
||||||
// v8 has no CC field
|
// v8 has no CC field
|
||||||
static void v8_no_cc(CC cc) { if (cc) v9_only(); }
|
static void v8_no_cc(CC cc) { if (cc) v9_only(); }
|
||||||
|
|
||||||
|
@ -730,11 +713,6 @@ public:
|
||||||
inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
|
inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
|
||||||
inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
|
inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
|
||||||
|
|
||||||
// pp 121 (V8)
|
|
||||||
|
|
||||||
inline void cb( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none );
|
|
||||||
inline void cb( Condition c, bool a, Label& L );
|
|
||||||
|
|
||||||
// pp 149
|
// pp 149
|
||||||
|
|
||||||
inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type );
|
inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type );
|
||||||
|
@ -775,8 +753,8 @@ public:
|
||||||
|
|
||||||
// pp 157
|
// pp 157
|
||||||
|
|
||||||
void fcmp( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc); emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); }
|
void fcmp( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); }
|
||||||
void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc); emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); }
|
void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); }
|
||||||
|
|
||||||
// pp 159
|
// pp 159
|
||||||
|
|
||||||
|
@ -794,21 +772,11 @@ public:
|
||||||
|
|
||||||
// pp 162
|
// pp 162
|
||||||
|
|
||||||
void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); }
|
void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); }
|
||||||
|
|
||||||
void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); }
|
void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); }
|
||||||
|
|
||||||
// page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fnegs is the only instruction available
|
void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); }
|
||||||
// on v8 to do negation of single, double and quad precision floats.
|
|
||||||
|
|
||||||
void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(sd, w)); else emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x05) | fs2(sd, w)); }
|
|
||||||
|
|
||||||
void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); }
|
|
||||||
|
|
||||||
// page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fabss is the only instruction available
|
|
||||||
// on v8 to do abs operation on single/double/quad precision floats.
|
|
||||||
|
|
||||||
void fabs( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(sd, w)); else emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x09) | fs2(sd, w)); }
|
|
||||||
|
|
||||||
// pp 163
|
// pp 163
|
||||||
|
|
||||||
|
@ -839,11 +807,6 @@ public:
|
||||||
void impdep1( int id1, int const19a ) { v9_only(); emit_int32( op(arith_op) | fcn(id1) | op3(impdep1_op3) | u_field(const19a, 18, 0)); }
|
void impdep1( int id1, int const19a ) { v9_only(); emit_int32( op(arith_op) | fcn(id1) | op3(impdep1_op3) | u_field(const19a, 18, 0)); }
|
||||||
void impdep2( int id1, int const19a ) { v9_only(); emit_int32( op(arith_op) | fcn(id1) | op3(impdep2_op3) | u_field(const19a, 18, 0)); }
|
void impdep2( int id1, int const19a ) { v9_only(); emit_int32( op(arith_op) | fcn(id1) | op3(impdep2_op3) | u_field(const19a, 18, 0)); }
|
||||||
|
|
||||||
// pp 149 (v8)
|
|
||||||
|
|
||||||
void cpop1( int opc, int cr1, int cr2, int crd ) { v8_only(); emit_int32( op(arith_op) | fcn(crd) | op3(impdep1_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); }
|
|
||||||
void cpop2( int opc, int cr1, int cr2, int crd ) { v8_only(); emit_int32( op(arith_op) | fcn(crd) | op3(impdep2_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); }
|
|
||||||
|
|
||||||
// pp 170
|
// pp 170
|
||||||
|
|
||||||
void jmpl( Register s1, Register s2, Register d );
|
void jmpl( Register s1, Register s2, Register d );
|
||||||
|
@ -860,16 +823,6 @@ public:
|
||||||
inline void ldxfsr( Register s1, Register s2 );
|
inline void ldxfsr( Register s1, Register s2 );
|
||||||
inline void ldxfsr( Register s1, int simm13a);
|
inline void ldxfsr( Register s1, int simm13a);
|
||||||
|
|
||||||
// pp 94 (v8)
|
|
||||||
|
|
||||||
inline void ldc( Register s1, Register s2, int crd );
|
|
||||||
inline void ldc( Register s1, int simm13a, int crd);
|
|
||||||
inline void lddc( Register s1, Register s2, int crd );
|
|
||||||
inline void lddc( Register s1, int simm13a, int crd);
|
|
||||||
inline void ldcsr( Register s1, Register s2, int crd );
|
|
||||||
inline void ldcsr( Register s1, int simm13a, int crd);
|
|
||||||
|
|
||||||
|
|
||||||
// 173
|
// 173
|
||||||
|
|
||||||
void ldfa( FloatRegisterImpl::Width w, Register s1, Register s2, int ia, FloatRegister d ) { v9_only(); emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
|
void ldfa( FloatRegisterImpl::Width w, Register s1, Register s2, int ia, FloatRegister d ) { v9_only(); emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
|
||||||
|
@ -910,18 +863,6 @@ public:
|
||||||
void lduwa( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
void lduwa( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||||
void ldxa( Register s1, Register s2, int ia, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
|
void ldxa( Register s1, Register s2, int ia, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
|
||||||
void ldxa( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
void ldxa( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||||
void ldda( Register s1, Register s2, int ia, Register d ) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
|
|
||||||
void ldda( Register s1, int simm13a, Register d ) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
|
||||||
|
|
||||||
// pp 179
|
|
||||||
|
|
||||||
inline void ldstub( Register s1, Register s2, Register d );
|
|
||||||
inline void ldstub( Register s1, int simm13a, Register d);
|
|
||||||
|
|
||||||
// pp 180
|
|
||||||
|
|
||||||
void ldstuba( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
|
|
||||||
void ldstuba( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
|
||||||
|
|
||||||
// pp 181
|
// pp 181
|
||||||
|
|
||||||
|
@ -992,11 +933,6 @@ public:
|
||||||
void smulcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
|
void smulcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
|
||||||
void smulcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
void smulcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||||
|
|
||||||
// pp 199
|
|
||||||
|
|
||||||
void mulscc( Register s1, Register s2, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | rs2(s2) ); }
|
|
||||||
void mulscc( Register s1, int simm13a, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
|
||||||
|
|
||||||
// pp 201
|
// pp 201
|
||||||
|
|
||||||
void nop() { emit_int32( op(branch_op) | op2(sethi_op2) ); }
|
void nop() { emit_int32( op(branch_op) | op2(sethi_op2) ); }
|
||||||
|
@ -1116,17 +1052,6 @@ public:
|
||||||
void stda( Register d, Register s1, Register s2, int ia ) { emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
|
void stda( Register d, Register s1, Register s2, int ia ) { emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
|
||||||
void stda( Register d, Register s1, int simm13a ) { emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
void stda( Register d, Register s1, int simm13a ) { emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||||
|
|
||||||
// pp 97 (v8)
|
|
||||||
|
|
||||||
inline void stc( int crd, Register s1, Register s2 );
|
|
||||||
inline void stc( int crd, Register s1, int simm13a);
|
|
||||||
inline void stdc( int crd, Register s1, Register s2 );
|
|
||||||
inline void stdc( int crd, Register s1, int simm13a);
|
|
||||||
inline void stcsr( int crd, Register s1, Register s2 );
|
|
||||||
inline void stcsr( int crd, Register s1, int simm13a);
|
|
||||||
inline void stdcq( int crd, Register s1, Register s2 );
|
|
||||||
inline void stdcq( int crd, Register s1, int simm13a);
|
|
||||||
|
|
||||||
// pp 230
|
// pp 230
|
||||||
|
|
||||||
void sub( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | rs2(s2) ); }
|
void sub( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | rs2(s2) ); }
|
||||||
|
@ -1153,20 +1078,16 @@ public:
|
||||||
|
|
||||||
void taddcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | rs2(s2) ); }
|
void taddcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | rs2(s2) ); }
|
||||||
void taddcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
void taddcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||||
void taddcctv( Register s1, Register s2, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | rs2(s2) ); }
|
|
||||||
void taddcctv( Register s1, int simm13a, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
|
||||||
|
|
||||||
// pp 235
|
// pp 235
|
||||||
|
|
||||||
void tsubcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | rs2(s2) ); }
|
void tsubcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | rs2(s2) ); }
|
||||||
void tsubcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
void tsubcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||||
void tsubcctv( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | rs2(s2) ); }
|
|
||||||
void tsubcctv( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
|
||||||
|
|
||||||
// pp 237
|
// pp 237
|
||||||
|
|
||||||
void trap( Condition c, CC cc, Register s1, Register s2 ) { v8_no_cc(cc); emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); }
|
void trap( Condition c, CC cc, Register s1, Register s2 ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); }
|
||||||
void trap( Condition c, CC cc, Register s1, int trapa ) { v8_no_cc(cc); emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); }
|
void trap( Condition c, CC cc, Register s1, int trapa ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); }
|
||||||
// simple uncond. trap
|
// simple uncond. trap
|
||||||
void trap( int trapa ) { trap( always, icc, G0, trapa ); }
|
void trap( int trapa ) { trap( always, icc, G0, trapa ); }
|
||||||
|
|
||||||
|
|
|
@ -63,9 +63,6 @@ inline void Assembler::fb( Condition c, bool a, Label& L ) { fb(c, a, target(L))
|
||||||
inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
|
inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
|
||||||
inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { fbp(c, a, cc, p, target(L)); }
|
inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { fbp(c, a, cc, p, target(L)); }
|
||||||
|
|
||||||
inline void Assembler::cb( Condition c, bool a, address d, relocInfo::relocType rt ) { v8_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(cb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
|
|
||||||
inline void Assembler::cb( Condition c, bool a, Label& L ) { cb(c, a, target(L)); }
|
|
||||||
|
|
||||||
inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
|
inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
|
||||||
inline void Assembler::br( Condition c, bool a, Label& L ) { br(c, a, target(L)); }
|
inline void Assembler::br( Condition c, bool a, Label& L ) { br(c, a, target(L)); }
|
||||||
|
|
||||||
|
@ -88,18 +85,9 @@ inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHol
|
||||||
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
|
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
|
||||||
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
|
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
|
||||||
|
|
||||||
inline void Assembler::ldfsr( Register s1, Register s2) { v9_dep(); emit_int32( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
|
|
||||||
inline void Assembler::ldfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
|
||||||
inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
|
inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
|
||||||
inline void Assembler::ldxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
inline void Assembler::ldxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||||
|
|
||||||
inline void Assembler::ldc( Register s1, Register s2, int crd) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(ldc_op3 ) | rs1(s1) | rs2(s2) ); }
|
|
||||||
inline void Assembler::ldc( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(ldc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
|
||||||
inline void Assembler::lddc( Register s1, Register s2, int crd) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | rs2(s2) ); }
|
|
||||||
inline void Assembler::lddc( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
|
||||||
inline void Assembler::ldcsr( Register s1, Register s2, int crd) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | rs2(s2) ); }
|
|
||||||
inline void Assembler::ldcsr( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
|
||||||
|
|
||||||
inline void Assembler::ldsb( Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | rs2(s2) ); }
|
inline void Assembler::ldsb( Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | rs2(s2) ); }
|
||||||
inline void Assembler::ldsb( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
inline void Assembler::ldsb( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||||
|
|
||||||
|
@ -119,9 +107,6 @@ inline void Assembler::ldx( Register s1, int simm13a, Register d) { v9_only();
|
||||||
inline void Assembler::ldd( Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); }
|
inline void Assembler::ldd( Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); }
|
||||||
inline void Assembler::ldd( Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
inline void Assembler::ldd( Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||||
|
|
||||||
inline void Assembler::ldstub( Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); }
|
|
||||||
inline void Assembler::ldstub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
|
||||||
|
|
||||||
inline void Assembler::rett( Register s1, Register s2 ) { cti(); emit_int32( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
|
inline void Assembler::rett( Register s1, Register s2 ) { cti(); emit_int32( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
|
||||||
inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { cti(); emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt); has_delay_slot(); }
|
inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { cti(); emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt); has_delay_slot(); }
|
||||||
|
|
||||||
|
@ -132,8 +117,6 @@ inline void Assembler::sethi( int imm22a, Register d, RelocationHolder const& rs
|
||||||
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); }
|
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); }
|
||||||
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||||
|
|
||||||
inline void Assembler::stfsr( Register s1, Register s2) { v9_dep(); emit_int32( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
|
|
||||||
inline void Assembler::stfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
|
||||||
inline void Assembler::stxfsr( Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
|
inline void Assembler::stxfsr( Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
|
||||||
inline void Assembler::stxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
inline void Assembler::stxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||||
|
|
||||||
|
@ -152,17 +135,6 @@ inline void Assembler::stx( Register d, Register s1, int simm13a) { v9_only();
|
||||||
inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); }
|
inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); }
|
||||||
inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||||
|
|
||||||
// v8 p 99
|
|
||||||
|
|
||||||
inline void Assembler::stc( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | rs2(s2) ); }
|
|
||||||
inline void Assembler::stc( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
|
||||||
inline void Assembler::stdc( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | rs2(s2) ); }
|
|
||||||
inline void Assembler::stdc( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
|
||||||
inline void Assembler::stcsr( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | rs2(s2) ); }
|
|
||||||
inline void Assembler::stcsr( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
|
||||||
inline void Assembler::stdcq( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); }
|
|
||||||
inline void Assembler::stdcq( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
|
||||||
|
|
||||||
// pp 231
|
// pp 231
|
||||||
|
|
||||||
inline void Assembler::swap( Register s1, Register s2, Register d) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); }
|
inline void Assembler::swap( Register s1, Register s2, Register d) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); }
|
||||||
|
|
|
@ -597,13 +597,6 @@ void LIR_Assembler::emit_op3(LIR_Op3* op) {
|
||||||
|
|
||||||
__ sra(Rdividend, 31, Rscratch);
|
__ sra(Rdividend, 31, Rscratch);
|
||||||
__ wry(Rscratch);
|
__ wry(Rscratch);
|
||||||
if (!VM_Version::v9_instructions_work()) {
|
|
||||||
// v9 doesn't require these nops
|
|
||||||
__ nop();
|
|
||||||
__ nop();
|
|
||||||
__ nop();
|
|
||||||
__ nop();
|
|
||||||
}
|
|
||||||
|
|
||||||
add_debug_info_for_div0_here(op->info());
|
add_debug_info_for_div0_here(op->info());
|
||||||
|
|
||||||
|
@ -652,10 +645,6 @@ void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
|
||||||
case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break;
|
case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break;
|
||||||
case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
|
case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
|
||||||
default : ShouldNotReachHere();
|
default : ShouldNotReachHere();
|
||||||
};
|
|
||||||
|
|
||||||
if (!VM_Version::v9_instructions_work()) {
|
|
||||||
__ nop();
|
|
||||||
}
|
}
|
||||||
__ fb( acond, false, Assembler::pn, *(op->label()));
|
__ fb( acond, false, Assembler::pn, *(op->label()));
|
||||||
} else {
|
} else {
|
||||||
|
@ -725,9 +714,6 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
|
||||||
Label L;
|
Label L;
|
||||||
// result must be 0 if value is NaN; test by comparing value to itself
|
// result must be 0 if value is NaN; test by comparing value to itself
|
||||||
__ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc);
|
__ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc);
|
||||||
if (!VM_Version::v9_instructions_work()) {
|
|
||||||
__ nop();
|
|
||||||
}
|
|
||||||
__ fb(Assembler::f_unordered, true, Assembler::pn, L);
|
__ fb(Assembler::f_unordered, true, Assembler::pn, L);
|
||||||
__ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN
|
__ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN
|
||||||
__ ftoi(FloatRegisterImpl::S, rsrc, rsrc);
|
__ ftoi(FloatRegisterImpl::S, rsrc, rsrc);
|
||||||
|
@ -1909,7 +1895,7 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
|
||||||
switch (code) {
|
switch (code) {
|
||||||
case lir_add: __ add (lreg, rreg, res); break;
|
case lir_add: __ add (lreg, rreg, res); break;
|
||||||
case lir_sub: __ sub (lreg, rreg, res); break;
|
case lir_sub: __ sub (lreg, rreg, res); break;
|
||||||
case lir_mul: __ mult (lreg, rreg, res); break;
|
case lir_mul: __ mulx (lreg, rreg, res); break;
|
||||||
default: ShouldNotReachHere();
|
default: ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1924,7 +1910,7 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
|
||||||
switch (code) {
|
switch (code) {
|
||||||
case lir_add: __ add (lreg, simm13, res); break;
|
case lir_add: __ add (lreg, simm13, res); break;
|
||||||
case lir_sub: __ sub (lreg, simm13, res); break;
|
case lir_sub: __ sub (lreg, simm13, res); break;
|
||||||
case lir_mul: __ mult (lreg, simm13, res); break;
|
case lir_mul: __ mulx (lreg, simm13, res); break;
|
||||||
default: ShouldNotReachHere();
|
default: ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -1936,7 +1922,7 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
|
||||||
switch (code) {
|
switch (code) {
|
||||||
case lir_add: __ add (lreg, (int)con, res); break;
|
case lir_add: __ add (lreg, (int)con, res); break;
|
||||||
case lir_sub: __ sub (lreg, (int)con, res); break;
|
case lir_sub: __ sub (lreg, (int)con, res); break;
|
||||||
case lir_mul: __ mult (lreg, (int)con, res); break;
|
case lir_mul: __ mulx (lreg, (int)con, res); break;
|
||||||
default: ShouldNotReachHere();
|
default: ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3234,7 +3220,6 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type,
|
||||||
Register base = mem_addr->base()->as_register();
|
Register base = mem_addr->base()->as_register();
|
||||||
if (src->is_register() && dest->is_address()) {
|
if (src->is_register() && dest->is_address()) {
|
||||||
// G4 is high half, G5 is low half
|
// G4 is high half, G5 is low half
|
||||||
if (VM_Version::v9_instructions_work()) {
|
|
||||||
// clear the top bits of G5, and scale up G4
|
// clear the top bits of G5, and scale up G4
|
||||||
__ srl (src->as_register_lo(), 0, G5);
|
__ srl (src->as_register_lo(), 0, G5);
|
||||||
__ sllx(src->as_register_hi(), 32, G4);
|
__ sllx(src->as_register_hi(), 32, G4);
|
||||||
|
@ -3246,19 +3231,8 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type,
|
||||||
} else {
|
} else {
|
||||||
__ stx(G4, base, idx);
|
__ stx(G4, base, idx);
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
__ mov (src->as_register_hi(), G4);
|
|
||||||
__ mov (src->as_register_lo(), G5);
|
|
||||||
null_check_offset = __ offset();
|
|
||||||
if (idx == noreg) {
|
|
||||||
__ std(G4, base, disp);
|
|
||||||
} else {
|
|
||||||
__ std(G4, base, idx);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (src->is_address() && dest->is_register()) {
|
} else if (src->is_address() && dest->is_register()) {
|
||||||
null_check_offset = __ offset();
|
null_check_offset = __ offset();
|
||||||
if (VM_Version::v9_instructions_work()) {
|
|
||||||
if (idx == noreg) {
|
if (idx == noreg) {
|
||||||
__ ldx(base, disp, G5);
|
__ ldx(base, disp, G5);
|
||||||
} else {
|
} else {
|
||||||
|
@ -3266,16 +3240,6 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type,
|
||||||
}
|
}
|
||||||
__ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
|
__ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
|
||||||
__ mov (G5, dest->as_register_lo()); // copy low half into lo
|
__ mov (G5, dest->as_register_lo()); // copy low half into lo
|
||||||
} else {
|
|
||||||
if (idx == noreg) {
|
|
||||||
__ ldd(base, disp, G4);
|
|
||||||
} else {
|
|
||||||
__ ldd(base, idx, G4);
|
|
||||||
}
|
|
||||||
// G4 is high half, G5 is low half
|
|
||||||
__ mov (G4, dest->as_register_hi());
|
|
||||||
__ mov (G5, dest->as_register_lo());
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
Unimplemented();
|
Unimplemented();
|
||||||
}
|
}
|
||||||
|
|
|
@ -108,7 +108,7 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
|
||||||
|
|
||||||
// compare object markOop with Rmark and if equal exchange Rscratch with object markOop
|
// compare object markOop with Rmark and if equal exchange Rscratch with object markOop
|
||||||
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
||||||
casx_under_lock(mark_addr.base(), Rmark, Rscratch, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
cas_ptr(mark_addr.base(), Rmark, Rscratch);
|
||||||
// if compare/exchange succeeded we found an unlocked object and we now have locked it
|
// if compare/exchange succeeded we found an unlocked object and we now have locked it
|
||||||
// hence we are done
|
// hence we are done
|
||||||
cmp(Rmark, Rscratch);
|
cmp(Rmark, Rscratch);
|
||||||
|
@ -149,7 +149,7 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
|
||||||
|
|
||||||
// Check if it is still a light weight lock, this is is true if we see
|
// Check if it is still a light weight lock, this is is true if we see
|
||||||
// the stack address of the basicLock in the markOop of the object
|
// the stack address of the basicLock in the markOop of the object
|
||||||
casx_under_lock(mark_addr.base(), Rbox, Rmark, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
cas_ptr(mark_addr.base(), Rbox, Rmark);
|
||||||
cmp(Rbox, Rmark);
|
cmp(Rbox, Rmark);
|
||||||
|
|
||||||
brx(Assembler::notEqual, false, Assembler::pn, slow_case);
|
brx(Assembler::notEqual, false, Assembler::pn, slow_case);
|
||||||
|
@ -276,7 +276,7 @@ void C1_MacroAssembler::initialize_object(
|
||||||
sub(var_size_in_bytes, hdr_size_in_bytes, t2); // compute size of body
|
sub(var_size_in_bytes, hdr_size_in_bytes, t2); // compute size of body
|
||||||
initialize_body(t1, t2);
|
initialize_body(t1, t2);
|
||||||
#ifndef _LP64
|
#ifndef _LP64
|
||||||
} else if (VM_Version::v9_instructions_work() && con_size_in_bytes < threshold * 2) {
|
} else if (con_size_in_bytes < threshold * 2) {
|
||||||
// on v9 we can do double word stores to fill twice as much space.
|
// on v9 we can do double word stores to fill twice as much space.
|
||||||
assert(hdr_size_in_bytes % 8 == 0, "double word aligned");
|
assert(hdr_size_in_bytes % 8 == 0, "double word aligned");
|
||||||
assert(con_size_in_bytes % 8 == 0, "double word aligned");
|
assert(con_size_in_bytes % 8 == 0, "double word aligned");
|
||||||
|
|
|
@ -30,5 +30,4 @@
|
||||||
|
|
||||||
void Compile::pd_compiler2_init() {
|
void Compile::pd_compiler2_init() {
|
||||||
guarantee(CodeEntryAlignment >= InteriorEntryAlignment, "" );
|
guarantee(CodeEntryAlignment >= InteriorEntryAlignment, "" );
|
||||||
guarantee( VM_Version::v9_instructions_work(), "Server compiler does not run on V8 systems" );
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,8 +30,7 @@
|
||||||
}
|
}
|
||||||
|
|
||||||
static const char* pd_cpu_opts() {
|
static const char* pd_cpu_opts() {
|
||||||
return (VM_Version::v9_instructions_work()?
|
return "v9only";
|
||||||
(VM_Version::v8_instructions_work()? "" : "v9only") : "v8only");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // CPU_SPARC_VM_DISASSEMBLER_SPARC_HPP
|
#endif // CPU_SPARC_VM_DISASSEMBLER_SPARC_HPP
|
||||||
|
|
|
@ -110,8 +110,5 @@ define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS
|
||||||
\
|
\
|
||||||
product(uintx, ArraycopyDstPrefetchDistance, 0, \
|
product(uintx, ArraycopyDstPrefetchDistance, 0, \
|
||||||
"Distance to prefetch destination array in arracopy") \
|
"Distance to prefetch destination array in arracopy") \
|
||||||
\
|
|
||||||
develop(intx, V8AtomicOperationUnderLockSpinCount, 50, \
|
|
||||||
"Number of times to spin wait on a v8 atomic operation lock") \
|
|
||||||
|
|
||||||
#endif // CPU_SPARC_VM_GLOBALS_SPARC_HPP
|
#endif // CPU_SPARC_VM_GLOBALS_SPARC_HPP
|
||||||
|
|
|
@ -1210,8 +1210,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object)
|
||||||
st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());
|
st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());
|
||||||
// compare and exchange object_addr, markOop | 1, stack address of basicLock
|
// compare and exchange object_addr, markOop | 1, stack address of basicLock
|
||||||
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
||||||
casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
|
cas_ptr(mark_addr.base(), mark_reg, temp_reg);
|
||||||
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
|
||||||
|
|
||||||
// if the compare and exchange succeeded we are done (we saw an unlocked object)
|
// if the compare and exchange succeeded we are done (we saw an unlocked object)
|
||||||
cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done);
|
cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done);
|
||||||
|
@ -1291,8 +1290,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
|
||||||
// we expect to see the stack address of the basicLock in case the
|
// we expect to see the stack address of the basicLock in case the
|
||||||
// lock is still a light weight lock (lock_reg)
|
// lock is still a light weight lock (lock_reg)
|
||||||
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
||||||
casx_under_lock(mark_addr.base(), lock_reg, displaced_header_reg,
|
cas_ptr(mark_addr.base(), lock_reg, displaced_header_reg);
|
||||||
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
|
||||||
cmp(lock_reg, displaced_header_reg);
|
cmp(lock_reg, displaced_header_reg);
|
||||||
brx(Assembler::equal, true, Assembler::pn, done);
|
brx(Assembler::equal, true, Assembler::pn, done);
|
||||||
delayed()->st_ptr(G0, lockobj_addr); // free entry
|
delayed()->st_ptr(G0, lockobj_addr); // free entry
|
||||||
|
|
|
@ -118,7 +118,6 @@ int MacroAssembler::patched_branch(int dest_pos, int inst, int inst_pos) {
|
||||||
case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break;
|
case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break;
|
||||||
case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
|
case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
|
||||||
case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
|
case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
|
||||||
case cb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
|
|
||||||
case bpr_op2: {
|
case bpr_op2: {
|
||||||
if (is_cbcond(inst)) {
|
if (is_cbcond(inst)) {
|
||||||
m = wdisp10(word_aligned_ones, 0);
|
m = wdisp10(word_aligned_ones, 0);
|
||||||
|
@ -149,7 +148,6 @@ int MacroAssembler::branch_destination(int inst, int pos) {
|
||||||
case bp_op2: r = inv_wdisp( inst, pos, 19); break;
|
case bp_op2: r = inv_wdisp( inst, pos, 19); break;
|
||||||
case fb_op2: r = inv_wdisp( inst, pos, 22); break;
|
case fb_op2: r = inv_wdisp( inst, pos, 22); break;
|
||||||
case br_op2: r = inv_wdisp( inst, pos, 22); break;
|
case br_op2: r = inv_wdisp( inst, pos, 22); break;
|
||||||
case cb_op2: r = inv_wdisp( inst, pos, 22); break;
|
|
||||||
case bpr_op2: {
|
case bpr_op2: {
|
||||||
if (is_cbcond(inst)) {
|
if (is_cbcond(inst)) {
|
||||||
r = inv_wdisp10(inst, pos);
|
r = inv_wdisp10(inst, pos);
|
||||||
|
@ -325,12 +323,6 @@ void MacroAssembler::breakpoint_trap() {
|
||||||
trap(ST_RESERVED_FOR_USER_0);
|
trap(ST_RESERVED_FOR_USER_0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// flush windows (except current) using flushw instruction if avail.
|
|
||||||
void MacroAssembler::flush_windows() {
|
|
||||||
if (VM_Version::v9_instructions_work()) flushw();
|
|
||||||
else flush_windows_trap();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write serialization page so VM thread can do a pseudo remote membar
|
// Write serialization page so VM thread can do a pseudo remote membar
|
||||||
// We use the current thread pointer to calculate a thread specific
|
// We use the current thread pointer to calculate a thread specific
|
||||||
// offset to write to within the page. This minimizes bus traffic
|
// offset to write to within the page. This minimizes bus traffic
|
||||||
|
@ -358,88 +350,6 @@ void MacroAssembler::leave() {
|
||||||
Unimplemented();
|
Unimplemented();
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::mult(Register s1, Register s2, Register d) {
|
|
||||||
if(VM_Version::v9_instructions_work()) {
|
|
||||||
mulx (s1, s2, d);
|
|
||||||
} else {
|
|
||||||
smul (s1, s2, d);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::mult(Register s1, int simm13a, Register d) {
|
|
||||||
if(VM_Version::v9_instructions_work()) {
|
|
||||||
mulx (s1, simm13a, d);
|
|
||||||
} else {
|
|
||||||
smul (s1, simm13a, d);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef ASSERT
|
|
||||||
void MacroAssembler::read_ccr_v8_assert(Register ccr_save) {
|
|
||||||
const Register s1 = G3_scratch;
|
|
||||||
const Register s2 = G4_scratch;
|
|
||||||
Label get_psr_test;
|
|
||||||
// Get the condition codes the V8 way.
|
|
||||||
read_ccr_trap(s1);
|
|
||||||
mov(ccr_save, s2);
|
|
||||||
// This is a test of V8 which has icc but not xcc
|
|
||||||
// so mask off the xcc bits
|
|
||||||
and3(s2, 0xf, s2);
|
|
||||||
// Compare condition codes from the V8 and V9 ways.
|
|
||||||
subcc(s2, s1, G0);
|
|
||||||
br(Assembler::notEqual, true, Assembler::pt, get_psr_test);
|
|
||||||
delayed()->breakpoint_trap();
|
|
||||||
bind(get_psr_test);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::write_ccr_v8_assert(Register ccr_save) {
|
|
||||||
const Register s1 = G3_scratch;
|
|
||||||
const Register s2 = G4_scratch;
|
|
||||||
Label set_psr_test;
|
|
||||||
// Write out the saved condition codes the V8 way
|
|
||||||
write_ccr_trap(ccr_save, s1, s2);
|
|
||||||
// Read back the condition codes using the V9 instruction
|
|
||||||
rdccr(s1);
|
|
||||||
mov(ccr_save, s2);
|
|
||||||
// This is a test of V8 which has icc but not xcc
|
|
||||||
// so mask off the xcc bits
|
|
||||||
and3(s2, 0xf, s2);
|
|
||||||
and3(s1, 0xf, s1);
|
|
||||||
// Compare the V8 way with the V9 way.
|
|
||||||
subcc(s2, s1, G0);
|
|
||||||
br(Assembler::notEqual, true, Assembler::pt, set_psr_test);
|
|
||||||
delayed()->breakpoint_trap();
|
|
||||||
bind(set_psr_test);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
#define read_ccr_v8_assert(x)
|
|
||||||
#define write_ccr_v8_assert(x)
|
|
||||||
#endif // ASSERT
|
|
||||||
|
|
||||||
void MacroAssembler::read_ccr(Register ccr_save) {
|
|
||||||
if (VM_Version::v9_instructions_work()) {
|
|
||||||
rdccr(ccr_save);
|
|
||||||
// Test code sequence used on V8. Do not move above rdccr.
|
|
||||||
read_ccr_v8_assert(ccr_save);
|
|
||||||
} else {
|
|
||||||
read_ccr_trap(ccr_save);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::write_ccr(Register ccr_save) {
|
|
||||||
if (VM_Version::v9_instructions_work()) {
|
|
||||||
// Test code sequence used on V8. Do not move below wrccr.
|
|
||||||
write_ccr_v8_assert(ccr_save);
|
|
||||||
wrccr(ccr_save);
|
|
||||||
} else {
|
|
||||||
const Register temp_reg1 = G3_scratch;
|
|
||||||
const Register temp_reg2 = G4_scratch;
|
|
||||||
write_ccr_trap(ccr_save, temp_reg1, temp_reg2);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// Calls to C land
|
// Calls to C land
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
|
@ -465,8 +375,8 @@ void MacroAssembler::get_thread() {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
AddressLiteral last_get_thread_addrlit(&last_get_thread);
|
AddressLiteral last_get_thread_addrlit(&last_get_thread);
|
||||||
set(last_get_thread_addrlit, L3);
|
set(last_get_thread_addrlit, L3);
|
||||||
inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call
|
rdpc(L4);
|
||||||
st_ptr(L4, L3, 0);
|
inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call st_ptr(L4, L3, 0);
|
||||||
#endif
|
#endif
|
||||||
call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type);
|
call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type);
|
||||||
delayed()->nop();
|
delayed()->nop();
|
||||||
|
@ -1327,7 +1237,7 @@ void RegistersForDebugging::print(outputStream* s) {
|
||||||
|
|
||||||
void RegistersForDebugging::save_registers(MacroAssembler* a) {
|
void RegistersForDebugging::save_registers(MacroAssembler* a) {
|
||||||
a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0);
|
a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0);
|
||||||
a->flush_windows();
|
a->flushw();
|
||||||
int i;
|
int i;
|
||||||
for (i = 0; i < 8; ++i) {
|
for (i = 0; i < 8; ++i) {
|
||||||
a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i));
|
a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i));
|
||||||
|
@ -1338,7 +1248,7 @@ void RegistersForDebugging::save_registers(MacroAssembler* a) {
|
||||||
for (i = 0; i < 32; ++i) {
|
for (i = 0; i < 32; ++i) {
|
||||||
a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i));
|
a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i));
|
||||||
}
|
}
|
||||||
for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
|
for (i = 0; i < 64; i += 2) {
|
||||||
a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i));
|
a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1350,7 +1260,7 @@ void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) {
|
||||||
for (int j = 0; j < 32; ++j) {
|
for (int j = 0; j < 32; ++j) {
|
||||||
a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j));
|
a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j));
|
||||||
}
|
}
|
||||||
for (int k = 0; k < (VM_Version::v9_instructions_work() ? 64 : 32); k += 2) {
|
for (int k = 0; k < 64; k += 2) {
|
||||||
a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k));
|
a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1465,8 +1375,6 @@ address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL };
|
||||||
// the high bits of the O-regs if they contain Long values. Acts as a 'leaf'
|
// the high bits of the O-regs if they contain Long values. Acts as a 'leaf'
|
||||||
// call.
|
// call.
|
||||||
void MacroAssembler::verify_oop_subroutine() {
|
void MacroAssembler::verify_oop_subroutine() {
|
||||||
assert( VM_Version::v9_instructions_work(), "VerifyOops not supported for V8" );
|
|
||||||
|
|
||||||
// Leaf call; no frame.
|
// Leaf call; no frame.
|
||||||
Label succeed, fail, null_or_fail;
|
Label succeed, fail, null_or_fail;
|
||||||
|
|
||||||
|
@ -1870,25 +1778,16 @@ void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low,
|
||||||
// And the equals case for the high part does not need testing,
|
// And the equals case for the high part does not need testing,
|
||||||
// since that triplet is reached only after finding the high halves differ.
|
// since that triplet is reached only after finding the high halves differ.
|
||||||
|
|
||||||
if (VM_Version::v9_instructions_work()) {
|
|
||||||
mov(-1, Rresult);
|
mov(-1, Rresult);
|
||||||
ba(done); delayed()-> movcc(greater, false, icc, 1, Rresult);
|
ba(done);
|
||||||
} else {
|
delayed()->movcc(greater, false, icc, 1, Rresult);
|
||||||
br(less, true, pt, done); delayed()-> set(-1, Rresult);
|
|
||||||
br(greater, true, pt, done); delayed()-> set( 1, Rresult);
|
|
||||||
}
|
|
||||||
|
|
||||||
bind(check_low_parts);
|
bind(check_low_parts);
|
||||||
|
|
||||||
if (VM_Version::v9_instructions_work()) {
|
|
||||||
mov( -1, Rresult);
|
mov( -1, Rresult);
|
||||||
movcc(equal, false, icc, 0, Rresult);
|
movcc(equal, false, icc, 0, Rresult);
|
||||||
movcc(greaterUnsigned, false, icc, 1, Rresult);
|
movcc(greaterUnsigned, false, icc, 1, Rresult);
|
||||||
} else {
|
|
||||||
set(-1, Rresult);
|
|
||||||
br(equal, true, pt, done); delayed()->set( 0, Rresult);
|
|
||||||
br(greaterUnsigned, true, pt, done); delayed()->set( 1, Rresult);
|
|
||||||
}
|
|
||||||
bind(done);
|
bind(done);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2117,119 +2016,24 @@ void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in
|
||||||
void MacroAssembler::float_cmp( bool is_float, int unordered_result,
|
void MacroAssembler::float_cmp( bool is_float, int unordered_result,
|
||||||
FloatRegister Fa, FloatRegister Fb,
|
FloatRegister Fa, FloatRegister Fb,
|
||||||
Register Rresult) {
|
Register Rresult) {
|
||||||
|
if (is_float) {
|
||||||
|
fcmp(FloatRegisterImpl::S, fcc0, Fa, Fb);
|
||||||
|
} else {
|
||||||
|
fcmp(FloatRegisterImpl::D, fcc0, Fa, Fb);
|
||||||
|
}
|
||||||
|
|
||||||
fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb);
|
if (unordered_result == 1) {
|
||||||
|
|
||||||
Condition lt = unordered_result == -1 ? f_unorderedOrLess : f_less;
|
|
||||||
Condition eq = f_equal;
|
|
||||||
Condition gt = unordered_result == 1 ? f_unorderedOrGreater : f_greater;
|
|
||||||
|
|
||||||
if (VM_Version::v9_instructions_work()) {
|
|
||||||
|
|
||||||
mov( -1, Rresult);
|
mov( -1, Rresult);
|
||||||
movcc(eq, true, fcc0, 0, Rresult);
|
movcc(f_equal, true, fcc0, 0, Rresult);
|
||||||
movcc(gt, true, fcc0, 1, Rresult);
|
movcc(f_unorderedOrGreater, true, fcc0, 1, Rresult);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
Label done;
|
mov( -1, Rresult);
|
||||||
|
movcc(f_equal, true, fcc0, 0, Rresult);
|
||||||
set( -1, Rresult );
|
movcc(f_greater, true, fcc0, 1, Rresult);
|
||||||
//fb(lt, true, pn, done); delayed()->set( -1, Rresult );
|
|
||||||
fb( eq, true, pn, done); delayed()->set( 0, Rresult );
|
|
||||||
fb( gt, true, pn, done); delayed()->set( 1, Rresult );
|
|
||||||
|
|
||||||
bind (done);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void MacroAssembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
|
|
||||||
{
|
|
||||||
if (VM_Version::v9_instructions_work()) {
|
|
||||||
Assembler::fneg(w, s, d);
|
|
||||||
} else {
|
|
||||||
if (w == FloatRegisterImpl::S) {
|
|
||||||
Assembler::fneg(w, s, d);
|
|
||||||
} else if (w == FloatRegisterImpl::D) {
|
|
||||||
// number() does a sanity check on the alignment.
|
|
||||||
assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
|
|
||||||
((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
|
|
||||||
|
|
||||||
Assembler::fneg(FloatRegisterImpl::S, s, d);
|
|
||||||
Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
|
|
||||||
} else {
|
|
||||||
assert(w == FloatRegisterImpl::Q, "Invalid float register width");
|
|
||||||
|
|
||||||
// number() does a sanity check on the alignment.
|
|
||||||
assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
|
|
||||||
((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
|
|
||||||
|
|
||||||
Assembler::fneg(FloatRegisterImpl::S, s, d);
|
|
||||||
Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
|
|
||||||
Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
|
|
||||||
Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
|
|
||||||
{
|
|
||||||
if (VM_Version::v9_instructions_work()) {
|
|
||||||
Assembler::fmov(w, s, d);
|
|
||||||
} else {
|
|
||||||
if (w == FloatRegisterImpl::S) {
|
|
||||||
Assembler::fmov(w, s, d);
|
|
||||||
} else if (w == FloatRegisterImpl::D) {
|
|
||||||
// number() does a sanity check on the alignment.
|
|
||||||
assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
|
|
||||||
((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
|
|
||||||
|
|
||||||
Assembler::fmov(FloatRegisterImpl::S, s, d);
|
|
||||||
Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
|
|
||||||
} else {
|
|
||||||
assert(w == FloatRegisterImpl::Q, "Invalid float register width");
|
|
||||||
|
|
||||||
// number() does a sanity check on the alignment.
|
|
||||||
assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
|
|
||||||
((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
|
|
||||||
|
|
||||||
Assembler::fmov(FloatRegisterImpl::S, s, d);
|
|
||||||
Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
|
|
||||||
Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
|
|
||||||
Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
|
|
||||||
{
|
|
||||||
if (VM_Version::v9_instructions_work()) {
|
|
||||||
Assembler::fabs(w, s, d);
|
|
||||||
} else {
|
|
||||||
if (w == FloatRegisterImpl::S) {
|
|
||||||
Assembler::fabs(w, s, d);
|
|
||||||
} else if (w == FloatRegisterImpl::D) {
|
|
||||||
// number() does a sanity check on the alignment.
|
|
||||||
assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
|
|
||||||
((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
|
|
||||||
|
|
||||||
Assembler::fabs(FloatRegisterImpl::S, s, d);
|
|
||||||
Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
|
|
||||||
} else {
|
|
||||||
assert(w == FloatRegisterImpl::Q, "Invalid float register width");
|
|
||||||
|
|
||||||
// number() does a sanity check on the alignment.
|
|
||||||
assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
|
|
||||||
((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
|
|
||||||
|
|
||||||
Assembler::fabs(FloatRegisterImpl::S, s, d);
|
|
||||||
Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
|
|
||||||
Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
|
|
||||||
Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::save_all_globals_into_locals() {
|
void MacroAssembler::save_all_globals_into_locals() {
|
||||||
mov(G1,L1);
|
mov(G1,L1);
|
||||||
mov(G2,L2);
|
mov(G2,L2);
|
||||||
|
@ -2250,135 +2054,6 @@ void MacroAssembler::restore_globals_from_locals() {
|
||||||
mov(L7,G7);
|
mov(L7,G7);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use for 64 bit operation.
|
|
||||||
void MacroAssembler::casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
|
|
||||||
{
|
|
||||||
// store ptr_reg as the new top value
|
|
||||||
#ifdef _LP64
|
|
||||||
casx(top_ptr_reg, top_reg, ptr_reg);
|
|
||||||
#else
|
|
||||||
cas_under_lock(top_ptr_reg, top_reg, ptr_reg, lock_addr, use_call_vm);
|
|
||||||
#endif // _LP64
|
|
||||||
}
|
|
||||||
|
|
||||||
// [RGV] This routine does not handle 64 bit operations.
|
|
||||||
// use casx_under_lock() or casx directly!!!
|
|
||||||
void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
|
|
||||||
{
|
|
||||||
// store ptr_reg as the new top value
|
|
||||||
if (VM_Version::v9_instructions_work()) {
|
|
||||||
cas(top_ptr_reg, top_reg, ptr_reg);
|
|
||||||
} else {
|
|
||||||
|
|
||||||
// If the register is not an out nor global, it is not visible
|
|
||||||
// after the save. Allocate a register for it, save its
|
|
||||||
// value in the register save area (the save may not flush
|
|
||||||
// registers to the save area).
|
|
||||||
|
|
||||||
Register top_ptr_reg_after_save;
|
|
||||||
Register top_reg_after_save;
|
|
||||||
Register ptr_reg_after_save;
|
|
||||||
|
|
||||||
if (top_ptr_reg->is_out() || top_ptr_reg->is_global()) {
|
|
||||||
top_ptr_reg_after_save = top_ptr_reg->after_save();
|
|
||||||
} else {
|
|
||||||
Address reg_save_addr = top_ptr_reg->address_in_saved_window();
|
|
||||||
top_ptr_reg_after_save = L0;
|
|
||||||
st(top_ptr_reg, reg_save_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (top_reg->is_out() || top_reg->is_global()) {
|
|
||||||
top_reg_after_save = top_reg->after_save();
|
|
||||||
} else {
|
|
||||||
Address reg_save_addr = top_reg->address_in_saved_window();
|
|
||||||
top_reg_after_save = L1;
|
|
||||||
st(top_reg, reg_save_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ptr_reg->is_out() || ptr_reg->is_global()) {
|
|
||||||
ptr_reg_after_save = ptr_reg->after_save();
|
|
||||||
} else {
|
|
||||||
Address reg_save_addr = ptr_reg->address_in_saved_window();
|
|
||||||
ptr_reg_after_save = L2;
|
|
||||||
st(ptr_reg, reg_save_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
const Register& lock_reg = L3;
|
|
||||||
const Register& lock_ptr_reg = L4;
|
|
||||||
const Register& value_reg = L5;
|
|
||||||
const Register& yield_reg = L6;
|
|
||||||
const Register& yieldall_reg = L7;
|
|
||||||
|
|
||||||
save_frame();
|
|
||||||
|
|
||||||
if (top_ptr_reg_after_save == L0) {
|
|
||||||
ld(top_ptr_reg->address_in_saved_window().after_save(), top_ptr_reg_after_save);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (top_reg_after_save == L1) {
|
|
||||||
ld(top_reg->address_in_saved_window().after_save(), top_reg_after_save);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ptr_reg_after_save == L2) {
|
|
||||||
ld(ptr_reg->address_in_saved_window().after_save(), ptr_reg_after_save);
|
|
||||||
}
|
|
||||||
|
|
||||||
Label(retry_get_lock);
|
|
||||||
Label(not_same);
|
|
||||||
Label(dont_yield);
|
|
||||||
|
|
||||||
assert(lock_addr, "lock_address should be non null for v8");
|
|
||||||
set((intptr_t)lock_addr, lock_ptr_reg);
|
|
||||||
// Initialize yield counter
|
|
||||||
mov(G0,yield_reg);
|
|
||||||
mov(G0, yieldall_reg);
|
|
||||||
set(StubRoutines::Sparc::locked, lock_reg);
|
|
||||||
|
|
||||||
bind(retry_get_lock);
|
|
||||||
cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dont_yield);
|
|
||||||
|
|
||||||
if(use_call_vm) {
|
|
||||||
Untested("Need to verify global reg consistancy");
|
|
||||||
call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::yield_all), yieldall_reg);
|
|
||||||
} else {
|
|
||||||
// Save the regs and make space for a C call
|
|
||||||
save(SP, -96, SP);
|
|
||||||
save_all_globals_into_locals();
|
|
||||||
call(CAST_FROM_FN_PTR(address,os::yield_all));
|
|
||||||
delayed()->mov(yieldall_reg, O0);
|
|
||||||
restore_globals_from_locals();
|
|
||||||
restore();
|
|
||||||
}
|
|
||||||
|
|
||||||
// reset the counter
|
|
||||||
mov(G0,yield_reg);
|
|
||||||
add(yieldall_reg, 1, yieldall_reg);
|
|
||||||
|
|
||||||
bind(dont_yield);
|
|
||||||
// try to get lock
|
|
||||||
Assembler::swap(lock_ptr_reg, 0, lock_reg);
|
|
||||||
|
|
||||||
// did we get the lock?
|
|
||||||
cmp(lock_reg, StubRoutines::Sparc::unlocked);
|
|
||||||
br(Assembler::notEqual, true, Assembler::pn, retry_get_lock);
|
|
||||||
delayed()->add(yield_reg,1,yield_reg);
|
|
||||||
|
|
||||||
// yes, got lock. do we have the same top?
|
|
||||||
ld(top_ptr_reg_after_save, 0, value_reg);
|
|
||||||
cmp_and_br_short(value_reg, top_reg_after_save, Assembler::notEqual, Assembler::pn, not_same);
|
|
||||||
|
|
||||||
// yes, same top.
|
|
||||||
st(ptr_reg_after_save, top_ptr_reg_after_save, 0);
|
|
||||||
membar(Assembler::StoreStore);
|
|
||||||
|
|
||||||
bind(not_same);
|
|
||||||
mov(value_reg, ptr_reg_after_save);
|
|
||||||
st(lock_reg, lock_ptr_reg, 0); // unlock
|
|
||||||
|
|
||||||
restore();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
|
RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
|
||||||
Register tmp,
|
Register tmp,
|
||||||
int offset) {
|
int offset) {
|
||||||
|
@ -2970,7 +2645,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
|
||||||
markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
|
markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
|
||||||
mark_reg);
|
mark_reg);
|
||||||
or3(G2_thread, mark_reg, temp_reg);
|
or3(G2_thread, mark_reg, temp_reg);
|
||||||
casn(mark_addr.base(), mark_reg, temp_reg);
|
cas_ptr(mark_addr.base(), mark_reg, temp_reg);
|
||||||
// If the biasing toward our thread failed, this means that
|
// If the biasing toward our thread failed, this means that
|
||||||
// another thread succeeded in biasing it toward itself and we
|
// another thread succeeded in biasing it toward itself and we
|
||||||
// need to revoke that bias. The revocation will occur in the
|
// need to revoke that bias. The revocation will occur in the
|
||||||
|
@ -2998,7 +2673,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
|
||||||
load_klass(obj_reg, temp_reg);
|
load_klass(obj_reg, temp_reg);
|
||||||
ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
|
ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
|
||||||
or3(G2_thread, temp_reg, temp_reg);
|
or3(G2_thread, temp_reg, temp_reg);
|
||||||
casn(mark_addr.base(), mark_reg, temp_reg);
|
cas_ptr(mark_addr.base(), mark_reg, temp_reg);
|
||||||
// If the biasing toward our thread failed, this means that
|
// If the biasing toward our thread failed, this means that
|
||||||
// another thread succeeded in biasing it toward itself and we
|
// another thread succeeded in biasing it toward itself and we
|
||||||
// need to revoke that bias. The revocation will occur in the
|
// need to revoke that bias. The revocation will occur in the
|
||||||
|
@ -3027,7 +2702,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
|
||||||
// bits in this situation. Should attempt to preserve them.
|
// bits in this situation. Should attempt to preserve them.
|
||||||
load_klass(obj_reg, temp_reg);
|
load_klass(obj_reg, temp_reg);
|
||||||
ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
|
ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
|
||||||
casn(mark_addr.base(), mark_reg, temp_reg);
|
cas_ptr(mark_addr.base(), mark_reg, temp_reg);
|
||||||
// Fall through to the normal CAS-based lock, because no matter what
|
// Fall through to the normal CAS-based lock, because no matter what
|
||||||
// the result of the above CAS, some thread must have succeeded in
|
// the result of the above CAS, some thread must have succeeded in
|
||||||
// removing the bias bit from the object's header.
|
// removing the bias bit from the object's header.
|
||||||
|
@ -3058,15 +2733,6 @@ void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by
|
|
||||||
// Solaris/SPARC's "as". Another apt name would be cas_ptr()
|
|
||||||
|
|
||||||
void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) {
|
|
||||||
casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// compiler_lock_object() and compiler_unlock_object() are direct transliterations
|
// compiler_lock_object() and compiler_unlock_object() are direct transliterations
|
||||||
// of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments.
|
// of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments.
|
||||||
// The code could be tightened up considerably.
|
// The code could be tightened up considerably.
|
||||||
|
@ -3129,8 +2795,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
|
||||||
|
|
||||||
// compare object markOop with Rmark and if equal exchange Rscratch with object markOop
|
// compare object markOop with Rmark and if equal exchange Rscratch with object markOop
|
||||||
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
||||||
casx_under_lock(mark_addr.base(), Rmark, Rscratch,
|
cas_ptr(mark_addr.base(), Rmark, Rscratch);
|
||||||
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
|
||||||
|
|
||||||
// if compare/exchange succeeded we found an unlocked object and we now have locked it
|
// if compare/exchange succeeded we found an unlocked object and we now have locked it
|
||||||
// hence we are done
|
// hence we are done
|
||||||
|
@ -3176,7 +2841,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
|
||||||
mov(Rbox, Rscratch);
|
mov(Rbox, Rscratch);
|
||||||
or3(Rmark, markOopDesc::unlocked_value, Rmark);
|
or3(Rmark, markOopDesc::unlocked_value, Rmark);
|
||||||
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
||||||
casn(mark_addr.base(), Rmark, Rscratch);
|
cas_ptr(mark_addr.base(), Rmark, Rscratch);
|
||||||
cmp(Rmark, Rscratch);
|
cmp(Rmark, Rscratch);
|
||||||
brx(Assembler::equal, false, Assembler::pt, done);
|
brx(Assembler::equal, false, Assembler::pt, done);
|
||||||
delayed()->sub(Rscratch, SP, Rscratch);
|
delayed()->sub(Rscratch, SP, Rscratch);
|
||||||
|
@ -3207,7 +2872,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
|
||||||
// Invariant: if we acquire the lock then _recursions should be 0.
|
// Invariant: if we acquire the lock then _recursions should be 0.
|
||||||
add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
|
add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
|
||||||
mov(G2_thread, Rscratch);
|
mov(G2_thread, Rscratch);
|
||||||
casn(Rmark, G0, Rscratch);
|
cas_ptr(Rmark, G0, Rscratch);
|
||||||
cmp(Rscratch, G0);
|
cmp(Rscratch, G0);
|
||||||
// Intentional fall-through into done
|
// Intentional fall-through into done
|
||||||
} else {
|
} else {
|
||||||
|
@ -3240,7 +2905,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
|
||||||
mov(0, Rscratch);
|
mov(0, Rscratch);
|
||||||
or3(Rmark, markOopDesc::unlocked_value, Rmark);
|
or3(Rmark, markOopDesc::unlocked_value, Rmark);
|
||||||
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
||||||
casn(mark_addr.base(), Rmark, Rscratch);
|
cas_ptr(mark_addr.base(), Rmark, Rscratch);
|
||||||
// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
|
// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
|
||||||
cmp(Rscratch, Rmark);
|
cmp(Rscratch, Rmark);
|
||||||
brx(Assembler::notZero, false, Assembler::pn, Recursive);
|
brx(Assembler::notZero, false, Assembler::pn, Recursive);
|
||||||
|
@ -3266,7 +2931,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
|
||||||
// the fast-path stack-lock code from the interpreter and always passed
|
// the fast-path stack-lock code from the interpreter and always passed
|
||||||
// control to the "slow" operators in synchronizer.cpp.
|
// control to the "slow" operators in synchronizer.cpp.
|
||||||
|
|
||||||
// RScratch contains the fetched obj->mark value from the failed CASN.
|
// RScratch contains the fetched obj->mark value from the failed CAS.
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
sub(Rscratch, STACK_BIAS, Rscratch);
|
sub(Rscratch, STACK_BIAS, Rscratch);
|
||||||
#endif
|
#endif
|
||||||
|
@ -3300,7 +2965,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
|
||||||
// Invariant: if we acquire the lock then _recursions should be 0.
|
// Invariant: if we acquire the lock then _recursions should be 0.
|
||||||
add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
|
add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
|
||||||
mov(G2_thread, Rscratch);
|
mov(G2_thread, Rscratch);
|
||||||
casn(Rmark, G0, Rscratch);
|
cas_ptr(Rmark, G0, Rscratch);
|
||||||
cmp(Rscratch, G0);
|
cmp(Rscratch, G0);
|
||||||
// ST box->displaced_header = NonZero.
|
// ST box->displaced_header = NonZero.
|
||||||
// Any non-zero value suffices:
|
// Any non-zero value suffices:
|
||||||
|
@ -3336,8 +3001,7 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
|
||||||
// Check if it is still a light weight lock, this is is true if we see
|
// Check if it is still a light weight lock, this is is true if we see
|
||||||
// the stack address of the basicLock in the markOop of the object
|
// the stack address of the basicLock in the markOop of the object
|
||||||
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
||||||
casx_under_lock(mark_addr.base(), Rbox, Rmark,
|
cas_ptr(mark_addr.base(), Rbox, Rmark);
|
||||||
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
|
||||||
ba(done);
|
ba(done);
|
||||||
delayed()->cmp(Rbox, Rmark);
|
delayed()->cmp(Rbox, Rmark);
|
||||||
bind(done);
|
bind(done);
|
||||||
|
@ -3398,7 +3062,7 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
|
||||||
delayed()->andcc(G0, G0, G0);
|
delayed()->andcc(G0, G0, G0);
|
||||||
add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
|
add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
|
||||||
mov(G2_thread, Rscratch);
|
mov(G2_thread, Rscratch);
|
||||||
casn(Rmark, G0, Rscratch);
|
cas_ptr(Rmark, G0, Rscratch);
|
||||||
// invert icc.zf and goto done
|
// invert icc.zf and goto done
|
||||||
br_notnull(Rscratch, false, Assembler::pt, done);
|
br_notnull(Rscratch, false, Assembler::pt, done);
|
||||||
delayed()->cmp(G0, G0);
|
delayed()->cmp(G0, G0);
|
||||||
|
@ -3440,7 +3104,7 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
|
||||||
// A prototype implementation showed excellent results, although
|
// A prototype implementation showed excellent results, although
|
||||||
// the scavenger and timeout code was rather involved.
|
// the scavenger and timeout code was rather involved.
|
||||||
|
|
||||||
casn(mark_addr.base(), Rbox, Rscratch);
|
cas_ptr(mark_addr.base(), Rbox, Rscratch);
|
||||||
cmp(Rbox, Rscratch);
|
cmp(Rbox, Rscratch);
|
||||||
// Intentional fall through into done ...
|
// Intentional fall through into done ...
|
||||||
|
|
||||||
|
@ -3540,7 +3204,8 @@ void MacroAssembler::eden_allocate(
|
||||||
|
|
||||||
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
|
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
|
||||||
// No allocation in the shared eden.
|
// No allocation in the shared eden.
|
||||||
ba_short(slow_case);
|
ba(slow_case);
|
||||||
|
delayed()->nop();
|
||||||
} else {
|
} else {
|
||||||
// get eden boundaries
|
// get eden boundaries
|
||||||
// note: we need both top & top_addr!
|
// note: we need both top & top_addr!
|
||||||
|
@ -3583,7 +3248,7 @@ void MacroAssembler::eden_allocate(
|
||||||
// Compare obj with the value at top_addr; if still equal, swap the value of
|
// Compare obj with the value at top_addr; if still equal, swap the value of
|
||||||
// end with the value at top_addr. If not equal, read the value at top_addr
|
// end with the value at top_addr. If not equal, read the value at top_addr
|
||||||
// into end.
|
// into end.
|
||||||
casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
cas_ptr(top_addr, obj, end);
|
||||||
// if someone beat us on the allocation, try again, otherwise continue
|
// if someone beat us on the allocation, try again, otherwise continue
|
||||||
cmp(obj, end);
|
cmp(obj, end);
|
||||||
brx(Assembler::notEqual, false, Assembler::pn, retry);
|
brx(Assembler::notEqual, false, Assembler::pn, retry);
|
||||||
|
|
|
@ -1056,13 +1056,6 @@ public:
|
||||||
|
|
||||||
void breakpoint_trap();
|
void breakpoint_trap();
|
||||||
void breakpoint_trap(Condition c, CC cc);
|
void breakpoint_trap(Condition c, CC cc);
|
||||||
void flush_windows_trap();
|
|
||||||
void clean_windows_trap();
|
|
||||||
void get_psr_trap();
|
|
||||||
void set_psr_trap();
|
|
||||||
|
|
||||||
// V8/V9 flush_windows
|
|
||||||
void flush_windows();
|
|
||||||
|
|
||||||
// Support for serializing memory accesses between threads
|
// Support for serializing memory accesses between threads
|
||||||
void serialize_memory(Register thread, Register tmp1, Register tmp2);
|
void serialize_memory(Register thread, Register tmp1, Register tmp2);
|
||||||
|
@ -1071,14 +1064,6 @@ public:
|
||||||
void enter();
|
void enter();
|
||||||
void leave();
|
void leave();
|
||||||
|
|
||||||
// V8/V9 integer multiply
|
|
||||||
void mult(Register s1, Register s2, Register d);
|
|
||||||
void mult(Register s1, int simm13a, Register d);
|
|
||||||
|
|
||||||
// V8/V9 read and write of condition codes.
|
|
||||||
void read_ccr(Register d);
|
|
||||||
void write_ccr(Register s);
|
|
||||||
|
|
||||||
// Manipulation of C++ bools
|
// Manipulation of C++ bools
|
||||||
// These are idioms to flag the need for care with accessing bools but on
|
// These are idioms to flag the need for care with accessing bools but on
|
||||||
// this platform we assume byte size
|
// this platform we assume byte size
|
||||||
|
@ -1162,21 +1147,6 @@ public:
|
||||||
// check_and_forward_exception to handle exceptions when it is safe
|
// check_and_forward_exception to handle exceptions when it is safe
|
||||||
void check_and_forward_exception(Register scratch_reg);
|
void check_and_forward_exception(Register scratch_reg);
|
||||||
|
|
||||||
private:
|
|
||||||
// For V8
|
|
||||||
void read_ccr_trap(Register ccr_save);
|
|
||||||
void write_ccr_trap(Register ccr_save1, Register scratch1, Register scratch2);
|
|
||||||
|
|
||||||
#ifdef ASSERT
|
|
||||||
// For V8 debugging. Uses V8 instruction sequence and checks
|
|
||||||
// result with V9 insturctions rdccr and wrccr.
|
|
||||||
// Uses Gscatch and Gscatch2
|
|
||||||
void read_ccr_v8_assert(Register ccr_save);
|
|
||||||
void write_ccr_v8_assert(Register ccr_save);
|
|
||||||
#endif // ASSERT
|
|
||||||
|
|
||||||
public:
|
|
||||||
|
|
||||||
// Write to card table for - register is destroyed afterwards.
|
// Write to card table for - register is destroyed afterwards.
|
||||||
void card_table_write(jbyte* byte_map_base, Register tmp, Register obj);
|
void card_table_write(jbyte* byte_map_base, Register tmp, Register obj);
|
||||||
|
|
||||||
|
@ -1314,20 +1284,9 @@ public:
|
||||||
FloatRegister Fa, FloatRegister Fb,
|
FloatRegister Fa, FloatRegister Fb,
|
||||||
Register Rresult);
|
Register Rresult);
|
||||||
|
|
||||||
void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
|
|
||||||
void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { Assembler::fneg(w, sd); }
|
|
||||||
void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
|
|
||||||
void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
|
|
||||||
|
|
||||||
void save_all_globals_into_locals();
|
void save_all_globals_into_locals();
|
||||||
void restore_globals_from_locals();
|
void restore_globals_from_locals();
|
||||||
|
|
||||||
void casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
|
|
||||||
address lock_addr=0, bool use_call_vm=false);
|
|
||||||
void cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
|
|
||||||
address lock_addr=0, bool use_call_vm=false);
|
|
||||||
void casn (Register addr_reg, Register cmp_reg, Register set_reg) ;
|
|
||||||
|
|
||||||
// These set the icc condition code to equal if the lock succeeded
|
// These set the icc condition code to equal if the lock succeeded
|
||||||
// and notEqual if it failed and requires a slow case
|
// and notEqual if it failed and requires a slow case
|
||||||
void compiler_lock_object(Register Roop, Register Rmark, Register Rbox,
|
void compiler_lock_object(Register Roop, Register Rmark, Register Rbox,
|
||||||
|
|
|
@ -229,10 +229,7 @@ inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Registe
|
||||||
// Use the right branch for the platform
|
// Use the right branch for the platform
|
||||||
|
|
||||||
inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
|
inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
|
||||||
if (VM_Version::v9_instructions_work())
|
|
||||||
Assembler::bp(c, a, icc, p, d, rt);
|
Assembler::bp(c, a, icc, p, d, rt);
|
||||||
else
|
|
||||||
Assembler::br(c, a, d, rt);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
|
inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
|
||||||
|
@ -268,10 +265,7 @@ inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
|
inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
|
||||||
if (VM_Version::v9_instructions_work())
|
|
||||||
fbp(c, a, fcc0, p, d, rt);
|
fbp(c, a, fcc0, p, d, rt);
|
||||||
else
|
|
||||||
Assembler::fb(c, a, d, rt);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
|
inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
|
||||||
|
@ -334,7 +328,7 @@ inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder co
|
||||||
|
|
||||||
// prefetch instruction
|
// prefetch instruction
|
||||||
inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
|
inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
|
||||||
if (VM_Version::v9_instructions_work())
|
Assembler::bp( never, true, xcc, pt, d, rt );
|
||||||
Assembler::bp( never, true, xcc, pt, d, rt );
|
Assembler::bp( never, true, xcc, pt, d, rt );
|
||||||
}
|
}
|
||||||
inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
|
inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
|
||||||
|
@ -344,15 +338,7 @@ inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
|
||||||
// returns delta from gotten pc to addr after
|
// returns delta from gotten pc to addr after
|
||||||
inline int MacroAssembler::get_pc( Register d ) {
|
inline int MacroAssembler::get_pc( Register d ) {
|
||||||
int x = offset();
|
int x = offset();
|
||||||
if (VM_Version::v9_instructions_work())
|
|
||||||
rdpc(d);
|
rdpc(d);
|
||||||
else {
|
|
||||||
Label lbl;
|
|
||||||
Assembler::call(lbl, relocInfo::none); // No relocation as this is call to pc+0x8
|
|
||||||
if (d == O7) delayed()->nop();
|
|
||||||
else delayed()->mov(O7, d);
|
|
||||||
bind(lbl);
|
|
||||||
}
|
|
||||||
return offset() - x;
|
return offset() - x;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -646,22 +632,19 @@ inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, const Address& a, Fl
|
||||||
// returns if membar generates anything, obviously this code should mirror
|
// returns if membar generates anything, obviously this code should mirror
|
||||||
// membar below.
|
// membar below.
|
||||||
inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
|
inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
|
||||||
if( !os::is_MP() ) return false; // Not needed on single CPU
|
if (!os::is_MP())
|
||||||
if( VM_Version::v9_instructions_work() ) {
|
return false; // Not needed on single CPU
|
||||||
const Membar_mask_bits effective_mask =
|
const Membar_mask_bits effective_mask =
|
||||||
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
|
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
|
||||||
return (effective_mask != 0);
|
return (effective_mask != 0);
|
||||||
} else {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
|
inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
|
||||||
// Uniprocessors do not need memory barriers
|
// Uniprocessors do not need memory barriers
|
||||||
if (!os::is_MP()) return;
|
if (!os::is_MP())
|
||||||
|
return;
|
||||||
// Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3,
|
// Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3,
|
||||||
// 8.4.4.3, a.31 and a.50.
|
// 8.4.4.3, a.31 and a.50.
|
||||||
if( VM_Version::v9_instructions_work() ) {
|
|
||||||
// Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
|
// Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
|
||||||
// of the mmask subfield of const7a that does anything that isn't done
|
// of the mmask subfield of const7a that does anything that isn't done
|
||||||
// implicitly is StoreLoad.
|
// implicitly is StoreLoad.
|
||||||
|
@ -670,18 +653,6 @@ inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
|
||||||
if (effective_mask != 0) {
|
if (effective_mask != 0) {
|
||||||
Assembler::membar(effective_mask);
|
Assembler::membar(effective_mask);
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
// stbar is the closest there is on v8. Equivalent to membar(StoreStore). We
|
|
||||||
// do not issue the stbar because to my knowledge all v8 machines implement TSO,
|
|
||||||
// which guarantees that all stores behave as if an stbar were issued just after
|
|
||||||
// each one of them. On these machines, stbar ought to be a nop. There doesn't
|
|
||||||
// appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it,
|
|
||||||
// it can't be specified by stbar, nor have I come up with a way to simulate it.
|
|
||||||
//
|
|
||||||
// Addendum. Dave says that ldstub guarantees a write buffer flush to coherent
|
|
||||||
// space. Put one here to be on the safe side.
|
|
||||||
Assembler::ldstub(SP, 0, G0);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void MacroAssembler::prefetch(const Address& a, PrefetchFcn f, int offset) {
|
inline void MacroAssembler::prefetch(const Address& a, PrefetchFcn f, int offset) {
|
||||||
|
|
|
@ -162,7 +162,7 @@ void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
|
||||||
int i1 = ((int*)code_buffer)[1];
|
int i1 = ((int*)code_buffer)[1];
|
||||||
int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
|
int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
|
||||||
assert(inv_op(*contention_addr) == Assembler::arith_op ||
|
assert(inv_op(*contention_addr) == Assembler::arith_op ||
|
||||||
*contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
|
*contention_addr == nop_instruction(),
|
||||||
"must not interfere with original call");
|
"must not interfere with original call");
|
||||||
// The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
|
// The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
|
||||||
n_call->set_long_at(1*BytesPerInstWord, i1);
|
n_call->set_long_at(1*BytesPerInstWord, i1);
|
||||||
|
@ -181,7 +181,7 @@ void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
|
||||||
// Make sure the first-patched instruction, which may co-exist
|
// Make sure the first-patched instruction, which may co-exist
|
||||||
// briefly with the call, will do something harmless.
|
// briefly with the call, will do something harmless.
|
||||||
assert(inv_op(*contention_addr) == Assembler::arith_op ||
|
assert(inv_op(*contention_addr) == Assembler::arith_op ||
|
||||||
*contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
|
*contention_addr == nop_instruction(),
|
||||||
"must not interfere with original call");
|
"must not interfere with original call");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -933,11 +933,7 @@ void NativeJump::patch_verified_entry(address entry, address verified_entry, add
|
||||||
int code_size = 1 * BytesPerInstWord;
|
int code_size = 1 * BytesPerInstWord;
|
||||||
CodeBuffer cb(verified_entry, code_size + 1);
|
CodeBuffer cb(verified_entry, code_size + 1);
|
||||||
MacroAssembler* a = new MacroAssembler(&cb);
|
MacroAssembler* a = new MacroAssembler(&cb);
|
||||||
if (VM_Version::v9_instructions_work()) {
|
|
||||||
a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
|
a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
|
||||||
} else {
|
|
||||||
a->lduw(G0, 0, O7); // "ld" must agree with code in the signal handler
|
|
||||||
}
|
|
||||||
ICache::invalidate_range(verified_entry, code_size);
|
ICache::invalidate_range(verified_entry, code_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1024,7 +1020,7 @@ void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer)
|
||||||
int i1 = ((int*)code_buffer)[1];
|
int i1 = ((int*)code_buffer)[1];
|
||||||
int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
|
int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
|
||||||
assert(inv_op(*contention_addr) == Assembler::arith_op ||
|
assert(inv_op(*contention_addr) == Assembler::arith_op ||
|
||||||
*contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
|
*contention_addr == nop_instruction(),
|
||||||
"must not interfere with original call");
|
"must not interfere with original call");
|
||||||
// The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
|
// The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
|
||||||
h_jump->set_long_at(1*BytesPerInstWord, i1);
|
h_jump->set_long_at(1*BytesPerInstWord, i1);
|
||||||
|
@ -1043,6 +1039,6 @@ void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer)
|
||||||
// Make sure the first-patched instruction, which may co-exist
|
// Make sure the first-patched instruction, which may co-exist
|
||||||
// briefly with the call, will do something harmless.
|
// briefly with the call, will do something harmless.
|
||||||
assert(inv_op(*contention_addr) == Assembler::arith_op ||
|
assert(inv_op(*contention_addr) == Assembler::arith_op ||
|
||||||
*contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
|
*contention_addr == nop_instruction(),
|
||||||
"must not interfere with original call");
|
"must not interfere with original call");
|
||||||
}
|
}
|
||||||
|
|
|
@ -70,8 +70,7 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
|
||||||
bool is_zombie() {
|
bool is_zombie() {
|
||||||
int x = long_at(0);
|
int x = long_at(0);
|
||||||
return is_op3(x,
|
return is_op3(x,
|
||||||
VM_Version::v9_instructions_work() ?
|
Assembler::ldsw_op3,
|
||||||
Assembler::ldsw_op3 : Assembler::lduw_op3,
|
|
||||||
Assembler::ldst_op)
|
Assembler::ldst_op)
|
||||||
&& Assembler::inv_rs1(x) == G0
|
&& Assembler::inv_rs1(x) == G0
|
||||||
&& Assembler::inv_rd(x) == O7;
|
&& Assembler::inv_rd(x) == O7;
|
||||||
|
|
|
@ -249,12 +249,10 @@ class FloatRegisterImpl: public AbstractRegisterImpl {
|
||||||
|
|
||||||
case D:
|
case D:
|
||||||
assert(c < 64 && (c & 1) == 0, "bad double float register");
|
assert(c < 64 && (c & 1) == 0, "bad double float register");
|
||||||
assert(c < 32 || VM_Version::v9_instructions_work(), "V9 float work only on V9 platform");
|
|
||||||
return (c & 0x1e) | ((c & 0x20) >> 5);
|
return (c & 0x1e) | ((c & 0x20) >> 5);
|
||||||
|
|
||||||
case Q:
|
case Q:
|
||||||
assert(c < 64 && (c & 3) == 0, "bad quad float register");
|
assert(c < 64 && (c & 3) == 0, "bad quad float register");
|
||||||
assert(c < 32 || VM_Version::v9_instructions_work(), "V9 float work only on V9 platform");
|
|
||||||
return (c & 0x1c) | ((c & 0x20) >> 5);
|
return (c & 0x1c) | ((c & 0x20) >> 5);
|
||||||
}
|
}
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
|
|
|
@ -2459,7 +2459,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||||
|
|
||||||
// Finally just about ready to make the JNI call
|
// Finally just about ready to make the JNI call
|
||||||
|
|
||||||
__ flush_windows();
|
__ flushw();
|
||||||
if (inner_frame_created) {
|
if (inner_frame_created) {
|
||||||
__ restore();
|
__ restore();
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -2778,10 +2778,7 @@ enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
|
||||||
Register Rold = reg_to_register_object($old$$reg);
|
Register Rold = reg_to_register_object($old$$reg);
|
||||||
Register Rnew = reg_to_register_object($new$$reg);
|
Register Rnew = reg_to_register_object($new$$reg);
|
||||||
|
|
||||||
// casx_under_lock picks 1 of 3 encodings:
|
__ cas_ptr(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold
|
||||||
// For 32-bit pointers you get a 32-bit CAS
|
|
||||||
// For 64-bit pointers you get a 64-bit CASX
|
|
||||||
__ casn(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold
|
|
||||||
__ cmp( Rold, Rnew );
|
__ cmp( Rold, Rnew );
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -3067,7 +3064,7 @@ enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, notemp_iRegI r
|
||||||
AddressLiteral last_rethrow_addrlit(&last_rethrow);
|
AddressLiteral last_rethrow_addrlit(&last_rethrow);
|
||||||
__ sethi(last_rethrow_addrlit, L1);
|
__ sethi(last_rethrow_addrlit, L1);
|
||||||
Address addr(L1, last_rethrow_addrlit.low10());
|
Address addr(L1, last_rethrow_addrlit.low10());
|
||||||
__ get_pc(L2);
|
__ rdpc(L2);
|
||||||
__ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to
|
__ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to
|
||||||
__ st_ptr(L2, addr);
|
__ st_ptr(L2, addr);
|
||||||
__ restore();
|
__ restore();
|
||||||
|
|
|
@ -566,7 +566,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows");
|
StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows");
|
||||||
address start = __ pc();
|
address start = __ pc();
|
||||||
|
|
||||||
__ flush_windows();
|
__ flushw();
|
||||||
__ retl(false);
|
__ retl(false);
|
||||||
__ delayed()->add( FP, STACK_BIAS, O0 );
|
__ delayed()->add( FP, STACK_BIAS, O0 );
|
||||||
// The returned value must be a stack pointer whose register save area
|
// The returned value must be a stack pointer whose register save area
|
||||||
|
@ -575,64 +575,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
return start;
|
return start;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper functions for v8 atomic operations.
|
|
||||||
//
|
|
||||||
void get_v8_oop_lock_ptr(Register lock_ptr_reg, Register mark_oop_reg, Register scratch_reg) {
|
|
||||||
if (mark_oop_reg == noreg) {
|
|
||||||
address lock_ptr = (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr();
|
|
||||||
__ set((intptr_t)lock_ptr, lock_ptr_reg);
|
|
||||||
} else {
|
|
||||||
assert(scratch_reg != noreg, "just checking");
|
|
||||||
address lock_ptr = (address)StubRoutines::Sparc::_v8_oop_lock_cache;
|
|
||||||
__ set((intptr_t)lock_ptr, lock_ptr_reg);
|
|
||||||
__ and3(mark_oop_reg, StubRoutines::Sparc::v8_oop_lock_mask_in_place, scratch_reg);
|
|
||||||
__ add(lock_ptr_reg, scratch_reg, lock_ptr_reg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void generate_v8_lock_prologue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
|
|
||||||
|
|
||||||
get_v8_oop_lock_ptr(lock_ptr_reg, mark_oop_reg, scratch_reg);
|
|
||||||
__ set(StubRoutines::Sparc::locked, lock_reg);
|
|
||||||
// Initialize yield counter
|
|
||||||
__ mov(G0,yield_reg);
|
|
||||||
|
|
||||||
__ BIND(retry);
|
|
||||||
__ cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dontyield);
|
|
||||||
|
|
||||||
// This code can only be called from inside the VM, this
|
|
||||||
// stub is only invoked from Atomic::add(). We do not
|
|
||||||
// want to use call_VM, because _last_java_sp and such
|
|
||||||
// must already be set.
|
|
||||||
//
|
|
||||||
// Save the regs and make space for a C call
|
|
||||||
__ save(SP, -96, SP);
|
|
||||||
__ save_all_globals_into_locals();
|
|
||||||
BLOCK_COMMENT("call os::naked_sleep");
|
|
||||||
__ call(CAST_FROM_FN_PTR(address, os::naked_sleep));
|
|
||||||
__ delayed()->nop();
|
|
||||||
__ restore_globals_from_locals();
|
|
||||||
__ restore();
|
|
||||||
// reset the counter
|
|
||||||
__ mov(G0,yield_reg);
|
|
||||||
|
|
||||||
__ BIND(dontyield);
|
|
||||||
|
|
||||||
// try to get lock
|
|
||||||
__ swap(lock_ptr_reg, 0, lock_reg);
|
|
||||||
|
|
||||||
// did we get the lock?
|
|
||||||
__ cmp(lock_reg, StubRoutines::Sparc::unlocked);
|
|
||||||
__ br(Assembler::notEqual, true, Assembler::pn, retry);
|
|
||||||
__ delayed()->add(yield_reg,1,yield_reg);
|
|
||||||
|
|
||||||
// yes, got lock. do the operation here.
|
|
||||||
}
|
|
||||||
|
|
||||||
void generate_v8_lock_epilogue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
|
|
||||||
__ st(lock_reg, lock_ptr_reg, 0); // unlock
|
|
||||||
}
|
|
||||||
|
|
||||||
// Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
|
// Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
|
||||||
//
|
//
|
||||||
// Arguments:
|
// Arguments:
|
||||||
|
@ -656,33 +598,14 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
__ mov(O0, O3); // scratch copy of exchange value
|
__ mov(O0, O3); // scratch copy of exchange value
|
||||||
__ ld(O1, 0, O2); // observe the previous value
|
__ ld(O1, 0, O2); // observe the previous value
|
||||||
// try to replace O2 with O3
|
// try to replace O2 with O3
|
||||||
__ cas_under_lock(O1, O2, O3,
|
__ cas(O1, O2, O3);
|
||||||
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
|
|
||||||
__ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
|
__ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
|
||||||
|
|
||||||
__ retl(false);
|
__ retl(false);
|
||||||
__ delayed()->mov(O2, O0); // report previous value to caller
|
__ delayed()->mov(O2, O0); // report previous value to caller
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
if (VM_Version::v9_instructions_work()) {
|
|
||||||
__ retl(false);
|
__ retl(false);
|
||||||
__ delayed()->swap(O1, 0, O0);
|
__ delayed()->swap(O1, 0, O0);
|
||||||
} else {
|
|
||||||
const Register& lock_reg = O2;
|
|
||||||
const Register& lock_ptr_reg = O3;
|
|
||||||
const Register& yield_reg = O4;
|
|
||||||
|
|
||||||
Label retry;
|
|
||||||
Label dontyield;
|
|
||||||
|
|
||||||
generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
|
|
||||||
// got the lock, do the swap
|
|
||||||
__ swap(O1, 0, O0);
|
|
||||||
|
|
||||||
generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
|
|
||||||
__ retl(false);
|
|
||||||
__ delayed()->nop();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return start;
|
return start;
|
||||||
|
@ -701,15 +624,12 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
//
|
//
|
||||||
// O0: the value previously stored in dest
|
// O0: the value previously stored in dest
|
||||||
//
|
//
|
||||||
// Overwrites (v8): O3,O4,O5
|
|
||||||
//
|
|
||||||
address generate_atomic_cmpxchg() {
|
address generate_atomic_cmpxchg() {
|
||||||
StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
|
StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
|
||||||
address start = __ pc();
|
address start = __ pc();
|
||||||
|
|
||||||
// cmpxchg(dest, compare_value, exchange_value)
|
// cmpxchg(dest, compare_value, exchange_value)
|
||||||
__ cas_under_lock(O1, O2, O0,
|
__ cas(O1, O2, O0);
|
||||||
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
|
|
||||||
__ retl(false);
|
__ retl(false);
|
||||||
__ delayed()->nop();
|
__ delayed()->nop();
|
||||||
|
|
||||||
|
@ -728,17 +648,12 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
//
|
//
|
||||||
// O1:O0: the value previously stored in dest
|
// O1:O0: the value previously stored in dest
|
||||||
//
|
//
|
||||||
// This only works on V9, on V8 we don't generate any
|
|
||||||
// code and just return NULL.
|
|
||||||
//
|
|
||||||
// Overwrites: G1,G2,G3
|
// Overwrites: G1,G2,G3
|
||||||
//
|
//
|
||||||
address generate_atomic_cmpxchg_long() {
|
address generate_atomic_cmpxchg_long() {
|
||||||
StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
|
StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
|
||||||
address start = __ pc();
|
address start = __ pc();
|
||||||
|
|
||||||
if (!VM_Version::supports_cx8())
|
|
||||||
return NULL;;
|
|
||||||
__ sllx(O0, 32, O0);
|
__ sllx(O0, 32, O0);
|
||||||
__ srl(O1, 0, O1);
|
__ srl(O1, 0, O1);
|
||||||
__ or3(O0,O1,O0); // O0 holds 64-bit value from compare_value
|
__ or3(O0,O1,O0); // O0 holds 64-bit value from compare_value
|
||||||
|
@ -765,15 +680,13 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
//
|
//
|
||||||
// O0: the new value stored in dest
|
// O0: the new value stored in dest
|
||||||
//
|
//
|
||||||
// Overwrites (v9): O3
|
// Overwrites: O3
|
||||||
// Overwrites (v8): O3,O4,O5
|
|
||||||
//
|
//
|
||||||
address generate_atomic_add() {
|
address generate_atomic_add() {
|
||||||
StubCodeMark mark(this, "StubRoutines", "atomic_add");
|
StubCodeMark mark(this, "StubRoutines", "atomic_add");
|
||||||
address start = __ pc();
|
address start = __ pc();
|
||||||
__ BIND(_atomic_add_stub);
|
__ BIND(_atomic_add_stub);
|
||||||
|
|
||||||
if (VM_Version::v9_instructions_work()) {
|
|
||||||
Label(retry);
|
Label(retry);
|
||||||
__ BIND(retry);
|
__ BIND(retry);
|
||||||
|
|
||||||
|
@ -783,29 +696,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
__ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
|
__ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
|
||||||
__ retl(false);
|
__ retl(false);
|
||||||
__ delayed()->add(O0, O2, O0); // note that cas made O2==O3
|
__ delayed()->add(O0, O2, O0); // note that cas made O2==O3
|
||||||
} else {
|
|
||||||
const Register& lock_reg = O2;
|
|
||||||
const Register& lock_ptr_reg = O3;
|
|
||||||
const Register& value_reg = O4;
|
|
||||||
const Register& yield_reg = O5;
|
|
||||||
|
|
||||||
Label(retry);
|
|
||||||
Label(dontyield);
|
|
||||||
|
|
||||||
generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
|
|
||||||
// got lock, do the increment
|
|
||||||
__ ld(O1, 0, value_reg);
|
|
||||||
__ add(O0, value_reg, value_reg);
|
|
||||||
__ st(value_reg, O1, 0);
|
|
||||||
|
|
||||||
// %%% only for RMO and PSO
|
|
||||||
__ membar(Assembler::StoreStore);
|
|
||||||
|
|
||||||
generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
|
|
||||||
|
|
||||||
__ retl(false);
|
|
||||||
__ delayed()->mov(value_reg, O0);
|
|
||||||
}
|
|
||||||
|
|
||||||
return start;
|
return start;
|
||||||
}
|
}
|
||||||
|
@ -841,7 +731,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
__ mov(G3, L3);
|
__ mov(G3, L3);
|
||||||
__ mov(G4, L4);
|
__ mov(G4, L4);
|
||||||
__ mov(G5, L5);
|
__ mov(G5, L5);
|
||||||
for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
|
for (i = 0; i < 64; i += 2) {
|
||||||
__ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize);
|
__ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -855,7 +745,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
__ mov(L3, G3);
|
__ mov(L3, G3);
|
||||||
__ mov(L4, G4);
|
__ mov(L4, G4);
|
||||||
__ mov(L5, G5);
|
__ mov(L5, G5);
|
||||||
for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
|
for (i = 0; i < 64; i += 2) {
|
||||||
__ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize);
|
__ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,3 @@ address StubRoutines::Sparc::_stop_subroutine_entry = NULL;
|
||||||
address StubRoutines::Sparc::_flush_callers_register_windows_entry = CAST_FROM_FN_PTR(address, bootstrap_flush_windows);
|
address StubRoutines::Sparc::_flush_callers_register_windows_entry = CAST_FROM_FN_PTR(address, bootstrap_flush_windows);
|
||||||
|
|
||||||
address StubRoutines::Sparc::_partial_subtype_check = NULL;
|
address StubRoutines::Sparc::_partial_subtype_check = NULL;
|
||||||
|
|
||||||
int StubRoutines::Sparc::_atomic_memory_operation_lock = StubRoutines::Sparc::unlocked;
|
|
||||||
|
|
||||||
int StubRoutines::Sparc::_v8_oop_lock_cache[StubRoutines::Sparc::nof_v8_oop_lock_cache_entries];
|
|
||||||
|
|
|
@ -47,46 +47,14 @@ enum /* platform_dependent_constants */ {
|
||||||
class Sparc {
|
class Sparc {
|
||||||
friend class StubGenerator;
|
friend class StubGenerator;
|
||||||
|
|
||||||
public:
|
|
||||||
enum { nof_instance_allocators = 10 };
|
|
||||||
|
|
||||||
// allocator lock values
|
|
||||||
enum {
|
|
||||||
unlocked = 0,
|
|
||||||
locked = 1
|
|
||||||
};
|
|
||||||
|
|
||||||
enum {
|
|
||||||
v8_oop_lock_ignore_bits = 2,
|
|
||||||
v8_oop_lock_bits = 4,
|
|
||||||
nof_v8_oop_lock_cache_entries = 1 << (v8_oop_lock_bits+v8_oop_lock_ignore_bits),
|
|
||||||
v8_oop_lock_mask = right_n_bits(v8_oop_lock_bits),
|
|
||||||
v8_oop_lock_mask_in_place = v8_oop_lock_mask << v8_oop_lock_ignore_bits
|
|
||||||
};
|
|
||||||
|
|
||||||
static int _v8_oop_lock_cache[nof_v8_oop_lock_cache_entries];
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static address _test_stop_entry;
|
static address _test_stop_entry;
|
||||||
static address _stop_subroutine_entry;
|
static address _stop_subroutine_entry;
|
||||||
static address _flush_callers_register_windows_entry;
|
static address _flush_callers_register_windows_entry;
|
||||||
|
|
||||||
static int _atomic_memory_operation_lock;
|
|
||||||
|
|
||||||
static address _partial_subtype_check;
|
static address _partial_subtype_check;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// %%% global lock for everyone who needs to use atomic_compare_and_exchange
|
|
||||||
// %%% or atomic_increment -- should probably use more locks for more
|
|
||||||
// %%% scalability-- for instance one for each eden space or group of
|
|
||||||
|
|
||||||
// address of the lock for atomic_compare_and_exchange
|
|
||||||
static int* atomic_memory_operation_lock_addr() { return &_atomic_memory_operation_lock; }
|
|
||||||
|
|
||||||
// accessor and mutator for _atomic_memory_operation_lock
|
|
||||||
static int atomic_memory_operation_lock() { return _atomic_memory_operation_lock; }
|
|
||||||
static void set_atomic_memory_operation_lock(int value) { _atomic_memory_operation_lock = value; }
|
|
||||||
|
|
||||||
// test assembler stop routine by setting registers
|
// test assembler stop routine by setting registers
|
||||||
static void (*test_stop_entry()) () { return CAST_TO_FN_PTR(void (*)(void), _test_stop_entry); }
|
static void (*test_stop_entry()) () { return CAST_TO_FN_PTR(void (*)(void), _test_stop_entry); }
|
||||||
|
|
||||||
|
|
|
@ -1054,7 +1054,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||||
// flush the windows now. We don't care about the current (protection) frame
|
// flush the windows now. We don't care about the current (protection) frame
|
||||||
// only the outer frames
|
// only the outer frames
|
||||||
|
|
||||||
__ flush_windows();
|
__ flushw();
|
||||||
|
|
||||||
// mark windows as flushed
|
// mark windows as flushed
|
||||||
Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
|
Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
|
||||||
|
|
|
@ -1338,14 +1338,13 @@ void TemplateTable::lneg() {
|
||||||
|
|
||||||
void TemplateTable::fneg() {
|
void TemplateTable::fneg() {
|
||||||
transition(ftos, ftos);
|
transition(ftos, ftos);
|
||||||
__ fneg(FloatRegisterImpl::S, Ftos_f);
|
__ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void TemplateTable::dneg() {
|
void TemplateTable::dneg() {
|
||||||
transition(dtos, dtos);
|
transition(dtos, dtos);
|
||||||
// v8 has fnegd if source and dest are the same
|
__ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f);
|
||||||
__ fneg(FloatRegisterImpl::D, Ftos_f);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1470,31 +1469,17 @@ void TemplateTable::convert() {
|
||||||
__ st_long(Otos_l, __ d_tmp);
|
__ st_long(Otos_l, __ d_tmp);
|
||||||
__ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
|
__ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
|
||||||
|
|
||||||
if (VM_Version::v9_instructions_work()) {
|
|
||||||
if (bytecode() == Bytecodes::_l2f) {
|
if (bytecode() == Bytecodes::_l2f) {
|
||||||
__ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
|
__ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
|
||||||
} else {
|
} else {
|
||||||
__ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
|
__ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
__ call_VM_leaf(
|
|
||||||
Lscratch,
|
|
||||||
bytecode() == Bytecodes::_l2f
|
|
||||||
? CAST_FROM_FN_PTR(address, SharedRuntime::l2f)
|
|
||||||
: CAST_FROM_FN_PTR(address, SharedRuntime::l2d)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case Bytecodes::_f2i: {
|
case Bytecodes::_f2i: {
|
||||||
Label isNaN;
|
Label isNaN;
|
||||||
// result must be 0 if value is NaN; test by comparing value to itself
|
// result must be 0 if value is NaN; test by comparing value to itself
|
||||||
__ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
|
__ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
|
||||||
// According to the v8 manual, you have to have a non-fp instruction
|
|
||||||
// between fcmp and fb.
|
|
||||||
if (!VM_Version::v9_instructions_work()) {
|
|
||||||
__ nop();
|
|
||||||
}
|
|
||||||
__ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
|
__ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
|
||||||
__ delayed()->clr(Otos_i); // NaN
|
__ delayed()->clr(Otos_i); // NaN
|
||||||
__ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
|
__ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
|
||||||
|
@ -1537,16 +1522,7 @@ void TemplateTable::convert() {
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case Bytecodes::_d2f:
|
case Bytecodes::_d2f:
|
||||||
if (VM_Version::v9_instructions_work()) {
|
|
||||||
__ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
|
__ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
|
||||||
}
|
|
||||||
else {
|
|
||||||
// must uncache tos
|
|
||||||
__ push_d();
|
|
||||||
__ pop_i(O0);
|
|
||||||
__ pop_i(O1);
|
|
||||||
__ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::d2f));
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default: ShouldNotReachHere();
|
default: ShouldNotReachHere();
|
||||||
|
@ -1956,17 +1932,8 @@ void TemplateTable::fast_binaryswitch() {
|
||||||
__ ld( Rarray, Rscratch, Rscratch );
|
__ ld( Rarray, Rscratch, Rscratch );
|
||||||
// (Rscratch is already in the native byte-ordering.)
|
// (Rscratch is already in the native byte-ordering.)
|
||||||
__ cmp( Rkey, Rscratch );
|
__ cmp( Rkey, Rscratch );
|
||||||
if ( VM_Version::v9_instructions_work() ) {
|
|
||||||
__ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match())
|
__ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match())
|
||||||
__ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match())
|
__ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match())
|
||||||
}
|
|
||||||
else {
|
|
||||||
Label end_of_if;
|
|
||||||
__ br( Assembler::less, true, Assembler::pt, end_of_if );
|
|
||||||
__ delayed()->mov( Rh, Rj ); // if (<) Rj = Rh
|
|
||||||
__ mov( Rh, Ri ); // else i = h
|
|
||||||
__ bind(end_of_if); // }
|
|
||||||
}
|
|
||||||
|
|
||||||
// while (i+1 < j)
|
// while (i+1 < j)
|
||||||
__ bind( entry );
|
__ bind( entry );
|
||||||
|
@ -3418,9 +3385,7 @@ void TemplateTable::_new() {
|
||||||
// has been allocated.
|
// has been allocated.
|
||||||
__ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
|
__ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
|
||||||
|
|
||||||
__ casx_under_lock(RtopAddr, RoldTopValue, RnewTopValue,
|
__ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue);
|
||||||
VM_Version::v9_instructions_work() ? NULL :
|
|
||||||
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
|
||||||
|
|
||||||
// if someone beat us on the allocation, try again, otherwise continue
|
// if someone beat us on the allocation, try again, otherwise continue
|
||||||
__ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
|
__ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
|
||||||
|
@ -3701,14 +3666,7 @@ void TemplateTable::monitorenter() {
|
||||||
|
|
||||||
__ verify_oop(O4); // verify each monitor's oop
|
__ verify_oop(O4); // verify each monitor's oop
|
||||||
__ tst(O4); // is this entry unused?
|
__ tst(O4); // is this entry unused?
|
||||||
if (VM_Version::v9_instructions_work())
|
|
||||||
__ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
|
__ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
|
||||||
else {
|
|
||||||
Label L;
|
|
||||||
__ br( Assembler::zero, true, Assembler::pn, L );
|
|
||||||
__ delayed()->mov(O3, O1); // rememeber this one if match
|
|
||||||
__ bind(L);
|
|
||||||
}
|
|
||||||
|
|
||||||
__ cmp(O4, O0); // check if current entry is for same object
|
__ cmp(O4, O0); // check if current entry is for same object
|
||||||
__ brx( Assembler::equal, false, Assembler::pn, exit );
|
__ brx( Assembler::equal, false, Assembler::pn, exit );
|
||||||
|
|
|
@ -75,23 +75,14 @@ void VM_Version::initialize() {
|
||||||
FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1);
|
FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (has_v9()) {
|
guarantee(VM_Version::has_v9(), "only SPARC v9 is supported");
|
||||||
|
|
||||||
assert(ArraycopySrcPrefetchDistance < 4096, "invalid value");
|
assert(ArraycopySrcPrefetchDistance < 4096, "invalid value");
|
||||||
if (ArraycopySrcPrefetchDistance >= 4096)
|
if (ArraycopySrcPrefetchDistance >= 4096)
|
||||||
ArraycopySrcPrefetchDistance = 4064;
|
ArraycopySrcPrefetchDistance = 4064;
|
||||||
assert(ArraycopyDstPrefetchDistance < 4096, "invalid value");
|
assert(ArraycopyDstPrefetchDistance < 4096, "invalid value");
|
||||||
if (ArraycopyDstPrefetchDistance >= 4096)
|
if (ArraycopyDstPrefetchDistance >= 4096)
|
||||||
ArraycopyDstPrefetchDistance = 4064;
|
ArraycopyDstPrefetchDistance = 4064;
|
||||||
} else {
|
|
||||||
if (ArraycopySrcPrefetchDistance > 0) {
|
|
||||||
warning("prefetch instructions are not available on this CPU");
|
|
||||||
FLAG_SET_DEFAULT(ArraycopySrcPrefetchDistance, 0);
|
|
||||||
}
|
|
||||||
if (ArraycopyDstPrefetchDistance > 0) {
|
|
||||||
warning("prefetch instructions are not available on this CPU");
|
|
||||||
FLAG_SET_DEFAULT(ArraycopyDstPrefetchDistance, 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
UseSSE = 0; // Only on x86 and x64
|
UseSSE = 0; // Only on x86 and x64
|
||||||
|
|
||||||
|
|
|
@ -177,10 +177,6 @@ public:
|
||||||
return AllocatePrefetchDistance > 0 ? AllocatePrefetchStyle : 0;
|
return AllocatePrefetchDistance > 0 ? AllocatePrefetchStyle : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Legacy
|
|
||||||
static bool v8_instructions_work() { return has_v8() && !has_v9(); }
|
|
||||||
static bool v9_instructions_work() { return has_v9(); }
|
|
||||||
|
|
||||||
// Assembler testing
|
// Assembler testing
|
||||||
static void allow_all();
|
static void allow_all();
|
||||||
static void revert();
|
static void revert();
|
||||||
|
|
|
@ -1429,6 +1429,8 @@ static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType
|
||||||
assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
|
assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
|
||||||
"possible collision");
|
"possible collision");
|
||||||
|
|
||||||
|
__ block_comment("unpack_array_argument {");
|
||||||
|
|
||||||
// Pass the length, ptr pair
|
// Pass the length, ptr pair
|
||||||
Label is_null, done;
|
Label is_null, done;
|
||||||
VMRegPair tmp;
|
VMRegPair tmp;
|
||||||
|
@ -1453,6 +1455,8 @@ static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType
|
||||||
move_ptr(masm, tmp, body_arg);
|
move_ptr(masm, tmp, body_arg);
|
||||||
move32_64(masm, tmp, length_arg);
|
move32_64(masm, tmp, length_arg);
|
||||||
__ bind(done);
|
__ bind(done);
|
||||||
|
|
||||||
|
__ block_comment("} unpack_array_argument");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -2170,13 +2174,16 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// point c_arg at the first arg that is already loaded in case we
|
int c_arg;
|
||||||
// need to spill before we call out
|
|
||||||
int c_arg = total_c_args - total_in_args;
|
|
||||||
|
|
||||||
// Pre-load a static method's oop into r14. Used both by locking code and
|
// Pre-load a static method's oop into r14. Used both by locking code and
|
||||||
// the normal JNI call code.
|
// the normal JNI call code.
|
||||||
if (method->is_static() && !is_critical_native) {
|
if (!is_critical_native) {
|
||||||
|
// point c_arg at the first arg that is already loaded in case we
|
||||||
|
// need to spill before we call out
|
||||||
|
c_arg = total_c_args - total_in_args;
|
||||||
|
|
||||||
|
if (method->is_static()) {
|
||||||
|
|
||||||
// load oop into a register
|
// load oop into a register
|
||||||
__ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
|
__ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
|
||||||
|
@ -2192,6 +2199,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||||
// and protect the arg if we must spill
|
// and protect the arg if we must spill
|
||||||
c_arg--;
|
c_arg--;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// For JNI critical methods we need to save all registers in save_args.
|
||||||
|
c_arg = 0;
|
||||||
|
}
|
||||||
|
|
||||||
// Change state to native (we save the return address in the thread, since it might not
|
// Change state to native (we save the return address in the thread, since it might not
|
||||||
// be pushed on the stack when we do a a stack traversal). It is enough that the pc()
|
// be pushed on the stack when we do a a stack traversal). It is enough that the pc()
|
||||||
|
|
|
@ -1,47 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
|
||||||
#include "asm/macroAssembler.hpp"
|
|
||||||
#include "runtime/os.hpp"
|
|
||||||
#include "runtime/threadLocalStorage.hpp"
|
|
||||||
|
|
||||||
#include <asm-sparc/traps.h>
|
|
||||||
|
|
||||||
void MacroAssembler::read_ccr_trap(Register ccr_save) {
|
|
||||||
// No implementation
|
|
||||||
breakpoint_trap();
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::write_ccr_trap(Register ccr_save, Register scratch1, Register scratch2) {
|
|
||||||
// No implementation
|
|
||||||
breakpoint_trap();
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::flush_windows_trap() { trap(SP_TRAP_FWIN); }
|
|
||||||
void MacroAssembler::clean_windows_trap() { trap(SP_TRAP_CWIN); }
|
|
||||||
|
|
||||||
// Use software breakpoint trap until we figure out how to do this on Linux
|
|
||||||
void MacroAssembler::get_psr_trap() { trap(SP_TRAP_SBPT); }
|
|
||||||
void MacroAssembler::set_psr_trap() { trap(SP_TRAP_SBPT); }
|
|
|
@ -169,7 +169,6 @@ inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong*
|
||||||
: "memory");
|
: "memory");
|
||||||
return rv;
|
return rv;
|
||||||
#else
|
#else
|
||||||
assert(VM_Version::v9_instructions_work(), "cas only supported on v9");
|
|
||||||
volatile jlong_accessor evl, cvl, rv;
|
volatile jlong_accessor evl, cvl, rv;
|
||||||
evl.long_value = exchange_value;
|
evl.long_value = exchange_value;
|
||||||
cvl.long_value = compare_value;
|
cvl.long_value = compare_value;
|
||||||
|
|
|
@ -1,61 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
|
||||||
#include "asm/macroAssembler.inline.hpp"
|
|
||||||
#include "runtime/os.hpp"
|
|
||||||
#include "runtime/threadLocalStorage.hpp"
|
|
||||||
|
|
||||||
#include <sys/trap.h> // For trap numbers
|
|
||||||
#include <v9/sys/psr_compat.h> // For V8 compatibility
|
|
||||||
|
|
||||||
void MacroAssembler::read_ccr_trap(Register ccr_save) {
|
|
||||||
// Execute a trap to get the PSR, mask and shift
|
|
||||||
// to get the condition codes.
|
|
||||||
get_psr_trap();
|
|
||||||
nop();
|
|
||||||
set(PSR_ICC, ccr_save);
|
|
||||||
and3(O0, ccr_save, ccr_save);
|
|
||||||
srl(ccr_save, PSR_ICC_SHIFT, ccr_save);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::write_ccr_trap(Register ccr_save, Register scratch1, Register scratch2) {
|
|
||||||
// Execute a trap to get the PSR, shift back
|
|
||||||
// the condition codes, mask the condition codes
|
|
||||||
// back into and PSR and trap to write back the
|
|
||||||
// PSR.
|
|
||||||
sll(ccr_save, PSR_ICC_SHIFT, scratch2);
|
|
||||||
get_psr_trap();
|
|
||||||
nop();
|
|
||||||
set(~PSR_ICC, scratch1);
|
|
||||||
and3(O0, scratch1, O0);
|
|
||||||
or3(O0, scratch2, O0);
|
|
||||||
set_psr_trap();
|
|
||||||
nop();
|
|
||||||
}
|
|
||||||
|
|
||||||
void MacroAssembler::flush_windows_trap() { trap(ST_FLUSH_WINDOWS); }
|
|
||||||
void MacroAssembler::clean_windows_trap() { trap(ST_CLEAN_WINDOWS); }
|
|
||||||
void MacroAssembler::get_psr_trap() { trap(ST_GETPSR); }
|
|
||||||
void MacroAssembler::set_psr_trap() { trap(ST_SETPSR); }
|
|
|
@ -60,21 +60,10 @@ inline jlong Atomic::load(volatile jlong* src) { return *src; }
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
extern "C" void _Atomic_move_long_v8(volatile jlong* src, volatile jlong* dst);
|
|
||||||
extern "C" void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst);
|
extern "C" void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst);
|
||||||
|
|
||||||
inline void Atomic_move_long(volatile jlong* src, volatile jlong* dst) {
|
inline void Atomic_move_long(volatile jlong* src, volatile jlong* dst) {
|
||||||
#ifdef COMPILER2
|
|
||||||
// Compiler2 does not support v8, it is used only for v9.
|
|
||||||
_Atomic_move_long_v9(src, dst);
|
_Atomic_move_long_v9(src, dst);
|
||||||
#else
|
|
||||||
// The branch is cheaper then emulated LDD.
|
|
||||||
if (VM_Version::v9_instructions_work()) {
|
|
||||||
_Atomic_move_long_v9(src, dst);
|
|
||||||
} else {
|
|
||||||
_Atomic_move_long_v8(src, dst);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inline jlong Atomic::load(volatile jlong* src) {
|
inline jlong Atomic::load(volatile jlong* src) {
|
||||||
|
@ -209,7 +198,6 @@ inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong*
|
||||||
: "memory");
|
: "memory");
|
||||||
return rv;
|
return rv;
|
||||||
#else //_LP64
|
#else //_LP64
|
||||||
assert(VM_Version::v9_instructions_work(), "cas only supported on v9");
|
|
||||||
volatile jlong_accessor evl, cvl, rv;
|
volatile jlong_accessor evl, cvl, rv;
|
||||||
evl.long_value = exchange_value;
|
evl.long_value = exchange_value;
|
||||||
cvl.long_value = compare_value;
|
cvl.long_value = compare_value;
|
||||||
|
@ -318,7 +306,6 @@ inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong*
|
||||||
// Return 64 bit value in %o0
|
// Return 64 bit value in %o0
|
||||||
return _Atomic_cas64((intptr_t)exchange_value, (intptr_t *)dest, (intptr_t)compare_value);
|
return _Atomic_cas64((intptr_t)exchange_value, (intptr_t *)dest, (intptr_t)compare_value);
|
||||||
#else // _LP64
|
#else // _LP64
|
||||||
assert (VM_Version::v9_instructions_work(), "only supported on v9");
|
|
||||||
// Return 64 bit value in %o0,%o1 by hand
|
// Return 64 bit value in %o0,%o1 by hand
|
||||||
return _Atomic_casl(exchange_value, dest, compare_value);
|
return _Atomic_casl(exchange_value, dest, compare_value);
|
||||||
#endif // _LP64
|
#endif // _LP64
|
||||||
|
|
|
@ -152,23 +152,6 @@
|
||||||
.nonvolatile
|
.nonvolatile
|
||||||
.end
|
.end
|
||||||
|
|
||||||
// Support for jlong Atomic::load and Atomic::store on v8.
|
|
||||||
//
|
|
||||||
// void _Atomic_move_long_v8(volatile jlong* src, volatile jlong* dst)
|
|
||||||
//
|
|
||||||
// Arguments:
|
|
||||||
// src: O0
|
|
||||||
// dest: O1
|
|
||||||
//
|
|
||||||
// Overwrites O2 and O3
|
|
||||||
|
|
||||||
.inline _Atomic_move_long_v8,2
|
|
||||||
.volatile
|
|
||||||
ldd [%o0], %o2
|
|
||||||
std %o2, [%o1]
|
|
||||||
.nonvolatile
|
|
||||||
.end
|
|
||||||
|
|
||||||
// Support for jlong Atomic::load and Atomic::store on v9.
|
// Support for jlong Atomic::load and Atomic::store on v9.
|
||||||
//
|
//
|
||||||
// void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst)
|
// void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst)
|
||||||
|
|
|
@ -235,6 +235,9 @@ bool InstructForm::is_parm(FormDict &globals) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool InstructForm::is_ideal_negD() const {
|
||||||
|
return (_matrule && _matrule->_rChild && strcmp(_matrule->_rChild->_opType, "NegD") == 0);
|
||||||
|
}
|
||||||
|
|
||||||
// Return 'true' if this instruction matches an ideal 'Copy*' node
|
// Return 'true' if this instruction matches an ideal 'Copy*' node
|
||||||
int InstructForm::is_ideal_copy() const {
|
int InstructForm::is_ideal_copy() const {
|
||||||
|
@ -533,6 +536,12 @@ bool InstructForm::rematerialize(FormDict &globals, RegisterForm *registers ) {
|
||||||
if( data_type != Form::none )
|
if( data_type != Form::none )
|
||||||
rematerialize = true;
|
rematerialize = true;
|
||||||
|
|
||||||
|
// Ugly: until a better fix is implemented, disable rematerialization for
|
||||||
|
// negD nodes because they are proved to be problematic.
|
||||||
|
if (is_ideal_negD()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
// Constants
|
// Constants
|
||||||
if( _components.count() == 1 && _components[0]->is(Component::USE_DEF) )
|
if( _components.count() == 1 && _components[0]->is(Component::USE_DEF) )
|
||||||
rematerialize = true;
|
rematerialize = true;
|
||||||
|
|
|
@ -147,6 +147,7 @@ public:
|
||||||
virtual int is_empty_encoding() const; // _size=0 and/or _insencode empty
|
virtual int is_empty_encoding() const; // _size=0 and/or _insencode empty
|
||||||
virtual int is_tls_instruction() const; // tlsLoadP rule or ideal ThreadLocal
|
virtual int is_tls_instruction() const; // tlsLoadP rule or ideal ThreadLocal
|
||||||
virtual int is_ideal_copy() const; // node matches ideal 'Copy*'
|
virtual int is_ideal_copy() const; // node matches ideal 'Copy*'
|
||||||
|
virtual bool is_ideal_negD() const; // node matches ideal 'NegD'
|
||||||
virtual bool is_ideal_if() const; // node matches ideal 'If'
|
virtual bool is_ideal_if() const; // node matches ideal 'If'
|
||||||
virtual bool is_ideal_fastlock() const; // node matches 'FastLock'
|
virtual bool is_ideal_fastlock() const; // node matches 'FastLock'
|
||||||
virtual bool is_ideal_membar() const; // node matches ideal 'MemBarXXX'
|
virtual bool is_ideal_membar() const; // node matches ideal 'MemBarXXX'
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -406,10 +406,10 @@
|
||||||
develop(intx, WarmCallMaxSize, 999999, \
|
develop(intx, WarmCallMaxSize, 999999, \
|
||||||
"size of the largest inlinable method") \
|
"size of the largest inlinable method") \
|
||||||
\
|
\
|
||||||
product(intx, MaxNodeLimit, 65000, \
|
product(intx, MaxNodeLimit, 80000, \
|
||||||
"Maximum number of nodes") \
|
"Maximum number of nodes") \
|
||||||
\
|
\
|
||||||
product(intx, NodeLimitFudgeFactor, 1000, \
|
product(intx, NodeLimitFudgeFactor, 2000, \
|
||||||
"Fudge Factor for certain optimizations") \
|
"Fudge Factor for certain optimizations") \
|
||||||
\
|
\
|
||||||
product(bool, UseJumpTables, true, \
|
product(bool, UseJumpTables, true, \
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -435,6 +435,9 @@ void PhaseChaitin::Register_Allocate() {
|
||||||
// Insert un-coalesced copies. Visit all Phis. Where inputs to a Phi do
|
// Insert un-coalesced copies. Visit all Phis. Where inputs to a Phi do
|
||||||
// not match the Phi itself, insert a copy.
|
// not match the Phi itself, insert a copy.
|
||||||
coalesce.insert_copies(_matcher);
|
coalesce.insert_copies(_matcher);
|
||||||
|
if (C->failing()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// After aggressive coalesce, attempt a first cut at coloring.
|
// After aggressive coalesce, attempt a first cut at coloring.
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -240,6 +240,8 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
|
||||||
_unique = C->unique();
|
_unique = C->unique();
|
||||||
|
|
||||||
for( uint i=0; i<_phc._cfg._num_blocks; i++ ) {
|
for( uint i=0; i<_phc._cfg._num_blocks; i++ ) {
|
||||||
|
C->check_node_count(NodeLimitFudgeFactor, "out of nodes in coalesce");
|
||||||
|
if (C->failing()) return;
|
||||||
Block *b = _phc._cfg._blocks[i];
|
Block *b = _phc._cfg._blocks[i];
|
||||||
uint cnt = b->num_preds(); // Number of inputs to the Phi
|
uint cnt = b->num_preds(); // Number of inputs to the Phi
|
||||||
|
|
||||||
|
|
|
@ -985,6 +985,8 @@ Node *Matcher::xform( Node *n, int max_stack ) {
|
||||||
mstack.push(n, Visit, NULL, -1); // set NULL as parent to indicate root
|
mstack.push(n, Visit, NULL, -1); // set NULL as parent to indicate root
|
||||||
|
|
||||||
while (mstack.is_nonempty()) {
|
while (mstack.is_nonempty()) {
|
||||||
|
C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
|
||||||
|
if (C->failing()) return NULL;
|
||||||
n = mstack.node(); // Leave node on stack
|
n = mstack.node(); // Leave node on stack
|
||||||
Node_State nstate = mstack.state();
|
Node_State nstate = mstack.state();
|
||||||
if (nstate == Visit) {
|
if (nstate == Visit) {
|
||||||
|
|
|
@ -3241,24 +3241,10 @@ JVM_ENTRY(jobject, JVM_CurrentClassLoader(JNIEnv *env))
|
||||||
JVM_END
|
JVM_END
|
||||||
|
|
||||||
|
|
||||||
// Utility object for collecting method holders walking down the stack
|
|
||||||
class KlassLink: public ResourceObj {
|
|
||||||
public:
|
|
||||||
KlassHandle klass;
|
|
||||||
KlassLink* next;
|
|
||||||
|
|
||||||
KlassLink(KlassHandle k) { klass = k; next = NULL; }
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
JVM_ENTRY(jobjectArray, JVM_GetClassContext(JNIEnv *env))
|
JVM_ENTRY(jobjectArray, JVM_GetClassContext(JNIEnv *env))
|
||||||
JVMWrapper("JVM_GetClassContext");
|
JVMWrapper("JVM_GetClassContext");
|
||||||
ResourceMark rm(THREAD);
|
ResourceMark rm(THREAD);
|
||||||
JvmtiVMObjectAllocEventCollector oam;
|
JvmtiVMObjectAllocEventCollector oam;
|
||||||
// Collect linked list of (handles to) method holders
|
|
||||||
KlassLink* first = NULL;
|
|
||||||
KlassLink* last = NULL;
|
|
||||||
int depth = 0;
|
|
||||||
vframeStream vfst(thread);
|
vframeStream vfst(thread);
|
||||||
|
|
||||||
if (SystemDictionary::reflect_CallerSensitive_klass() != NULL) {
|
if (SystemDictionary::reflect_CallerSensitive_klass() != NULL) {
|
||||||
|
@ -3272,32 +3258,23 @@ JVM_ENTRY(jobjectArray, JVM_GetClassContext(JNIEnv *env))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Collect method holders
|
// Collect method holders
|
||||||
|
GrowableArray<KlassHandle>* klass_array = new GrowableArray<KlassHandle>();
|
||||||
for (; !vfst.at_end(); vfst.security_next()) {
|
for (; !vfst.at_end(); vfst.security_next()) {
|
||||||
Method* m = vfst.method();
|
Method* m = vfst.method();
|
||||||
// Native frames are not returned
|
// Native frames are not returned
|
||||||
if (!m->is_ignored_by_security_stack_walk() && !m->is_native()) {
|
if (!m->is_ignored_by_security_stack_walk() && !m->is_native()) {
|
||||||
Klass* holder = m->method_holder();
|
Klass* holder = m->method_holder();
|
||||||
assert(holder->is_klass(), "just checking");
|
assert(holder->is_klass(), "just checking");
|
||||||
depth++;
|
klass_array->append(holder);
|
||||||
KlassLink* l = new KlassLink(KlassHandle(thread, holder));
|
|
||||||
if (first == NULL) {
|
|
||||||
first = last = l;
|
|
||||||
} else {
|
|
||||||
last->next = l;
|
|
||||||
last = l;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create result array of type [Ljava/lang/Class;
|
// Create result array of type [Ljava/lang/Class;
|
||||||
objArrayOop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), depth, CHECK_NULL);
|
objArrayOop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), klass_array->length(), CHECK_NULL);
|
||||||
// Fill in mirrors corresponding to method holders
|
// Fill in mirrors corresponding to method holders
|
||||||
int index = 0;
|
for (int i = 0; i < klass_array->length(); i++) {
|
||||||
while (first != NULL) {
|
result->obj_at_put(i, klass_array->at(i)->java_mirror());
|
||||||
result->obj_at_put(index++, first->klass()->java_mirror());
|
|
||||||
first = first->next;
|
|
||||||
}
|
}
|
||||||
assert(index == depth, "just checking");
|
|
||||||
|
|
||||||
return (jobjectArray) JNIHandles::make_local(env, result);
|
return (jobjectArray) JNIHandles::make_local(env, result);
|
||||||
JVM_END
|
JVM_END
|
||||||
|
|
|
@ -1885,21 +1885,6 @@ bool Arguments::check_vm_args_consistency() {
|
||||||
// Note: Needs platform-dependent factoring.
|
// Note: Needs platform-dependent factoring.
|
||||||
bool status = true;
|
bool status = true;
|
||||||
|
|
||||||
#if ( (defined(COMPILER2) && defined(SPARC)))
|
|
||||||
// NOTE: The call to VM_Version_init depends on the fact that VM_Version_init
|
|
||||||
// on sparc doesn't require generation of a stub as is the case on, e.g.,
|
|
||||||
// x86. Normally, VM_Version_init must be called from init_globals in
|
|
||||||
// init.cpp, which is called by the initial java thread *after* arguments
|
|
||||||
// have been parsed. VM_Version_init gets called twice on sparc.
|
|
||||||
extern void VM_Version_init();
|
|
||||||
VM_Version_init();
|
|
||||||
if (!VM_Version::has_v9()) {
|
|
||||||
jio_fprintf(defaultStream::error_stream(),
|
|
||||||
"V8 Machine detected, Server requires V9\n");
|
|
||||||
status = false;
|
|
||||||
}
|
|
||||||
#endif /* COMPILER2 && SPARC */
|
|
||||||
|
|
||||||
// Allow both -XX:-UseStackBanging and -XX:-UseBoundThreads in non-product
|
// Allow both -XX:-UseStackBanging and -XX:-UseBoundThreads in non-product
|
||||||
// builds so the cost of stack banging can be measured.
|
// builds so the cost of stack banging can be measured.
|
||||||
#if (defined(PRODUCT) && defined(SOLARIS))
|
#if (defined(PRODUCT) && defined(SOLARIS))
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue