This commit is contained in:
Joseph Provino 2013-07-16 12:20:08 -04:00
commit b573023374
2314 changed files with 78229 additions and 36237 deletions

View file

@ -57,7 +57,6 @@ class Assembler : public AbstractAssembler {
fbp_op2 = 5,
br_op2 = 2,
bp_op2 = 1,
cb_op2 = 7, // V8
sethi_op2 = 4
};
@ -145,7 +144,6 @@ class Assembler : public AbstractAssembler {
ldsh_op3 = 0x0a,
ldx_op3 = 0x0b,
ldstub_op3 = 0x0d,
stx_op3 = 0x0e,
swap_op3 = 0x0f,
@ -163,15 +161,6 @@ class Assembler : public AbstractAssembler {
prefetch_op3 = 0x2d,
ldc_op3 = 0x30,
ldcsr_op3 = 0x31,
lddc_op3 = 0x33,
stc_op3 = 0x34,
stcsr_op3 = 0x35,
stdcq_op3 = 0x36,
stdc_op3 = 0x37,
casa_op3 = 0x3c,
casxa_op3 = 0x3e,
@ -574,17 +563,11 @@ class Assembler : public AbstractAssembler {
static void vis3_only() { assert( VM_Version::has_vis3(), "This instruction only works on SPARC with VIS3"); }
// instruction only in v9
static void v9_only() { assert( VM_Version::v9_instructions_work(), "This instruction only works on SPARC V9"); }
// instruction only in v8
static void v8_only() { assert( VM_Version::v8_instructions_work(), "This instruction only works on SPARC V8"); }
static void v9_only() { } // do nothing
// instruction deprecated in v9
static void v9_dep() { } // do nothing for now
// some float instructions only exist for single prec. on v8
static void v8_s_only(FloatRegisterImpl::Width w) { if (w != FloatRegisterImpl::S) v9_only(); }
// v8 has no CC field
static void v8_no_cc(CC cc) { if (cc) v9_only(); }
@ -730,11 +713,6 @@ public:
inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
// pp 121 (V8)
inline void cb( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none );
inline void cb( Condition c, bool a, Label& L );
// pp 149
inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type );
@ -775,8 +753,8 @@ public:
// pp 157
void fcmp( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc); emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); }
void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc); emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); }
void fcmp( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); }
void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); }
// pp 159
@ -794,21 +772,11 @@ public:
// pp 162
void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); }
void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); }
void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); }
void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); }
// page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fnegs is the only instruction available
// on v8 to do negation of single, double and quad precision floats.
void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(sd, w)); else emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x05) | fs2(sd, w)); }
void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); }
// page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fabss is the only instruction available
// on v8 to do abs operation on single/double/quad precision floats.
void fabs( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(sd, w)); else emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x09) | fs2(sd, w)); }
void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); }
// pp 163
@ -839,11 +807,6 @@ public:
void impdep1( int id1, int const19a ) { v9_only(); emit_int32( op(arith_op) | fcn(id1) | op3(impdep1_op3) | u_field(const19a, 18, 0)); }
void impdep2( int id1, int const19a ) { v9_only(); emit_int32( op(arith_op) | fcn(id1) | op3(impdep2_op3) | u_field(const19a, 18, 0)); }
// pp 149 (v8)
void cpop1( int opc, int cr1, int cr2, int crd ) { v8_only(); emit_int32( op(arith_op) | fcn(crd) | op3(impdep1_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); }
void cpop2( int opc, int cr1, int cr2, int crd ) { v8_only(); emit_int32( op(arith_op) | fcn(crd) | op3(impdep2_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); }
// pp 170
void jmpl( Register s1, Register s2, Register d );
@ -860,16 +823,6 @@ public:
inline void ldxfsr( Register s1, Register s2 );
inline void ldxfsr( Register s1, int simm13a);
// pp 94 (v8)
inline void ldc( Register s1, Register s2, int crd );
inline void ldc( Register s1, int simm13a, int crd);
inline void lddc( Register s1, Register s2, int crd );
inline void lddc( Register s1, int simm13a, int crd);
inline void ldcsr( Register s1, Register s2, int crd );
inline void ldcsr( Register s1, int simm13a, int crd);
// 173
void ldfa( FloatRegisterImpl::Width w, Register s1, Register s2, int ia, FloatRegister d ) { v9_only(); emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
@ -910,18 +863,6 @@ public:
void lduwa( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void ldxa( Register s1, Register s2, int ia, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
void ldxa( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void ldda( Register s1, Register s2, int ia, Register d ) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
void ldda( Register s1, int simm13a, Register d ) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
// pp 179
inline void ldstub( Register s1, Register s2, Register d );
inline void ldstub( Register s1, int simm13a, Register d);
// pp 180
void ldstuba( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
void ldstuba( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
// pp 181
@ -992,11 +933,6 @@ public:
void smulcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
void smulcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
// pp 199
void mulscc( Register s1, Register s2, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | rs2(s2) ); }
void mulscc( Register s1, int simm13a, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
// pp 201
void nop() { emit_int32( op(branch_op) | op2(sethi_op2) ); }
@ -1116,17 +1052,6 @@ public:
void stda( Register d, Register s1, Register s2, int ia ) { emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
void stda( Register d, Register s1, int simm13a ) { emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
// pp 97 (v8)
inline void stc( int crd, Register s1, Register s2 );
inline void stc( int crd, Register s1, int simm13a);
inline void stdc( int crd, Register s1, Register s2 );
inline void stdc( int crd, Register s1, int simm13a);
inline void stcsr( int crd, Register s1, Register s2 );
inline void stcsr( int crd, Register s1, int simm13a);
inline void stdcq( int crd, Register s1, Register s2 );
inline void stdcq( int crd, Register s1, int simm13a);
// pp 230
void sub( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | rs2(s2) ); }
@ -1153,20 +1078,16 @@ public:
void taddcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | rs2(s2) ); }
void taddcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void taddcctv( Register s1, Register s2, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | rs2(s2) ); }
void taddcctv( Register s1, int simm13a, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
// pp 235
void tsubcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | rs2(s2) ); }
void tsubcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void tsubcctv( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | rs2(s2) ); }
void tsubcctv( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
// pp 237
void trap( Condition c, CC cc, Register s1, Register s2 ) { v8_no_cc(cc); emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); }
void trap( Condition c, CC cc, Register s1, int trapa ) { v8_no_cc(cc); emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); }
void trap( Condition c, CC cc, Register s1, Register s2 ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); }
void trap( Condition c, CC cc, Register s1, int trapa ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); }
// simple uncond. trap
void trap( int trapa ) { trap( always, icc, G0, trapa ); }

View file

@ -63,9 +63,6 @@ inline void Assembler::fb( Condition c, bool a, Label& L ) { fb(c, a, target(L))
inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { fbp(c, a, cc, p, target(L)); }
inline void Assembler::cb( Condition c, bool a, address d, relocInfo::relocType rt ) { v8_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(cb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
inline void Assembler::cb( Condition c, bool a, Label& L ) { cb(c, a, target(L)); }
inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
inline void Assembler::br( Condition c, bool a, Label& L ) { br(c, a, target(L)); }
@ -88,18 +85,9 @@ inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHol
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
inline void Assembler::ldfsr( Register s1, Register s2) { v9_dep(); emit_int32( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::ldc( Register s1, Register s2, int crd) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(ldc_op3 ) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldc( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(ldc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::lddc( Register s1, Register s2, int crd) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | rs2(s2) ); }
inline void Assembler::lddc( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::ldcsr( Register s1, Register s2, int crd) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldcsr( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::ldsb( Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldsb( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
@ -119,9 +107,6 @@ inline void Assembler::ldx( Register s1, int simm13a, Register d) { v9_only();
inline void Assembler::ldd( Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldd( Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::ldstub( Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldstub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::rett( Register s1, Register s2 ) { cti(); emit_int32( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { cti(); emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt); has_delay_slot(); }
@ -132,8 +117,6 @@ inline void Assembler::sethi( int imm22a, Register d, RelocationHolder const& rs
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::stfsr( Register s1, Register s2) { v9_dep(); emit_int32( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::stxfsr( Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
@ -152,17 +135,6 @@ inline void Assembler::stx( Register d, Register s1, int simm13a) { v9_only();
inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
// v8 p 99
inline void Assembler::stc( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stc( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::stdc( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stdc( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::stcsr( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stcsr( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::stdcq( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stdcq( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
// pp 231
inline void Assembler::swap( Register s1, Register s2, Register d) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); }

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -597,13 +597,6 @@ void LIR_Assembler::emit_op3(LIR_Op3* op) {
__ sra(Rdividend, 31, Rscratch);
__ wry(Rscratch);
if (!VM_Version::v9_instructions_work()) {
// v9 doesn't require these nops
__ nop();
__ nop();
__ nop();
__ nop();
}
add_debug_info_for_div0_here(op->info());
@ -652,10 +645,6 @@ void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break;
case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
default : ShouldNotReachHere();
};
if (!VM_Version::v9_instructions_work()) {
__ nop();
}
__ fb( acond, false, Assembler::pn, *(op->label()));
} else {
@ -725,9 +714,6 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
Label L;
// result must be 0 if value is NaN; test by comparing value to itself
__ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc);
if (!VM_Version::v9_instructions_work()) {
__ nop();
}
__ fb(Assembler::f_unordered, true, Assembler::pn, L);
__ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN
__ ftoi(FloatRegisterImpl::S, rsrc, rsrc);
@ -1909,7 +1895,7 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
switch (code) {
case lir_add: __ add (lreg, rreg, res); break;
case lir_sub: __ sub (lreg, rreg, res); break;
case lir_mul: __ mult (lreg, rreg, res); break;
case lir_mul: __ mulx (lreg, rreg, res); break;
default: ShouldNotReachHere();
}
}
@ -1924,7 +1910,7 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
switch (code) {
case lir_add: __ add (lreg, simm13, res); break;
case lir_sub: __ sub (lreg, simm13, res); break;
case lir_mul: __ mult (lreg, simm13, res); break;
case lir_mul: __ mulx (lreg, simm13, res); break;
default: ShouldNotReachHere();
}
} else {
@ -1936,7 +1922,7 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
switch (code) {
case lir_add: __ add (lreg, (int)con, res); break;
case lir_sub: __ sub (lreg, (int)con, res); break;
case lir_mul: __ mult (lreg, (int)con, res); break;
case lir_mul: __ mulx (lreg, (int)con, res); break;
default: ShouldNotReachHere();
}
}
@ -2960,6 +2946,9 @@ void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) {
}
}
void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
fatal("CRC32 intrinsic is not implemented on this platform");
}
void LIR_Assembler::emit_lock(LIR_OpLock* op) {
Register obj = op->obj_opr()->as_register();
@ -3234,48 +3223,26 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type,
Register base = mem_addr->base()->as_register();
if (src->is_register() && dest->is_address()) {
// G4 is high half, G5 is low half
if (VM_Version::v9_instructions_work()) {
// clear the top bits of G5, and scale up G4
__ srl (src->as_register_lo(), 0, G5);
__ sllx(src->as_register_hi(), 32, G4);
// combine the two halves into the 64 bits of G4
__ or3(G4, G5, G4);
null_check_offset = __ offset();
if (idx == noreg) {
__ stx(G4, base, disp);
} else {
__ stx(G4, base, idx);
}
// clear the top bits of G5, and scale up G4
__ srl (src->as_register_lo(), 0, G5);
__ sllx(src->as_register_hi(), 32, G4);
// combine the two halves into the 64 bits of G4
__ or3(G4, G5, G4);
null_check_offset = __ offset();
if (idx == noreg) {
__ stx(G4, base, disp);
} else {
__ mov (src->as_register_hi(), G4);
__ mov (src->as_register_lo(), G5);
null_check_offset = __ offset();
if (idx == noreg) {
__ std(G4, base, disp);
} else {
__ std(G4, base, idx);
}
__ stx(G4, base, idx);
}
} else if (src->is_address() && dest->is_register()) {
null_check_offset = __ offset();
if (VM_Version::v9_instructions_work()) {
if (idx == noreg) {
__ ldx(base, disp, G5);
} else {
__ ldx(base, idx, G5);
}
__ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
__ mov (G5, dest->as_register_lo()); // copy low half into lo
if (idx == noreg) {
__ ldx(base, disp, G5);
} else {
if (idx == noreg) {
__ ldd(base, disp, G4);
} else {
__ ldd(base, idx, G4);
}
// G4 is high half, G5 is low half
__ mov (G4, dest->as_register_hi());
__ mov (G5, dest->as_register_lo());
__ ldx(base, idx, G5);
}
__ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
__ mov (G5, dest->as_register_lo()); // copy low half into lo
} else {
Unimplemented();
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -784,6 +784,10 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
set_no_result(x);
}
void LIRGenerator::do_update_CRC32(Intrinsic* x) {
fatal("CRC32 intrinsic is not implemented on this platform");
}
// _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
// _i2b, _i2c, _i2s
void LIRGenerator::do_Convert(Convert* x) {

View file

@ -108,7 +108,7 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
// compare object markOop with Rmark and if equal exchange Rscratch with object markOop
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
casx_under_lock(mark_addr.base(), Rmark, Rscratch, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
cas_ptr(mark_addr.base(), Rmark, Rscratch);
// if compare/exchange succeeded we found an unlocked object and we now have locked it
// hence we are done
cmp(Rmark, Rscratch);
@ -149,7 +149,7 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
// Check if it is still a light weight lock, this is is true if we see
// the stack address of the basicLock in the markOop of the object
casx_under_lock(mark_addr.base(), Rbox, Rmark, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
cas_ptr(mark_addr.base(), Rbox, Rmark);
cmp(Rbox, Rmark);
brx(Assembler::notEqual, false, Assembler::pn, slow_case);
@ -276,7 +276,7 @@ void C1_MacroAssembler::initialize_object(
sub(var_size_in_bytes, hdr_size_in_bytes, t2); // compute size of body
initialize_body(t1, t2);
#ifndef _LP64
} else if (VM_Version::v9_instructions_work() && con_size_in_bytes < threshold * 2) {
} else if (con_size_in_bytes < threshold * 2) {
// on v9 we can do double word stores to fill twice as much space.
assert(hdr_size_in_bytes % 8 == 0, "double word aligned");
assert(con_size_in_bytes % 8 == 0, "double word aligned");

View file

@ -49,8 +49,9 @@ define_pd_global(intx, FreqInlineSize, 325 );
define_pd_global(bool, ResizeTLAB, true );
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
define_pd_global(uintx,CodeCacheMinBlockLength, 1);
define_pd_global(uintx,MetaspaceSize, 12*M );
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(uintx, MetaspaceSize, 12*M );
define_pd_global(bool, NeverActAsServerClassMachine, true );
define_pd_global(intx, NewSizeThreadIncrease, 16*K );
define_pd_global(uint64_t,MaxRAM, 1ULL*G);

View file

@ -86,7 +86,8 @@ define_pd_global(intx, CodeCacheExpansionSize, 32*K);
// Ergonomics related flags
define_pd_global(uint64_t,MaxRAM, 4ULL*G);
#endif
define_pd_global(uintx,CodeCacheMinBlockLength, 4);
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
// Heap related flags
define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(16*M));

View file

@ -30,5 +30,4 @@
void Compile::pd_compiler2_init() {
guarantee(CodeEntryAlignment >= InteriorEntryAlignment, "" );
guarantee( VM_Version::v9_instructions_work(), "Server compiler does not run on V8 systems" );
}

View file

@ -30,8 +30,7 @@
}
static const char* pd_cpu_opts() {
return (VM_Version::v9_instructions_work()?
(VM_Version::v8_instructions_work()? "" : "v9only") : "v8only");
return "v9only";
}
#endif // CPU_SPARC_VM_DISASSEMBLER_SPARC_HPP

View file

@ -252,6 +252,11 @@ bool frame::safe_for_sender(JavaThread *thread) {
return false;
}
// Could be a zombie method
if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
return false;
}
// It should be safe to construct the sender though it might not be valid
frame sender(_SENDER_SP, younger_sp, adjusted_stack);
@ -294,10 +299,10 @@ bool frame::safe_for_sender(JavaThread *thread) {
return jcw_safe;
}
// If the frame size is 0 something is bad because every nmethod has a non-zero frame size
// If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
// because you must allocate window space
if (sender_blob->frame_size() == 0) {
if (sender_blob->frame_size() <= 0) {
assert(!sender_blob->is_nmethod(), "should count return address at least");
return false;
}
@ -670,7 +675,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
// validate ConstantPoolCache*
ConstantPoolCache* cp = *interpreter_frame_cache_addr();
if (cp == NULL || !cp->is_metadata()) return false;
if (cp == NULL || !cp->is_metaspace_object()) return false;
// validate locals

View file

@ -110,8 +110,5 @@ define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS
\
product(uintx, ArraycopyDstPrefetchDistance, 0, \
"Distance to prefetch destination array in arracopy") \
\
develop(intx, V8AtomicOperationUnderLockSpinCount, 50, \
"Number of times to spin wait on a v8 atomic operation lock") \
#endif // CPU_SPARC_VM_GLOBALS_SPARC_HPP

View file

@ -1210,8 +1210,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object)
st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());
// compare and exchange object_addr, markOop | 1, stack address of basicLock
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
cas_ptr(mark_addr.base(), mark_reg, temp_reg);
// if the compare and exchange succeeded we are done (we saw an unlocked object)
cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done);
@ -1291,8 +1290,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
// we expect to see the stack address of the basicLock in case the
// lock is still a light weight lock (lock_reg)
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
casx_under_lock(mark_addr.base(), lock_reg, displaced_header_reg,
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
cas_ptr(mark_addr.base(), lock_reg, displaced_header_reg);
cmp(lock_reg, displaced_header_reg);
brx(Assembler::equal, true, Assembler::pn, done);
delayed()->st_ptr(G0, lockobj_addr); // free entry

View file

@ -118,7 +118,6 @@ int MacroAssembler::patched_branch(int dest_pos, int inst, int inst_pos) {
case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break;
case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
case cb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
case bpr_op2: {
if (is_cbcond(inst)) {
m = wdisp10(word_aligned_ones, 0);
@ -149,7 +148,6 @@ int MacroAssembler::branch_destination(int inst, int pos) {
case bp_op2: r = inv_wdisp( inst, pos, 19); break;
case fb_op2: r = inv_wdisp( inst, pos, 22); break;
case br_op2: r = inv_wdisp( inst, pos, 22); break;
case cb_op2: r = inv_wdisp( inst, pos, 22); break;
case bpr_op2: {
if (is_cbcond(inst)) {
r = inv_wdisp10(inst, pos);
@ -325,12 +323,6 @@ void MacroAssembler::breakpoint_trap() {
trap(ST_RESERVED_FOR_USER_0);
}
// flush windows (except current) using flushw instruction if avail.
void MacroAssembler::flush_windows() {
if (VM_Version::v9_instructions_work()) flushw();
else flush_windows_trap();
}
// Write serialization page so VM thread can do a pseudo remote membar
// We use the current thread pointer to calculate a thread specific
// offset to write to within the page. This minimizes bus traffic
@ -358,88 +350,6 @@ void MacroAssembler::leave() {
Unimplemented();
}
void MacroAssembler::mult(Register s1, Register s2, Register d) {
if(VM_Version::v9_instructions_work()) {
mulx (s1, s2, d);
} else {
smul (s1, s2, d);
}
}
void MacroAssembler::mult(Register s1, int simm13a, Register d) {
if(VM_Version::v9_instructions_work()) {
mulx (s1, simm13a, d);
} else {
smul (s1, simm13a, d);
}
}
#ifdef ASSERT
void MacroAssembler::read_ccr_v8_assert(Register ccr_save) {
const Register s1 = G3_scratch;
const Register s2 = G4_scratch;
Label get_psr_test;
// Get the condition codes the V8 way.
read_ccr_trap(s1);
mov(ccr_save, s2);
// This is a test of V8 which has icc but not xcc
// so mask off the xcc bits
and3(s2, 0xf, s2);
// Compare condition codes from the V8 and V9 ways.
subcc(s2, s1, G0);
br(Assembler::notEqual, true, Assembler::pt, get_psr_test);
delayed()->breakpoint_trap();
bind(get_psr_test);
}
void MacroAssembler::write_ccr_v8_assert(Register ccr_save) {
const Register s1 = G3_scratch;
const Register s2 = G4_scratch;
Label set_psr_test;
// Write out the saved condition codes the V8 way
write_ccr_trap(ccr_save, s1, s2);
// Read back the condition codes using the V9 instruction
rdccr(s1);
mov(ccr_save, s2);
// This is a test of V8 which has icc but not xcc
// so mask off the xcc bits
and3(s2, 0xf, s2);
and3(s1, 0xf, s1);
// Compare the V8 way with the V9 way.
subcc(s2, s1, G0);
br(Assembler::notEqual, true, Assembler::pt, set_psr_test);
delayed()->breakpoint_trap();
bind(set_psr_test);
}
#else
#define read_ccr_v8_assert(x)
#define write_ccr_v8_assert(x)
#endif // ASSERT
void MacroAssembler::read_ccr(Register ccr_save) {
if (VM_Version::v9_instructions_work()) {
rdccr(ccr_save);
// Test code sequence used on V8. Do not move above rdccr.
read_ccr_v8_assert(ccr_save);
} else {
read_ccr_trap(ccr_save);
}
}
void MacroAssembler::write_ccr(Register ccr_save) {
if (VM_Version::v9_instructions_work()) {
// Test code sequence used on V8. Do not move below wrccr.
write_ccr_v8_assert(ccr_save);
wrccr(ccr_save);
} else {
const Register temp_reg1 = G3_scratch;
const Register temp_reg2 = G4_scratch;
write_ccr_trap(ccr_save, temp_reg1, temp_reg2);
}
}
// Calls to C land
#ifdef ASSERT
@ -465,8 +375,8 @@ void MacroAssembler::get_thread() {
#ifdef ASSERT
AddressLiteral last_get_thread_addrlit(&last_get_thread);
set(last_get_thread_addrlit, L3);
inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call
st_ptr(L4, L3, 0);
rdpc(L4);
inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call st_ptr(L4, L3, 0);
#endif
call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type);
delayed()->nop();
@ -1251,12 +1161,6 @@ void MacroAssembler::align(int modulus) {
while (offset() % modulus != 0) nop();
}
void MacroAssembler::safepoint() {
relocate(breakpoint_Relocation::spec(breakpoint_Relocation::safepoint));
}
void RegistersForDebugging::print(outputStream* s) {
FlagSetting fs(Debugging, true);
int j;
@ -1327,7 +1231,7 @@ void RegistersForDebugging::print(outputStream* s) {
void RegistersForDebugging::save_registers(MacroAssembler* a) {
a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0);
a->flush_windows();
a->flushw();
int i;
for (i = 0; i < 8; ++i) {
a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i));
@ -1338,7 +1242,7 @@ void RegistersForDebugging::save_registers(MacroAssembler* a) {
for (i = 0; i < 32; ++i) {
a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i));
}
for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
for (i = 0; i < 64; i += 2) {
a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i));
}
}
@ -1350,7 +1254,7 @@ void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) {
for (int j = 0; j < 32; ++j) {
a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j));
}
for (int k = 0; k < (VM_Version::v9_instructions_work() ? 64 : 32); k += 2) {
for (int k = 0; k < 64; k += 2) {
a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k));
}
}
@ -1465,8 +1369,6 @@ address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL };
// the high bits of the O-regs if they contain Long values. Acts as a 'leaf'
// call.
void MacroAssembler::verify_oop_subroutine() {
assert( VM_Version::v9_instructions_work(), "VerifyOops not supported for V8" );
// Leaf call; no frame.
Label succeed, fail, null_or_fail;
@ -1870,26 +1772,17 @@ void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low,
// And the equals case for the high part does not need testing,
// since that triplet is reached only after finding the high halves differ.
if (VM_Version::v9_instructions_work()) {
mov(-1, Rresult);
ba(done); delayed()-> movcc(greater, false, icc, 1, Rresult);
} else {
br(less, true, pt, done); delayed()-> set(-1, Rresult);
br(greater, true, pt, done); delayed()-> set( 1, Rresult);
}
mov(-1, Rresult);
ba(done);
delayed()->movcc(greater, false, icc, 1, Rresult);
bind( check_low_parts );
bind(check_low_parts);
if (VM_Version::v9_instructions_work()) {
mov( -1, Rresult);
movcc(equal, false, icc, 0, Rresult);
movcc(greaterUnsigned, false, icc, 1, Rresult);
} else {
set(-1, Rresult);
br(equal, true, pt, done); delayed()->set( 0, Rresult);
br(greaterUnsigned, true, pt, done); delayed()->set( 1, Rresult);
}
bind( done );
mov( -1, Rresult);
movcc(equal, false, icc, 0, Rresult);
movcc(greaterUnsigned, false, icc, 1, Rresult);
bind(done);
}
void MacroAssembler::lneg( Register Rhi, Register Rlow ) {
@ -2117,119 +2010,24 @@ void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in
void MacroAssembler::float_cmp( bool is_float, int unordered_result,
FloatRegister Fa, FloatRegister Fb,
Register Rresult) {
fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb);
Condition lt = unordered_result == -1 ? f_unorderedOrLess : f_less;
Condition eq = f_equal;
Condition gt = unordered_result == 1 ? f_unorderedOrGreater : f_greater;
if (VM_Version::v9_instructions_work()) {
mov(-1, Rresult);
movcc(eq, true, fcc0, 0, Rresult);
movcc(gt, true, fcc0, 1, Rresult);
if (is_float) {
fcmp(FloatRegisterImpl::S, fcc0, Fa, Fb);
} else {
Label done;
fcmp(FloatRegisterImpl::D, fcc0, Fa, Fb);
}
set( -1, Rresult );
//fb(lt, true, pn, done); delayed()->set( -1, Rresult );
fb( eq, true, pn, done); delayed()->set( 0, Rresult );
fb( gt, true, pn, done); delayed()->set( 1, Rresult );
bind (done);
if (unordered_result == 1) {
mov( -1, Rresult);
movcc(f_equal, true, fcc0, 0, Rresult);
movcc(f_unorderedOrGreater, true, fcc0, 1, Rresult);
} else {
mov( -1, Rresult);
movcc(f_equal, true, fcc0, 0, Rresult);
movcc(f_greater, true, fcc0, 1, Rresult);
}
}
void MacroAssembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
{
if (VM_Version::v9_instructions_work()) {
Assembler::fneg(w, s, d);
} else {
if (w == FloatRegisterImpl::S) {
Assembler::fneg(w, s, d);
} else if (w == FloatRegisterImpl::D) {
// number() does a sanity check on the alignment.
assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
Assembler::fneg(FloatRegisterImpl::S, s, d);
Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
} else {
assert(w == FloatRegisterImpl::Q, "Invalid float register width");
// number() does a sanity check on the alignment.
assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
Assembler::fneg(FloatRegisterImpl::S, s, d);
Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
}
}
}
void MacroAssembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
{
if (VM_Version::v9_instructions_work()) {
Assembler::fmov(w, s, d);
} else {
if (w == FloatRegisterImpl::S) {
Assembler::fmov(w, s, d);
} else if (w == FloatRegisterImpl::D) {
// number() does a sanity check on the alignment.
assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
Assembler::fmov(FloatRegisterImpl::S, s, d);
Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
} else {
assert(w == FloatRegisterImpl::Q, "Invalid float register width");
// number() does a sanity check on the alignment.
assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
Assembler::fmov(FloatRegisterImpl::S, s, d);
Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
}
}
}
void MacroAssembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
{
if (VM_Version::v9_instructions_work()) {
Assembler::fabs(w, s, d);
} else {
if (w == FloatRegisterImpl::S) {
Assembler::fabs(w, s, d);
} else if (w == FloatRegisterImpl::D) {
// number() does a sanity check on the alignment.
assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
Assembler::fabs(FloatRegisterImpl::S, s, d);
Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
} else {
assert(w == FloatRegisterImpl::Q, "Invalid float register width");
// number() does a sanity check on the alignment.
assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
Assembler::fabs(FloatRegisterImpl::S, s, d);
Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
}
}
}
void MacroAssembler::save_all_globals_into_locals() {
mov(G1,L1);
mov(G2,L2);
@ -2250,135 +2048,6 @@ void MacroAssembler::restore_globals_from_locals() {
mov(L7,G7);
}
// Use for 64 bit operation.
void MacroAssembler::casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
{
// store ptr_reg as the new top value
#ifdef _LP64
casx(top_ptr_reg, top_reg, ptr_reg);
#else
cas_under_lock(top_ptr_reg, top_reg, ptr_reg, lock_addr, use_call_vm);
#endif // _LP64
}
// [RGV] This routine does not handle 64 bit operations.
// use casx_under_lock() or casx directly!!!
void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
{
// store ptr_reg as the new top value
if (VM_Version::v9_instructions_work()) {
cas(top_ptr_reg, top_reg, ptr_reg);
} else {
// If the register is not an out nor global, it is not visible
// after the save. Allocate a register for it, save its
// value in the register save area (the save may not flush
// registers to the save area).
Register top_ptr_reg_after_save;
Register top_reg_after_save;
Register ptr_reg_after_save;
if (top_ptr_reg->is_out() || top_ptr_reg->is_global()) {
top_ptr_reg_after_save = top_ptr_reg->after_save();
} else {
Address reg_save_addr = top_ptr_reg->address_in_saved_window();
top_ptr_reg_after_save = L0;
st(top_ptr_reg, reg_save_addr);
}
if (top_reg->is_out() || top_reg->is_global()) {
top_reg_after_save = top_reg->after_save();
} else {
Address reg_save_addr = top_reg->address_in_saved_window();
top_reg_after_save = L1;
st(top_reg, reg_save_addr);
}
if (ptr_reg->is_out() || ptr_reg->is_global()) {
ptr_reg_after_save = ptr_reg->after_save();
} else {
Address reg_save_addr = ptr_reg->address_in_saved_window();
ptr_reg_after_save = L2;
st(ptr_reg, reg_save_addr);
}
const Register& lock_reg = L3;
const Register& lock_ptr_reg = L4;
const Register& value_reg = L5;
const Register& yield_reg = L6;
const Register& yieldall_reg = L7;
save_frame();
if (top_ptr_reg_after_save == L0) {
ld(top_ptr_reg->address_in_saved_window().after_save(), top_ptr_reg_after_save);
}
if (top_reg_after_save == L1) {
ld(top_reg->address_in_saved_window().after_save(), top_reg_after_save);
}
if (ptr_reg_after_save == L2) {
ld(ptr_reg->address_in_saved_window().after_save(), ptr_reg_after_save);
}
Label(retry_get_lock);
Label(not_same);
Label(dont_yield);
assert(lock_addr, "lock_address should be non null for v8");
set((intptr_t)lock_addr, lock_ptr_reg);
// Initialize yield counter
mov(G0,yield_reg);
mov(G0, yieldall_reg);
set(StubRoutines::Sparc::locked, lock_reg);
bind(retry_get_lock);
cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dont_yield);
if(use_call_vm) {
Untested("Need to verify global reg consistancy");
call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::yield_all), yieldall_reg);
} else {
// Save the regs and make space for a C call
save(SP, -96, SP);
save_all_globals_into_locals();
call(CAST_FROM_FN_PTR(address,os::yield_all));
delayed()->mov(yieldall_reg, O0);
restore_globals_from_locals();
restore();
}
// reset the counter
mov(G0,yield_reg);
add(yieldall_reg, 1, yieldall_reg);
bind(dont_yield);
// try to get lock
Assembler::swap(lock_ptr_reg, 0, lock_reg);
// did we get the lock?
cmp(lock_reg, StubRoutines::Sparc::unlocked);
br(Assembler::notEqual, true, Assembler::pn, retry_get_lock);
delayed()->add(yield_reg,1,yield_reg);
// yes, got lock. do we have the same top?
ld(top_ptr_reg_after_save, 0, value_reg);
cmp_and_br_short(value_reg, top_reg_after_save, Assembler::notEqual, Assembler::pn, not_same);
// yes, same top.
st(ptr_reg_after_save, top_ptr_reg_after_save, 0);
membar(Assembler::StoreStore);
bind(not_same);
mov(value_reg, ptr_reg_after_save);
st(lock_reg, lock_ptr_reg, 0); // unlock
restore();
}
}
RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
Register tmp,
int offset) {
@ -2970,7 +2639,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
mark_reg);
or3(G2_thread, mark_reg, temp_reg);
casn(mark_addr.base(), mark_reg, temp_reg);
cas_ptr(mark_addr.base(), mark_reg, temp_reg);
// If the biasing toward our thread failed, this means that
// another thread succeeded in biasing it toward itself and we
// need to revoke that bias. The revocation will occur in the
@ -2998,7 +2667,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
or3(G2_thread, temp_reg, temp_reg);
casn(mark_addr.base(), mark_reg, temp_reg);
cas_ptr(mark_addr.base(), mark_reg, temp_reg);
// If the biasing toward our thread failed, this means that
// another thread succeeded in biasing it toward itself and we
// need to revoke that bias. The revocation will occur in the
@ -3027,7 +2696,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
// bits in this situation. Should attempt to preserve them.
load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
casn(mark_addr.base(), mark_reg, temp_reg);
cas_ptr(mark_addr.base(), mark_reg, temp_reg);
// Fall through to the normal CAS-based lock, because no matter what
// the result of the above CAS, some thread must have succeeded in
// removing the bias bit from the object's header.
@ -3058,15 +2727,6 @@ void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg,
}
// CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by
// Solaris/SPARC's "as". Another apt name would be cas_ptr()
void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) {
casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
}
// compiler_lock_object() and compiler_unlock_object() are direct transliterations
// of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments.
// The code could be tightened up considerably.
@ -3129,8 +2789,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
// compare object markOop with Rmark and if equal exchange Rscratch with object markOop
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
casx_under_lock(mark_addr.base(), Rmark, Rscratch,
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
cas_ptr(mark_addr.base(), Rmark, Rscratch);
// if compare/exchange succeeded we found an unlocked object and we now have locked it
// hence we are done
@ -3176,7 +2835,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
mov(Rbox, Rscratch);
or3(Rmark, markOopDesc::unlocked_value, Rmark);
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
casn(mark_addr.base(), Rmark, Rscratch);
cas_ptr(mark_addr.base(), Rmark, Rscratch);
cmp(Rmark, Rscratch);
brx(Assembler::equal, false, Assembler::pt, done);
delayed()->sub(Rscratch, SP, Rscratch);
@ -3207,7 +2866,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
// Invariant: if we acquire the lock then _recursions should be 0.
add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
mov(G2_thread, Rscratch);
casn(Rmark, G0, Rscratch);
cas_ptr(Rmark, G0, Rscratch);
cmp(Rscratch, G0);
// Intentional fall-through into done
} else {
@ -3240,7 +2899,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
mov(0, Rscratch);
or3(Rmark, markOopDesc::unlocked_value, Rmark);
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
casn(mark_addr.base(), Rmark, Rscratch);
cas_ptr(mark_addr.base(), Rmark, Rscratch);
// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
cmp(Rscratch, Rmark);
brx(Assembler::notZero, false, Assembler::pn, Recursive);
@ -3266,7 +2925,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
// the fast-path stack-lock code from the interpreter and always passed
// control to the "slow" operators in synchronizer.cpp.
// RScratch contains the fetched obj->mark value from the failed CASN.
// RScratch contains the fetched obj->mark value from the failed CAS.
#ifdef _LP64
sub(Rscratch, STACK_BIAS, Rscratch);
#endif
@ -3300,7 +2959,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
// Invariant: if we acquire the lock then _recursions should be 0.
add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
mov(G2_thread, Rscratch);
casn(Rmark, G0, Rscratch);
cas_ptr(Rmark, G0, Rscratch);
cmp(Rscratch, G0);
// ST box->displaced_header = NonZero.
// Any non-zero value suffices:
@ -3336,8 +2995,7 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
// Check if it is still a light weight lock, this is is true if we see
// the stack address of the basicLock in the markOop of the object
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
casx_under_lock(mark_addr.base(), Rbox, Rmark,
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
cas_ptr(mark_addr.base(), Rbox, Rmark);
ba(done);
delayed()->cmp(Rbox, Rmark);
bind(done);
@ -3398,7 +3056,7 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
delayed()->andcc(G0, G0, G0);
add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
mov(G2_thread, Rscratch);
casn(Rmark, G0, Rscratch);
cas_ptr(Rmark, G0, Rscratch);
// invert icc.zf and goto done
br_notnull(Rscratch, false, Assembler::pt, done);
delayed()->cmp(G0, G0);
@ -3440,7 +3098,7 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
// A prototype implementation showed excellent results, although
// the scavenger and timeout code was rather involved.
casn(mark_addr.base(), Rbox, Rscratch);
cas_ptr(mark_addr.base(), Rbox, Rscratch);
cmp(Rbox, Rscratch);
// Intentional fall through into done ...
@ -3540,7 +3198,8 @@ void MacroAssembler::eden_allocate(
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
// No allocation in the shared eden.
ba_short(slow_case);
ba(slow_case);
delayed()->nop();
} else {
// get eden boundaries
// note: we need both top & top_addr!
@ -3583,7 +3242,7 @@ void MacroAssembler::eden_allocate(
// Compare obj with the value at top_addr; if still equal, swap the value of
// end with the value at top_addr. If not equal, read the value at top_addr
// into end.
casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
cas_ptr(top_addr, obj, end);
// if someone beat us on the allocation, try again, otherwise continue
cmp(obj, end);
brx(Assembler::notEqual, false, Assembler::pn, retry);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -963,7 +963,7 @@ public:
inline void sub(Register s1, RegisterOrConstant s2, Register d, int offset = 0);
using Assembler::swap;
inline void swap(Address& a, Register d, int offset = 0);
inline void swap(const Address& a, Register d, int offset = 0);
// address pseudos: make these names unlike instruction names to avoid confusion
inline intptr_t load_pc_address( Register reg, int bytes_to_skip );
@ -1056,13 +1056,6 @@ public:
void breakpoint_trap();
void breakpoint_trap(Condition c, CC cc);
void flush_windows_trap();
void clean_windows_trap();
void get_psr_trap();
void set_psr_trap();
// V8/V9 flush_windows
void flush_windows();
// Support for serializing memory accesses between threads
void serialize_memory(Register thread, Register tmp1, Register tmp2);
@ -1071,14 +1064,6 @@ public:
void enter();
void leave();
// V8/V9 integer multiply
void mult(Register s1, Register s2, Register d);
void mult(Register s1, int simm13a, Register d);
// V8/V9 read and write of condition codes.
void read_ccr(Register d);
void write_ccr(Register s);
// Manipulation of C++ bools
// These are idioms to flag the need for care with accessing bools but on
// this platform we assume byte size
@ -1162,21 +1147,6 @@ public:
// check_and_forward_exception to handle exceptions when it is safe
void check_and_forward_exception(Register scratch_reg);
private:
// For V8
void read_ccr_trap(Register ccr_save);
void write_ccr_trap(Register ccr_save1, Register scratch1, Register scratch2);
#ifdef ASSERT
// For V8 debugging. Uses V8 instruction sequence and checks
// result with V9 insturctions rdccr and wrccr.
// Uses Gscatch and Gscatch2
void read_ccr_v8_assert(Register ccr_save);
void write_ccr_v8_assert(Register ccr_save);
#endif // ASSERT
public:
// Write to card table for - register is destroyed afterwards.
void card_table_write(jbyte* byte_map_base, Register tmp, Register obj);
@ -1314,20 +1284,9 @@ public:
FloatRegister Fa, FloatRegister Fb,
Register Rresult);
void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { Assembler::fneg(w, sd); }
void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
void save_all_globals_into_locals();
void restore_globals_from_locals();
void casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
address lock_addr=0, bool use_call_vm=false);
void cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
address lock_addr=0, bool use_call_vm=false);
void casn (Register addr_reg, Register cmp_reg, Register set_reg) ;
// These set the icc condition code to equal if the lock succeeded
// and notEqual if it failed and requires a slow case
void compiler_lock_object(Register Roop, Register Rmark, Register Rbox,

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -229,10 +229,7 @@ inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Registe
// Use the right branch for the platform
inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
if (VM_Version::v9_instructions_work())
Assembler::bp(c, a, icc, p, d, rt);
else
Assembler::br(c, a, d, rt);
Assembler::bp(c, a, icc, p, d, rt);
}
inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
@ -268,10 +265,7 @@ inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L
}
inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
if (VM_Version::v9_instructions_work())
fbp(c, a, fcc0, p, d, rt);
else
Assembler::fb(c, a, d, rt);
fbp(c, a, fcc0, p, d, rt);
}
inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
@ -334,7 +328,7 @@ inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder co
// prefetch instruction
inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
if (VM_Version::v9_instructions_work())
Assembler::bp( never, true, xcc, pt, d, rt );
Assembler::bp( never, true, xcc, pt, d, rt );
}
inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
@ -344,15 +338,7 @@ inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
// returns delta from gotten pc to addr after
inline int MacroAssembler::get_pc( Register d ) {
int x = offset();
if (VM_Version::v9_instructions_work())
rdpc(d);
else {
Label lbl;
Assembler::call(lbl, relocInfo::none); // No relocation as this is call to pc+0x8
if (d == O7) delayed()->nop();
else delayed()->mov(O7, d);
bind(lbl);
}
rdpc(d);
return offset() - x;
}
@ -646,41 +632,26 @@ inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, const Address& a, Fl
// returns if membar generates anything, obviously this code should mirror
// membar below.
inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
if( !os::is_MP() ) return false; // Not needed on single CPU
if( VM_Version::v9_instructions_work() ) {
const Membar_mask_bits effective_mask =
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
return (effective_mask != 0);
} else {
return true;
}
if (!os::is_MP())
return false; // Not needed on single CPU
const Membar_mask_bits effective_mask =
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
return (effective_mask != 0);
}
inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
// Uniprocessors do not need memory barriers
if (!os::is_MP()) return;
if (!os::is_MP())
return;
// Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3,
// 8.4.4.3, a.31 and a.50.
if( VM_Version::v9_instructions_work() ) {
// Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
// of the mmask subfield of const7a that does anything that isn't done
// implicitly is StoreLoad.
const Membar_mask_bits effective_mask =
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
if ( effective_mask != 0 ) {
Assembler::membar( effective_mask );
}
} else {
// stbar is the closest there is on v8. Equivalent to membar(StoreStore). We
// do not issue the stbar because to my knowledge all v8 machines implement TSO,
// which guarantees that all stores behave as if an stbar were issued just after
// each one of them. On these machines, stbar ought to be a nop. There doesn't
// appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it,
// it can't be specified by stbar, nor have I come up with a way to simulate it.
//
// Addendum. Dave says that ldstub guarantees a write buffer flush to coherent
// space. Put one here to be on the safe side.
Assembler::ldstub(SP, 0, G0);
// Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
// of the mmask subfield of const7a that does anything that isn't done
// implicitly is StoreLoad.
const Membar_mask_bits effective_mask =
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
if (effective_mask != 0) {
Assembler::membar(effective_mask);
}
}
@ -748,7 +719,7 @@ inline void MacroAssembler::sub(Register s1, RegisterOrConstant s2, Register d,
if (offset != 0) sub(d, offset, d);
}
inline void MacroAssembler::swap(Address& a, Register d, int offset) {
inline void MacroAssembler::swap(const Address& a, Register d, int offset) {
relocate(a.rspec(offset));
if (a.has_index()) { assert(offset == 0, ""); swap(a.base(), a.index(), d ); }
else { swap(a.base(), a.disp() + offset, d); }

View file

@ -162,7 +162,7 @@ void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
int i1 = ((int*)code_buffer)[1];
int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
assert(inv_op(*contention_addr) == Assembler::arith_op ||
*contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
*contention_addr == nop_instruction(),
"must not interfere with original call");
// The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
n_call->set_long_at(1*BytesPerInstWord, i1);
@ -181,7 +181,7 @@ void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
// Make sure the first-patched instruction, which may co-exist
// briefly with the call, will do something harmless.
assert(inv_op(*contention_addr) == Assembler::arith_op ||
*contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
*contention_addr == nop_instruction(),
"must not interfere with original call");
}
@ -933,11 +933,7 @@ void NativeJump::patch_verified_entry(address entry, address verified_entry, add
int code_size = 1 * BytesPerInstWord;
CodeBuffer cb(verified_entry, code_size + 1);
MacroAssembler* a = new MacroAssembler(&cb);
if (VM_Version::v9_instructions_work()) {
a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
} else {
a->lduw(G0, 0, O7); // "ld" must agree with code in the signal handler
}
a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
ICache::invalidate_range(verified_entry, code_size);
}
@ -1024,7 +1020,7 @@ void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer)
int i1 = ((int*)code_buffer)[1];
int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
assert(inv_op(*contention_addr) == Assembler::arith_op ||
*contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
*contention_addr == nop_instruction(),
"must not interfere with original call");
// The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
h_jump->set_long_at(1*BytesPerInstWord, i1);
@ -1043,6 +1039,6 @@ void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer)
// Make sure the first-patched instruction, which may co-exist
// briefly with the call, will do something harmless.
assert(inv_op(*contention_addr) == Assembler::arith_op ||
*contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
*contention_addr == nop_instruction(),
"must not interfere with original call");
}

View file

@ -70,8 +70,7 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
bool is_zombie() {
int x = long_at(0);
return is_op3(x,
VM_Version::v9_instructions_work() ?
Assembler::ldsw_op3 : Assembler::lduw_op3,
Assembler::ldsw_op3,
Assembler::ldst_op)
&& Assembler::inv_rs1(x) == G0
&& Assembler::inv_rd(x) == O7;

View file

@ -249,12 +249,10 @@ class FloatRegisterImpl: public AbstractRegisterImpl {
case D:
assert(c < 64 && (c & 1) == 0, "bad double float register");
assert(c < 32 || VM_Version::v9_instructions_work(), "V9 float work only on V9 platform");
return (c & 0x1e) | ((c & 0x20) >> 5);
case Q:
assert(c < 64 && (c & 3) == 0, "bad quad float register");
assert(c < 32 || VM_Version::v9_instructions_work(), "V9 float work only on V9 platform");
return (c & 0x1c) | ((c & 0x20) >> 5);
}
ShouldNotReachHere();

View file

@ -193,36 +193,6 @@ address Relocation::pd_get_address_from_code() {
return *(address*)addr();
}
int Relocation::pd_breakpoint_size() {
// minimum breakpoint size, in short words
return NativeIllegalInstruction::instruction_size / sizeof(short);
}
void Relocation::pd_swap_in_breakpoint(address x, short* instrs, int instrlen) {
Untested("pd_swap_in_breakpoint");
// %%% probably do not need a general instrlen; just use the trap size
if (instrs != NULL) {
assert(instrlen * sizeof(short) == NativeIllegalInstruction::instruction_size, "enough instrlen in reloc. data");
for (int i = 0; i < instrlen; i++) {
instrs[i] = ((short*)x)[i];
}
}
NativeIllegalInstruction::insert(x);
}
void Relocation::pd_swap_out_breakpoint(address x, short* instrs, int instrlen) {
Untested("pd_swap_out_breakpoint");
assert(instrlen * sizeof(short) == sizeof(int), "enough buf");
union { int l; short s[1]; } u;
for (int i = 0; i < instrlen; i++) {
u.s[i] = instrs[i];
}
NativeInstruction* ni = nativeInstruction_at(x);
ni->set_long_at(0, u.l);
}
void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
}

View file

@ -2459,7 +2459,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Finally just about ready to make the JNI call
__ flush_windows();
__ flushw();
if (inner_frame_created) {
__ restore();
} else {

View file

@ -2778,10 +2778,7 @@ enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
Register Rold = reg_to_register_object($old$$reg);
Register Rnew = reg_to_register_object($new$$reg);
// casx_under_lock picks 1 of 3 encodings:
// For 32-bit pointers you get a 32-bit CAS
// For 64-bit pointers you get a 64-bit CASX
__ casn(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold
__ cas_ptr(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold
__ cmp( Rold, Rnew );
%}
@ -3067,7 +3064,7 @@ enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, notemp_iRegI r
AddressLiteral last_rethrow_addrlit(&last_rethrow);
__ sethi(last_rethrow_addrlit, L1);
Address addr(L1, last_rethrow_addrlit.low10());
__ get_pc(L2);
__ rdpc(L2);
__ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to
__ st_ptr(L2, addr);
__ restore();

View file

@ -410,6 +410,51 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
// Safefetch stubs.
void generate_safefetch(const char* name, int size, address* entry,
address* fault_pc, address* continuation_pc) {
// safefetch signatures:
// int SafeFetch32(int* adr, int errValue);
// intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
//
// arguments:
// o0 = adr
// o1 = errValue
//
// result:
// o0 = *adr or errValue
StubCodeMark mark(this, "StubRoutines", name);
// Entry point, pc or function descriptor.
__ align(CodeEntryAlignment);
*entry = __ pc();
__ mov(O0, G1); // g1 = o0
__ mov(O1, O0); // o0 = o1
// Load *adr into c_rarg1, may fault.
*fault_pc = __ pc();
switch (size) {
case 4:
// int32_t
__ ldsw(G1, 0, O0); // o0 = [g1]
break;
case 8:
// int64_t
__ ldx(G1, 0, O0); // o0 = [g1]
break;
default:
ShouldNotReachHere();
}
// return errValue or *adr
*continuation_pc = __ pc();
// By convention with the trap handler we ensure there is a non-CTI
// instruction in the trap shadow.
__ nop();
__ retl();
__ delayed()->nop();
}
//------------------------------------------------------------------------------------------------------------------------
// Continuation point for throwing of implicit exceptions that are not handled in
@ -566,7 +611,7 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows");
address start = __ pc();
__ flush_windows();
__ flushw();
__ retl(false);
__ delayed()->add( FP, STACK_BIAS, O0 );
// The returned value must be a stack pointer whose register save area
@ -575,67 +620,9 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
// Helper functions for v8 atomic operations.
//
void get_v8_oop_lock_ptr(Register lock_ptr_reg, Register mark_oop_reg, Register scratch_reg) {
if (mark_oop_reg == noreg) {
address lock_ptr = (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr();
__ set((intptr_t)lock_ptr, lock_ptr_reg);
} else {
assert(scratch_reg != noreg, "just checking");
address lock_ptr = (address)StubRoutines::Sparc::_v8_oop_lock_cache;
__ set((intptr_t)lock_ptr, lock_ptr_reg);
__ and3(mark_oop_reg, StubRoutines::Sparc::v8_oop_lock_mask_in_place, scratch_reg);
__ add(lock_ptr_reg, scratch_reg, lock_ptr_reg);
}
}
void generate_v8_lock_prologue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
get_v8_oop_lock_ptr(lock_ptr_reg, mark_oop_reg, scratch_reg);
__ set(StubRoutines::Sparc::locked, lock_reg);
// Initialize yield counter
__ mov(G0,yield_reg);
__ BIND(retry);
__ cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dontyield);
// This code can only be called from inside the VM, this
// stub is only invoked from Atomic::add(). We do not
// want to use call_VM, because _last_java_sp and such
// must already be set.
//
// Save the regs and make space for a C call
__ save(SP, -96, SP);
__ save_all_globals_into_locals();
BLOCK_COMMENT("call os::naked_sleep");
__ call(CAST_FROM_FN_PTR(address, os::naked_sleep));
__ delayed()->nop();
__ restore_globals_from_locals();
__ restore();
// reset the counter
__ mov(G0,yield_reg);
__ BIND(dontyield);
// try to get lock
__ swap(lock_ptr_reg, 0, lock_reg);
// did we get the lock?
__ cmp(lock_reg, StubRoutines::Sparc::unlocked);
__ br(Assembler::notEqual, true, Assembler::pn, retry);
__ delayed()->add(yield_reg,1,yield_reg);
// yes, got lock. do the operation here.
}
void generate_v8_lock_epilogue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
__ st(lock_reg, lock_ptr_reg, 0); // unlock
}
// Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
//
// Arguments :
// Arguments:
//
// exchange_value: O0
// dest: O1
@ -656,33 +643,14 @@ class StubGenerator: public StubCodeGenerator {
__ mov(O0, O3); // scratch copy of exchange value
__ ld(O1, 0, O2); // observe the previous value
// try to replace O2 with O3
__ cas_under_lock(O1, O2, O3,
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
__ cas(O1, O2, O3);
__ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
__ retl(false);
__ delayed()->mov(O2, O0); // report previous value to caller
} else {
if (VM_Version::v9_instructions_work()) {
__ retl(false);
__ delayed()->swap(O1, 0, O0);
} else {
const Register& lock_reg = O2;
const Register& lock_ptr_reg = O3;
const Register& yield_reg = O4;
Label retry;
Label dontyield;
generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
// got the lock, do the swap
__ swap(O1, 0, O0);
generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
__ retl(false);
__ delayed()->nop();
}
__ retl(false);
__ delayed()->swap(O1, 0, O0);
}
return start;
@ -691,7 +659,7 @@ class StubGenerator: public StubCodeGenerator {
// Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
//
// Arguments :
// Arguments:
//
// exchange_value: O0
// dest: O1
@ -701,15 +669,12 @@ class StubGenerator: public StubCodeGenerator {
//
// O0: the value previously stored in dest
//
// Overwrites (v8): O3,O4,O5
//
address generate_atomic_cmpxchg() {
StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
address start = __ pc();
// cmpxchg(dest, compare_value, exchange_value)
__ cas_under_lock(O1, O2, O0,
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
__ cas(O1, O2, O0);
__ retl(false);
__ delayed()->nop();
@ -718,7 +683,7 @@ class StubGenerator: public StubCodeGenerator {
// Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
//
// Arguments :
// Arguments:
//
// exchange_value: O1:O0
// dest: O2
@ -728,17 +693,12 @@ class StubGenerator: public StubCodeGenerator {
//
// O1:O0: the value previously stored in dest
//
// This only works on V9, on V8 we don't generate any
// code and just return NULL.
//
// Overwrites: G1,G2,G3
//
address generate_atomic_cmpxchg_long() {
StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
address start = __ pc();
if (!VM_Version::supports_cx8())
return NULL;;
__ sllx(O0, 32, O0);
__ srl(O1, 0, O1);
__ or3(O0,O1,O0); // O0 holds 64-bit value from compare_value
@ -756,7 +716,7 @@ class StubGenerator: public StubCodeGenerator {
// Support for jint Atomic::add(jint add_value, volatile jint* dest).
//
// Arguments :
// Arguments:
//
// add_value: O0 (e.g., +1 or -1)
// dest: O1
@ -765,47 +725,22 @@ class StubGenerator: public StubCodeGenerator {
//
// O0: the new value stored in dest
//
// Overwrites (v9): O3
// Overwrites (v8): O3,O4,O5
// Overwrites: O3
//
address generate_atomic_add() {
StubCodeMark mark(this, "StubRoutines", "atomic_add");
address start = __ pc();
__ BIND(_atomic_add_stub);
if (VM_Version::v9_instructions_work()) {
Label(retry);
__ BIND(retry);
Label(retry);
__ BIND(retry);
__ lduw(O1, 0, O2);
__ add(O0, O2, O3);
__ cas(O1, O2, O3);
__ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
__ retl(false);
__ delayed()->add(O0, O2, O0); // note that cas made O2==O3
} else {
const Register& lock_reg = O2;
const Register& lock_ptr_reg = O3;
const Register& value_reg = O4;
const Register& yield_reg = O5;
Label(retry);
Label(dontyield);
generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
// got lock, do the increment
__ ld(O1, 0, value_reg);
__ add(O0, value_reg, value_reg);
__ st(value_reg, O1, 0);
// %%% only for RMO and PSO
__ membar(Assembler::StoreStore);
generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
__ retl(false);
__ delayed()->mov(value_reg, O0);
}
__ lduw(O1, 0, O2);
__ add(O0, O2, O3);
__ cas(O1, O2, O3);
__ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
__ retl(false);
__ delayed()->add(O0, O2, O0); // note that cas made O2==O3
return start;
}
@ -841,7 +776,7 @@ class StubGenerator: public StubCodeGenerator {
__ mov(G3, L3);
__ mov(G4, L4);
__ mov(G5, L5);
for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
for (i = 0; i < 64; i += 2) {
__ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize);
}
@ -855,7 +790,7 @@ class StubGenerator: public StubCodeGenerator {
__ mov(L3, G3);
__ mov(L4, G4);
__ mov(L5, G5);
for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
for (i = 0; i < 64; i += 2) {
__ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize);
}
@ -3425,6 +3360,14 @@ class StubGenerator: public StubCodeGenerator {
// Don't initialize the platform math functions since sparc
// doesn't have intrinsics for these operations.
// Safefetch stubs.
generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
&StubRoutines::_safefetch32_fault_pc,
&StubRoutines::_safefetch32_continuation_pc);
generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
&StubRoutines::_safefetchN_fault_pc,
&StubRoutines::_safefetchN_continuation_pc);
}

View file

@ -52,7 +52,3 @@ address StubRoutines::Sparc::_stop_subroutine_entry = NULL;
address StubRoutines::Sparc::_flush_callers_register_windows_entry = CAST_FROM_FN_PTR(address, bootstrap_flush_windows);
address StubRoutines::Sparc::_partial_subtype_check = NULL;
int StubRoutines::Sparc::_atomic_memory_operation_lock = StubRoutines::Sparc::unlocked;
int StubRoutines::Sparc::_v8_oop_lock_cache[StubRoutines::Sparc::nof_v8_oop_lock_cache_entries];

View file

@ -47,46 +47,14 @@ enum /* platform_dependent_constants */ {
class Sparc {
friend class StubGenerator;
public:
enum { nof_instance_allocators = 10 };
// allocator lock values
enum {
unlocked = 0,
locked = 1
};
enum {
v8_oop_lock_ignore_bits = 2,
v8_oop_lock_bits = 4,
nof_v8_oop_lock_cache_entries = 1 << (v8_oop_lock_bits+v8_oop_lock_ignore_bits),
v8_oop_lock_mask = right_n_bits(v8_oop_lock_bits),
v8_oop_lock_mask_in_place = v8_oop_lock_mask << v8_oop_lock_ignore_bits
};
static int _v8_oop_lock_cache[nof_v8_oop_lock_cache_entries];
private:
static address _test_stop_entry;
static address _stop_subroutine_entry;
static address _flush_callers_register_windows_entry;
static int _atomic_memory_operation_lock;
static address _partial_subtype_check;
public:
// %%% global lock for everyone who needs to use atomic_compare_and_exchange
// %%% or atomic_increment -- should probably use more locks for more
// %%% scalability-- for instance one for each eden space or group of
// address of the lock for atomic_compare_and_exchange
static int* atomic_memory_operation_lock_addr() { return &_atomic_memory_operation_lock; }
// accessor and mutator for _atomic_memory_operation_lock
static int atomic_memory_operation_lock() { return _atomic_memory_operation_lock; }
static void set_atomic_memory_operation_lock(int value) { _atomic_memory_operation_lock = value; }
// test assembler stop routine by setting registers
static void (*test_stop_entry()) () { return CAST_TO_FN_PTR(void (*)(void), _test_stop_entry); }

View file

@ -1054,7 +1054,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// flush the windows now. We don't care about the current (protection) frame
// only the outer frames
__ flush_windows();
__ flushw();
// mark windows as flushed
Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());

View file

@ -1338,14 +1338,13 @@ void TemplateTable::lneg() {
void TemplateTable::fneg() {
transition(ftos, ftos);
__ fneg(FloatRegisterImpl::S, Ftos_f);
__ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f);
}
void TemplateTable::dneg() {
transition(dtos, dtos);
// v8 has fnegd if source and dest are the same
__ fneg(FloatRegisterImpl::D, Ftos_f);
__ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f);
}
@ -1470,19 +1469,10 @@ void TemplateTable::convert() {
__ st_long(Otos_l, __ d_tmp);
__ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
if (VM_Version::v9_instructions_work()) {
if (bytecode() == Bytecodes::_l2f) {
__ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
} else {
__ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
}
if (bytecode() == Bytecodes::_l2f) {
__ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
} else {
__ call_VM_leaf(
Lscratch,
bytecode() == Bytecodes::_l2f
? CAST_FROM_FN_PTR(address, SharedRuntime::l2f)
: CAST_FROM_FN_PTR(address, SharedRuntime::l2d)
);
__ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
}
break;
@ -1490,11 +1480,6 @@ void TemplateTable::convert() {
Label isNaN;
// result must be 0 if value is NaN; test by comparing value to itself
__ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
// According to the v8 manual, you have to have a non-fp instruction
// between fcmp and fb.
if (!VM_Version::v9_instructions_work()) {
__ nop();
}
__ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
__ delayed()->clr(Otos_i); // NaN
__ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
@ -1537,16 +1522,7 @@ void TemplateTable::convert() {
break;
case Bytecodes::_d2f:
if (VM_Version::v9_instructions_work()) {
__ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
}
else {
// must uncache tos
__ push_d();
__ pop_i(O0);
__ pop_i(O1);
__ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::d2f));
}
break;
default: ShouldNotReachHere();
@ -1956,17 +1932,8 @@ void TemplateTable::fast_binaryswitch() {
__ ld( Rarray, Rscratch, Rscratch );
// (Rscratch is already in the native byte-ordering.)
__ cmp( Rkey, Rscratch );
if ( VM_Version::v9_instructions_work() ) {
__ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match())
__ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match())
}
else {
Label end_of_if;
__ br( Assembler::less, true, Assembler::pt, end_of_if );
__ delayed()->mov( Rh, Rj ); // if (<) Rj = Rh
__ mov( Rh, Ri ); // else i = h
__ bind(end_of_if); // }
}
__ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match())
__ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match())
// while (i+1 < j)
__ bind( entry );
@ -3418,9 +3385,7 @@ void TemplateTable::_new() {
// has been allocated.
__ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
__ casx_under_lock(RtopAddr, RoldTopValue, RnewTopValue,
VM_Version::v9_instructions_work() ? NULL :
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
__ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue);
// if someone beat us on the allocation, try again, otherwise continue
__ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
@ -3701,14 +3666,7 @@ void TemplateTable::monitorenter() {
__ verify_oop(O4); // verify each monitor's oop
__ tst(O4); // is this entry unused?
if (VM_Version::v9_instructions_work())
__ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
else {
Label L;
__ br( Assembler::zero, true, Assembler::pn, L );
__ delayed()->mov(O3, O1); // rememeber this one if match
__ bind(L);
}
__ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
__ cmp(O4, O0); // check if current entry is for same object
__ brx( Assembler::equal, false, Assembler::pn, exit );

View file

@ -75,23 +75,14 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1);
}
if (has_v9()) {
assert(ArraycopySrcPrefetchDistance < 4096, "invalid value");
if (ArraycopySrcPrefetchDistance >= 4096)
ArraycopySrcPrefetchDistance = 4064;
assert(ArraycopyDstPrefetchDistance < 4096, "invalid value");
if (ArraycopyDstPrefetchDistance >= 4096)
ArraycopyDstPrefetchDistance = 4064;
} else {
if (ArraycopySrcPrefetchDistance > 0) {
warning("prefetch instructions are not available on this CPU");
FLAG_SET_DEFAULT(ArraycopySrcPrefetchDistance, 0);
}
if (ArraycopyDstPrefetchDistance > 0) {
warning("prefetch instructions are not available on this CPU");
FLAG_SET_DEFAULT(ArraycopyDstPrefetchDistance, 0);
}
}
guarantee(VM_Version::has_v9(), "only SPARC v9 is supported");
assert(ArraycopySrcPrefetchDistance < 4096, "invalid value");
if (ArraycopySrcPrefetchDistance >= 4096)
ArraycopySrcPrefetchDistance = 4064;
assert(ArraycopyDstPrefetchDistance < 4096, "invalid value");
if (ArraycopyDstPrefetchDistance >= 4096)
ArraycopyDstPrefetchDistance = 4064;
UseSSE = 0; // Only on x86 and x64

View file

@ -177,10 +177,6 @@ public:
return AllocatePrefetchDistance > 0 ? AllocatePrefetchStyle : 0;
}
// Legacy
static bool v8_instructions_work() { return has_v8() && !has_v9(); }
static bool v9_instructions_work() { return has_v9(); }
// Assembler testing
static void allow_all();
static void revert();

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1673,6 +1673,11 @@ void Assembler::movdqa(XMMRegister dst, XMMRegister src) {
emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66);
}
void Assembler::movdqa(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66);
}
void Assembler::movdqu(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3);
@ -2286,6 +2291,38 @@ void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
emit_int8(imm8);
}
void Assembler::pextrd(Register dst, XMMRegister src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, false);
emit_int8(0x16);
emit_int8((unsigned char)(0xC0 | encode));
emit_int8(imm8);
}
void Assembler::pextrq(Register dst, XMMRegister src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, true);
emit_int8(0x16);
emit_int8((unsigned char)(0xC0 | encode));
emit_int8(imm8);
}
void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, false);
emit_int8(0x22);
emit_int8((unsigned char)(0xC0 | encode));
emit_int8(imm8);
}
void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, true);
emit_int8(0x22);
emit_int8((unsigned char)(0xC0 | encode));
emit_int8(imm8);
}
void Assembler::pmovzxbw(XMMRegister dst, Address src) {
assert(VM_Version::supports_sse4_1(), "");
InstructionMark im(this);
@ -3691,6 +3728,16 @@ void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) {
emit_int8((unsigned char)(0xC0 | encode));
}
// Carry-Less Multiplication Quadword
void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) {
assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), "");
bool vector256 = false;
int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
emit_int8(0x44);
emit_int8((unsigned char)(0xC0 | encode));
emit_int8((unsigned char)mask);
}
void Assembler::vzeroupper() {
assert(VM_Version::supports_avx(), "");
(void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE);

View file

@ -1266,6 +1266,7 @@ private:
// Move Aligned Double Quadword
void movdqa(XMMRegister dst, XMMRegister src);
void movdqa(XMMRegister dst, Address src);
// Move Unaligned Double Quadword
void movdqu(Address dst, XMMRegister src);
@ -1404,6 +1405,14 @@ private:
void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
void pcmpestri(XMMRegister xmm1, Address src, int imm8);
// SSE 4.1 extract
void pextrd(Register dst, XMMRegister src, int imm8);
void pextrq(Register dst, XMMRegister src, int imm8);
// SSE 4.1 insert
void pinsrd(XMMRegister dst, Register src, int imm8);
void pinsrq(XMMRegister dst, Register src, int imm8);
// SSE4.1 packed move
void pmovzxbw(XMMRegister dst, XMMRegister src);
void pmovzxbw(XMMRegister dst, Address src);
@ -1764,6 +1773,9 @@ private:
// duplicate 4-bytes integer data from src into 8 locations in dest
void vpbroadcastd(XMMRegister dst, XMMRegister src);
// Carry-Less Multiplication Quadword
void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask);
// AVX instruction which is used to clear upper 128 bits of YMM registers and
// to avoid transaction penalty between AVX and SSE states. There is no
// penalty if legacy SSE instructions are encoded using VEX prefix because

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -3512,6 +3512,22 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ bind(*stub->continuation());
}
void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
assert(op->crc()->is_single_cpu(), "crc must be register");
assert(op->val()->is_single_cpu(), "byte value must be register");
assert(op->result_opr()->is_single_cpu(), "result must be register");
Register crc = op->crc()->as_register();
Register val = op->val()->as_register();
Register res = op->result_opr()->as_register();
assert_different_registers(val, crc, res);
__ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
__ notl(crc); // ~crc
__ update_byte_crc32(crc, val, res);
__ notl(crc); // ~crc
__ mov(res, crc);
}
void LIR_Assembler::emit_lock(LIR_OpLock* op) {
Register obj = op->obj_opr()->as_register(); // may not be an oop

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -932,6 +932,81 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
__ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
}
void LIRGenerator::do_update_CRC32(Intrinsic* x) {
assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
// Make all state_for calls early since they can emit code
LIR_Opr result = rlock_result(x);
int flags = 0;
switch (x->id()) {
case vmIntrinsics::_updateCRC32: {
LIRItem crc(x->argument_at(0), this);
LIRItem val(x->argument_at(1), this);
crc.load_item();
val.load_item();
__ update_crc32(crc.result(), val.result(), result);
break;
}
case vmIntrinsics::_updateBytesCRC32:
case vmIntrinsics::_updateByteBufferCRC32: {
bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
LIRItem crc(x->argument_at(0), this);
LIRItem buf(x->argument_at(1), this);
LIRItem off(x->argument_at(2), this);
LIRItem len(x->argument_at(3), this);
buf.load_item();
off.load_nonconstant();
LIR_Opr index = off.result();
int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
if(off.result()->is_constant()) {
index = LIR_OprFact::illegalOpr;
offset += off.result()->as_jint();
}
LIR_Opr base_op = buf.result();
#ifndef _LP64
if (!is_updateBytes) { // long b raw address
base_op = new_register(T_INT);
__ convert(Bytecodes::_l2i, buf.result(), base_op);
}
#else
if (index->is_valid()) {
LIR_Opr tmp = new_register(T_LONG);
__ convert(Bytecodes::_i2l, index, tmp);
index = tmp;
}
#endif
LIR_Address* a = new LIR_Address(base_op,
index,
LIR_Address::times_1,
offset,
T_BYTE);
BasicTypeList signature(3);
signature.append(T_INT);
signature.append(T_ADDRESS);
signature.append(T_INT);
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
const LIR_Opr result_reg = result_register_for(x->type());
LIR_Opr addr = new_pointer_register();
__ leal(LIR_OprFact::address(a), addr);
crc.load_item_force(cc->at(0));
__ move(addr, cc->at(1));
len.load_item_force(cc->at(2));
__ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());
__ move(result_reg, result);
break;
}
default: {
ShouldNotReachHere();
}
}
}
// _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
// _i2b, _i2c, _i2s

View file

@ -50,8 +50,9 @@ define_pd_global(intx, InitialCodeCacheSize, 160*K);
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
define_pd_global(uintx,CodeCacheMinBlockLength, 1);
define_pd_global(uintx,MetaspaceSize, 12*M );
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(uintx, MetaspaceSize, 12*M );
define_pd_global(bool, NeverActAsServerClassMachine, true );
define_pd_global(uint64_t,MaxRAM, 1ULL*G);
define_pd_global(bool, CICompileOSR, true );

View file

@ -85,7 +85,8 @@ define_pd_global(bool, OptoScheduling, false);
define_pd_global(bool, OptoBundling, false);
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
define_pd_global(uintx,CodeCacheMinBlockLength, 4);
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
// Heap related flags
define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(16*M));

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,6 +33,7 @@
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/monitorChunk.hpp"
#include "runtime/os.hpp"
#include "runtime/signature.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
@ -54,16 +55,22 @@ bool frame::safe_for_sender(JavaThread *thread) {
address sp = (address)_sp;
address fp = (address)_fp;
address unextended_sp = (address)_unextended_sp;
// sp must be within the stack
bool sp_safe = (sp <= thread->stack_base()) &&
(sp >= thread->stack_base() - thread->stack_size());
// consider stack guards when trying to determine "safe" stack pointers
static size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackYellowPages + StackRedPages) * os::vm_page_size() : 0;
size_t usable_stack_size = thread->stack_size() - stack_guard_size;
// sp must be within the usable part of the stack (not in guards)
bool sp_safe = (sp < thread->stack_base()) &&
(sp >= thread->stack_base() - usable_stack_size);
if (!sp_safe) {
return false;
}
// unextended sp must be within the stack and above or equal sp
bool unextended_sp_safe = (unextended_sp <= thread->stack_base()) &&
bool unextended_sp_safe = (unextended_sp < thread->stack_base()) &&
(unextended_sp >= sp);
if (!unextended_sp_safe) {
@ -71,7 +78,8 @@ bool frame::safe_for_sender(JavaThread *thread) {
}
// an fp must be within the stack and above (but not equal) sp
bool fp_safe = (fp <= thread->stack_base()) && (fp > sp);
// second evaluation on fp+ is added to handle situation where fp is -1
bool fp_safe = (fp < thread->stack_base() && (fp > sp) && (((fp + (return_addr_offset * sizeof(void*))) < thread->stack_base())));
// We know sp/unextended_sp are safe only fp is questionable here
@ -86,6 +94,13 @@ bool frame::safe_for_sender(JavaThread *thread) {
// other generic buffer blobs are more problematic so we just assume they are
// ok. adapter blobs never have a frame complete and are never ok.
// check for a valid frame_size, otherwise we are unlikely to get a valid sender_pc
if (!Interpreter::contains(_pc) && _cb->frame_size() <= 0) {
//assert(0, "Invalid frame_size");
return false;
}
if (!_cb->is_frame_complete_at(_pc)) {
if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
return false;
@ -107,7 +122,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
address jcw = (address)entry_frame_call_wrapper();
bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > fp);
bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > fp);
return jcw_safe;
@ -134,12 +149,6 @@ bool frame::safe_for_sender(JavaThread *thread) {
sender_pc = (address) *(sender_sp-1);
}
// We must always be able to find a recognizable pc
CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
if (sender_pc == NULL || sender_blob == NULL) {
return false;
}
// If the potential sender is the interpreter then we can do some more checking
if (Interpreter::contains(sender_pc)) {
@ -149,7 +158,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// is really a frame pointer.
intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp);
bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);
if (!saved_fp_safe) {
return false;
@ -163,6 +172,17 @@ bool frame::safe_for_sender(JavaThread *thread) {
}
// We must always be able to find a recognizable pc
CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
if (sender_pc == NULL || sender_blob == NULL) {
return false;
}
// Could be a zombie method
if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
return false;
}
// Could just be some random pointer within the codeBlob
if (!sender_blob->code_contains(sender_pc)) {
return false;
@ -174,10 +194,9 @@ bool frame::safe_for_sender(JavaThread *thread) {
}
// Could be the call_stub
if (StubRoutines::returns_to_call_stub(sender_pc)) {
intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp);
bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);
if (!saved_fp_safe) {
return false;
@ -190,15 +209,24 @@ bool frame::safe_for_sender(JavaThread *thread) {
// Validate the JavaCallWrapper an entry frame must have
address jcw = (address)sender.entry_frame_call_wrapper();
bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > (address)sender.fp());
bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > (address)sender.fp());
return jcw_safe;
}
// If the frame size is 0 something is bad because every nmethod has a non-zero frame size
if (sender_blob->is_nmethod()) {
nmethod* nm = sender_blob->as_nmethod_or_null();
if (nm != NULL) {
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc)) {
return false;
}
}
}
// If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
// because the return address counts against the callee's frame.
if (sender_blob->frame_size() == 0) {
if (sender_blob->frame_size() <= 0) {
assert(!sender_blob->is_nmethod(), "should count return address at least");
return false;
}
@ -208,7 +236,9 @@ bool frame::safe_for_sender(JavaThread *thread) {
// should not be anything but the call stub (already covered), the interpreter (already covered)
// or an nmethod.
assert(sender_blob->is_nmethod(), "Impossible call chain");
if (!sender_blob->is_nmethod()) {
return false;
}
// Could put some more validation for the potential non-interpreted sender
// frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
@ -557,7 +587,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
// validate ConstantPoolCache*
ConstantPoolCache* cp = *interpreter_frame_cache_addr();
if (cp == NULL || !cp->is_metadata()) return false;
if (cp == NULL || !cp->is_metaspace_object()) return false;
// validate locals

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,7 +55,7 @@ define_pd_global(intx, OptoLoopAlignment, 16);
define_pd_global(intx, InlineFrequencyCount, 100);
define_pd_global(intx, InlineSmallCode, 1000);
define_pd_global(intx, StackYellowPages, 2);
define_pd_global(intx, StackYellowPages, NOT_WINDOWS(2) WINDOWS_ONLY(3));
define_pd_global(intx, StackRedPages, 1);
#ifdef AMD64
// Very large C++ stack frames using solaris-amd64 optimized builds
@ -96,6 +96,9 @@ define_pd_global(uintx, CMSYoungGenPerWorker, 64*M); // default max size of CMS
product(intx, UseAVX, 99, \
"Highest supported AVX instructions set on x86/x64") \
\
product(bool, UseCLMUL, false, \
"Control whether CLMUL instructions can be used on x86/x64") \
\
diagnostic(bool, UseIncDec, true, \
"Use INC, DEC instructions on x86") \
\

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,6 +39,8 @@
address generate_empty_entry(void);
address generate_accessor_entry(void);
address generate_Reference_get_entry();
address generate_CRC32_update_entry();
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
void lock_method(void);
void generate_stack_overflow_check(void);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2794,6 +2794,15 @@ void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src) {
}
}
void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src) {
if (reachable(src)) {
Assembler::movdqa(dst, as_Address(src));
} else {
lea(rscratch1, src);
Assembler::movdqa(dst, Address(rscratch1, 0));
}
}
void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
if (reachable(src)) {
Assembler::movsd(dst, as_Address(src));
@ -6388,6 +6397,193 @@ void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
bind(L_done);
}
/**
* Emits code to update CRC-32 with a byte value according to constants in table
*
* @param [in,out]crc Register containing the crc.
* @param [in]val Register containing the byte to fold into the CRC.
* @param [in]table Register containing the table of crc constants.
*
* uint32_t crc;
* val = crc_table[(val ^ crc) & 0xFF];
* crc = val ^ (crc >> 8);
*
*/
void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
xorl(val, crc);
andl(val, 0xFF);
shrl(crc, 8); // unsigned shift
xorl(crc, Address(table, val, Address::times_4, 0));
}
/**
* Fold 128-bit data chunk
*/
void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
vpclmulhdq(xtmp, xK, xcrc); // [123:64]
vpclmulldq(xcrc, xK, xcrc); // [63:0]
vpxor(xcrc, xcrc, Address(buf, offset), false /* vector256 */);
pxor(xcrc, xtmp);
}
void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) {
vpclmulhdq(xtmp, xK, xcrc);
vpclmulldq(xcrc, xK, xcrc);
pxor(xcrc, xbuf);
pxor(xcrc, xtmp);
}
/**
* 8-bit folds to compute 32-bit CRC
*
* uint64_t xcrc;
* timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8);
*/
void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) {
movdl(tmp, xcrc);
andl(tmp, 0xFF);
movdl(xtmp, Address(table, tmp, Address::times_4, 0));
psrldq(xcrc, 1); // unsigned shift one byte
pxor(xcrc, xtmp);
}
/**
* uint32_t crc;
* timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
*/
void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
movl(tmp, crc);
andl(tmp, 0xFF);
shrl(crc, 8);
xorl(crc, Address(table, tmp, Address::times_4, 0));
}
/**
* @param crc register containing existing CRC (32-bit)
* @param buf register pointing to input byte buffer (byte*)
* @param len register containing number of bytes
* @param table register that will contain address of CRC table
* @param tmp scratch register
*/
void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) {
assert_different_registers(crc, buf, len, table, tmp, rax);
Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
lea(table, ExternalAddress(StubRoutines::crc_table_addr()));
notl(crc); // ~crc
cmpl(len, 16);
jcc(Assembler::less, L_tail);
// Align buffer to 16 bytes
movl(tmp, buf);
andl(tmp, 0xF);
jccb(Assembler::zero, L_aligned);
subl(tmp, 16);
addl(len, tmp);
align(4);
BIND(L_align_loop);
movsbl(rax, Address(buf, 0)); // load byte with sign extension
update_byte_crc32(crc, rax, table);
increment(buf);
incrementl(tmp);
jccb(Assembler::less, L_align_loop);
BIND(L_aligned);
movl(tmp, len); // save
shrl(len, 4);
jcc(Assembler::zero, L_tail_restore);
// Fold crc into first bytes of vector
movdqa(xmm1, Address(buf, 0));
movdl(rax, xmm1);
xorl(crc, rax);
pinsrd(xmm1, crc, 0);
addptr(buf, 16);
subl(len, 4); // len > 0
jcc(Assembler::less, L_fold_tail);
movdqa(xmm2, Address(buf, 0));
movdqa(xmm3, Address(buf, 16));
movdqa(xmm4, Address(buf, 32));
addptr(buf, 48);
subl(len, 3);
jcc(Assembler::lessEqual, L_fold_512b);
// Fold total 512 bits of polynomial on each iteration,
// 128 bits per each of 4 parallel streams.
movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32));
align(32);
BIND(L_fold_512b_loop);
fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0);
fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16);
fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32);
fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48);
addptr(buf, 64);
subl(len, 4);
jcc(Assembler::greater, L_fold_512b_loop);
// Fold 512 bits to 128 bits.
BIND(L_fold_512b);
movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16));
fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2);
fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3);
fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4);
// Fold the rest of 128 bits data chunks
BIND(L_fold_tail);
addl(len, 3);
jccb(Assembler::lessEqual, L_fold_128b);
movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16));
BIND(L_fold_tail_loop);
fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0);
addptr(buf, 16);
decrementl(len);
jccb(Assembler::greater, L_fold_tail_loop);
// Fold 128 bits in xmm1 down into 32 bits in crc register.
BIND(L_fold_128b);
movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()));
vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
vpand(xmm3, xmm0, xmm2, false /* vector256 */);
vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
psrldq(xmm1, 8);
psrldq(xmm2, 4);
pxor(xmm0, xmm1);
pxor(xmm0, xmm2);
// 8 8-bit folds to compute 32-bit CRC.
for (int j = 0; j < 4; j++) {
fold_8bit_crc32(xmm0, table, xmm1, rax);
}
movdl(crc, xmm0); // mov 32 bits to general register
for (int j = 0; j < 4; j++) {
fold_8bit_crc32(crc, table, rax);
}
BIND(L_tail_restore);
movl(len, tmp); // restore
BIND(L_tail);
andl(len, 0xf);
jccb(Assembler::zero, L_exit);
// Fold the rest of bytes
align(4);
BIND(L_tail_loop);
movsbl(rax, Address(buf, 0)); // load byte with sign extension
update_byte_crc32(crc, rax, table);
increment(buf);
decrementl(len);
jccb(Assembler::greater, L_tail_loop);
BIND(L_exit);
notl(crc); // ~c
}
#undef BIND
#undef BLOCK_COMMENT

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -899,6 +899,11 @@ public:
void movdqu(XMMRegister dst, XMMRegister src) { Assembler::movdqu(dst, src); }
void movdqu(XMMRegister dst, AddressLiteral src);
// Move Aligned Double Quadword
void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); }
void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); }
void movdqa(XMMRegister dst, AddressLiteral src);
void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); }
void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
@ -1027,6 +1032,16 @@ public:
Assembler::vinsertf128h(dst, nds, src);
}
// Carry-Less Multiplication Quadword
void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
// 0x00 - multiply lower 64 bits [0:63]
Assembler::vpclmulqdq(dst, nds, src, 0x00);
}
void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
// 0x11 - multiply upper 64 bits [64:127]
Assembler::vpclmulqdq(dst, nds, src, 0x11);
}
// Data
void cmov32( Condition cc, Register dst, Address src);
@ -1143,6 +1158,16 @@ public:
XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
XMMRegister tmp4, Register tmp5, Register result);
// CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic.
void update_byte_crc32(Register crc, Register val, Register table);
void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
// Fold 128-bit data chunk
void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf);
// Fold 8-bit data
void fold_8bit_crc32(Register crc, Register table, Register tmp);
void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
#undef VIRTUAL
};

View file

@ -177,30 +177,6 @@ address Relocation::pd_get_address_from_code() {
return *pd_address_in_code();
}
int Relocation::pd_breakpoint_size() {
// minimum breakpoint size, in short words
return NativeIllegalInstruction::instruction_size / sizeof(short);
}
void Relocation::pd_swap_in_breakpoint(address x, short* instrs, int instrlen) {
Untested("pd_swap_in_breakpoint");
if (instrs != NULL) {
assert(instrlen * sizeof(short) == NativeIllegalInstruction::instruction_size, "enough instrlen in reloc. data");
for (int i = 0; i < instrlen; i++) {
instrs[i] = ((short*)x)[i];
}
}
NativeIllegalInstruction::insert(x);
}
void Relocation::pd_swap_out_breakpoint(address x, short* instrs, int instrlen) {
Untested("pd_swap_out_breakpoint");
assert(NativeIllegalInstruction::instruction_size == sizeof(short), "right address unit for update");
NativeInstruction* ni = nativeInstruction_at(x);
*(short*)ni->addr_at(0) = instrs[0];
}
void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
#ifdef _LP64
if (!Assembler::is_polling_page_far()) {

View file

@ -1429,6 +1429,8 @@ static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType
assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
"possible collision");
__ block_comment("unpack_array_argument {");
// Pass the length, ptr pair
Label is_null, done;
VMRegPair tmp;
@ -1453,6 +1455,8 @@ static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType
move_ptr(masm, tmp, body_arg);
move32_64(masm, tmp, length_arg);
__ bind(done);
__ block_comment("} unpack_array_argument");
}
@ -2170,27 +2174,34 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
}
}
// point c_arg at the first arg that is already loaded in case we
// need to spill before we call out
int c_arg = total_c_args - total_in_args;
int c_arg;
// Pre-load a static method's oop into r14. Used both by locking code and
// the normal JNI call code.
if (method->is_static() && !is_critical_native) {
if (!is_critical_native) {
// point c_arg at the first arg that is already loaded in case we
// need to spill before we call out
c_arg = total_c_args - total_in_args;
// load oop into a register
__ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
if (method->is_static()) {
// Now handlize the static class mirror it's known not-null.
__ movptr(Address(rsp, klass_offset), oop_handle_reg);
map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
// load oop into a register
__ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
// Now get the handle
__ lea(oop_handle_reg, Address(rsp, klass_offset));
// store the klass handle as second argument
__ movptr(c_rarg1, oop_handle_reg);
// and protect the arg if we must spill
c_arg--;
// Now handlize the static class mirror it's known not-null.
__ movptr(Address(rsp, klass_offset), oop_handle_reg);
map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
// Now get the handle
__ lea(oop_handle_reg, Address(rsp, klass_offset));
// store the klass handle as second argument
__ movptr(c_rarg1, oop_handle_reg);
// and protect the arg if we must spill
c_arg--;
}
} else {
// For JNI critical methods we need to save all registers in save_args.
c_arg = 0;
}
// Change state to native (we save the return address in the thread, since it might not

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -83,7 +83,7 @@ class StubGenerator: public StubCodeGenerator {
private:
#ifdef PRODUCT
#define inc_counter_np(counter) (0)
#define inc_counter_np(counter) ((void)0)
#else
void inc_counter_np_(int& counter) {
__ incrementl(ExternalAddress((address)&counter));
@ -2713,6 +2713,92 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
/**
* Arguments:
*
* Inputs:
* rsp(4) - int crc
* rsp(8) - byte* buf
* rsp(12) - int length
*
* Ouput:
* rax - int crc result
*/
address generate_updateBytesCRC32() {
assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions");
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32");
address start = __ pc();
const Register crc = rdx; // crc
const Register buf = rsi; // source java byte array address
const Register len = rcx; // length
const Register table = rdi; // crc_table address (reuse register)
const Register tmp = rbx;
assert_different_registers(crc, buf, len, table, tmp, rax);
BLOCK_COMMENT("Entry:");
__ enter(); // required for proper stackwalking of RuntimeStub frame
__ push(rsi);
__ push(rdi);
__ push(rbx);
Address crc_arg(rbp, 8 + 0);
Address buf_arg(rbp, 8 + 4);
Address len_arg(rbp, 8 + 8);
// Load up:
__ movl(crc, crc_arg);
__ movptr(buf, buf_arg);
__ movl(len, len_arg);
__ kernel_crc32(crc, buf, len, table, tmp);
__ movl(rax, crc);
__ pop(rbx);
__ pop(rdi);
__ pop(rsi);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
return start;
}
// Safefetch stubs.
void generate_safefetch(const char* name, int size, address* entry,
address* fault_pc, address* continuation_pc) {
// safefetch signatures:
// int SafeFetch32(int* adr, int errValue);
// intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
StubCodeMark mark(this, "StubRoutines", name);
// Entry point, pc or function descriptor.
*entry = __ pc();
__ movl(rax, Address(rsp, 0x8));
__ movl(rcx, Address(rsp, 0x4));
// Load *adr into eax, may fault.
*fault_pc = __ pc();
switch (size) {
case 4:
// int32_t
__ movl(rax, Address(rcx, 0));
break;
case 8:
// int64_t
Unimplemented();
break;
default:
ShouldNotReachHere();
}
// Return errValue or *adr.
*continuation_pc = __ pc();
__ ret(0);
}
public:
// Information about frame layout at time of blocking runtime call.
@ -2887,6 +2973,12 @@ class StubGenerator: public StubCodeGenerator {
// Build this early so it's available for the interpreter
StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
if (UseCRC32Intrinsics) {
// set table address before stub generation which use it
StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table;
StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
}
}
@ -2919,6 +3011,14 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt();
}
// Safefetch stubs.
generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
&StubRoutines::_safefetch32_fault_pc,
&StubRoutines::_safefetch32_continuation_pc);
StubRoutines::_safefetchN_entry = StubRoutines::_safefetch32_entry;
StubRoutines::_safefetchN_fault_pc = StubRoutines::_safefetch32_fault_pc;
StubRoutines::_safefetchN_continuation_pc = StubRoutines::_safefetch32_continuation_pc;
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -81,7 +81,7 @@ class StubGenerator: public StubCodeGenerator {
private:
#ifdef PRODUCT
#define inc_counter_np(counter) (0)
#define inc_counter_np(counter) ((void)0)
#else
void inc_counter_np_(int& counter) {
// This can destroy rscratch1 if counter is far from the code cache
@ -3357,7 +3357,45 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
// Safefetch stubs.
void generate_safefetch(const char* name, int size, address* entry,
address* fault_pc, address* continuation_pc) {
// safefetch signatures:
// int SafeFetch32(int* adr, int errValue);
// intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
//
// arguments:
// c_rarg0 = adr
// c_rarg1 = errValue
//
// result:
// PPC_RET = *adr or errValue
StubCodeMark mark(this, "StubRoutines", name);
// Entry point, pc or function descriptor.
*entry = __ pc();
// Load *adr into c_rarg1, may fault.
*fault_pc = __ pc();
switch (size) {
case 4:
// int32_t
__ movl(c_rarg1, Address(c_rarg0, 0));
break;
case 8:
// int64_t
__ movq(c_rarg1, Address(c_rarg0, 0));
break;
default:
ShouldNotReachHere();
}
// return errValue or *adr
*continuation_pc = __ pc();
__ movq(rax, c_rarg1);
__ ret(0);
}
// This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time
// to hide instruction latency
@ -3584,7 +3622,45 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
/**
* Arguments:
*
* Inputs:
* c_rarg0 - int crc
* c_rarg1 - byte* buf
* c_rarg2 - int length
*
* Ouput:
* rax - int crc result
*/
address generate_updateBytesCRC32() {
assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions");
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32");
address start = __ pc();
// Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
// Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
// rscratch1: r10
const Register crc = c_rarg0; // crc
const Register buf = c_rarg1; // source java byte array address
const Register len = c_rarg2; // length
const Register table = c_rarg3; // crc_table address (reuse register)
const Register tmp = r11;
assert_different_registers(crc, buf, len, table, tmp, rax);
BLOCK_COMMENT("Entry:");
__ enter(); // required for proper stackwalking of RuntimeStub frame
__ kernel_crc32(crc, buf, len, table, tmp);
__ movl(rax, crc);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
return start;
}
#undef __
#define __ masm->
@ -3736,6 +3812,11 @@ class StubGenerator: public StubCodeGenerator {
CAST_FROM_FN_PTR(address,
SharedRuntime::
throw_StackOverflowError));
if (UseCRC32Intrinsics) {
// set table address before stub generation which use it
StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table;
StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
}
}
void generate_all() {
@ -3790,6 +3871,14 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
}
// Safefetch stubs.
generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
&StubRoutines::_safefetch32_fault_pc,
&StubRoutines::_safefetch32_continuation_pc);
generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
&StubRoutines::_safefetchN_fault_pc,
&StubRoutines::_safefetchN_continuation_pc);
}
public:

View file

@ -0,0 +1,130 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.inline.hpp"
// Implementation of the platform-specific part of StubRoutines - for
// a description of how to extend it, see the stubRoutines.hpp file.
address StubRoutines::x86::_verify_mxcsr_entry = NULL;
address StubRoutines::x86::_key_shuffle_mask_addr = NULL;
uint64_t StubRoutines::x86::_crc_by128_masks[] =
{
/* The fields in this structure are arranged so that they can be
* picked up two at a time with 128-bit loads.
*
* Because of flipped bit order for this CRC polynomials
* the constant for X**N is left-shifted by 1. This is because
* a 64 x 64 polynomial multiply produces a 127-bit result
* but the highest term is always aligned to bit 0 in the container.
* Pre-shifting by one fixes this, at the cost of potentially making
* the 32-bit constant no longer fit in a 32-bit container (thus the
* use of uint64_t, though this is also the size used by the carry-
* less multiply instruction.
*
* In addition, the flipped bit order and highest-term-at-least-bit
* multiply changes the constants used. The 96-bit result will be
* aligned to the high-term end of the target 128-bit container,
* not the low-term end; that is, instead of a 512-bit or 576-bit fold,
* instead it is a 480 (=512-32) or 544 (=512+64-32) bit fold.
*
* This cause additional problems in the 128-to-64-bit reduction; see the
* code for details. By storing a mask in the otherwise unused half of
* a 128-bit constant, bits can be cleared before multiplication without
* storing and reloading. Note that staying on a 128-bit datapath means
* that some data is uselessly stored and some unused data is intersected
* with an irrelevant constant.
*/
((uint64_t) 0xffffffffUL), /* low of K_M_64 */
((uint64_t) 0xb1e6b092U << 1), /* high of K_M_64 */
((uint64_t) 0xba8ccbe8U << 1), /* low of K_160_96 */
((uint64_t) 0x6655004fU << 1), /* high of K_160_96 */
((uint64_t) 0xaa2215eaU << 1), /* low of K_544_480 */
((uint64_t) 0xe3720acbU << 1) /* high of K_544_480 */
};
/**
* crc_table[] from jdk/src/share/native/java/util/zip/zlib-1.2.5/crc32.h
*/
juint StubRoutines::x86::_crc_table[] =
{
0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL,
0x706af48fUL, 0xe963a535UL, 0x9e6495a3UL, 0x0edb8832UL, 0x79dcb8a4UL,
0xe0d5e91eUL, 0x97d2d988UL, 0x09b64c2bUL, 0x7eb17cbdUL, 0xe7b82d07UL,
0x90bf1d91UL, 0x1db71064UL, 0x6ab020f2UL, 0xf3b97148UL, 0x84be41deUL,
0x1adad47dUL, 0x6ddde4ebUL, 0xf4d4b551UL, 0x83d385c7UL, 0x136c9856UL,
0x646ba8c0UL, 0xfd62f97aUL, 0x8a65c9ecUL, 0x14015c4fUL, 0x63066cd9UL,
0xfa0f3d63UL, 0x8d080df5UL, 0x3b6e20c8UL, 0x4c69105eUL, 0xd56041e4UL,
0xa2677172UL, 0x3c03e4d1UL, 0x4b04d447UL, 0xd20d85fdUL, 0xa50ab56bUL,
0x35b5a8faUL, 0x42b2986cUL, 0xdbbbc9d6UL, 0xacbcf940UL, 0x32d86ce3UL,
0x45df5c75UL, 0xdcd60dcfUL, 0xabd13d59UL, 0x26d930acUL, 0x51de003aUL,
0xc8d75180UL, 0xbfd06116UL, 0x21b4f4b5UL, 0x56b3c423UL, 0xcfba9599UL,
0xb8bda50fUL, 0x2802b89eUL, 0x5f058808UL, 0xc60cd9b2UL, 0xb10be924UL,
0x2f6f7c87UL, 0x58684c11UL, 0xc1611dabUL, 0xb6662d3dUL, 0x76dc4190UL,
0x01db7106UL, 0x98d220bcUL, 0xefd5102aUL, 0x71b18589UL, 0x06b6b51fUL,
0x9fbfe4a5UL, 0xe8b8d433UL, 0x7807c9a2UL, 0x0f00f934UL, 0x9609a88eUL,
0xe10e9818UL, 0x7f6a0dbbUL, 0x086d3d2dUL, 0x91646c97UL, 0xe6635c01UL,
0x6b6b51f4UL, 0x1c6c6162UL, 0x856530d8UL, 0xf262004eUL, 0x6c0695edUL,
0x1b01a57bUL, 0x8208f4c1UL, 0xf50fc457UL, 0x65b0d9c6UL, 0x12b7e950UL,
0x8bbeb8eaUL, 0xfcb9887cUL, 0x62dd1ddfUL, 0x15da2d49UL, 0x8cd37cf3UL,
0xfbd44c65UL, 0x4db26158UL, 0x3ab551ceUL, 0xa3bc0074UL, 0xd4bb30e2UL,
0x4adfa541UL, 0x3dd895d7UL, 0xa4d1c46dUL, 0xd3d6f4fbUL, 0x4369e96aUL,
0x346ed9fcUL, 0xad678846UL, 0xda60b8d0UL, 0x44042d73UL, 0x33031de5UL,
0xaa0a4c5fUL, 0xdd0d7cc9UL, 0x5005713cUL, 0x270241aaUL, 0xbe0b1010UL,
0xc90c2086UL, 0x5768b525UL, 0x206f85b3UL, 0xb966d409UL, 0xce61e49fUL,
0x5edef90eUL, 0x29d9c998UL, 0xb0d09822UL, 0xc7d7a8b4UL, 0x59b33d17UL,
0x2eb40d81UL, 0xb7bd5c3bUL, 0xc0ba6cadUL, 0xedb88320UL, 0x9abfb3b6UL,
0x03b6e20cUL, 0x74b1d29aUL, 0xead54739UL, 0x9dd277afUL, 0x04db2615UL,
0x73dc1683UL, 0xe3630b12UL, 0x94643b84UL, 0x0d6d6a3eUL, 0x7a6a5aa8UL,
0xe40ecf0bUL, 0x9309ff9dUL, 0x0a00ae27UL, 0x7d079eb1UL, 0xf00f9344UL,
0x8708a3d2UL, 0x1e01f268UL, 0x6906c2feUL, 0xf762575dUL, 0x806567cbUL,
0x196c3671UL, 0x6e6b06e7UL, 0xfed41b76UL, 0x89d32be0UL, 0x10da7a5aUL,
0x67dd4accUL, 0xf9b9df6fUL, 0x8ebeeff9UL, 0x17b7be43UL, 0x60b08ed5UL,
0xd6d6a3e8UL, 0xa1d1937eUL, 0x38d8c2c4UL, 0x4fdff252UL, 0xd1bb67f1UL,
0xa6bc5767UL, 0x3fb506ddUL, 0x48b2364bUL, 0xd80d2bdaUL, 0xaf0a1b4cUL,
0x36034af6UL, 0x41047a60UL, 0xdf60efc3UL, 0xa867df55UL, 0x316e8eefUL,
0x4669be79UL, 0xcb61b38cUL, 0xbc66831aUL, 0x256fd2a0UL, 0x5268e236UL,
0xcc0c7795UL, 0xbb0b4703UL, 0x220216b9UL, 0x5505262fUL, 0xc5ba3bbeUL,
0xb2bd0b28UL, 0x2bb45a92UL, 0x5cb36a04UL, 0xc2d7ffa7UL, 0xb5d0cf31UL,
0x2cd99e8bUL, 0x5bdeae1dUL, 0x9b64c2b0UL, 0xec63f226UL, 0x756aa39cUL,
0x026d930aUL, 0x9c0906a9UL, 0xeb0e363fUL, 0x72076785UL, 0x05005713UL,
0x95bf4a82UL, 0xe2b87a14UL, 0x7bb12baeUL, 0x0cb61b38UL, 0x92d28e9bUL,
0xe5d5be0dUL, 0x7cdcefb7UL, 0x0bdbdf21UL, 0x86d3d2d4UL, 0xf1d4e242UL,
0x68ddb3f8UL, 0x1fda836eUL, 0x81be16cdUL, 0xf6b9265bUL, 0x6fb077e1UL,
0x18b74777UL, 0x88085ae6UL, 0xff0f6a70UL, 0x66063bcaUL, 0x11010b5cUL,
0x8f659effUL, 0xf862ae69UL, 0x616bffd3UL, 0x166ccf45UL, 0xa00ae278UL,
0xd70dd2eeUL, 0x4e048354UL, 0x3903b3c2UL, 0xa7672661UL, 0xd06016f7UL,
0x4969474dUL, 0x3e6e77dbUL, 0xaed16a4aUL, 0xd9d65adcUL, 0x40df0b66UL,
0x37d83bf0UL, 0xa9bcae53UL, 0xdebb9ec5UL, 0x47b2cf7fUL, 0x30b5ffe9UL,
0xbdbdf21cUL, 0xcabac28aUL, 0x53b39330UL, 0x24b4a3a6UL, 0xbad03605UL,
0xcdd70693UL, 0x54de5729UL, 0x23d967bfUL, 0xb3667a2eUL, 0xc4614ab8UL,
0x5d681b02UL, 0x2a6f2b94UL, 0xb40bbe37UL, 0xc30c8ea1UL, 0x5a05df1bUL,
0x2d02ef8dUL
};

View file

@ -0,0 +1,45 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_X86_VM_STUBROUTINES_X86_HPP
#define CPU_X86_VM_STUBROUTINES_X86_HPP
// This file holds the platform specific parts of the StubRoutines
// definition. See stubRoutines.hpp for a description on how to
// extend it.
private:
static address _verify_mxcsr_entry;
// shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers
static address _key_shuffle_mask_addr;
// masks and table for CRC32
static uint64_t _crc_by128_masks[];
static juint _crc_table[];
public:
static address verify_mxcsr_entry() { return _verify_mxcsr_entry; }
static address key_shuffle_mask_addr() { return _key_shuffle_mask_addr; }
static address crc_by128_masks_addr() { return (address)_crc_by128_masks; }
#endif // CPU_X86_VM_STUBROUTINES_X86_32_HPP

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,6 +31,4 @@
// Implementation of the platform-specific part of StubRoutines - for
// a description of how to extend it, see the stubRoutines.hpp file.
address StubRoutines::x86::_verify_mxcsr_entry = NULL;
address StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = NULL;
address StubRoutines::x86::_key_shuffle_mask_addr = NULL;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,15 +39,12 @@ class x86 {
friend class VMStructs;
private:
static address _verify_mxcsr_entry;
static address _verify_fpu_cntrl_wrd_entry;
// shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers
static address _key_shuffle_mask_addr;
public:
static address verify_mxcsr_entry() { return _verify_mxcsr_entry; }
static address verify_fpu_cntrl_wrd_entry() { return _verify_fpu_cntrl_wrd_entry; }
static address key_shuffle_mask_addr() { return _key_shuffle_mask_addr; }
# include "stubRoutines_x86.hpp"
};

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,8 +34,6 @@
address StubRoutines::x86::_get_previous_fp_entry = NULL;
address StubRoutines::x86::_get_previous_sp_entry = NULL;
address StubRoutines::x86::_verify_mxcsr_entry = NULL;
address StubRoutines::x86::_f2i_fixup = NULL;
address StubRoutines::x86::_f2l_fixup = NULL;
address StubRoutines::x86::_d2i_fixup = NULL;
@ -45,4 +43,3 @@ address StubRoutines::x86::_float_sign_flip = NULL;
address StubRoutines::x86::_double_sign_mask = NULL;
address StubRoutines::x86::_double_sign_flip = NULL;
address StubRoutines::x86::_mxcsr_std = NULL;
address StubRoutines::x86::_key_shuffle_mask_addr = NULL;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,7 +42,6 @@ class x86 {
private:
static address _get_previous_fp_entry;
static address _get_previous_sp_entry;
static address _verify_mxcsr_entry;
static address _f2i_fixup;
static address _f2l_fixup;
@ -54,8 +53,6 @@ class x86 {
static address _double_sign_mask;
static address _double_sign_flip;
static address _mxcsr_std;
// shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers
static address _key_shuffle_mask_addr;
public:
@ -69,11 +66,6 @@ class x86 {
return _get_previous_sp_entry;
}
static address verify_mxcsr_entry()
{
return _verify_mxcsr_entry;
}
static address f2i_fixup()
{
return _f2i_fixup;
@ -119,7 +111,7 @@ class x86 {
return _mxcsr_std;
}
static address key_shuffle_mask_addr() { return _key_shuffle_mask_addr; }
# include "stubRoutines_x86.hpp"
};

View file

@ -868,6 +868,120 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
return generate_accessor_entry();
}
/**
* Method entry for static native methods:
* int java.util.zip.CRC32.update(int crc, int b)
*/
address InterpreterGenerator::generate_CRC32_update_entry() {
if (UseCRC32Intrinsics) {
address entry = __ pc();
// rbx,: Method*
// rsi: senderSP must preserved for slow path, set SP to it on fast path
// rdx: scratch
// rdi: scratch
Label slow_path;
// If we need a safepoint check, generate full interpreter entry.
ExternalAddress state(SafepointSynchronize::address_of_state());
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ jcc(Assembler::notEqual, slow_path);
// We don't generate local frame and don't align stack because
// we call stub code and there is no safepoint on this path.
// Load parameters
const Register crc = rax; // crc
const Register val = rdx; // source java byte value
const Register tbl = rdi; // scratch
// Arguments are reversed on java expression stack
__ movl(val, Address(rsp, wordSize)); // byte value
__ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC
__ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr()));
__ notl(crc); // ~crc
__ update_byte_crc32(crc, val, tbl);
__ notl(crc); // ~crc
// result in rax
// _areturn
__ pop(rdi); // get return address
__ mov(rsp, rsi); // set sp to sender sp
__ jmp(rdi);
// generate a vanilla native entry as the slow path
__ bind(slow_path);
(void) generate_native_entry(false);
return entry;
}
return generate_native_entry(false);
}
/**
* Method entry for static native methods:
* int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
* int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
*/
address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
if (UseCRC32Intrinsics) {
address entry = __ pc();
// rbx,: Method*
// rsi: senderSP must preserved for slow path, set SP to it on fast path
// rdx: scratch
// rdi: scratch
Label slow_path;
// If we need a safepoint check, generate full interpreter entry.
ExternalAddress state(SafepointSynchronize::address_of_state());
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ jcc(Assembler::notEqual, slow_path);
// We don't generate local frame and don't align stack because
// we call stub code and there is no safepoint on this path.
// Load parameters
const Register crc = rax; // crc
const Register buf = rdx; // source java byte array address
const Register len = rdi; // length
// Arguments are reversed on java expression stack
__ movl(len, Address(rsp, wordSize)); // Length
// Calculate address of start element
if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
__ movptr(buf, Address(rsp, 3*wordSize)); // long buf
__ addptr(buf, Address(rsp, 2*wordSize)); // + offset
__ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC
} else {
__ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
__ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
__ addptr(buf, Address(rsp, 2*wordSize)); // + offset
__ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC
}
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
// result in rax
// _areturn
__ pop(rdi); // get return address
__ mov(rsp, rsi); // set sp to sender sp
__ jmp(rdi);
// generate a vanilla native entry as the slow path
__ bind(slow_path);
(void) generate_native_entry(false);
return entry;
}
return generate_native_entry(false);
}
//
// Interpreter stub for calling a native method. (asm interpreter)
// This sets up a somewhat different looking stack for calling the native method
@ -1501,15 +1615,16 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
// determine code generation flags
bool synchronized = false;
address entry_point = NULL;
InterpreterGenerator* ig_this = (InterpreterGenerator*)this;
switch (kind) {
case Interpreter::zerolocals : break;
case Interpreter::zerolocals_synchronized: synchronized = true; break;
case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
case Interpreter::zerolocals : break;
case Interpreter::zerolocals_synchronized: synchronized = true; break;
case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break;
case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break;
case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break;
case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break;
case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru
@ -1519,9 +1634,15 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : // fall thru
case Interpreter::java_lang_math_pow : // fall thru
case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
: entry_point = ig_this->generate_Reference_get_entry(); break;
case Interpreter::java_util_zip_CRC32_update
: entry_point = ig_this->generate_CRC32_update_entry(); break;
case Interpreter::java_util_zip_CRC32_updateBytes
: // fall thru
case Interpreter::java_util_zip_CRC32_updateByteBuffer
: entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break;
default:
fatal(err_msg("unexpected method kind: %d", kind));
break;
@ -1529,7 +1650,7 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
if (entry_point) return entry_point;
return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
return ig_this->generate_normal_entry(synchronized);
}

View file

@ -840,6 +840,117 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
return generate_accessor_entry();
}
/**
* Method entry for static native methods:
* int java.util.zip.CRC32.update(int crc, int b)
*/
address InterpreterGenerator::generate_CRC32_update_entry() {
if (UseCRC32Intrinsics) {
address entry = __ pc();
// rbx,: Method*
// rsi: senderSP must preserved for slow path, set SP to it on fast path
// rdx: scratch
// rdi: scratch
Label slow_path;
// If we need a safepoint check, generate full interpreter entry.
ExternalAddress state(SafepointSynchronize::address_of_state());
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ jcc(Assembler::notEqual, slow_path);
// We don't generate local frame and don't align stack because
// we call stub code and there is no safepoint on this path.
// Load parameters
const Register crc = rax; // crc
const Register val = rdx; // source java byte value
const Register tbl = rdi; // scratch
// Arguments are reversed on java expression stack
__ movl(val, Address(rsp, wordSize)); // byte value
__ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC
__ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr()));
__ notl(crc); // ~crc
__ update_byte_crc32(crc, val, tbl);
__ notl(crc); // ~crc
// result in rax
// _areturn
__ pop(rdi); // get return address
__ mov(rsp, rsi); // set sp to sender sp
__ jmp(rdi);
// generate a vanilla native entry as the slow path
__ bind(slow_path);
(void) generate_native_entry(false);
return entry;
}
return generate_native_entry(false);
}
/**
* Method entry for static native methods:
* int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
* int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
*/
address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
if (UseCRC32Intrinsics) {
address entry = __ pc();
// rbx,: Method*
// r13: senderSP must preserved for slow path, set SP to it on fast path
Label slow_path;
// If we need a safepoint check, generate full interpreter entry.
ExternalAddress state(SafepointSynchronize::address_of_state());
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ jcc(Assembler::notEqual, slow_path);
// We don't generate local frame and don't align stack because
// we call stub code and there is no safepoint on this path.
// Load parameters
const Register crc = c_rarg0; // crc
const Register buf = c_rarg1; // source java byte array address
const Register len = c_rarg2; // length
// Arguments are reversed on java expression stack
__ movl(len, Address(rsp, wordSize)); // Length
// Calculate address of start element
if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
__ movptr(buf, Address(rsp, 3*wordSize)); // long buf
__ addptr(buf, Address(rsp, 2*wordSize)); // + offset
__ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC
} else {
__ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
__ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
__ addptr(buf, Address(rsp, 2*wordSize)); // + offset
__ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC
}
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
// result in rax
// _areturn
__ pop(rdi); // get return address
__ mov(rsp, r13); // set sp to sender sp
__ jmp(rdi);
// generate a vanilla native entry as the slow path
__ bind(slow_path);
(void) generate_native_entry(false);
return entry;
}
return generate_native_entry(false);
}
// Interpreter stub for calling a native method. (asm interpreter)
// This sets up a somewhat different looking stack for calling the
@ -1510,15 +1621,16 @@ address AbstractInterpreterGenerator::generate_method_entry(
// determine code generation flags
bool synchronized = false;
address entry_point = NULL;
InterpreterGenerator* ig_this = (InterpreterGenerator*)this;
switch (kind) {
case Interpreter::zerolocals : break;
case Interpreter::zerolocals_synchronized: synchronized = true; break;
case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
case Interpreter::zerolocals : break;
case Interpreter::zerolocals_synchronized: synchronized = true; break;
case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break;
case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break;
case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break;
case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break;
case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru
@ -1528,9 +1640,15 @@ address AbstractInterpreterGenerator::generate_method_entry(
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : // fall thru
case Interpreter::java_lang_math_pow : // fall thru
case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
: entry_point = ig_this->generate_Reference_get_entry(); break;
case Interpreter::java_util_zip_CRC32_update
: entry_point = ig_this->generate_CRC32_update_entry(); break;
case Interpreter::java_util_zip_CRC32_updateBytes
: // fall thru
case Interpreter::java_util_zip_CRC32_updateByteBuffer
: entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break;
default:
fatal(err_msg("unexpected method kind: %d", kind));
break;
@ -1540,8 +1658,7 @@ address AbstractInterpreterGenerator::generate_method_entry(
return entry_point;
}
return ((InterpreterGenerator*) this)->
generate_normal_entry(synchronized);
return ig_this->generate_normal_entry(synchronized);
}
// These should never be compiled since the interpreter will prefer

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -446,6 +446,7 @@ void VM_Version::get_processor_features() {
(supports_avx() ? ", avx" : ""),
(supports_avx2() ? ", avx2" : ""),
(supports_aes() ? ", aes" : ""),
(supports_clmul() ? ", clmul" : ""),
(supports_erms() ? ", erms" : ""),
(supports_mmx_ext() ? ", mmxext" : ""),
(supports_3dnow_prefetch() ? ", 3dnowpref" : ""),
@ -489,6 +490,27 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(UseAES, false);
}
// Use CLMUL instructions if available.
if (supports_clmul()) {
if (FLAG_IS_DEFAULT(UseCLMUL)) {
UseCLMUL = true;
}
} else if (UseCLMUL) {
if (!FLAG_IS_DEFAULT(UseCLMUL))
warning("CLMUL instructions not available on this CPU (AVX may also be required)");
FLAG_SET_DEFAULT(UseCLMUL, false);
}
if (UseCLMUL && (UseAVX > 0) && (UseSSE > 2)) {
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
UseCRC32Intrinsics = true;
}
} else if (UseCRC32Intrinsics) {
if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics))
warning("CRC32 Intrinsics requires AVX and CLMUL instructions (not available on this CPU)");
FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
}
// The AES intrinsic stubs require AES instruction support (of course)
// but also require sse3 mode for instructions it use.
if (UseAES && (UseSSE > 2)) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -61,7 +61,8 @@ public:
uint32_t value;
struct {
uint32_t sse3 : 1,
: 2,
clmul : 1,
: 1,
monitor : 1,
: 1,
vmx : 1,
@ -249,7 +250,8 @@ protected:
CPU_AVX = (1 << 17),
CPU_AVX2 = (1 << 18),
CPU_AES = (1 << 19),
CPU_ERMS = (1 << 20) // enhanced 'rep movsb/stosb' instructions
CPU_ERMS = (1 << 20), // enhanced 'rep movsb/stosb' instructions
CPU_CLMUL = (1 << 21) // carryless multiply for CRC
} cpuFeatureFlags;
enum {
@ -429,6 +431,8 @@ protected:
result |= CPU_AES;
if (_cpuid_info.sef_cpuid7_ebx.bits.erms != 0)
result |= CPU_ERMS;
if (_cpuid_info.std_cpuid1_ecx.bits.clmul != 0)
result |= CPU_CLMUL;
// AMD features.
if (is_amd()) {
@ -555,6 +559,7 @@ public:
static bool supports_tsc() { return (_cpuFeatures & CPU_TSC) != 0; }
static bool supports_aes() { return (_cpuFeatures & CPU_AES) != 0; }
static bool supports_erms() { return (_cpuFeatures & CPU_ERMS) != 0; }
static bool supports_clmul() { return (_cpuFeatures & CPU_CLMUL) != 0; }
// Intel features
static bool is_intel_family_core() { return is_intel() &&

View file

@ -52,22 +52,6 @@ address* Relocation::pd_address_in_code() {
return (address *) addr();
}
int Relocation::pd_breakpoint_size() {
ShouldNotCallThis();
}
void Relocation::pd_swap_in_breakpoint(address x,
short* instrs,
int instrlen) {
ShouldNotCallThis();
}
void Relocation::pd_swap_out_breakpoint(address x,
short* instrs,
int instrlen) {
ShouldNotCallThis();
}
void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src,
CodeBuffer* dst) {
ShouldNotCallThis();

View file

@ -58,7 +58,9 @@ define_pd_global(intx, ReservedCodeCacheSize, 32*M );
define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
define_pd_global(uintx, CodeCacheMinBlockLength, 1 );
define_pd_global(uintx, MetaspaceSize, 12*M );
define_pd_global(uintx, CodeCacheMinimumUseSpace, 200*K);
define_pd_global(uintx, MetaspaceSize, 12*M );
define_pd_global(bool, NeverActAsServerClassMachine, true );
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
define_pd_global(bool, CICompileOSR, true );

View file

@ -122,9 +122,7 @@ static int file_open(const char* path, int flag) {
}
static int file_close(int fd) {
int ret;
RESTARTABLE(close(fd), ret);
return ret;
return close(fd);
}
static int file_read(int fd, char* buf, int len) {

View file

@ -199,7 +199,7 @@ int BsdAttachListener::init() {
::unlink(initial_path);
int res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr));
if (res == -1) {
RESTARTABLE(::close(listener), res);
::close(listener);
return -1;
}
@ -217,7 +217,7 @@ int BsdAttachListener::init() {
}
}
if (res == -1) {
RESTARTABLE(::close(listener), res);
::close(listener);
::unlink(initial_path);
return -1;
}
@ -345,24 +345,21 @@ BsdAttachOperation* BsdAttachListener::dequeue() {
uid_t puid;
gid_t pgid;
if (::getpeereid(s, &puid, &pgid) != 0) {
int res;
RESTARTABLE(::close(s), res);
::close(s);
continue;
}
uid_t euid = geteuid();
gid_t egid = getegid();
if (puid != euid || pgid != egid) {
int res;
RESTARTABLE(::close(s), res);
::close(s);
continue;
}
// peer credential look okay so we read the request
BsdAttachOperation* op = read_request(s);
if (op == NULL) {
int res;
RESTARTABLE(::close(s), res);
::close(s);
continue;
} else {
return op;
@ -413,7 +410,7 @@ void BsdAttachOperation::complete(jint result, bufferedStream* st) {
}
// done
RESTARTABLE(::close(this->socket()), rc);
::close(this->socket());
// were we externally suspended while we were waiting?
thread->check_and_wait_while_suspended();

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -94,7 +94,7 @@ public:
// flags that support signal based suspend/resume on Bsd are in a
// separate class to avoid confusion with many flags in OSThread that
// are used by VM level suspend/resume.
os::Bsd::SuspendResume sr;
os::SuspendResume sr;
// _ucontext and _siginfo are used by SR_handler() to save thread context,
// and they will later be used to walk the stack or reposition thread PC.

View file

@ -1234,12 +1234,13 @@ bool os::address_is_in_vm(address addr) {
Dl_info dlinfo;
if (libjvm_base_addr == NULL) {
dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo);
libjvm_base_addr = (address)dlinfo.dli_fbase;
if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
libjvm_base_addr = (address)dlinfo.dli_fbase;
}
assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
}
if (dladdr((void *)addr, &dlinfo)) {
if (dladdr((void *)addr, &dlinfo) != 0) {
if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
}
@ -1251,35 +1252,40 @@ bool os::address_is_in_vm(address addr) {
bool os::dll_address_to_function_name(address addr, char *buf,
int buflen, int *offset) {
// buf is not optional, but offset is optional
assert(buf != NULL, "sanity check");
Dl_info dlinfo;
char localbuf[MACH_MAXSYMLEN];
// dladdr will find names of dynamic functions only, but does
// it set dli_fbase with mach_header address when it "fails" ?
if (dladdr((void*)addr, &dlinfo) && dlinfo.dli_sname != NULL) {
if (buf != NULL) {
if(!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
if (dladdr((void*)addr, &dlinfo) != 0) {
// see if we have a matching symbol
if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
}
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
return true;
}
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
return true;
} else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
buf, buflen, offset, dlinfo.dli_fname)) {
return true;
// no matching symbol so try for just file info
if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
buf, buflen, offset, dlinfo.dli_fname)) {
return true;
}
}
}
// Handle non-dymanic manually:
if (dlinfo.dli_fbase != NULL &&
Decoder::decode(addr, localbuf, MACH_MAXSYMLEN, offset, dlinfo.dli_fbase)) {
if(!Decoder::demangle(localbuf, buf, buflen)) {
jio_snprintf(buf, buflen, "%s", localbuf);
// Handle non-dynamic manually:
if (dlinfo.dli_fbase != NULL &&
Decoder::decode(addr, localbuf, MACH_MAXSYMLEN, offset,
dlinfo.dli_fbase)) {
if (!Decoder::demangle(localbuf, buf, buflen)) {
jio_snprintf(buf, buflen, "%s", localbuf);
}
return true;
}
return true;
}
if (buf != NULL) buf[0] = '\0';
buf[0] = '\0';
if (offset != NULL) *offset = -1;
return false;
}
@ -1287,17 +1293,24 @@ bool os::dll_address_to_function_name(address addr, char *buf,
// ported from solaris version
bool os::dll_address_to_library_name(address addr, char* buf,
int buflen, int* offset) {
// buf is not optional, but offset is optional
assert(buf != NULL, "sanity check");
Dl_info dlinfo;
if (dladdr((void*)addr, &dlinfo)){
if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
if (offset) *offset = addr - (address)dlinfo.dli_fbase;
return true;
} else {
if (buf) buf[0] = '\0';
if (offset) *offset = -1;
return false;
if (dladdr((void*)addr, &dlinfo) != 0) {
if (dlinfo.dli_fname != NULL) {
jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
}
if (dlinfo.dli_fbase != NULL && offset != NULL) {
*offset = addr - (address)dlinfo.dli_fbase;
}
return true;
}
buf[0] = '\0';
if (offset) *offset = -1;
return false;
}
// Loads .dll/.so and
@ -1520,49 +1533,50 @@ static bool _print_ascii_file(const char* filename, outputStream* st) {
}
void os::print_dll_info(outputStream *st) {
st->print_cr("Dynamic libraries:");
st->print_cr("Dynamic libraries:");
#ifdef RTLD_DI_LINKMAP
Dl_info dli;
void *handle;
Link_map *map;
Link_map *p;
Dl_info dli;
void *handle;
Link_map *map;
Link_map *p;
if (!dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli)) {
st->print_cr("Error: Cannot print dynamic libraries.");
return;
}
handle = dlopen(dli.dli_fname, RTLD_LAZY);
if (handle == NULL) {
st->print_cr("Error: Cannot print dynamic libraries.");
return;
}
dlinfo(handle, RTLD_DI_LINKMAP, &map);
if (map == NULL) {
st->print_cr("Error: Cannot print dynamic libraries.");
return;
}
if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 ||
dli.dli_fname == NULL) {
st->print_cr("Error: Cannot print dynamic libraries.");
return;
}
handle = dlopen(dli.dli_fname, RTLD_LAZY);
if (handle == NULL) {
st->print_cr("Error: Cannot print dynamic libraries.");
return;
}
dlinfo(handle, RTLD_DI_LINKMAP, &map);
if (map == NULL) {
st->print_cr("Error: Cannot print dynamic libraries.");
return;
}
while (map->l_prev != NULL)
map = map->l_prev;
while (map->l_prev != NULL)
map = map->l_prev;
while (map != NULL) {
st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
map = map->l_next;
}
while (map != NULL) {
st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
map = map->l_next;
}
dlclose(handle);
dlclose(handle);
#elif defined(__APPLE__)
uint32_t count;
uint32_t i;
uint32_t count;
uint32_t i;
count = _dyld_image_count();
for (i = 1; i < count; i++) {
const char *name = _dyld_get_image_name(i);
intptr_t slide = _dyld_get_image_vmaddr_slide(i);
st->print_cr(PTR_FORMAT " \t%s", slide, name);
}
count = _dyld_image_count();
for (i = 1; i < count; i++) {
const char *name = _dyld_get_image_name(i);
intptr_t slide = _dyld_get_image_vmaddr_slide(i);
st->print_cr(PTR_FORMAT " \t%s", slide, name);
}
#else
st->print_cr("Error: Cannot print dynamic libraries.");
st->print_cr("Error: Cannot print dynamic libraries.");
#endif
}
@ -1707,8 +1721,11 @@ void os::jvm_path(char *buf, jint buflen) {
bool ret = dll_address_to_library_name(
CAST_FROM_FN_PTR(address, os::jvm_path),
dli_fname, sizeof(dli_fname), NULL);
assert(ret != 0, "cannot locate libjvm");
char *rp = realpath(dli_fname, buf);
assert(ret, "cannot locate libjvm");
char *rp = NULL;
if (ret && dli_fname[0] != '\0') {
rp = realpath(dli_fname, buf);
}
if (rp == NULL)
return;
@ -1852,17 +1869,118 @@ static volatile jint pending_signals[NSIG+1] = { 0 };
// Bsd(POSIX) specific hand shaking semaphore.
#ifdef __APPLE__
static semaphore_t sig_sem;
typedef semaphore_t os_semaphore_t;
#define SEM_INIT(sem, value) semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, value)
#define SEM_WAIT(sem) semaphore_wait(sem);
#define SEM_POST(sem) semaphore_signal(sem);
#define SEM_WAIT(sem) semaphore_wait(sem)
#define SEM_POST(sem) semaphore_signal(sem)
#define SEM_DESTROY(sem) semaphore_destroy(mach_task_self(), sem)
#else
static sem_t sig_sem;
typedef sem_t os_semaphore_t;
#define SEM_INIT(sem, value) sem_init(&sem, 0, value)
#define SEM_WAIT(sem) sem_wait(&sem);
#define SEM_POST(sem) sem_post(&sem);
#define SEM_WAIT(sem) sem_wait(&sem)
#define SEM_POST(sem) sem_post(&sem)
#define SEM_DESTROY(sem) sem_destroy(&sem)
#endif
class Semaphore : public StackObj {
public:
Semaphore();
~Semaphore();
void signal();
void wait();
bool trywait();
bool timedwait(unsigned int sec, int nsec);
private:
jlong currenttime() const;
semaphore_t _semaphore;
};
Semaphore::Semaphore() : _semaphore(0) {
SEM_INIT(_semaphore, 0);
}
Semaphore::~Semaphore() {
SEM_DESTROY(_semaphore);
}
void Semaphore::signal() {
SEM_POST(_semaphore);
}
void Semaphore::wait() {
SEM_WAIT(_semaphore);
}
jlong Semaphore::currenttime() const {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * NANOSECS_PER_SEC) + (tv.tv_usec * 1000);
}
#ifdef __APPLE__
bool Semaphore::trywait() {
return timedwait(0, 0);
}
bool Semaphore::timedwait(unsigned int sec, int nsec) {
kern_return_t kr = KERN_ABORTED;
mach_timespec_t waitspec;
waitspec.tv_sec = sec;
waitspec.tv_nsec = nsec;
jlong starttime = currenttime();
kr = semaphore_timedwait(_semaphore, waitspec);
while (kr == KERN_ABORTED) {
jlong totalwait = (sec * NANOSECS_PER_SEC) + nsec;
jlong current = currenttime();
jlong passedtime = current - starttime;
if (passedtime >= totalwait) {
waitspec.tv_sec = 0;
waitspec.tv_nsec = 0;
} else {
jlong waittime = totalwait - (current - starttime);
waitspec.tv_sec = waittime / NANOSECS_PER_SEC;
waitspec.tv_nsec = waittime % NANOSECS_PER_SEC;
}
kr = semaphore_timedwait(_semaphore, waitspec);
}
return kr == KERN_SUCCESS;
}
#else
bool Semaphore::trywait() {
return sem_trywait(&_semaphore) == 0;
}
bool Semaphore::timedwait(unsigned int sec, int nsec) {
struct timespec ts;
jlong endtime = unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
while (1) {
int result = sem_timedwait(&_semaphore, &ts);
if (result == 0) {
return true;
} else if (errno == EINTR) {
continue;
} else if (errno == ETIMEDOUT) {
return false;
} else {
return false;
}
}
}
#endif // __APPLE__
static os_semaphore_t sig_sem;
static Semaphore sr_semaphore;
void os::signal_init_pd() {
// Initialize signal structures
::memset((void*)pending_signals, 0, sizeof(pending_signals));
@ -1973,6 +2091,13 @@ void bsd_wrap_code(char* base, size_t size) {
}
}
static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
int err) {
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
", %d) failed; error='%s' (errno=%d)", addr, size, exec,
strerror(err), err);
}
// NOTE: Bsd kernel does not really reserve the pages for us.
// All it does is to check if there are enough free pages
// left at the time of mmap(). This could be a potential
@ -1981,18 +2106,45 @@ bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
#ifdef __OpenBSD__
// XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
return ::mprotect(addr, size, prot) == 0;
if (::mprotect(addr, size, prot) == 0) {
return true;
}
#else
uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
return res != (uintptr_t) MAP_FAILED;
if (res != (uintptr_t) MAP_FAILED) {
return true;
}
#endif
}
// Warn about any commit errors we see in non-product builds just
// in case mmap() doesn't work as described on the man page.
NOT_PRODUCT(warn_fail_commit_memory(addr, size, exec, errno);)
return false;
}
bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
bool exec) {
return commit_memory(addr, size, exec);
// alignment_hint is ignored on this OS
return pd_commit_memory(addr, size, exec);
}
void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
const char* mesg) {
assert(mesg != NULL, "mesg must be specified");
if (!pd_commit_memory(addr, size, exec)) {
// add extra info in product mode for vm_exit_out_of_memory():
PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
}
}
void os::pd_commit_memory_or_exit(char* addr, size_t size,
size_t alignment_hint, bool exec,
const char* mesg) {
// alignment_hint is ignored on this OS
pd_commit_memory_or_exit(addr, size, exec, mesg);
}
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
@ -2047,7 +2199,7 @@ bool os::pd_uncommit_memory(char* addr, size_t size) {
}
bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
return os::commit_memory(addr, size);
return os::commit_memory(addr, size, !ExecMem);
}
// If this is a growable mapping, remove the guard pages entirely by
@ -2219,21 +2371,20 @@ char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
}
// The memory is committed
address pc = CALLER_PC;
MemTracker::record_virtual_memory_reserve((address)addr, bytes, pc);
MemTracker::record_virtual_memory_commit((address)addr, bytes, pc);
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
return addr;
}
bool os::release_memory_special(char* base, size_t bytes) {
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
// detaching the SHM segment will also delete it, see reserve_memory_special()
int rslt = shmdt(base);
if (rslt == 0) {
MemTracker::record_virtual_memory_uncommit((address)base, bytes);
MemTracker::record_virtual_memory_release((address)base, bytes);
tkr.record((address)base, bytes);
return true;
} else {
tkr.discard();
return false;
}
@ -2616,9 +2767,6 @@ void os::hint_no_preempt() {}
static void resume_clear_context(OSThread *osthread) {
osthread->set_ucontext(NULL);
osthread->set_siginfo(NULL);
// notify the suspend action is completed, we have now resumed
osthread->sr.clear_suspended();
}
static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
@ -2638,7 +2786,7 @@ static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontex
// its signal handlers run and prevents sigwait()'s use with the
// mutex granting granting signal.
//
// Currently only ever called on the VMThread
// Currently only ever called on the VMThread or JavaThread
//
static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
// Save and restore errno to avoid confusing native code with EINTR
@ -2647,38 +2795,48 @@ static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
Thread* thread = Thread::current();
OSThread* osthread = thread->osthread();
assert(thread->is_VM_thread(), "Must be VMThread");
// read current suspend action
int action = osthread->sr.suspend_action();
if (action == os::Bsd::SuspendResume::SR_SUSPEND) {
assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
os::SuspendResume::State current = osthread->sr.state();
if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
suspend_save_context(osthread, siginfo, context);
// Notify the suspend action is about to be completed. do_suspend()
// waits until SR_SUSPENDED is set and then returns. We will wait
// here for a resume signal and that completes the suspend-other
// action. do_suspend/do_resume is always called as a pair from
// the same thread - so there are no races
// attempt to switch the state, we assume we had a SUSPEND_REQUEST
os::SuspendResume::State state = osthread->sr.suspended();
if (state == os::SuspendResume::SR_SUSPENDED) {
sigset_t suspend_set; // signals for sigsuspend()
// notify the caller
osthread->sr.set_suspended();
// get current set of blocked signals and unblock resume signal
pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
sigdelset(&suspend_set, SR_signum);
sigset_t suspend_set; // signals for sigsuspend()
sr_semaphore.signal();
// wait here until we are resumed
while (1) {
sigsuspend(&suspend_set);
// get current set of blocked signals and unblock resume signal
pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
sigdelset(&suspend_set, SR_signum);
os::SuspendResume::State result = osthread->sr.running();
if (result == os::SuspendResume::SR_RUNNING) {
sr_semaphore.signal();
break;
} else if (result != os::SuspendResume::SR_SUSPENDED) {
ShouldNotReachHere();
}
}
// wait here until we are resumed
do {
sigsuspend(&suspend_set);
// ignore all returns until we get a resume signal
} while (osthread->sr.suspend_action() != os::Bsd::SuspendResume::SR_CONTINUE);
} else if (state == os::SuspendResume::SR_RUNNING) {
// request was cancelled, continue
} else {
ShouldNotReachHere();
}
resume_clear_context(osthread);
} else if (current == os::SuspendResume::SR_RUNNING) {
// request was cancelled, continue
} else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
// ignore
} else {
assert(action == os::Bsd::SuspendResume::SR_CONTINUE, "unexpected sr action");
// nothing special to do - just leave the handler
// ignore
}
errno = old_errno;
@ -2722,42 +2880,82 @@ static int SR_initialize() {
return 0;
}
static int sr_notify(OSThread* osthread) {
int status = pthread_kill(osthread->pthread_id(), SR_signum);
assert_status(status == 0, status, "pthread_kill");
return status;
}
// "Randomly" selected value for how long we want to spin
// before bailing out on suspending a thread, also how often
// we send a signal to a thread we want to resume
static const int RANDOMLY_LARGE_INTEGER = 1000000;
static const int RANDOMLY_LARGE_INTEGER2 = 100;
// returns true on success and false on error - really an error is fatal
// but this seems the normal response to library errors
static bool do_suspend(OSThread* osthread) {
// mark as suspended and send signal
osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_SUSPEND);
int status = pthread_kill(osthread->pthread_id(), SR_signum);
assert_status(status == 0, status, "pthread_kill");
assert(osthread->sr.is_running(), "thread should be running");
assert(!sr_semaphore.trywait(), "semaphore has invalid state");
// check status and wait until notified of suspension
if (status == 0) {
for (int i = 0; !osthread->sr.is_suspended(); i++) {
os::yield_all(i);
}
osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_NONE);
return true;
}
else {
osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_NONE);
// mark as suspended and send signal
if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
// failed to switch, state wasn't running?
ShouldNotReachHere();
return false;
}
if (sr_notify(osthread) != 0) {
ShouldNotReachHere();
}
// managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
while (true) {
if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
break;
} else {
// timeout
os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
if (cancelled == os::SuspendResume::SR_RUNNING) {
return false;
} else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
// make sure that we consume the signal on the semaphore as well
sr_semaphore.wait();
break;
} else {
ShouldNotReachHere();
return false;
}
}
}
guarantee(osthread->sr.is_suspended(), "Must be suspended");
return true;
}
static void do_resume(OSThread* osthread) {
assert(osthread->sr.is_suspended(), "thread should be suspended");
osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_CONTINUE);
assert(!sr_semaphore.trywait(), "invalid semaphore state");
int status = pthread_kill(osthread->pthread_id(), SR_signum);
assert_status(status == 0, status, "pthread_kill");
// check status and wait unit notified of resumption
if (status == 0) {
for (int i = 0; osthread->sr.is_suspended(); i++) {
os::yield_all(i);
if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
// failed to switch to WAKEUP_REQUEST
ShouldNotReachHere();
return;
}
while (true) {
if (sr_notify(osthread) == 0) {
if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
if (osthread->sr.is_running()) {
return;
}
}
} else {
ShouldNotReachHere();
}
}
osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_NONE);
guarantee(osthread->sr.is_running(), "Must be running!");
}
////////////////////////////////////////////////////////////////////////////////
@ -3364,7 +3562,7 @@ jint os::init_2(void)
if (!UseMembar) {
address mem_serialize_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
guarantee( mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
os::set_memory_serialize_page( mem_serialize_page );
#ifndef PRODUCT
@ -3508,7 +3706,40 @@ bool os::bind_to_processor(uint processor_id) {
return false;
}
void os::SuspendedThreadTask::internal_do_task() {
if (do_suspend(_thread->osthread())) {
SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
do_task(context);
do_resume(_thread->osthread());
}
}
///
class PcFetcher : public os::SuspendedThreadTask {
public:
PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
ExtendedPC result();
protected:
void do_task(const os::SuspendedThreadTaskContext& context);
private:
ExtendedPC _epc;
};
ExtendedPC PcFetcher::result() {
guarantee(is_done(), "task is not done yet.");
return _epc;
}
void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
Thread* thread = context.thread();
OSThread* osthread = thread->osthread();
if (osthread->ucontext() != NULL) {
_epc = os::Bsd::ucontext_get_pc((ucontext_t *) context.ucontext());
} else {
// NULL context is unexpected, double-check this is the VMThread
guarantee(thread->is_VM_thread(), "can only be called for VMThread");
}
}
// Suspends the target using the signal mechanism and then grabs the PC before
// resuming the target. Used by the flat-profiler only
@ -3517,22 +3748,9 @@ ExtendedPC os::get_thread_pc(Thread* thread) {
assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
assert(thread->is_VM_thread(), "Can only be called for VMThread");
ExtendedPC epc;
OSThread* osthread = thread->osthread();
if (do_suspend(osthread)) {
if (osthread->ucontext() != NULL) {
epc = os::Bsd::ucontext_get_pc(osthread->ucontext());
} else {
// NULL context is unexpected, double-check this is the VMThread
guarantee(thread->is_VM_thread(), "can only be called for VMThread");
}
do_resume(osthread);
}
// failure means pthread_kill failed for some reason - arguably this is
// a fatal problem, but such problems are ignored elsewhere
return epc;
PcFetcher fetcher(thread);
fetcher.run();
return fetcher.result();
}
int os::Bsd::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime)
@ -3546,20 +3764,20 @@ int os::Bsd::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex,
bool os::find(address addr, outputStream* st) {
Dl_info dlinfo;
memset(&dlinfo, 0, sizeof(dlinfo));
if (dladdr(addr, &dlinfo)) {
if (dladdr(addr, &dlinfo) != 0) {
st->print(PTR_FORMAT ": ", addr);
if (dlinfo.dli_sname != NULL) {
if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
st->print("%s+%#x", dlinfo.dli_sname,
addr - (intptr_t)dlinfo.dli_saddr);
} else if (dlinfo.dli_fname) {
} else if (dlinfo.dli_fbase != NULL) {
st->print("<offset %#x>", addr - (intptr_t)dlinfo.dli_fbase);
} else {
st->print("<absolute address>");
}
if (dlinfo.dli_fname) {
if (dlinfo.dli_fname != NULL) {
st->print(" in %s", dlinfo.dli_fname);
}
if (dlinfo.dli_fbase) {
if (dlinfo.dli_fbase != NULL) {
st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
}
st->cr();
@ -3572,7 +3790,7 @@ bool os::find(address addr, outputStream* st) {
if (!lowest) lowest = (address) dlinfo.dli_fbase;
if (begin < lowest) begin = lowest;
Dl_info dlinfo2;
if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr
if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
&& end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
end = (address) dlinfo2.dli_saddr;
Disassembler::decode(begin, end, st);
@ -4517,3 +4735,4 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
return n;
}

View file

@ -145,36 +145,6 @@ class Bsd {
// BsdThreads work-around for 6292965
static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime);
// Bsd suspend/resume support - this helper is a shadow of its former
// self now that low-level suspension is barely used, and old workarounds
// for BsdThreads are no longer needed.
class SuspendResume {
private:
volatile int _suspend_action;
volatile jint _state;
public:
// values for suspend_action:
enum {
SR_NONE = 0x00,
SR_SUSPEND = 0x01, // suspend request
SR_CONTINUE = 0x02, // resume request
SR_SUSPENDED = 0x20 // values for _state: + SR_NONE
};
SuspendResume() { _suspend_action = SR_NONE; _state = SR_NONE; }
int suspend_action() const { return _suspend_action; }
void set_suspend_action(int x) { _suspend_action = x; }
// atomic updates for _state
inline void set_suspended();
inline void clear_suspended();
bool is_suspended() { return _state & SR_SUSPENDED; }
#undef SR_SUSPENDED
};
private:
typedef int (*sched_getcpu_func_t)(void);
typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
@ -250,7 +220,7 @@ class PlatformEvent : public CHeapObj<mtInternal> {
int TryPark () ;
int park (jlong millis) ;
void SetAssociation (Thread * a) { _Assoc = a ; }
} ;
};
class PlatformParker : public CHeapObj<mtInternal> {
protected:
@ -268,6 +238,6 @@ class PlatformParker : public CHeapObj<mtInternal> {
status = pthread_mutex_init (_mutex, NULL);
assert_status(status == 0, status, "mutex_init");
}
} ;
};
#endif // OS_BSD_VM_OS_BSD_HPP

View file

@ -178,11 +178,11 @@ inline size_t os::write(int fd, const void *buf, unsigned int nBytes) {
}
inline int os::close(int fd) {
RESTARTABLE_RETURN_INT(::close(fd));
return ::close(fd);
}
inline int os::socket_close(int fd) {
RESTARTABLE_RETURN_INT(::close(fd));
return ::close(fd);
}
inline int os::socket(int domain, int type, int protocol) {
@ -286,20 +286,4 @@ inline int os::set_sock_opt(int fd, int level, int optname,
return ::setsockopt(fd, level, optname, optval, optlen);
}
inline void os::Bsd::SuspendResume::set_suspended() {
jint temp, temp2;
do {
temp = _state;
temp2 = Atomic::cmpxchg(temp | SR_SUSPENDED, &_state, temp);
} while (temp2 != temp);
}
inline void os::Bsd::SuspendResume::clear_suspended() {
jint temp, temp2;
do {
temp = _state;
temp2 = Atomic::cmpxchg(temp & ~SR_SUSPENDED, &_state, temp);
} while (temp2 != temp);
}
#endif // OS_BSD_VM_OS_BSD_INLINE_HPP

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -60,7 +60,7 @@ static char* create_standard_memory(size_t size) {
}
// commit memory
if (!os::commit_memory(mapAddress, size)) {
if (!os::commit_memory(mapAddress, size, !ExecMem)) {
if (PrintMiscellaneous && Verbose) {
warning("Could not commit PerfData memory\n");
}
@ -120,7 +120,7 @@ static void save_memory_to_file(char* addr, size_t size) {
addr += result;
}
RESTARTABLE(::close(fd), result);
result = ::close(fd);
if (PrintMiscellaneous && Verbose) {
if (result == OS_ERR) {
warning("Could not close %s: %s\n", destfile, strerror(errno));
@ -632,7 +632,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
if (PrintMiscellaneous && Verbose) {
warning("could not set shared memory file size: %s\n", strerror(errno));
}
RESTARTABLE(::close(fd), result);
::close(fd);
return -1;
}
@ -656,7 +656,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
if (result != -1) {
return fd;
} else {
RESTARTABLE(::close(fd), result);
::close(fd);
return -1;
}
}
@ -734,9 +734,7 @@ static char* mmap_create_shared(size_t size) {
mapAddress = (char*)::mmap((char*)0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
// attempt to close the file - restart it if it was interrupted,
// but ignore other failures
RESTARTABLE(::close(fd), result);
result = ::close(fd);
assert(result != OS_ERR, "could not close file");
if (mapAddress == MAP_FAILED) {
@ -755,8 +753,7 @@ static char* mmap_create_shared(size_t size) {
(void)::memset((void*) mapAddress, 0, size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
return mapAddress;
}
@ -909,7 +906,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
// attempt to close the file - restart if it gets interrupted,
// but ignore other failures
RESTARTABLE(::close(fd), result);
result = ::close(fd);
assert(result != OS_ERR, "could not close file");
if (mapAddress == MAP_FAILED) {
@ -921,8 +918,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
}
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
*addr = mapAddress;
*sizep = size;

View file

@ -199,7 +199,7 @@ int LinuxAttachListener::init() {
::unlink(initial_path);
int res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr));
if (res == -1) {
RESTARTABLE(::close(listener), res);
::close(listener);
return -1;
}
@ -212,7 +212,7 @@ int LinuxAttachListener::init() {
}
}
if (res == -1) {
RESTARTABLE(::close(listener), res);
::close(listener);
::unlink(initial_path);
return -1;
}
@ -340,24 +340,21 @@ LinuxAttachOperation* LinuxAttachListener::dequeue() {
struct ucred cred_info;
socklen_t optlen = sizeof(cred_info);
if (::getsockopt(s, SOL_SOCKET, SO_PEERCRED, (void*)&cred_info, &optlen) == -1) {
int res;
RESTARTABLE(::close(s), res);
::close(s);
continue;
}
uid_t euid = geteuid();
gid_t egid = getegid();
if (cred_info.uid != euid || cred_info.gid != egid) {
int res;
RESTARTABLE(::close(s), res);
::close(s);
continue;
}
// peer credential look okay so we read the request
LinuxAttachOperation* op = read_request(s);
if (op == NULL) {
int res;
RESTARTABLE(::close(s), res);
::close(s);
continue;
} else {
return op;
@ -408,7 +405,7 @@ void LinuxAttachOperation::complete(jint result, bufferedStream* st) {
}
// done
RESTARTABLE(::close(this->socket()), rc);
::close(this->socket());
// were we externally suspended while we were waiting?
thread->check_and_wait_while_suspended();

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -77,7 +77,7 @@ public:
// flags that support signal based suspend/resume on Linux are in a
// separate class to avoid confusion with many flags in OSThread that
// are used by VM level suspend/resume.
os::Linux::SuspendResume sr;
os::SuspendResume sr;
// _ucontext and _siginfo are used by SR_handler() to save thread context,
// and they will later be used to walk the stack or reposition thread PC.

View file

@ -151,6 +151,9 @@ sigset_t SR_sigset;
/* Used to protect dlsym() calls */
static pthread_mutex_t dl_mutex;
// Declarations
static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
#ifdef JAVASE_EMBEDDED
class MemNotifyThread: public Thread {
friend class VMStructs;
@ -1679,12 +1682,13 @@ bool os::address_is_in_vm(address addr) {
Dl_info dlinfo;
if (libjvm_base_addr == NULL) {
dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo);
libjvm_base_addr = (address)dlinfo.dli_fbase;
if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
libjvm_base_addr = (address)dlinfo.dli_fbase;
}
assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
}
if (dladdr((void *)addr, &dlinfo)) {
if (dladdr((void *)addr, &dlinfo) != 0) {
if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
}
@ -1693,24 +1697,30 @@ bool os::address_is_in_vm(address addr) {
bool os::dll_address_to_function_name(address addr, char *buf,
int buflen, int *offset) {
// buf is not optional, but offset is optional
assert(buf != NULL, "sanity check");
Dl_info dlinfo;
if (dladdr((void*)addr, &dlinfo) && dlinfo.dli_sname != NULL) {
if (buf != NULL) {
if(!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
if (dladdr((void*)addr, &dlinfo) != 0) {
// see if we have a matching symbol
if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
}
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
return true;
}
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
return true;
} else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
buf, buflen, offset, dlinfo.dli_fname)) {
return true;
// no matching symbol so try for just file info
if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
buf, buflen, offset, dlinfo.dli_fname)) {
return true;
}
}
}
if (buf != NULL) buf[0] = '\0';
buf[0] = '\0';
if (offset != NULL) *offset = -1;
return false;
}
@ -1761,6 +1771,9 @@ static int address_to_library_name_callback(struct dl_phdr_info *info,
bool os::dll_address_to_library_name(address addr, char* buf,
int buflen, int* offset) {
// buf is not optional, but offset is optional
assert(buf != NULL, "sanity check");
Dl_info dlinfo;
struct _address_to_library_name data;
@ -1779,15 +1792,20 @@ bool os::dll_address_to_library_name(address addr, char* buf,
// buf already contains library name
if (offset) *offset = addr - data.base;
return true;
} else if (dladdr((void*)addr, &dlinfo)){
if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
if (offset) *offset = addr - (address)dlinfo.dli_fbase;
return true;
} else {
if (buf) buf[0] = '\0';
if (offset) *offset = -1;
return false;
}
if (dladdr((void*)addr, &dlinfo) != 0) {
if (dlinfo.dli_fname != NULL) {
jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
}
if (dlinfo.dli_fbase != NULL && offset != NULL) {
*offset = addr - (address)dlinfo.dli_fbase;
}
return true;
}
buf[0] = '\0';
if (offset) *offset = -1;
return false;
}
// Loads .dll/.so and
@ -2314,8 +2332,11 @@ void os::jvm_path(char *buf, jint buflen) {
bool ret = dll_address_to_library_name(
CAST_FROM_FN_PTR(address, os::jvm_path),
dli_fname, sizeof(dli_fname), NULL);
assert(ret != 0, "cannot locate libjvm");
char *rp = realpath(dli_fname, buf);
assert(ret, "cannot locate libjvm");
char *rp = NULL;
if (ret && dli_fname[0] != '\0') {
rp = realpath(dli_fname, buf);
}
if (rp == NULL)
return;
@ -2407,6 +2428,57 @@ void* os::user_handler() {
return CAST_FROM_FN_PTR(void*, UserHandler);
}
class Semaphore : public StackObj {
public:
Semaphore();
~Semaphore();
void signal();
void wait();
bool trywait();
bool timedwait(unsigned int sec, int nsec);
private:
sem_t _semaphore;
};
Semaphore::Semaphore() {
sem_init(&_semaphore, 0, 0);
}
Semaphore::~Semaphore() {
sem_destroy(&_semaphore);
}
void Semaphore::signal() {
sem_post(&_semaphore);
}
void Semaphore::wait() {
sem_wait(&_semaphore);
}
bool Semaphore::trywait() {
return sem_trywait(&_semaphore) == 0;
}
bool Semaphore::timedwait(unsigned int sec, int nsec) {
struct timespec ts;
unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
while (1) {
int result = sem_timedwait(&_semaphore, &ts);
if (result == 0) {
return true;
} else if (errno == EINTR) {
continue;
} else if (errno == ETIMEDOUT) {
return false;
} else {
return false;
}
}
}
extern "C" {
typedef void (*sa_handler_t)(int);
typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
@ -2446,6 +2518,7 @@ static volatile jint pending_signals[NSIG+1] = { 0 };
// Linux(POSIX) specific hand shaking semaphore.
static sem_t sig_sem;
static Semaphore sr_semaphore;
void os::signal_init_pd() {
// Initialize signal structures
@ -2557,11 +2630,49 @@ void linux_wrap_code(char* base, size_t size) {
}
}
static bool recoverable_mmap_error(int err) {
// See if the error is one we can let the caller handle. This
// list of errno values comes from JBS-6843484. I can't find a
// Linux man page that documents this specific set of errno
// values so while this list currently matches Solaris, it may
// change as we gain experience with this failure mode.
switch (err) {
case EBADF:
case EINVAL:
case ENOTSUP:
// let the caller deal with these errors
return true;
default:
// Any remaining errors on this OS can cause our reserved mapping
// to be lost. That can cause confusion where different data
// structures think they have the same memory mapped. The worst
// scenario is if both the VM and a library think they have the
// same memory mapped.
return false;
}
}
static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
int err) {
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
", %d) failed; error='%s' (errno=%d)", addr, size, exec,
strerror(err), err);
}
static void warn_fail_commit_memory(char* addr, size_t size,
size_t alignment_hint, bool exec,
int err) {
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, size,
alignment_hint, exec, strerror(err), err);
}
// NOTE: Linux kernel does not really reserve the pages for us.
// All it does is to check if there are enough free pages
// left at the time of mmap(). This could be a potential
// problem.
bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
int os::Linux::commit_memory_impl(char* addr, size_t size, bool exec) {
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
@ -2569,9 +2680,32 @@ bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
if (UseNUMAInterleaving) {
numa_make_global(addr, size);
}
return true;
return 0;
}
int err = errno; // save errno from mmap() call above
if (!recoverable_mmap_error(err)) {
warn_fail_commit_memory(addr, size, exec, err);
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "committing reserved memory.");
}
return err;
}
bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
return os::Linux::commit_memory_impl(addr, size, exec) == 0;
}
void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
const char* mesg) {
assert(mesg != NULL, "mesg must be specified");
int err = os::Linux::commit_memory_impl(addr, size, exec);
if (err != 0) {
// the caller wants all commit errors to exit with the specified mesg:
warn_fail_commit_memory(addr, size, exec, err);
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
}
return false;
}
// Define MAP_HUGETLB here so we can build HotSpot on old systems.
@ -2584,8 +2718,9 @@ bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
#define MADV_HUGEPAGE 14
#endif
bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
bool exec) {
int os::Linux::commit_memory_impl(char* addr, size_t size,
size_t alignment_hint, bool exec) {
int err;
if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
uintptr_t res =
@ -2596,16 +2731,46 @@ bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
if (UseNUMAInterleaving) {
numa_make_global(addr, size);
}
return true;
return 0;
}
err = errno; // save errno from mmap() call above
if (!recoverable_mmap_error(err)) {
// However, it is not clear that this loss of our reserved mapping
// happens with large pages on Linux or that we cannot recover
// from the loss. For now, we just issue a warning and we don't
// call vm_exit_out_of_memory(). This issue is being tracked by
// JBS-8007074.
warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
// vm_exit_out_of_memory(size, OOM_MMAP_ERROR,
// "committing reserved memory.");
}
// Fall through and try to use small pages
}
if (commit_memory(addr, size, exec)) {
err = os::Linux::commit_memory_impl(addr, size, exec);
if (err == 0) {
realign_memory(addr, size, alignment_hint);
return true;
}
return false;
return err;
}
bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
bool exec) {
return os::Linux::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
}
void os::pd_commit_memory_or_exit(char* addr, size_t size,
size_t alignment_hint, bool exec,
const char* mesg) {
assert(mesg != NULL, "mesg must be specified");
int err = os::Linux::commit_memory_impl(addr, size, alignment_hint, exec);
if (err != 0) {
// the caller wants all commit errors to exit with the specified mesg:
warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
}
}
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
@ -2623,7 +2788,7 @@ void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
// small pages on top of the SHM segment. This method always works for small pages, so we
// allow that in any case.
if (alignment_hint <= (size_t)os::vm_page_size() || !UseSHM) {
commit_memory(addr, bytes, alignment_hint, false);
commit_memory(addr, bytes, alignment_hint, !ExecMem);
}
}
@ -2876,7 +3041,7 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
::munmap((void*)stack_extent, (uintptr_t)addr - stack_extent);
}
return os::commit_memory(addr, size);
return os::commit_memory(addr, size, !ExecMem);
}
// If this is a growable mapping, remove the guard pages entirely by
@ -2998,7 +3163,7 @@ bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
-1, 0);
if (p != (void *) -1) {
if (p != MAP_FAILED) {
// We don't know if this really is a huge page or not.
FILE *fp = fopen("/proc/self/maps", "r");
if (fp) {
@ -3216,22 +3381,21 @@ char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
}
// The memory is committed
address pc = CALLER_PC;
MemTracker::record_virtual_memory_reserve((address)addr, bytes, pc);
MemTracker::record_virtual_memory_commit((address)addr, bytes, pc);
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
return addr;
}
bool os::release_memory_special(char* base, size_t bytes) {
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
// detaching the SHM segment will also delete it, see reserve_memory_special()
int rslt = shmdt(base);
if (rslt == 0) {
MemTracker::record_virtual_memory_uncommit((address)base, bytes);
MemTracker::record_virtual_memory_release((address)base, bytes);
tkr.record((address)base, bytes);
return true;
} else {
return false;
tkr.discard();
return false;
}
}
@ -3559,9 +3723,6 @@ void os::hint_no_preempt() {}
static void resume_clear_context(OSThread *osthread) {
osthread->set_ucontext(NULL);
osthread->set_siginfo(NULL);
// notify the suspend action is completed, we have now resumed
osthread->sr.clear_suspended();
}
static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
@ -3581,7 +3742,7 @@ static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontex
// its signal handlers run and prevents sigwait()'s use with the
// mutex granting granting signal.
//
// Currently only ever called on the VMThread
// Currently only ever called on the VMThread and JavaThreads (PC sampling)
//
static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
// Save and restore errno to avoid confusing native code with EINTR
@ -3590,38 +3751,46 @@ static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
Thread* thread = Thread::current();
OSThread* osthread = thread->osthread();
assert(thread->is_VM_thread(), "Must be VMThread");
// read current suspend action
int action = osthread->sr.suspend_action();
if (action == os::Linux::SuspendResume::SR_SUSPEND) {
assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
os::SuspendResume::State current = osthread->sr.state();
if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
suspend_save_context(osthread, siginfo, context);
// Notify the suspend action is about to be completed. do_suspend()
// waits until SR_SUSPENDED is set and then returns. We will wait
// here for a resume signal and that completes the suspend-other
// action. do_suspend/do_resume is always called as a pair from
// the same thread - so there are no races
// attempt to switch the state, we assume we had a SUSPEND_REQUEST
os::SuspendResume::State state = osthread->sr.suspended();
if (state == os::SuspendResume::SR_SUSPENDED) {
sigset_t suspend_set; // signals for sigsuspend()
// notify the caller
osthread->sr.set_suspended();
// get current set of blocked signals and unblock resume signal
pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
sigdelset(&suspend_set, SR_signum);
sigset_t suspend_set; // signals for sigsuspend()
sr_semaphore.signal();
// wait here until we are resumed
while (1) {
sigsuspend(&suspend_set);
// get current set of blocked signals and unblock resume signal
pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
sigdelset(&suspend_set, SR_signum);
os::SuspendResume::State result = osthread->sr.running();
if (result == os::SuspendResume::SR_RUNNING) {
sr_semaphore.signal();
break;
}
}
// wait here until we are resumed
do {
sigsuspend(&suspend_set);
// ignore all returns until we get a resume signal
} while (osthread->sr.suspend_action() != os::Linux::SuspendResume::SR_CONTINUE);
} else if (state == os::SuspendResume::SR_RUNNING) {
// request was cancelled, continue
} else {
ShouldNotReachHere();
}
resume_clear_context(osthread);
} else if (current == os::SuspendResume::SR_RUNNING) {
// request was cancelled, continue
} else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
// ignore
} else {
assert(action == os::Linux::SuspendResume::SR_CONTINUE, "unexpected sr action");
// nothing special to do - just leave the handler
// ignore
}
errno = old_errno;
@ -3665,42 +3834,82 @@ static int SR_initialize() {
return 0;
}
static int sr_notify(OSThread* osthread) {
int status = pthread_kill(osthread->pthread_id(), SR_signum);
assert_status(status == 0, status, "pthread_kill");
return status;
}
// "Randomly" selected value for how long we want to spin
// before bailing out on suspending a thread, also how often
// we send a signal to a thread we want to resume
static const int RANDOMLY_LARGE_INTEGER = 1000000;
static const int RANDOMLY_LARGE_INTEGER2 = 100;
// returns true on success and false on error - really an error is fatal
// but this seems the normal response to library errors
static bool do_suspend(OSThread* osthread) {
// mark as suspended and send signal
osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_SUSPEND);
int status = pthread_kill(osthread->pthread_id(), SR_signum);
assert_status(status == 0, status, "pthread_kill");
assert(osthread->sr.is_running(), "thread should be running");
assert(!sr_semaphore.trywait(), "semaphore has invalid state");
// check status and wait until notified of suspension
if (status == 0) {
for (int i = 0; !osthread->sr.is_suspended(); i++) {
os::yield_all(i);
}
osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_NONE);
return true;
}
else {
osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_NONE);
// mark as suspended and send signal
if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
// failed to switch, state wasn't running?
ShouldNotReachHere();
return false;
}
if (sr_notify(osthread) != 0) {
ShouldNotReachHere();
}
// managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
while (true) {
if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
break;
} else {
// timeout
os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
if (cancelled == os::SuspendResume::SR_RUNNING) {
return false;
} else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
// make sure that we consume the signal on the semaphore as well
sr_semaphore.wait();
break;
} else {
ShouldNotReachHere();
return false;
}
}
}
guarantee(osthread->sr.is_suspended(), "Must be suspended");
return true;
}
static void do_resume(OSThread* osthread) {
assert(osthread->sr.is_suspended(), "thread should be suspended");
osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_CONTINUE);
assert(!sr_semaphore.trywait(), "invalid semaphore state");
int status = pthread_kill(osthread->pthread_id(), SR_signum);
assert_status(status == 0, status, "pthread_kill");
// check status and wait unit notified of resumption
if (status == 0) {
for (int i = 0; osthread->sr.is_suspended(); i++) {
os::yield_all(i);
if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
// failed to switch to WAKEUP_REQUEST
ShouldNotReachHere();
return;
}
while (true) {
if (sr_notify(osthread) == 0) {
if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
if (osthread->sr.is_running()) {
return;
}
}
} else {
ShouldNotReachHere();
}
}
osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_NONE);
guarantee(osthread->sr.is_running(), "Must be running!");
}
////////////////////////////////////////////////////////////////////////////////
@ -4293,7 +4502,7 @@ jint os::init_2(void)
if (!UseMembar) {
address mem_serialize_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
guarantee( mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
os::set_memory_serialize_page( mem_serialize_page );
#ifndef PRODUCT
@ -4472,6 +4681,40 @@ bool os::bind_to_processor(uint processor_id) {
///
void os::SuspendedThreadTask::internal_do_task() {
if (do_suspend(_thread->osthread())) {
SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
do_task(context);
do_resume(_thread->osthread());
}
}
class PcFetcher : public os::SuspendedThreadTask {
public:
PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
ExtendedPC result();
protected:
void do_task(const os::SuspendedThreadTaskContext& context);
private:
ExtendedPC _epc;
};
ExtendedPC PcFetcher::result() {
guarantee(is_done(), "task is not done yet.");
return _epc;
}
void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
Thread* thread = context.thread();
OSThread* osthread = thread->osthread();
if (osthread->ucontext() != NULL) {
_epc = os::Linux::ucontext_get_pc((ucontext_t *) context.ucontext());
} else {
// NULL context is unexpected, double-check this is the VMThread
guarantee(thread->is_VM_thread(), "can only be called for VMThread");
}
}
// Suspends the target using the signal mechanism and then grabs the PC before
// resuming the target. Used by the flat-profiler only
ExtendedPC os::get_thread_pc(Thread* thread) {
@ -4479,22 +4722,9 @@ ExtendedPC os::get_thread_pc(Thread* thread) {
assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
assert(thread->is_VM_thread(), "Can only be called for VMThread");
ExtendedPC epc;
OSThread* osthread = thread->osthread();
if (do_suspend(osthread)) {
if (osthread->ucontext() != NULL) {
epc = os::Linux::ucontext_get_pc(osthread->ucontext());
} else {
// NULL context is unexpected, double-check this is the VMThread
guarantee(thread->is_VM_thread(), "can only be called for VMThread");
}
do_resume(osthread);
}
// failure means pthread_kill failed for some reason - arguably this is
// a fatal problem, but such problems are ignored elsewhere
return epc;
PcFetcher fetcher(thread);
fetcher.run();
return fetcher.result();
}
int os::Linux::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime)
@ -4518,20 +4748,20 @@ int os::Linux::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mute
bool os::find(address addr, outputStream* st) {
Dl_info dlinfo;
memset(&dlinfo, 0, sizeof(dlinfo));
if (dladdr(addr, &dlinfo)) {
if (dladdr(addr, &dlinfo) != 0) {
st->print(PTR_FORMAT ": ", addr);
if (dlinfo.dli_sname != NULL) {
if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
st->print("%s+%#x", dlinfo.dli_sname,
addr - (intptr_t)dlinfo.dli_saddr);
} else if (dlinfo.dli_fname) {
} else if (dlinfo.dli_fbase != NULL) {
st->print("<offset %#x>", addr - (intptr_t)dlinfo.dli_fbase);
} else {
st->print("<absolute address>");
}
if (dlinfo.dli_fname) {
if (dlinfo.dli_fname != NULL) {
st->print(" in %s", dlinfo.dli_fname);
}
if (dlinfo.dli_fbase) {
if (dlinfo.dli_fbase != NULL) {
st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
}
st->cr();
@ -4544,7 +4774,7 @@ bool os::find(address addr, outputStream* st) {
if (!lowest) lowest = (address) dlinfo.dli_fbase;
if (begin < lowest) begin = lowest;
Dl_info dlinfo2;
if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr
if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
&& end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
end = (address) dlinfo2.dli_saddr;
Disassembler::decode(begin, end, st);
@ -5616,4 +5846,5 @@ void MemNotifyThread::start() {
new MemNotifyThread(fd);
}
}
#endif // JAVASE_EMBEDDED

View file

@ -76,6 +76,10 @@ class Linux {
static julong physical_memory() { return _physical_memory; }
static void initialize_system_info();
static int commit_memory_impl(char* addr, size_t bytes, bool exec);
static int commit_memory_impl(char* addr, size_t bytes,
size_t alignment_hint, bool exec);
static void set_glibc_version(const char *s) { _glibc_version = s; }
static void set_libpthread_version(const char *s) { _libpthread_version = s; }
@ -210,35 +214,6 @@ class Linux {
// LinuxThreads work-around for 6292965
static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime);
// Linux suspend/resume support - this helper is a shadow of its former
// self now that low-level suspension is barely used, and old workarounds
// for LinuxThreads are no longer needed.
class SuspendResume {
private:
volatile int _suspend_action;
volatile jint _state;
public:
// values for suspend_action:
enum {
SR_NONE = 0x00,
SR_SUSPEND = 0x01, // suspend request
SR_CONTINUE = 0x02, // resume request
SR_SUSPENDED = 0x20 // values for _state: + SR_NONE
};
SuspendResume() { _suspend_action = SR_NONE; _state = SR_NONE; }
int suspend_action() const { return _suspend_action; }
void set_suspend_action(int x) { _suspend_action = x; }
// atomic updates for _state
inline void set_suspended();
inline void clear_suspended();
bool is_suspended() { return _state & SR_SUSPENDED; }
};
private:
typedef int (*sched_getcpu_func_t)(void);
typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
@ -333,6 +308,6 @@ class PlatformParker : public CHeapObj<mtInternal> {
status = pthread_mutex_init (_mutex, NULL);
assert_status(status == 0, status, "mutex_init");
}
} ;
};
#endif // OS_LINUX_VM_OS_LINUX_HPP

View file

@ -288,20 +288,4 @@ inline int os::set_sock_opt(int fd, int level, int optname,
return ::setsockopt(fd, level, optname, optval, optlen);
}
inline void os::Linux::SuspendResume::set_suspended() {
jint temp, temp2;
do {
temp = _state;
temp2 = Atomic::cmpxchg(temp | SR_SUSPENDED, &_state, temp);
} while (temp2 != temp);
}
inline void os::Linux::SuspendResume::clear_suspended() {
jint temp, temp2;
do {
temp = _state;
temp2 = Atomic::cmpxchg(temp & ~SR_SUSPENDED, &_state, temp);
} while (temp2 != temp);
}
#endif // OS_LINUX_VM_OS_LINUX_INLINE_HPP

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -60,7 +60,7 @@ static char* create_standard_memory(size_t size) {
}
// commit memory
if (!os::commit_memory(mapAddress, size)) {
if (!os::commit_memory(mapAddress, size, !ExecMem)) {
if (PrintMiscellaneous && Verbose) {
warning("Could not commit PerfData memory\n");
}
@ -120,7 +120,7 @@ static void save_memory_to_file(char* addr, size_t size) {
addr += result;
}
RESTARTABLE(::close(fd), result);
result = ::close(fd);
if (PrintMiscellaneous && Verbose) {
if (result == OS_ERR) {
warning("Could not close %s: %s\n", destfile, strerror(errno));
@ -632,7 +632,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
if (PrintMiscellaneous && Verbose) {
warning("could not set shared memory file size: %s\n", strerror(errno));
}
RESTARTABLE(::close(fd), result);
::close(fd);
return -1;
}
@ -656,7 +656,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
if (result != -1) {
return fd;
} else {
RESTARTABLE(::close(fd), result);
::close(fd);
return -1;
}
}
@ -734,9 +734,7 @@ static char* mmap_create_shared(size_t size) {
mapAddress = (char*)::mmap((char*)0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
// attempt to close the file - restart it if it was interrupted,
// but ignore other failures
RESTARTABLE(::close(fd), result);
result = ::close(fd);
assert(result != OS_ERR, "could not close file");
if (mapAddress == MAP_FAILED) {
@ -755,8 +753,7 @@ static char* mmap_create_shared(size_t size) {
(void)::memset((void*) mapAddress, 0, size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
return mapAddress;
}
@ -907,9 +904,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
mapAddress = (char*)::mmap((char*)0, size, mmap_prot, MAP_SHARED, fd, 0);
// attempt to close the file - restart if it gets interrupted,
// but ignore other failures
RESTARTABLE(::close(fd), result);
result = ::close(fd);
assert(result != OS_ERR, "could not close file");
if (mapAddress == MAP_FAILED) {
@ -921,8 +916,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
}
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
*addr = mapAddress;
*sizep = size;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -332,12 +332,15 @@ dtrace:helper:ustack:
this->nameSymbol = copyin_ptr(this->constantPool +
this->nameIndex * sizeof (pointer) + SIZE_ConstantPool);
/* The symbol is a CPSlot and has lower bit set to indicate metadata */
this->nameSymbol &= (~1); /* remove metadata lsb */
this->nameSymbolLength = copyin_uint16(this->nameSymbol +
OFFSET_Symbol_length);
this->signatureSymbol = copyin_ptr(this->constantPool +
this->signatureIndex * sizeof (pointer) + SIZE_ConstantPool);
this->signatureSymbol &= (~1); /* remove metadata lsb */
this->signatureSymbolLength = copyin_uint16(this->signatureSymbol +
OFFSET_Symbol_length);

View file

@ -122,9 +122,7 @@ static int file_open(const char* path, int flag) {
}
static int file_close(int fd) {
int ret;
RESTARTABLE(close(fd), ret);
return ret;
return close(fd);
}
static int file_read(int fd, char* buf, int len) {

View file

@ -392,7 +392,7 @@ int SolarisAttachListener::create_door() {
return -1;
}
assert(fd >= 0, "bad file descriptor");
RESTARTABLE(::close(fd), res);
::close(fd);
// attach the door descriptor to the file
if ((res = ::fattach(dd, initial_path)) == -1) {
@ -410,7 +410,7 @@ int SolarisAttachListener::create_door() {
// rename file so that clients can attach
if (dd >= 0) {
if (::rename(initial_path, door_path) == -1) {
RESTARTABLE(::close(dd), res);
::close(dd);
::fdetach(initial_path);
dd = -1;
}
@ -549,7 +549,7 @@ void SolarisAttachOperation::complete(jint res, bufferedStream* st) {
}
// close socket and we're done
RESTARTABLE(::close(this->socket()), rc);
::close(this->socket());
// were we externally suspended while we were waiting?
thread->check_and_wait_while_suspended();

View file

@ -30,15 +30,6 @@
//
#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
\
product(bool, UseISM, false, \
"Use Intimate Shared Memory (Solaris Only)") \
\
product(bool, UsePermISM, false, \
"Obsolete flag for compatibility (same as UseISM)") \
\
product(bool, UseMPSS, true, \
"Use Multiple Page Size Support (Solaris 9 Only)") \
\
product(bool, UseExtendedFileIO, true, \
"Enable workaround for limitations of stdio FILE structure")

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,10 +41,6 @@ void OSThread::pd_initialize() {
_thread_id = 0;
sigemptyset(&_caller_sigmask);
_current_callback = NULL;
_current_callback_lock = VM_Version::supports_compare_and_exchange() ? NULL
: new Mutex(Mutex::suspend_resume, "Callback_lock", true);
_saved_interrupt_thread_state = _thread_new;
_vm_created_thread = false;
}
@ -52,172 +48,6 @@ void OSThread::pd_initialize() {
void OSThread::pd_destroy() {
}
// Synchronous interrupt support
//
// _current_callback == NULL no pending callback
// == 1 callback_in_progress
// == other value pointer to the pending callback
//
// CAS on v8 is implemented by using a global atomic_memory_operation_lock,
// which is shared by other atomic functions. It is OK for normal uses, but
// dangerous if used after some thread is suspended or if used in signal
// handlers. Instead here we use a special per-thread lock to synchronize
// updating _current_callback if we are running on v8. Note in general trying
// to grab locks after a thread is suspended is not safe, but it is safe for
// updating _current_callback, because synchronous interrupt callbacks are
// currently only used in:
// 1. GetThreadPC_Callback - used by WatcherThread to profile VM thread
// There is no overlap between the callbacks, which means we won't try to
// grab a thread's sync lock after the thread has been suspended while holding
// the same lock.
// used after a thread is suspended
static intptr_t compare_and_exchange_current_callback (
intptr_t callback, intptr_t *addr, intptr_t compare_value, Mutex *sync) {
if (VM_Version::supports_compare_and_exchange()) {
return Atomic::cmpxchg_ptr(callback, addr, compare_value);
} else {
MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
if (*addr == compare_value) {
*addr = callback;
return compare_value;
} else {
return callback;
}
}
}
// used in signal handler
static intptr_t exchange_current_callback(intptr_t callback, intptr_t *addr, Mutex *sync) {
if (VM_Version::supports_compare_and_exchange()) {
return Atomic::xchg_ptr(callback, addr);
} else {
MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
intptr_t cb = *addr;
*addr = callback;
return cb;
}
}
// one interrupt at a time. spin if _current_callback != NULL
int OSThread::set_interrupt_callback(Sync_Interrupt_Callback * cb) {
int count = 0;
while (compare_and_exchange_current_callback(
(intptr_t)cb, (intptr_t *)&_current_callback, (intptr_t)NULL, _current_callback_lock) != NULL) {
while (_current_callback != NULL) {
count++;
#ifdef ASSERT
if ((WarnOnStalledSpinLock > 0) &&
(count % WarnOnStalledSpinLock == 0)) {
warning("_current_callback seems to be stalled: %p", _current_callback);
}
#endif
os::yield_all(count);
}
}
return 0;
}
// reset _current_callback, spin if _current_callback is callback_in_progress
void OSThread::remove_interrupt_callback(Sync_Interrupt_Callback * cb) {
int count = 0;
while (compare_and_exchange_current_callback(
(intptr_t)NULL, (intptr_t *)&_current_callback, (intptr_t)cb, _current_callback_lock) != (intptr_t)cb) {
#ifdef ASSERT
intptr_t p = (intptr_t)_current_callback;
assert(p == (intptr_t)callback_in_progress ||
p == (intptr_t)cb, "wrong _current_callback value");
#endif
while (_current_callback != cb) {
count++;
#ifdef ASSERT
if ((WarnOnStalledSpinLock > 0) &&
(count % WarnOnStalledSpinLock == 0)) {
warning("_current_callback seems to be stalled: %p", _current_callback);
}
#endif
os::yield_all(count);
}
}
}
void OSThread::do_interrupt_callbacks_at_interrupt(InterruptArguments *args) {
Sync_Interrupt_Callback * cb;
cb = (Sync_Interrupt_Callback *)exchange_current_callback(
(intptr_t)callback_in_progress, (intptr_t *)&_current_callback, _current_callback_lock);
if (cb == NULL) {
// signal is delivered too late (thread is masking interrupt signal??).
// there is nothing we need to do because requesting thread has given up.
} else if ((intptr_t)cb == (intptr_t)callback_in_progress) {
fatal("invalid _current_callback state");
} else {
assert(cb->target()->osthread() == this, "wrong target");
cb->execute(args);
cb->leave_callback(); // notify the requester
}
// restore original _current_callback value
intptr_t p;
p = exchange_current_callback((intptr_t)cb, (intptr_t *)&_current_callback, _current_callback_lock);
assert(p == (intptr_t)callback_in_progress, "just checking");
}
// Called by the requesting thread to send a signal to target thread and
// execute "this" callback from the signal handler.
int OSThread::Sync_Interrupt_Callback::interrupt(Thread * target, int timeout) {
// Let signals to the vm_thread go even if the Threads_lock is not acquired
assert(Threads_lock->owned_by_self() || (target == VMThread::vm_thread()),
"must have threads lock to call this");
OSThread * osthread = target->osthread();
// may block if target thread already has a pending callback
osthread->set_interrupt_callback(this);
_target = target;
int rslt = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
assert(rslt == 0, "thr_kill != 0");
bool status = false;
jlong t1 = os::javaTimeMillis();
{ // don't use safepoint check because we might be the watcher thread.
MutexLockerEx ml(_sync, Mutex::_no_safepoint_check_flag);
while (!is_done()) {
status = _sync->wait(Mutex::_no_safepoint_check_flag, timeout);
// status == true if timed out
if (status) break;
// update timeout
jlong t2 = os::javaTimeMillis();
timeout -= t2 - t1;
t1 = t2;
}
}
// reset current_callback
osthread->remove_interrupt_callback(this);
return status;
}
void OSThread::Sync_Interrupt_Callback::leave_callback() {
if (!_sync->owned_by_self()) {
// notify requesting thread
MutexLockerEx ml(_sync, Mutex::_no_safepoint_check_flag);
_is_done = true;
_sync->notify_all();
} else {
// Current thread is interrupted while it is holding the _sync lock, trying
// to grab it again will deadlock. The requester will timeout anyway,
// so just return.
_is_done = true;
}
}
// copied from synchronizer.cpp
void OSThread::handle_spinlock_contention(int tries) {
@ -229,3 +59,7 @@ void OSThread::handle_spinlock_contention(int tries) {
os::yield(); // Yield to threads of same or higher priority
}
}
void OSThread::SR_handler(Thread* thread, ucontext_t* uc) {
os::Solaris::SR_handler(thread, uc);
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -72,61 +72,15 @@
// ***************************************************************
public:
class InterruptArguments : StackObj {
private:
Thread* _thread; // the thread to signal was dispatched to
ucontext_t* _ucontext; // the machine context at the time of the signal
public:
InterruptArguments(Thread* thread, ucontext_t* ucontext) {
_thread = thread;
_ucontext = ucontext;
}
Thread* thread() const { return _thread; }
ucontext_t* ucontext() const { return _ucontext; }
};
// There are currently no asynchronous callbacks - and we'd better not
// support them in the future either, as they need to be deallocated from
// the interrupt handler, which is not safe; they also require locks to
// protect the callback queue.
class Sync_Interrupt_Callback : private StackObj {
protected:
volatile bool _is_done;
Monitor* _sync;
Thread* _target;
public:
Sync_Interrupt_Callback(Monitor * sync) {
_is_done = false; _target = NULL; _sync = sync;
}
bool is_done() const { return _is_done; }
Thread* target() const { return _target; }
int interrupt(Thread * target, int timeout);
// override to implement the callback.
virtual void execute(InterruptArguments *args) = 0;
void leave_callback();
};
os::SuspendResume sr;
private:
Sync_Interrupt_Callback * volatile _current_callback;
enum {
callback_in_progress = 1
};
Mutex * _current_callback_lock; // only used on v8
ucontext_t* _ucontext;
public:
int set_interrupt_callback (Sync_Interrupt_Callback * cb);
void remove_interrupt_callback(Sync_Interrupt_Callback * cb);
void do_interrupt_callbacks_at_interrupt(InterruptArguments *args);
ucontext_t* ucontext() const { return _ucontext; }
void set_ucontext(ucontext_t* ptr) { _ucontext = ptr; }
static void SR_handler(Thread* thread, ucontext_t* uc);
// ***************************************************************
// java.lang.Thread.interrupt state.

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,28 +27,6 @@
// Defines the interfaces to Solaris operating systems that vary across platforms
// This is a simple callback that just fetches a PC for an interrupted thread.
// The thread need not be suspended and the fetched PC is just a hint.
// Returned PC and nPC are not necessarily consecutive.
// This one is currently used for profiling the VMThread ONLY!
// Must be synchronous
class GetThreadPC_Callback : public OSThread::Sync_Interrupt_Callback {
private:
ExtendedPC _addr;
public:
GetThreadPC_Callback(Monitor *sync) :
OSThread::Sync_Interrupt_Callback(sync) { }
ExtendedPC addr() const { return _addr; }
void set_addr(ExtendedPC addr) { _addr = addr; }
void execute(OSThread::InterruptArguments *args);
};
// misc
extern "C" {
void signalHandler(int, siginfo_t*, void*);

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -106,8 +106,8 @@ class Solaris {
static meminfo_func_t _meminfo;
// Large Page Support--mpss.
static bool set_mpss_range(caddr_t start, size_t bytes, size_t align);
// Large Page Support
static bool setup_large_pages(caddr_t start, size_t bytes, size_t align);
static void init_thread_fpu_state(void);
@ -127,7 +127,6 @@ class Solaris {
static void set_SIGinterrupt(int newsig) { _SIGinterrupt = newsig; }
static void set_SIGasync(int newsig) { _SIGasync = newsig; }
public:
// Large Page Support--ISM.
static bool largepage_range(char* addr, size_t size);
@ -145,6 +144,7 @@ class Solaris {
static intptr_t* ucontext_get_sp(ucontext_t* uc);
// ucontext_get_fp() is only used by Solaris X86 (see note below)
static intptr_t* ucontext_get_fp(ucontext_t* uc);
static address ucontext_get_pc(ucontext_t* uc);
// For Analyzer Forte AsyncGetCallTrace profiling support:
// Parameter ret_fp is only used by Solaris X86.
@ -157,6 +157,8 @@ class Solaris {
static void hotspot_sigmask(Thread* thread);
// SR_handler
static void SR_handler(Thread* thread, ucontext_t* uc);
protected:
// Solaris-specific interface goes here
static julong available_memory();
@ -166,10 +168,12 @@ class Solaris {
static int _dev_zero_fd;
static int get_dev_zero_fd() { return _dev_zero_fd; }
static void set_dev_zero_fd(int fd) { _dev_zero_fd = fd; }
static int commit_memory_impl(char* addr, size_t bytes, bool exec);
static int commit_memory_impl(char* addr, size_t bytes,
size_t alignment_hint, bool exec);
static char* mmap_chunk(char *addr, size_t size, int flags, int prot);
static char* anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed);
static bool mpss_sanity_check(bool warn, size_t * page_size);
static bool ism_sanity_check (bool warn, size_t * page_size);
// Workaround for 4352906. thr_stksegment sometimes returns
// a bad value for the primordial thread's stack base when

View file

@ -89,7 +89,7 @@ inline int os::readdir_buf_size(const char *path) {
inline struct dirent* os::readdir(DIR* dirp, dirent* dbuf) {
assert(dirp != NULL, "just checking");
#if defined(_LP64) || defined(_GNU_SOURCE)
#if defined(_LP64) || defined(_GNU_SOURCE) || _FILE_OFFSET_BITS==64
dirent* p;
int status;
@ -98,9 +98,9 @@ inline struct dirent* os::readdir(DIR* dirp, dirent* dbuf) {
return NULL;
} else
return p;
#else // defined(_LP64) || defined(_GNU_SOURCE)
#else // defined(_LP64) || defined(_GNU_SOURCE) || _FILE_OFFSET_BITS==64
return ::readdir_r(dirp, dbuf);
#endif // defined(_LP64) || defined(_GNU_SOURCE)
#endif // defined(_LP64) || defined(_GNU_SOURCE) || _FILE_OFFSET_BITS==64
}
inline int os::closedir(DIR *dirp) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -62,7 +62,7 @@ static char* create_standard_memory(size_t size) {
}
// commit memory
if (!os::commit_memory(mapAddress, size)) {
if (!os::commit_memory(mapAddress, size, !ExecMem)) {
if (PrintMiscellaneous && Verbose) {
warning("Could not commit PerfData memory\n");
}
@ -122,7 +122,7 @@ static void save_memory_to_file(char* addr, size_t size) {
addr += result;
}
RESTARTABLE(::close(fd), result);
result = ::close(fd);
if (PrintMiscellaneous && Verbose) {
if (result == OS_ERR) {
warning("Could not close %s: %s\n", destfile, strerror(errno));
@ -437,7 +437,7 @@ static char* get_user_name(int vmid, TRAPS) {
addr+=result;
}
RESTARTABLE(::close(fd), result);
::close(fd);
// get the user name for the effective user id of the process
char* user_name = get_user_name(psinfo.pr_euid);
@ -669,7 +669,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
if (PrintMiscellaneous && Verbose) {
warning("could not set shared memory file size: %s\n", strerror(errno));
}
RESTARTABLE(::close(fd), result);
::close(fd);
return -1;
}
@ -749,9 +749,7 @@ static char* mmap_create_shared(size_t size) {
mapAddress = (char*)::mmap((char*)0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
// attempt to close the file - restart it if it was interrupted,
// but ignore other failures
RESTARTABLE(::close(fd), result);
result = ::close(fd);
assert(result != OS_ERR, "could not close file");
if (mapAddress == MAP_FAILED) {
@ -770,8 +768,7 @@ static char* mmap_create_shared(size_t size) {
(void)::memset((void*) mapAddress, 0, size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
return mapAddress;
}
@ -922,9 +919,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
mapAddress = (char*)::mmap((char*)0, size, mmap_prot, MAP_SHARED, fd, 0);
// attempt to close the file - restart if it gets interrupted,
// but ignore other failures
RESTARTABLE(::close(fd), result);
result = ::close(fd);
assert(result != OS_ERR, "could not close file");
if (mapAddress == MAP_FAILED) {
@ -936,8 +931,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
}
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
*addr = mapAddress;
*sizep = size;

View file

@ -1420,34 +1420,40 @@ static int _locate_module_by_addr(int pid, char * mod_fname, address base_addr,
bool os::dll_address_to_library_name(address addr, char* buf,
int buflen, int* offset) {
// buf is not optional, but offset is optional
assert(buf != NULL, "sanity check");
// NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
// return the full path to the DLL file, sometimes it returns path
// to the corresponding PDB file (debug info); sometimes it only
// returns partial path, which makes life painful.
struct _modinfo mi;
mi.addr = addr;
mi.full_path = buf;
mi.buflen = buflen;
int pid = os::current_process_id();
if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) {
// buf already contains path name
if (offset) *offset = addr - mi.base_addr;
return true;
} else {
if (buf) buf[0] = '\0';
if (offset) *offset = -1;
return false;
}
struct _modinfo mi;
mi.addr = addr;
mi.full_path = buf;
mi.buflen = buflen;
int pid = os::current_process_id();
if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) {
// buf already contains path name
if (offset) *offset = addr - mi.base_addr;
return true;
}
buf[0] = '\0';
if (offset) *offset = -1;
return false;
}
bool os::dll_address_to_function_name(address addr, char *buf,
int buflen, int *offset) {
// buf is not optional, but offset is optional
assert(buf != NULL, "sanity check");
if (Decoder::decode(addr, buf, buflen, offset)) {
return true;
}
if (offset != NULL) *offset = -1;
if (buf != NULL) buf[0] = '\0';
buf[0] = '\0';
return false;
}
@ -2317,6 +2323,11 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
#endif
Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady
// Handle SafeFetch32 and SafeFetchN exceptions.
if (StubRoutines::is_safefetch_fault(pc)) {
return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
}
#ifndef _WIN64
// Execution protection violation - win32 running on AMD64 only
// Handled first to avoid misdiagnosis as a "normal" access violation;
@ -2524,7 +2535,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
addr = (address)((uintptr_t)addr &
(~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
os::commit_memory((char *)addr, thread->stack_base() - addr,
false );
!ExecMem);
return EXCEPTION_CONTINUE_EXECUTION;
}
else
@ -2689,6 +2700,19 @@ address os::win32::fast_jni_accessor_wrapper(BasicType type) {
}
#endif
#ifndef PRODUCT
void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) {
// Install a win32 structured exception handler around the test
// function call so the VM can generate an error dump if needed.
__try {
(*funcPtr)();
} __except(topLevelExceptionFilter(
(_EXCEPTION_POINTERS*)_exception_info())) {
// Nothing to do.
}
}
#endif
// Virtual Memory
int os::vm_page_size() { return os::win32::vm_page_size(); }
@ -2875,7 +2899,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
PAGE_READWRITE);
// If reservation failed, return NULL
if (p_buf == NULL) return NULL;
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, mtNone, CALLER_PC);
os::release_memory(p_buf, bytes + chunk_size);
// we still need to round up to a page boundary (in case we are using large pages)
@ -2941,7 +2965,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
// need to create a dummy 'reserve' record to match
// the release.
MemTracker::record_virtual_memory_reserve((address)p_buf,
bytes_to_release, CALLER_PC);
bytes_to_release, mtNone, CALLER_PC);
os::release_memory(p_buf, bytes_to_release);
}
#ifdef ASSERT
@ -2961,9 +2985,10 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
// Although the memory is allocated individually, it is returned as one.
// NMT records it as one block.
address pc = CALLER_PC;
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, pc);
if ((flags & MEM_COMMIT) != 0) {
MemTracker::record_virtual_memory_commit((address)p_buf, bytes, pc);
MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, mtNone, pc);
} else {
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, mtNone, pc);
}
// made it this far, success
@ -3154,8 +3179,7 @@ char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot);
if (res != NULL) {
address pc = CALLER_PC;
MemTracker::record_virtual_memory_reserve((address)res, bytes, pc);
MemTracker::record_virtual_memory_commit((address)res, bytes, pc);
MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
}
return res;
@ -3164,14 +3188,21 @@ char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
bool os::release_memory_special(char* base, size_t bytes) {
assert(base != NULL, "Sanity check");
// Memory allocated via reserve_memory_special() is committed
MemTracker::record_virtual_memory_uncommit((address)base, bytes);
return release_memory(base, bytes);
}
void os::print_statistics() {
}
static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
int err = os::get_last_error();
char buf[256];
size_t buf_len = os::lasterror(buf, sizeof(buf));
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
exec, buf_len != 0 ? buf : "<no_error_string>", err);
}
bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
if (bytes == 0) {
// Don't bother the OS with noops.
@ -3186,11 +3217,17 @@ bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
// is always within a reserve covered by a single VirtualAlloc
// in that case we can just do a single commit for the requested size
if (!UseNUMAInterleaving) {
if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) return false;
if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
return false;
}
if (exec) {
DWORD oldprot;
// Windows doc says to use VirtualProtect to get execute permissions
if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) return false;
if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
return false;
}
}
return true;
} else {
@ -3205,12 +3242,20 @@ bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
MEMORY_BASIC_INFORMATION alloc_info;
VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, PAGE_READWRITE) == NULL)
if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
PAGE_READWRITE) == NULL) {
NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
exec);)
return false;
}
if (exec) {
DWORD oldprot;
if (!VirtualProtect(next_alloc_addr, bytes_to_rq, PAGE_EXECUTE_READWRITE, &oldprot))
if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
PAGE_EXECUTE_READWRITE, &oldprot)) {
NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
exec);)
return false;
}
}
bytes_remaining -= bytes_to_rq;
next_alloc_addr += bytes_to_rq;
@ -3222,7 +3267,24 @@ bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
bool exec) {
return commit_memory(addr, size, exec);
// alignment_hint is ignored on this OS
return pd_commit_memory(addr, size, exec);
}
void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
const char* mesg) {
assert(mesg != NULL, "mesg must be specified");
if (!pd_commit_memory(addr, size, exec)) {
warn_fail_commit_memory(addr, size, exec);
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
}
}
void os::pd_commit_memory_or_exit(char* addr, size_t size,
size_t alignment_hint, bool exec,
const char* mesg) {
// alignment_hint is ignored on this OS
pd_commit_memory_or_exit(addr, size, exec, mesg);
}
bool os::pd_uncommit_memory(char* addr, size_t bytes) {
@ -3240,7 +3302,7 @@ bool os::pd_release_memory(char* addr, size_t bytes) {
}
bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
return os::commit_memory(addr, size);
return os::commit_memory(addr, size, !ExecMem);
}
bool os::remove_stack_guard_pages(char* addr, size_t size) {
@ -3264,8 +3326,9 @@ bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
// Strange enough, but on Win32 one can change protection only for committed
// memory, not a big deal anyway, as bytes less or equal than 64K
if (!is_committed && !commit_memory(addr, bytes, prot == MEM_PROT_RWX)) {
fatal("cannot commit protection page");
if (!is_committed) {
commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
"cannot commit protection page");
}
// One cannot use os::guard_memory() here, as on Win32 guard page
// have different (one-shot) semantics, from MSDN on PAGE_GUARD:
@ -5048,6 +5111,71 @@ int os::set_sock_opt(int fd, int level, int optname,
return ::setsockopt(fd, level, optname, optval, optlen);
}
// WINDOWS CONTEXT Flags for THREAD_SAMPLING
#if defined(IA32)
# define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
#elif defined (AMD64)
# define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
#endif
// returns true if thread could be suspended,
// false otherwise
static bool do_suspend(HANDLE* h) {
if (h != NULL) {
if (SuspendThread(*h) != ~0) {
return true;
}
}
return false;
}
// resume the thread
// calling resume on an active thread is a no-op
static void do_resume(HANDLE* h) {
if (h != NULL) {
ResumeThread(*h);
}
}
// retrieve a suspend/resume context capable handle
// from the tid. Caller validates handle return value.
void get_thread_handle_for_extended_context(HANDLE* h, OSThread::thread_id_t tid) {
if (h != NULL) {
*h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
}
}
//
// Thread sampling implementation
//
void os::SuspendedThreadTask::internal_do_task() {
CONTEXT ctxt;
HANDLE h = NULL;
// get context capable handle for thread
get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
// sanity
if (h == NULL || h == INVALID_HANDLE_VALUE) {
return;
}
// suspend the thread
if (do_suspend(&h)) {
ctxt.ContextFlags = sampling_context_flags;
// get thread context
GetThreadContext(h, &ctxt);
SuspendedThreadTaskContext context(_thread, &ctxt);
// pass context to Thread Sampling impl
do_task(context);
// resume thread
do_resume(&h);
}
// close handle
CloseHandle(h);
}
// Kernel32 API
typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -94,6 +94,10 @@ class win32 {
static address fast_jni_accessor_wrapper(BasicType);
#endif
#ifndef PRODUCT
static void call_test_func_with_wrapper(void (*funcPtr)(void));
#endif
// filter function to ignore faults on serializations page
static LONG WINAPI serialize_fault_filter(struct _EXCEPTION_POINTERS* e);
};

View file

@ -106,4 +106,10 @@ inline size_t os::write(int fd, const void *buf, unsigned int nBytes) {
inline int os::close(int fd) {
return ::close(fd);
}
#ifndef PRODUCT
#define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) \
os::win32::call_test_func_with_wrapper(f)
#endif
#endif // OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,7 +58,7 @@ static char* create_standard_memory(size_t size) {
}
// commit memory
if (!os::commit_memory(mapAddress, size)) {
if (!os::commit_memory(mapAddress, size, !ExecMem)) {
if (PrintMiscellaneous && Verbose) {
warning("Could not commit PerfData memory\n");
}
@ -1498,8 +1498,7 @@ static char* mapping_create_shared(size_t size) {
(void)memset(mapAddress, '\0', size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
return (char*) mapAddress;
}
@ -1681,8 +1680,7 @@ static void open_file_mapping(const char* user, int vmid,
}
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
*addrp = (char*)mapAddress;
@ -1836,9 +1834,10 @@ void PerfMemory::detach(char* addr, size_t bytes, TRAPS) {
return;
}
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
remove_file_mapping(addr);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_release((address)addr, bytes);
tkr.record((address)addr, bytes);
}
char* PerfMemory::backing_store_filename() {

View file

@ -63,24 +63,6 @@ SYMBOL(fixcw):
popl %eax
ret
.globl SYMBOL(SafeFetch32), SYMBOL(Fetch32PFI), SYMBOL(Fetch32Resume)
.globl SYMBOL(SafeFetchN)
## TODO: avoid exposing Fetch32PFI and Fetch32Resume.
## Instead, the signal handler would call a new SafeFetchTriage(FaultingEIP)
## routine to vet the address. If the address is the faulting LD then
## SafeFetchTriage() would return the resume-at EIP, otherwise null.
ELF_TYPE(SafeFetch32,@function)
.p2align 4,,15
SYMBOL(SafeFetch32):
SYMBOL(SafeFetchN):
movl 0x8(%esp), %eax
movl 0x4(%esp), %ecx
SYMBOL(Fetch32PFI):
movl (%ecx), %eax
SYMBOL(Fetch32Resume):
ret
.globl SYMBOL(SpinPause)
ELF_TYPE(SpinPause,@function)
.p2align 4,,15

View file

@ -46,28 +46,6 @@
.text
.globl SYMBOL(SafeFetch32), SYMBOL(Fetch32PFI), SYMBOL(Fetch32Resume)
.p2align 4,,15
ELF_TYPE(SafeFetch32,@function)
// Prototype: int SafeFetch32 (int * Adr, int ErrValue)
SYMBOL(SafeFetch32):
movl %esi, %eax
SYMBOL(Fetch32PFI):
movl (%rdi), %eax
SYMBOL(Fetch32Resume):
ret
.globl SYMBOL(SafeFetchN), SYMBOL(FetchNPFI), SYMBOL(FetchNResume)
.p2align 4,,15
ELF_TYPE(SafeFetchN,@function)
// Prototype: intptr_t SafeFetchN (intptr_t * Adr, intptr_t ErrValue)
SYMBOL(SafeFetchN):
movq %rsi, %rax
SYMBOL(FetchNPFI):
movq (%rdi), %rax
SYMBOL(FetchNResume):
ret
.globl SYMBOL(SpinPause)
.p2align 4,,15
ELF_TYPE(SpinPause,@function)

View file

@ -385,13 +385,6 @@ enum {
trap_page_fault = 0xE
};
extern "C" void Fetch32PFI () ;
extern "C" void Fetch32Resume () ;
#ifdef AMD64
extern "C" void FetchNPFI () ;
extern "C" void FetchNResume () ;
#endif // AMD64
extern "C" JNIEXPORT int
JVM_handle_bsd_signal(int sig,
siginfo_t* info,
@ -454,16 +447,10 @@ JVM_handle_bsd_signal(int sig,
if (info != NULL && uc != NULL && thread != NULL) {
pc = (address) os::Bsd::ucontext_get_pc(uc);
if (pc == (address) Fetch32PFI) {
uc->context_pc = intptr_t(Fetch32Resume) ;
return 1 ;
if (StubRoutines::is_safefetch_fault(pc)) {
uc->context_pc = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc));
return 1;
}
#ifdef AMD64
if (pc == (address) FetchNPFI) {
uc->context_pc = intptr_t (FetchNResume) ;
return 1 ;
}
#endif // AMD64
// Handle ALL stack overflow variations here
if (sig == SIGSEGV || sig == SIGBUS) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,10 +30,16 @@
// currently interrupted by SIGPROF
bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
void* ucontext, bool isInJava) {
assert(Thread::current() == this, "caller must be current thread");
assert(this->is_Java_thread(), "must be JavaThread");
return pd_get_top_frame(fr_addr, ucontext, isInJava);
}
bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) {
return pd_get_top_frame(fr_addr, ucontext, isInJava);
}
bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) {
assert(this->is_Java_thread(), "must be JavaThread");
JavaThread* jt = (JavaThread *)this;
// If we have a last_Java_frame, then we should use it even if

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -61,6 +61,13 @@
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava);
bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext,
bool isInJava);
private:
bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava);
public:
// These routines are only used on cpu architectures that
// have separate register stacks (Itanium).
static bool register_stack_overflow() { return false; }

View file

@ -169,7 +169,6 @@ inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong*
: "memory");
return rv;
#else
assert(VM_Version::v9_instructions_work(), "cas only supported on v9");
volatile jlong_accessor evl, cvl, rv;
evl.long_value = exchange_value;
cvl.long_value = compare_value;

View file

@ -21,42 +21,6 @@
# questions.
#
# Prototype: int SafeFetch32 (int * adr, int ErrValue)
# The "ld" at Fetch32 is potentially faulting instruction.
# If the instruction traps the trap handler will arrange
# for control to resume at Fetch32Resume.
# By convention with the trap handler we ensure there is a non-CTI
# instruction in the trap shadow.
.globl SafeFetch32, Fetch32PFI, Fetch32Resume
.globl SafeFetchN
.align 32
.type SafeFetch32,@function
SafeFetch32:
mov %o0, %g1
mov %o1, %o0
Fetch32PFI:
# <-- Potentially faulting instruction
ld [%g1], %o0
Fetch32Resume:
nop
retl
nop
.globl SafeFetchN, FetchNPFI, FetchNResume
.type SafeFetchN,@function
.align 32
SafeFetchN:
mov %o0, %g1
mov %o1, %o0
FetchNPFI:
ldn [%g1], %o0
FetchNResume:
nop
retl
nop
# Possibilities:
# -- membar
# -- CAS (SP + BIAS, G0, G0)

View file

@ -366,18 +366,9 @@ intptr_t* os::Linux::ucontext_get_fp(ucontext_t *uc) {
// Utility functions
extern "C" void Fetch32PFI();
extern "C" void Fetch32Resume();
extern "C" void FetchNPFI();
extern "C" void FetchNResume();
inline static bool checkPrefetch(sigcontext* uc, address pc) {
if (pc == (address) Fetch32PFI) {
set_cont_address(uc, address(Fetch32Resume));
return true;
}
if (pc == (address) FetchNPFI) {
set_cont_address(uc, address(FetchNResume));
if (StubRoutines::is_safefetch_fault(pc)) {
set_cont_address(uc, address(StubRoutines::continuation_for_safefetch_fault(pc)));
return true;
}
return false;

View file

@ -42,24 +42,6 @@
.text
.globl SafeFetch32, Fetch32PFI, Fetch32Resume
.globl SafeFetchN
## TODO: avoid exposing Fetch32PFI and Fetch32Resume.
## Instead, the signal handler would call a new SafeFetchTriage(FaultingEIP)
## routine to vet the address. If the address is the faulting LD then
## SafeFetchTriage() would return the resume-at EIP, otherwise null.
.type SafeFetch32,@function
.p2align 4,,15
SafeFetch32:
SafeFetchN:
movl 0x8(%esp), %eax
movl 0x4(%esp), %ecx
Fetch32PFI:
movl (%ecx), %eax
Fetch32Resume:
ret
.globl SpinPause
.type SpinPause,@function
.p2align 4,,15

View file

@ -38,28 +38,6 @@
.text
.globl SafeFetch32, Fetch32PFI, Fetch32Resume
.align 16
.type SafeFetch32,@function
// Prototype: int SafeFetch32 (int * Adr, int ErrValue)
SafeFetch32:
movl %esi, %eax
Fetch32PFI:
movl (%rdi), %eax
Fetch32Resume:
ret
.globl SafeFetchN, FetchNPFI, FetchNResume
.align 16
.type SafeFetchN,@function
// Prototype: intptr_t SafeFetchN (intptr_t * Adr, intptr_t ErrValue)
SafeFetchN:
movq %rsi, %rax
FetchNPFI:
movq (%rdi), %rax
FetchNResume:
ret
.globl SpinPause
.align 16
.type SpinPause,@function

View file

@ -209,13 +209,6 @@ enum {
trap_page_fault = 0xE
};
extern "C" void Fetch32PFI () ;
extern "C" void Fetch32Resume () ;
#ifdef AMD64
extern "C" void FetchNPFI () ;
extern "C" void FetchNResume () ;
#endif // AMD64
extern "C" JNIEXPORT int
JVM_handle_linux_signal(int sig,
siginfo_t* info,
@ -278,14 +271,18 @@ JVM_handle_linux_signal(int sig,
if (info != NULL && uc != NULL && thread != NULL) {
pc = (address) os::Linux::ucontext_get_pc(uc);
if (pc == (address) Fetch32PFI) {
uc->uc_mcontext.gregs[REG_PC] = intptr_t(Fetch32Resume) ;
return 1 ;
if (StubRoutines::is_safefetch_fault(pc)) {
uc->uc_mcontext.gregs[REG_PC] = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc));
return 1;
}
#ifdef AMD64
if (pc == (address) FetchNPFI) {
uc->uc_mcontext.gregs[REG_PC] = intptr_t (FetchNResume) ;
return 1 ;
#ifndef AMD64
// Halt if SI_KERNEL before more crashes get misdiagnosed as Java bugs
// This can happen in any running code (currently more frequently in
// interpreter code but has been seen in compiled code)
if (sig == SIGSEGV && info->si_addr == 0 && info->si_code == SI_KERNEL) {
fatal("An irrecoverable SI_KERNEL SIGSEGV has occurred due "
"to unstable signal handling in this distribution.");
}
#endif // AMD64

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,8 +32,15 @@ bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
void* ucontext, bool isInJava) {
assert(Thread::current() == this, "caller must be current thread");
assert(this->is_Java_thread(), "must be JavaThread");
return pd_get_top_frame(fr_addr, ucontext, isInJava);
}
bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) {
return pd_get_top_frame(fr_addr, ucontext, isInJava);
}
bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) {
assert(this->is_Java_thread(), "must be JavaThread");
JavaThread* jt = (JavaThread *)this;
// If we have a last_Java_frame, then we should use it even if

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -61,6 +61,11 @@
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava);
bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava);
private:
bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava);
public:
// These routines are only used on cpu architectures that
// have separate register stacks (Itanium).
static bool register_stack_overflow() { return false; }

View file

@ -1,61 +0,0 @@
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/threadLocalStorage.hpp"
#include <sys/trap.h> // For trap numbers
#include <v9/sys/psr_compat.h> // For V8 compatibility
void MacroAssembler::read_ccr_trap(Register ccr_save) {
// Execute a trap to get the PSR, mask and shift
// to get the condition codes.
get_psr_trap();
nop();
set(PSR_ICC, ccr_save);
and3(O0, ccr_save, ccr_save);
srl(ccr_save, PSR_ICC_SHIFT, ccr_save);
}
void MacroAssembler::write_ccr_trap(Register ccr_save, Register scratch1, Register scratch2) {
// Execute a trap to get the PSR, shift back
// the condition codes, mask the condition codes
// back into and PSR and trap to write back the
// PSR.
sll(ccr_save, PSR_ICC_SHIFT, scratch2);
get_psr_trap();
nop();
set(~PSR_ICC, scratch1);
and3(O0, scratch1, O0);
or3(O0, scratch2, O0);
set_psr_trap();
nop();
}
void MacroAssembler::flush_windows_trap() { trap(ST_FLUSH_WINDOWS); }
void MacroAssembler::clean_windows_trap() { trap(ST_CLEAN_WINDOWS); }
void MacroAssembler::get_psr_trap() { trap(ST_GETPSR); }
void MacroAssembler::set_psr_trap() { trap(ST_SETPSR); }

View file

@ -60,21 +60,10 @@ inline jlong Atomic::load(volatile jlong* src) { return *src; }
#else
extern "C" void _Atomic_move_long_v8(volatile jlong* src, volatile jlong* dst);
extern "C" void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst);
inline void Atomic_move_long(volatile jlong* src, volatile jlong* dst) {
#ifdef COMPILER2
// Compiler2 does not support v8, it is used only for v9.
_Atomic_move_long_v9(src, dst);
#else
// The branch is cheaper then emulated LDD.
if (VM_Version::v9_instructions_work()) {
_Atomic_move_long_v9(src, dst);
} else {
_Atomic_move_long_v8(src, dst);
}
#endif
}
inline jlong Atomic::load(volatile jlong* src) {
@ -209,7 +198,6 @@ inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong*
: "memory");
return rv;
#else //_LP64
assert(VM_Version::v9_instructions_work(), "cas only supported on v9");
volatile jlong_accessor evl, cvl, rv;
evl.long_value = exchange_value;
cvl.long_value = compare_value;
@ -318,7 +306,6 @@ inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong*
// Return 64 bit value in %o0
return _Atomic_cas64((intptr_t)exchange_value, (intptr_t *)dest, (intptr_t)compare_value);
#else // _LP64
assert (VM_Version::v9_instructions_work(), "only supported on v9");
// Return 64 bit value in %o0,%o1 by hand
return _Atomic_casl(exchange_value, dest, compare_value);
#endif // _LP64

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -194,6 +194,11 @@ intptr_t* os::Solaris::ucontext_get_fp(ucontext_t *uc) {
return NULL;
}
address os::Solaris::ucontext_get_pc(ucontext_t *uc) {
return (address) uc->uc_mcontext.gregs[REG_PC];
}
// For Forte Analyzer AsyncGetCallTrace profiling support - thread
// is currently interrupted by SIGPROF.
//
@ -265,22 +270,6 @@ frame os::current_frame() {
}
}
void GetThreadPC_Callback::execute(OSThread::InterruptArguments *args) {
Thread* thread = args->thread();
ucontext_t* uc = args->ucontext();
intptr_t* sp;
assert(ProfileVM && thread->is_VM_thread(), "just checking");
// Skip the mcontext corruption verification. If if occasionally
// things get corrupt, it is ok for profiling - we will just get an unresolved
// function name
ExtendedPC new_addr((address)uc->uc_mcontext.gregs[REG_PC]);
_addr = new_addr;
}
static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) {
char lwpstatusfile[PROCFILE_LENGTH];
int lwpfd, err;
@ -314,11 +303,6 @@ bool os::is_allocatable(size_t bytes) {
#endif
}
extern "C" void Fetch32PFI () ;
extern "C" void Fetch32Resume () ;
extern "C" void FetchNPFI () ;
extern "C" void FetchNResume () ;
extern "C" JNIEXPORT int
JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
int abort_if_unrecognized) {
@ -358,13 +342,8 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
guarantee(sig != os::Solaris::SIGinterrupt(), "Can not chain VM interrupt signal, try -XX:+UseAltSigs");
if (sig == os::Solaris::SIGasync()) {
if (thread) {
OSThread::InterruptArguments args(thread, uc);
thread->osthread()->do_interrupt_callbacks_at_interrupt(&args);
return true;
} else if (vmthread) {
OSThread::InterruptArguments args(vmthread, uc);
vmthread->osthread()->do_interrupt_callbacks_at_interrupt(&args);
if (thread || vmthread) {
OSThread::SR_handler(t, uc);
return true;
} else if (os::Solaris::chained_handler(sig, info, ucVoid)) {
return true;
@ -395,17 +374,10 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
npc = (address) uc->uc_mcontext.gregs[REG_nPC];
// SafeFetch() support
// Implemented with either a fixed set of addresses such
// as Fetch32*, or with Thread._OnTrap.
if (uc->uc_mcontext.gregs[REG_PC] == intptr_t(Fetch32PFI)) {
uc->uc_mcontext.gregs [REG_PC] = intptr_t(Fetch32Resume) ;
uc->uc_mcontext.gregs [REG_nPC] = intptr_t(Fetch32Resume) + 4 ;
return true ;
}
if (uc->uc_mcontext.gregs[REG_PC] == intptr_t(FetchNPFI)) {
uc->uc_mcontext.gregs [REG_PC] = intptr_t(FetchNResume) ;
uc->uc_mcontext.gregs [REG_nPC] = intptr_t(FetchNResume) + 4 ;
return true ;
if (StubRoutines::is_safefetch_fault(pc)) {
uc->uc_mcontext.gregs[REG_PC] = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc));
uc->uc_mcontext.gregs[REG_nPC] = uc->uc_mcontext.gregs[REG_PC] + 4;
return 1;
}
// Handle ALL stack overflow variations here

View file

@ -152,23 +152,6 @@
.nonvolatile
.end
// Support for jlong Atomic::load and Atomic::store on v8.
//
// void _Atomic_move_long_v8(volatile jlong* src, volatile jlong* dst)
//
// Arguments:
// src: O0
// dest: O1
//
// Overwrites O2 and O3
.inline _Atomic_move_long_v8,2
.volatile
ldd [%o0], %o2
std %o2, [%o1]
.nonvolatile
.end
// Support for jlong Atomic::load and Atomic::store on v9.
//
// void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst)

Some files were not shown because too many files have changed in this diff Show more