8210764: Update avx512 implementation

Reviewed-by: kvn
This commit is contained in:
Sandhya Viswanathan 2018-09-24 16:37:28 -07:00 committed by Vladimir Kozlov
parent e5b9edac53
commit 092fe55fb1
21 changed files with 1427 additions and 3162 deletions

View file

@ -2792,7 +2792,10 @@ void LIR_Assembler::align_backward_branch_target() {
} }
void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
// tmp must be unused
assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
if (left->is_single_cpu()) { if (left->is_single_cpu()) {
assert(dest->is_single_cpu(), "expect single result reg"); assert(dest->is_single_cpu(), "expect single result reg");
__ negw(dest->as_register(), left->as_register()); __ negw(dest->as_register(), left->as_register());

View file

@ -3265,7 +3265,9 @@ void LIR_Assembler::align_backward_branch_target() {
} }
void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
// tmp must be unused
assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
if (left->is_single_cpu()) { if (left->is_single_cpu()) {
assert (dest->type() == T_INT, "unexpected result type"); assert (dest->type() == T_INT, "unexpected result type");

View file

@ -2840,7 +2840,9 @@ void LIR_Assembler::emit_delay(LIR_OpDelay* op) {
} }
void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
// tmp must be unused
assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
assert(left->is_register(), "can only handle registers"); assert(left->is_register(), "can only handle registers");
if (left->is_single_cpu()) { if (left->is_single_cpu()) {

View file

@ -2850,7 +2850,9 @@ void LIR_Assembler::emit_delay(LIR_OpDelay* op) {
ShouldNotCallThis(); // There are no delay slots on ZARCH_64. ShouldNotCallThis(); // There are no delay slots on ZARCH_64.
} }
void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
// tmp must be unused
assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
assert(left->is_register(), "can only handle registers"); assert(left->is_register(), "can only handle registers");
if (left->is_single_cpu()) { if (left->is_single_cpu()) {

View file

@ -3024,7 +3024,9 @@ void LIR_Assembler::emit_delay(LIR_OpDelay* op) {
} }
void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
// tmp must be unused
assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
assert(left->is_register(), "can only handle registers"); assert(left->is_register(), "can only handle registers");
if (left->is_single_cpu()) { if (left->is_single_cpu()) {

File diff suppressed because it is too large Load diff

View file

@ -2097,6 +2097,7 @@ private:
// Andn packed integers // Andn packed integers
void pandn(XMMRegister dst, XMMRegister src); void pandn(XMMRegister dst, XMMRegister src);
void vpandn(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
// Or packed integers // Or packed integers
void por(XMMRegister dst, XMMRegister src); void por(XMMRegister dst, XMMRegister src);
@ -2134,6 +2135,7 @@ private:
void vextracti32x4(Address dst, XMMRegister src, uint8_t imm8); void vextracti32x4(Address dst, XMMRegister src, uint8_t imm8);
void vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8); void vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8);
void vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8); void vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
void vextracti64x4(Address dst, XMMRegister src, uint8_t imm8);
// vextractf forms // vextractf forms
void vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8); void vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8);
@ -2144,28 +2146,24 @@ private:
void vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8); void vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
void vextractf64x4(Address dst, XMMRegister src, uint8_t imm8); void vextractf64x4(Address dst, XMMRegister src, uint8_t imm8);
// legacy xmm sourced word/dword replicate
void vpbroadcastw(XMMRegister dst, XMMRegister src);
void vpbroadcastd(XMMRegister dst, XMMRegister src);
// xmm/mem sourced byte/word/dword/qword replicate // xmm/mem sourced byte/word/dword/qword replicate
void evpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len); void vpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len);
void evpbroadcastb(XMMRegister dst, Address src, int vector_len); void vpbroadcastb(XMMRegister dst, Address src, int vector_len);
void evpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len); void vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len);
void evpbroadcastw(XMMRegister dst, Address src, int vector_len); void vpbroadcastw(XMMRegister dst, Address src, int vector_len);
void evpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len); void vpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len);
void evpbroadcastd(XMMRegister dst, Address src, int vector_len); void vpbroadcastd(XMMRegister dst, Address src, int vector_len);
void evpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len); void vpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len);
void evpbroadcastq(XMMRegister dst, Address src, int vector_len); void vpbroadcastq(XMMRegister dst, Address src, int vector_len);
void evbroadcasti64x2(XMMRegister dst, XMMRegister src, int vector_len); void evbroadcasti64x2(XMMRegister dst, XMMRegister src, int vector_len);
void evbroadcasti64x2(XMMRegister dst, Address src, int vector_len); void evbroadcasti64x2(XMMRegister dst, Address src, int vector_len);
// scalar single/double precision replicate // scalar single/double precision replicate
void evpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len); void vpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len);
void evpbroadcastss(XMMRegister dst, Address src, int vector_len); void vpbroadcastss(XMMRegister dst, Address src, int vector_len);
void evpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len); void vpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len);
void evpbroadcastsd(XMMRegister dst, Address src, int vector_len); void vpbroadcastsd(XMMRegister dst, Address src, int vector_len);
// gpr sourced byte/word/dword/qword replicate // gpr sourced byte/word/dword/qword replicate
void evpbroadcastb(XMMRegister dst, Register src, int vector_len); void evpbroadcastb(XMMRegister dst, Register src, int vector_len);

View file

@ -68,7 +68,6 @@ static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], (jl
static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000)); static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000));
NEEDS_CLEANUP // remove this definitions ? NEEDS_CLEANUP // remove this definitions ?
const Register IC_Klass = rax; // where the IC klass is cached const Register IC_Klass = rax; // where the IC klass is cached
const Register SYNC_header = rax; // synchronization header const Register SYNC_header = rax; // synchronization header
@ -650,7 +649,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
case T_FLOAT: { case T_FLOAT: {
if (dest->is_single_xmm()) { if (dest->is_single_xmm()) {
if (c->is_zero_float()) { if (LP64_ONLY(UseAVX < 2 &&) c->is_zero_float()) {
__ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg()); __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg());
} else { } else {
__ movflt(dest->as_xmm_float_reg(), __ movflt(dest->as_xmm_float_reg(),
@ -672,7 +671,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
case T_DOUBLE: { case T_DOUBLE: {
if (dest->is_double_xmm()) { if (dest->is_double_xmm()) {
if (c->is_zero_double()) { if (LP64_ONLY(UseAVX < 2 &&) c->is_zero_double()) {
__ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg()); __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg());
} else { } else {
__ movdbl(dest->as_xmm_double_reg(), __ movdbl(dest->as_xmm_double_reg(),
@ -2395,16 +2394,24 @@ void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int
} }
void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) { void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) {
if (value->is_double_xmm()) { if (value->is_double_xmm()) {
switch(code) { switch(code) {
case lir_abs : case lir_abs :
{ {
if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) { #ifdef _LP64
__ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
assert(tmp->is_valid(), "need temporary");
__ vpandn(dest->as_xmm_double_reg(), tmp->as_xmm_double_reg(), value->as_xmm_double_reg(), 2);
} else {
#endif
if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
__ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
}
assert(!tmp->is_valid(), "do not need temporary");
__ andpd(dest->as_xmm_double_reg(),
ExternalAddress((address)double_signmask_pool));
} }
__ andpd(dest->as_xmm_double_reg(),
ExternalAddress((address)double_signmask_pool));
} }
break; break;
@ -3734,7 +3741,7 @@ void LIR_Assembler::align_backward_branch_target() {
} }
void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
if (left->is_single_cpu()) { if (left->is_single_cpu()) {
__ negl(left->as_register()); __ negl(left->as_register());
move_regs(left->as_register(), dest->as_register()); move_regs(left->as_register(), dest->as_register());
@ -3759,24 +3766,36 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
#endif // _LP64 #endif // _LP64
} else if (dest->is_single_xmm()) { } else if (dest->is_single_xmm()) {
if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) { #ifdef _LP64
__ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg()); if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
assert(tmp->is_valid(), "need temporary");
assert_different_registers(left->as_xmm_float_reg(), tmp->as_xmm_float_reg());
__ vpxor(dest->as_xmm_float_reg(), tmp->as_xmm_float_reg(), left->as_xmm_float_reg(), 2);
} }
if (UseAVX > 0) { else
__ vnegatess(dest->as_xmm_float_reg(), dest->as_xmm_float_reg(), #endif
ExternalAddress((address)float_signflip_pool)); {
} else { assert(!tmp->is_valid(), "do not need temporary");
if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {
__ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg());
}
__ xorps(dest->as_xmm_float_reg(), __ xorps(dest->as_xmm_float_reg(),
ExternalAddress((address)float_signflip_pool)); ExternalAddress((address)float_signflip_pool));
} }
} else if (dest->is_double_xmm()) { } else if (dest->is_double_xmm()) {
if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) { #ifdef _LP64
__ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg()); if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
assert(tmp->is_valid(), "need temporary");
assert_different_registers(left->as_xmm_double_reg(), tmp->as_xmm_double_reg());
__ vpxor(dest->as_xmm_double_reg(), tmp->as_xmm_double_reg(), left->as_xmm_double_reg(), 2);
} }
if (UseAVX > 0) { else
__ vnegatesd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg(), #endif
ExternalAddress((address)double_signflip_pool)); {
} else { assert(!tmp->is_valid(), "do not need temporary");
if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {
__ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());
}
__ xorpd(dest->as_xmm_double_reg(), __ xorpd(dest->as_xmm_double_reg(),
ExternalAddress((address)double_signflip_pool)); ExternalAddress((address)double_signflip_pool));
} }

View file

@ -320,7 +320,21 @@ void LIRGenerator::do_NegateOp(NegateOp* x) {
value.set_destroys_register(); value.set_destroys_register();
value.load_item(); value.load_item();
LIR_Opr reg = rlock(x); LIR_Opr reg = rlock(x);
__ negate(value.result(), reg);
LIR_Opr tmp = LIR_OprFact::illegalOpr;
#ifdef _LP64
if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
if (x->type()->tag() == doubleTag) {
tmp = new_register(T_DOUBLE);
__ move(LIR_OprFact::doubleConst(-0.0), tmp);
}
else if (x->type()->tag() == floatTag) {
tmp = new_register(T_FLOAT);
__ move(LIR_OprFact::floatConst(-0.0), tmp);
}
}
#endif
__ negate(value.result(), reg, tmp);
set_result(x, round_item(reg)); set_result(x, round_item(reg));
} }
@ -748,8 +762,17 @@ void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
LIR_Opr calc_input = value.result(); LIR_Opr calc_input = value.result();
LIR_Opr calc_result = rlock_result(x); LIR_Opr calc_result = rlock_result(x);
LIR_Opr tmp = LIR_OprFact::illegalOpr;
#ifdef _LP64
if (UseAVX > 2 && (!VM_Version::supports_avx512vl()) &&
(x->id() == vmIntrinsics::_dabs)) {
tmp = new_register(T_DOUBLE);
__ move(LIR_OprFact::doubleConst(-0.0), tmp);
}
#endif
switch(x->id()) { switch(x->id()) {
case vmIntrinsics::_dabs: __ abs (calc_input, calc_result, LIR_OprFact::illegalOpr); break; case vmIntrinsics::_dabs: __ abs (calc_input, calc_result, tmp); break;
case vmIntrinsics::_dsqrt: __ sqrt (calc_input, calc_result, LIR_OprFact::illegalOpr); break; case vmIntrinsics::_dsqrt: __ sqrt (calc_input, calc_result, LIR_OprFact::illegalOpr); break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }

View file

@ -119,7 +119,7 @@ define_pd_global(bool, ThreadLocalHandshakes, false);
product(bool, UseStoreImmI16, true, \ product(bool, UseStoreImmI16, true, \
"Use store immediate 16-bits value instruction on x86") \ "Use store immediate 16-bits value instruction on x86") \
\ \
product(intx, UseAVX, 2, \ product(intx, UseAVX, 3, \
"Highest supported AVX instructions set on x86/x64") \ "Highest supported AVX instructions set on x86/x64") \
range(0, 99) \ range(0, 99) \
\ \

File diff suppressed because it is too large Load diff

View file

@ -482,10 +482,6 @@ class MacroAssembler: public Assembler {
// from register xmm0. Otherwise, the value is stored from the FPU stack. // from register xmm0. Otherwise, the value is stored from the FPU stack.
void store_double(Address dst); void store_double(Address dst);
// Save/restore ZMM (512bit) register on stack.
void push_zmm(XMMRegister reg);
void pop_zmm(XMMRegister reg);
// pushes double TOS element of FPU stack on CPU stack; pops from FPU stack // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
void push_fTOS(); void push_fTOS();
@ -1214,9 +1210,11 @@ public:
void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len); void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);
void vpbroadcastw(XMMRegister dst, XMMRegister src); void vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len);
void vpbroadcastw(XMMRegister dst, Address src, int vector_len) { Assembler::vpbroadcastw(dst, src, vector_len); }
void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpmovzxbw(XMMRegister dst, Address src, int vector_len); void vpmovzxbw(XMMRegister dst, Address src, int vector_len);

View file

@ -403,7 +403,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ movdl(xmm0, rcx); __ movdl(xmm0, rcx);
__ movl(rcx, 0xffff); __ movl(rcx, 0xffff);
__ kmovwl(k1, rcx); __ kmovwl(k1, rcx);
__ evpbroadcastd(xmm0, xmm0, Assembler::AVX_512bit); __ vpbroadcastd(xmm0, xmm0, Assembler::AVX_512bit);
__ evmovdqul(xmm7, xmm0, Assembler::AVX_512bit); __ evmovdqul(xmm7, xmm0, Assembler::AVX_512bit);
#ifdef _LP64 #ifdef _LP64
__ evmovdqul(xmm8, xmm0, Assembler::AVX_512bit); __ evmovdqul(xmm8, xmm0, Assembler::AVX_512bit);

View file

@ -816,7 +816,10 @@ public:
static bool supports_avx512cd() { return (_features & CPU_AVX512CD) != 0; } static bool supports_avx512cd() { return (_features & CPU_AVX512CD) != 0; }
static bool supports_avx512bw() { return (_features & CPU_AVX512BW) != 0; } static bool supports_avx512bw() { return (_features & CPU_AVX512BW) != 0; }
static bool supports_avx512vl() { return (_features & CPU_AVX512VL) != 0; } static bool supports_avx512vl() { return (_features & CPU_AVX512VL) != 0; }
static bool supports_avx512vlbw() { return (supports_avx512bw() && supports_avx512vl()); } static bool supports_avx512vlbw() { return (supports_evex() && supports_avx512bw() && supports_avx512vl()); }
static bool supports_avx512vldq() { return (supports_evex() && supports_avx512dq() && supports_avx512vl()); }
static bool supports_avx512vlbwdq() { return (supports_evex() && supports_avx512vl() &&
supports_avx512bw() && supports_avx512dq()); }
static bool supports_avx512novl() { return (supports_evex() && !supports_avx512vl()); } static bool supports_avx512novl() { return (supports_evex() && !supports_avx512vl()); }
static bool supports_avx512nobw() { return (supports_evex() && !supports_avx512bw()); } static bool supports_avx512nobw() { return (supports_evex() && !supports_avx512bw()); }
static bool supports_avx256only() { return (supports_avx2() && !supports_evex()); } static bool supports_avx256only() { return (supports_avx2() && !supports_evex()); }

File diff suppressed because it is too large Load diff

View file

@ -4101,6 +4101,15 @@ operand regF() %{
interface(REG_INTER); interface(REG_INTER);
%} %}
// Float register operands
operand vlRegF() %{
constraint(ALLOC_IN_RC(float_reg_vl));
match(RegF);
format %{ %}
interface(REG_INTER);
%}
// XMM Double register operands // XMM Double register operands
operand regD() %{ operand regD() %{
predicate( UseSSE>=2 ); predicate( UseSSE>=2 );
@ -4110,6 +4119,15 @@ operand regD() %{
interface(REG_INTER); interface(REG_INTER);
%} %}
// Double register operands
operand vlRegD() %{
constraint(ALLOC_IN_RC(double_reg_vl));
match(RegD);
format %{ %}
interface(REG_INTER);
%}
// Vectors : note, we use legacy registers to avoid extra (unneeded in 32-bit VM) // Vectors : note, we use legacy registers to avoid extra (unneeded in 32-bit VM)
// runtime code generation via reg_class_dynamic. // runtime code generation via reg_class_dynamic.
operand vecS() %{ operand vecS() %{
@ -4120,6 +4138,14 @@ operand vecS() %{
interface(REG_INTER); interface(REG_INTER);
%} %}
operand legVecS() %{
constraint(ALLOC_IN_RC(vectors_reg_legacy));
match(VecS);
format %{ %}
interface(REG_INTER);
%}
operand vecD() %{ operand vecD() %{
constraint(ALLOC_IN_RC(vectord_reg_legacy)); constraint(ALLOC_IN_RC(vectord_reg_legacy));
match(VecD); match(VecD);
@ -4128,6 +4154,14 @@ operand vecD() %{
interface(REG_INTER); interface(REG_INTER);
%} %}
operand legVecD() %{
constraint(ALLOC_IN_RC(vectord_reg_legacy));
match(VecD);
format %{ %}
interface(REG_INTER);
%}
operand vecX() %{ operand vecX() %{
constraint(ALLOC_IN_RC(vectorx_reg_legacy)); constraint(ALLOC_IN_RC(vectorx_reg_legacy));
match(VecX); match(VecX);
@ -4136,6 +4170,14 @@ operand vecX() %{
interface(REG_INTER); interface(REG_INTER);
%} %}
operand legVecX() %{
constraint(ALLOC_IN_RC(vectorx_reg_legacy));
match(VecX);
format %{ %}
interface(REG_INTER);
%}
operand vecY() %{ operand vecY() %{
constraint(ALLOC_IN_RC(vectory_reg_legacy)); constraint(ALLOC_IN_RC(vectory_reg_legacy));
match(VecY); match(VecY);
@ -4144,6 +4186,14 @@ operand vecY() %{
interface(REG_INTER); interface(REG_INTER);
%} %}
operand legVecY() %{
constraint(ALLOC_IN_RC(vectory_reg_legacy));
match(VecY);
format %{ %}
interface(REG_INTER);
%}
//----------Memory Operands---------------------------------------------------- //----------Memory Operands----------------------------------------------------
// Direct Memory Operand // Direct Memory Operand
operand direct(immP addr) %{ operand direct(immP addr) %{
@ -6515,6 +6565,26 @@ instruct storeD(memory mem, regD src) %{
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
// Load Double
instruct MoveD2VL(vlRegD dst, regD src) %{
match(Set dst src);
format %{ "movsd $dst,$src\t! load double (8 bytes)" %}
ins_encode %{
__ movdbl($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe( fpu_reg_reg );
%}
// Load Double
instruct MoveVL2D(regD dst, vlRegD src) %{
match(Set dst src);
format %{ "movsd $dst,$src\t! load double (8 bytes)" %}
ins_encode %{
__ movdbl($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe( fpu_reg_reg );
%}
// Store XMM register to memory (single-precision floating point) // Store XMM register to memory (single-precision floating point)
// MOVSS instruction // MOVSS instruction
instruct storeF(memory mem, regF src) %{ instruct storeF(memory mem, regF src) %{
@ -6528,6 +6598,26 @@ instruct storeF(memory mem, regF src) %{
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );
%} %}
// Load Float
instruct MoveF2VL(vlRegF dst, regF src) %{
match(Set dst src);
format %{ "movss $dst,$src\t! load float (4 bytes)" %}
ins_encode %{
__ movflt($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe( fpu_reg_reg );
%}
// Load Float
instruct MoveVL2F(regF dst, vlRegF src) %{
match(Set dst src);
format %{ "movss $dst,$src\t! load float (4 bytes)" %}
ins_encode %{
__ movflt($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe( fpu_reg_reg );
%}
// Store Float // Store Float
instruct storeFPR( memory mem, regFPR1 src) %{ instruct storeFPR( memory mem, regFPR1 src) %{
predicate(UseSSE==0); predicate(UseSSE==0);

View file

@ -3656,6 +3656,15 @@ operand regF() %{
interface(REG_INTER); interface(REG_INTER);
%} %}
// Float register operands
operand vlRegF() %{
constraint(ALLOC_IN_RC(float_reg_vl));
match(RegF);
format %{ %}
interface(REG_INTER);
%}
// Double register operands // Double register operands
operand regD() %{ operand regD() %{
constraint(ALLOC_IN_RC(double_reg)); constraint(ALLOC_IN_RC(double_reg));
@ -3665,9 +3674,27 @@ operand regD() %{
interface(REG_INTER); interface(REG_INTER);
%} %}
// Double register operands
operand vlRegD() %{
constraint(ALLOC_IN_RC(double_reg_vl));
match(RegD);
format %{ %}
interface(REG_INTER);
%}
// Vectors // Vectors
operand vecS() %{ operand vecS() %{
constraint(ALLOC_IN_RC(vectors_reg)); constraint(ALLOC_IN_RC(vectors_reg_vlbwdq));
match(VecS);
format %{ %}
interface(REG_INTER);
%}
// Vectors
operand legVecS() %{
constraint(ALLOC_IN_RC(vectors_reg_legacy));
match(VecS); match(VecS);
format %{ %} format %{ %}
@ -3675,7 +3702,15 @@ operand vecS() %{
%} %}
operand vecD() %{ operand vecD() %{
constraint(ALLOC_IN_RC(vectord_reg)); constraint(ALLOC_IN_RC(vectord_reg_vlbwdq));
match(VecD);
format %{ %}
interface(REG_INTER);
%}
operand legVecD() %{
constraint(ALLOC_IN_RC(vectord_reg_legacy));
match(VecD); match(VecD);
format %{ %} format %{ %}
@ -3683,7 +3718,15 @@ operand vecD() %{
%} %}
operand vecX() %{ operand vecX() %{
constraint(ALLOC_IN_RC(vectorx_reg)); constraint(ALLOC_IN_RC(vectorx_reg_vlbwdq));
match(VecX);
format %{ %}
interface(REG_INTER);
%}
operand legVecX() %{
constraint(ALLOC_IN_RC(vectorx_reg_legacy));
match(VecX); match(VecX);
format %{ %} format %{ %}
@ -3691,7 +3734,15 @@ operand vecX() %{
%} %}
operand vecY() %{ operand vecY() %{
constraint(ALLOC_IN_RC(vectory_reg)); constraint(ALLOC_IN_RC(vectory_reg_vlbwdq));
match(VecY);
format %{ %}
interface(REG_INTER);
%}
operand legVecY() %{
constraint(ALLOC_IN_RC(vectory_reg_legacy));
match(VecY); match(VecY);
format %{ %} format %{ %}
@ -5287,6 +5338,26 @@ instruct loadF(regF dst, memory mem)
ins_pipe(pipe_slow); // XXX ins_pipe(pipe_slow); // XXX
%} %}
// Load Float
instruct MoveF2VL(vlRegF dst, regF src) %{
match(Set dst src);
format %{ "movss $dst,$src\t! load float (4 bytes)" %}
ins_encode %{
__ movflt($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe( fpu_reg_reg );
%}
// Load Float
instruct MoveVL2F(regF dst, vlRegF src) %{
match(Set dst src);
format %{ "movss $dst,$src\t! load float (4 bytes)" %}
ins_encode %{
__ movflt($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe( fpu_reg_reg );
%}
// Load Double // Load Double
instruct loadD_partial(regD dst, memory mem) instruct loadD_partial(regD dst, memory mem)
%{ %{
@ -5314,6 +5385,26 @@ instruct loadD(regD dst, memory mem)
ins_pipe(pipe_slow); // XXX ins_pipe(pipe_slow); // XXX
%} %}
// Load Double
instruct MoveD2VL(vlRegD dst, regD src) %{
match(Set dst src);
format %{ "movsd $dst,$src\t! load double (8 bytes)" %}
ins_encode %{
__ movdbl($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe( fpu_reg_reg );
%}
// Load Double
instruct MoveVL2D(regD dst, vlRegD src) %{
match(Set dst src);
format %{ "movsd $dst,$src\t! load double (8 bytes)" %}
ins_encode %{
__ movdbl($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe( fpu_reg_reg );
%}
// Load Effective Address // Load Effective Address
instruct leaP8(rRegP dst, indOffset8 mem) instruct leaP8(rRegP dst, indOffset8 mem)
%{ %{
@ -10858,7 +10949,7 @@ instruct rep_stos_large(rcx_RegL cnt, rdi_RegP base, regD tmp, rax_RegI zero,
%} %}
instruct string_compareL(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2, instruct string_compareL(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2,
rax_RegI result, regD tmp1, rFlagsReg cr) rax_RegI result, legVecS tmp1, rFlagsReg cr)
%{ %{
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL); predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
@ -10874,7 +10965,7 @@ instruct string_compareL(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI c
%} %}
instruct string_compareU(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2, instruct string_compareU(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2,
rax_RegI result, regD tmp1, rFlagsReg cr) rax_RegI result, legVecS tmp1, rFlagsReg cr)
%{ %{
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU); predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
@ -10890,7 +10981,7 @@ instruct string_compareU(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI c
%} %}
instruct string_compareLU(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2, instruct string_compareLU(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2,
rax_RegI result, regD tmp1, rFlagsReg cr) rax_RegI result, legVecS tmp1, rFlagsReg cr)
%{ %{
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU); predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
@ -10906,7 +10997,7 @@ instruct string_compareLU(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI
%} %}
instruct string_compareUL(rsi_RegP str1, rdx_RegI cnt1, rdi_RegP str2, rcx_RegI cnt2, instruct string_compareUL(rsi_RegP str1, rdx_RegI cnt1, rdi_RegP str2, rcx_RegI cnt2,
rax_RegI result, regD tmp1, rFlagsReg cr) rax_RegI result, legVecS tmp1, rFlagsReg cr)
%{ %{
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL); predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
@ -10923,7 +11014,7 @@ instruct string_compareUL(rsi_RegP str1, rdx_RegI cnt1, rdi_RegP str2, rcx_RegI
// fast search of substring with known size. // fast search of substring with known size.
instruct string_indexof_conL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI int_cnt2, instruct string_indexof_conL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI int_cnt2,
rbx_RegI result, regD vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr) rbx_RegI result, legVecS vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr)
%{ %{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL)); predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2))); match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
@ -10952,7 +11043,7 @@ instruct string_indexof_conL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI i
// fast search of substring with known size. // fast search of substring with known size.
instruct string_indexof_conU(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI int_cnt2, instruct string_indexof_conU(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI int_cnt2,
rbx_RegI result, regD vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr) rbx_RegI result, legVecS vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr)
%{ %{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU)); predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2))); match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
@ -10981,7 +11072,7 @@ instruct string_indexof_conU(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI i
// fast search of substring with known size. // fast search of substring with known size.
instruct string_indexof_conUL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI int_cnt2, instruct string_indexof_conUL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI int_cnt2,
rbx_RegI result, regD vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr) rbx_RegI result, legVecS vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr)
%{ %{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL)); predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2))); match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
@ -11009,7 +11100,7 @@ instruct string_indexof_conUL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI
%} %}
instruct string_indexofL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI cnt2, instruct string_indexofL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI cnt2,
rbx_RegI result, regD vec, rcx_RegI tmp, rFlagsReg cr) rbx_RegI result, legVecS vec, rcx_RegI tmp, rFlagsReg cr)
%{ %{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL)); predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2))); match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
@ -11026,7 +11117,7 @@ instruct string_indexofL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI c
%} %}
instruct string_indexofU(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI cnt2, instruct string_indexofU(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI cnt2,
rbx_RegI result, regD vec, rcx_RegI tmp, rFlagsReg cr) rbx_RegI result, legVecS vec, rcx_RegI tmp, rFlagsReg cr)
%{ %{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU)); predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2))); match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
@ -11043,7 +11134,7 @@ instruct string_indexofU(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI c
%} %}
instruct string_indexofUL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI cnt2, instruct string_indexofUL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI cnt2,
rbx_RegI result, regD vec, rcx_RegI tmp, rFlagsReg cr) rbx_RegI result, legVecS vec, rcx_RegI tmp, rFlagsReg cr)
%{ %{
predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL)); predicate(UseSSE42Intrinsics && (((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL));
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2))); match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
@ -11060,7 +11151,7 @@ instruct string_indexofUL(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI
%} %}
instruct string_indexofU_char(rdi_RegP str1, rdx_RegI cnt1, rax_RegI ch, instruct string_indexofU_char(rdi_RegP str1, rdx_RegI cnt1, rax_RegI ch,
rbx_RegI result, regD vec1, regD vec2, regD vec3, rcx_RegI tmp, rFlagsReg cr) rbx_RegI result, legVecS vec1, legVecS vec2, legVecS vec3, rcx_RegI tmp, rFlagsReg cr)
%{ %{
predicate(UseSSE42Intrinsics); predicate(UseSSE42Intrinsics);
match(Set result (StrIndexOfChar (Binary str1 cnt1) ch)); match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
@ -11075,7 +11166,7 @@ instruct string_indexofU_char(rdi_RegP str1, rdx_RegI cnt1, rax_RegI ch,
// fast string equals // fast string equals
instruct string_equals(rdi_RegP str1, rsi_RegP str2, rcx_RegI cnt, rax_RegI result, instruct string_equals(rdi_RegP str1, rsi_RegP str2, rcx_RegI cnt, rax_RegI result,
regD tmp1, regD tmp2, rbx_RegI tmp3, rFlagsReg cr) legVecS tmp1, legVecS tmp2, rbx_RegI tmp3, rFlagsReg cr)
%{ %{
match(Set result (StrEquals (Binary str1 str2) cnt)); match(Set result (StrEquals (Binary str1 str2) cnt));
effect(TEMP tmp1, TEMP tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp3, KILL cr); effect(TEMP tmp1, TEMP tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp3, KILL cr);
@ -11091,7 +11182,7 @@ instruct string_equals(rdi_RegP str1, rsi_RegP str2, rcx_RegI cnt, rax_RegI resu
// fast array equals // fast array equals
instruct array_equalsB(rdi_RegP ary1, rsi_RegP ary2, rax_RegI result, instruct array_equalsB(rdi_RegP ary1, rsi_RegP ary2, rax_RegI result,
regD tmp1, regD tmp2, rcx_RegI tmp3, rbx_RegI tmp4, rFlagsReg cr) legVecS tmp1, legVecS tmp2, rcx_RegI tmp3, rbx_RegI tmp4, rFlagsReg cr)
%{ %{
predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL); predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
match(Set result (AryEq ary1 ary2)); match(Set result (AryEq ary1 ary2));
@ -11107,7 +11198,7 @@ instruct array_equalsB(rdi_RegP ary1, rsi_RegP ary2, rax_RegI result,
%} %}
instruct array_equalsC(rdi_RegP ary1, rsi_RegP ary2, rax_RegI result, instruct array_equalsC(rdi_RegP ary1, rsi_RegP ary2, rax_RegI result,
regD tmp1, regD tmp2, rcx_RegI tmp3, rbx_RegI tmp4, rFlagsReg cr) legVecS tmp1, legVecS tmp2, rcx_RegI tmp3, rbx_RegI tmp4, rFlagsReg cr)
%{ %{
predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU); predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
match(Set result (AryEq ary1 ary2)); match(Set result (AryEq ary1 ary2));
@ -11123,7 +11214,7 @@ instruct array_equalsC(rdi_RegP ary1, rsi_RegP ary2, rax_RegI result,
%} %}
instruct has_negatives(rsi_RegP ary1, rcx_RegI len, rax_RegI result, instruct has_negatives(rsi_RegP ary1, rcx_RegI len, rax_RegI result,
regD tmp1, regD tmp2, rbx_RegI tmp3, rFlagsReg cr) legVecS tmp1, legVecS tmp2, rbx_RegI tmp3, rFlagsReg cr)
%{ %{
match(Set result (HasNegatives ary1 len)); match(Set result (HasNegatives ary1 len));
effect(TEMP tmp1, TEMP tmp2, USE_KILL ary1, USE_KILL len, KILL tmp3, KILL cr); effect(TEMP tmp1, TEMP tmp2, USE_KILL ary1, USE_KILL len, KILL tmp3, KILL cr);
@ -11138,7 +11229,7 @@ instruct has_negatives(rsi_RegP ary1, rcx_RegI len, rax_RegI result,
%} %}
// fast char[] to byte[] compression // fast char[] to byte[] compression
instruct string_compress(rsi_RegP src, rdi_RegP dst, rdx_RegI len, regD tmp1, regD tmp2, regD tmp3, regD tmp4, instruct string_compress(rsi_RegP src, rdi_RegP dst, rdx_RegI len, legVecS tmp1, legVecS tmp2, legVecS tmp3, legVecS tmp4,
rcx_RegI tmp5, rax_RegI result, rFlagsReg cr) %{ rcx_RegI tmp5, rax_RegI result, rFlagsReg cr) %{
match(Set result (StrCompressedCopy src (Binary dst len))); match(Set result (StrCompressedCopy src (Binary dst len)));
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL tmp5, KILL cr); effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL tmp5, KILL cr);
@ -11154,7 +11245,7 @@ instruct string_compress(rsi_RegP src, rdi_RegP dst, rdx_RegI len, regD tmp1, re
// fast byte[] to char[] inflation // fast byte[] to char[] inflation
instruct string_inflate(Universe dummy, rsi_RegP src, rdi_RegP dst, rdx_RegI len, instruct string_inflate(Universe dummy, rsi_RegP src, rdi_RegP dst, rdx_RegI len,
regD tmp1, rcx_RegI tmp2, rFlagsReg cr) %{ legVecS tmp1, rcx_RegI tmp2, rFlagsReg cr) %{
match(Set dummy (StrInflatedCopy src (Binary dst len))); match(Set dummy (StrInflatedCopy src (Binary dst len)));
effect(TEMP tmp1, TEMP tmp2, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr); effect(TEMP tmp1, TEMP tmp2, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
@ -11168,7 +11259,7 @@ instruct string_inflate(Universe dummy, rsi_RegP src, rdi_RegP dst, rdx_RegI len
// encode char[] to byte[] in ISO_8859_1 // encode char[] to byte[] in ISO_8859_1
instruct encode_iso_array(rsi_RegP src, rdi_RegP dst, rdx_RegI len, instruct encode_iso_array(rsi_RegP src, rdi_RegP dst, rdx_RegI len,
regD tmp1, regD tmp2, regD tmp3, regD tmp4, legVecS tmp1, legVecS tmp2, legVecS tmp3, legVecS tmp4,
rcx_RegI tmp5, rax_RegI result, rFlagsReg cr) %{ rcx_RegI tmp5, rax_RegI result, rFlagsReg cr) %{
match(Set result (EncodeISOArray src (Binary dst len))); match(Set result (EncodeISOArray src (Binary dst len)));
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL tmp5, KILL cr); effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL tmp5, KILL cr);

View file

@ -472,7 +472,6 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
case lir_pop: // input always valid, result and info always invalid case lir_pop: // input always valid, result and info always invalid
case lir_return: // input always valid, result and info always invalid case lir_return: // input always valid, result and info always invalid
case lir_leal: // input and result always valid, info always invalid case lir_leal: // input and result always valid, info always invalid
case lir_neg: // input and result always valid, info always invalid
case lir_monaddr: // input and result always valid, info always invalid case lir_monaddr: // input and result always valid, info always invalid
case lir_null_check: // input and info always valid, result always invalid case lir_null_check: // input and info always valid, result always invalid
case lir_move: // input and result always valid, may have info case lir_move: // input and result always valid, may have info
@ -580,6 +579,7 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
case lir_rem: case lir_rem:
case lir_sqrt: case lir_sqrt:
case lir_abs: case lir_abs:
case lir_neg:
case lir_logic_and: case lir_logic_and:
case lir_logic_or: case lir_logic_or:
case lir_logic_xor: case lir_logic_xor:
@ -1662,7 +1662,6 @@ const char * LIR_Op::name() const {
case lir_null_check: s = "null_check"; break; case lir_null_check: s = "null_check"; break;
case lir_return: s = "return"; break; case lir_return: s = "return"; break;
case lir_safepoint: s = "safepoint"; break; case lir_safepoint: s = "safepoint"; break;
case lir_neg: s = "neg"; break;
case lir_leal: s = "leal"; break; case lir_leal: s = "leal"; break;
case lir_branch: s = "branch"; break; case lir_branch: s = "branch"; break;
case lir_cond_float_branch: s = "flt_cond_br"; break; case lir_cond_float_branch: s = "flt_cond_br"; break;
@ -1690,6 +1689,7 @@ const char * LIR_Op::name() const {
case lir_div_strictfp: s = "div_strictfp"; break; case lir_div_strictfp: s = "div_strictfp"; break;
case lir_rem: s = "rem"; break; case lir_rem: s = "rem"; break;
case lir_abs: s = "abs"; break; case lir_abs: s = "abs"; break;
case lir_neg: s = "neg"; break;
case lir_sqrt: s = "sqrt"; break; case lir_sqrt: s = "sqrt"; break;
case lir_logic_and: s = "logic_and"; break; case lir_logic_and: s = "logic_and"; break;
case lir_logic_or: s = "logic_or"; break; case lir_logic_or: s = "logic_or"; break;

View file

@ -911,7 +911,6 @@ enum LIR_Code {
, lir_null_check , lir_null_check
, lir_return , lir_return
, lir_leal , lir_leal
, lir_neg
, lir_branch , lir_branch
, lir_cond_float_branch , lir_cond_float_branch
, lir_move , lir_move
@ -939,6 +938,7 @@ enum LIR_Code {
, lir_rem , lir_rem
, lir_sqrt , lir_sqrt
, lir_abs , lir_abs
, lir_neg
, lir_tan , lir_tan
, lir_log10 , lir_log10
, lir_logic_and , lir_logic_and
@ -2075,7 +2075,6 @@ class LIR_List: public CompilationResourceObj {
void branch_destination(Label* lbl) { append(new LIR_OpLabel(lbl)); } void branch_destination(Label* lbl) { append(new LIR_OpLabel(lbl)); }
void negate(LIR_Opr from, LIR_Opr to) { append(new LIR_Op1(lir_neg, from, to)); }
void leal(LIR_Opr from, LIR_Opr result_reg, LIR_PatchCode patch_code = lir_patch_none, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_leal, from, result_reg, T_ILLEGAL, patch_code, info)); } void leal(LIR_Opr from, LIR_Opr result_reg, LIR_PatchCode patch_code = lir_patch_none, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_leal, from, result_reg, T_ILLEGAL, patch_code, info)); }
// result is a stack location for old backend and vreg for UseLinearScan // result is a stack location for old backend and vreg for UseLinearScan
@ -2159,6 +2158,7 @@ class LIR_List: public CompilationResourceObj {
LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr); LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
void abs (LIR_Opr from, LIR_Opr to, LIR_Opr tmp) { append(new LIR_Op2(lir_abs , from, tmp, to)); } void abs (LIR_Opr from, LIR_Opr to, LIR_Opr tmp) { append(new LIR_Op2(lir_abs , from, tmp, to)); }
void negate(LIR_Opr from, LIR_Opr to, LIR_Opr tmp = LIR_OprFact::illegalOpr) { append(new LIR_Op2(lir_neg, from, tmp, to)); }
void sqrt(LIR_Opr from, LIR_Opr to, LIR_Opr tmp) { append(new LIR_Op2(lir_sqrt, from, tmp, to)); } void sqrt(LIR_Opr from, LIR_Opr to, LIR_Opr tmp) { append(new LIR_Op2(lir_sqrt, from, tmp, to)); }
void fmad(LIR_Opr from, LIR_Opr from1, LIR_Opr from2, LIR_Opr to) { append(new LIR_Op3(lir_fmad, from, from1, from2, to)); } void fmad(LIR_Opr from, LIR_Opr from1, LIR_Opr from2, LIR_Opr to) { append(new LIR_Op3(lir_fmad, from, from1, from2, to)); }
void fmaf(LIR_Opr from, LIR_Opr from1, LIR_Opr from2, LIR_Opr to) { append(new LIR_Op3(lir_fmaf, from, from1, from2, to)); } void fmaf(LIR_Opr from, LIR_Opr from1, LIR_Opr from2, LIR_Opr to) { append(new LIR_Op3(lir_fmaf, from, from1, from2, to)); }

View file

@ -554,10 +554,6 @@ void LIR_Assembler::emit_op1(LIR_Op1* op) {
pop(op->in_opr()); pop(op->in_opr());
break; break;
case lir_neg:
negate(op->in_opr(), op->result_opr());
break;
case lir_leal: case lir_leal:
leal(op->in_opr(), op->result_opr(), op->patch_code(), op->info()); leal(op->in_opr(), op->result_opr(), op->patch_code(), op->info());
break; break;
@ -750,6 +746,10 @@ void LIR_Assembler::emit_op2(LIR_Op2* op) {
intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
break; break;
case lir_neg:
negate(op->in_opr1(), op->result_opr(), op->in_opr2());
break;
case lir_logic_and: case lir_logic_and:
case lir_logic_or: case lir_logic_or:
case lir_logic_xor: case lir_logic_xor:

View file

@ -239,7 +239,7 @@ class LIR_Assembler: public CompilationResourceObj {
void align_backward_branch_target(); void align_backward_branch_target();
void align_call(LIR_Code code); void align_call(LIR_Code code);
void negate(LIR_Opr left, LIR_Opr dest); void negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp = LIR_OprFact::illegalOpr);
void leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info); void leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info);
void rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info); void rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info);