This commit is contained in:
Vladimir Kozlov 2011-12-29 11:37:50 -08:00
commit b6828ac542
69 changed files with 4062 additions and 3590 deletions

View file

@ -39,9 +39,16 @@ OS = $(Platform_os_family)
SOURCE.AD = $(OUTDIR)/$(OS)_$(Platform_arch_model).ad
SOURCES.AD = \
ifeq ("${Platform_arch_model}", "${Platform_arch}")
SOURCES.AD = \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
$(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
else
SOURCES.AD = \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) \
$(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
endif
EXEC = $(OUTDIR)/adlc

View file

@ -39,9 +39,16 @@ OS = $(Platform_os_family)
SOURCE.AD = $(OUTDIR)/$(OS)_$(Platform_arch_model).ad
SOURCES.AD = \
ifeq ("${Platform_arch_model}", "${Platform_arch}")
SOURCES.AD = \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
$(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
else
SOURCES.AD = \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) \
$(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
endif
EXEC = $(OUTDIR)/adlc

View file

@ -40,9 +40,16 @@ OS = $(Platform_os_family)
SOURCE.AD = $(OUTDIR)/$(OS)_$(Platform_arch_model).ad
SOURCES.AD = \
ifeq ("${Platform_arch_model}", "${Platform_arch}")
SOURCES.AD = \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
$(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
else
SOURCES.AD = \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) \
$(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
endif
EXEC = $(OUTDIR)/adlc

View file

@ -53,6 +53,17 @@ CPP_INCLUDE_DIRS=\
/I "$(WorkSpace)\src\os\windows\vm" \
/I "$(WorkSpace)\src\cpu\$(Platform_arch)\vm"
!if "$(Platform_arch_model)" == "$(Platform_arch)"
SOURCES_AD=\
$(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad \
$(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm/windows_$(Platform_arch_model).ad
!else
SOURCES_AD=\
$(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad \
$(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch).ad \
$(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm/windows_$(Platform_arch_model).ad
!endif
# NOTE! If you add any files here, you must also update GENERATED_NAMES_IN_DIR
# and ProjectCreatorIDEOptions in projectcreator.make.
GENERATED_NAMES=\
@ -105,7 +116,6 @@ $(GENERATED_NAMES_IN_DIR): $(Platform_arch_model).ad adlc.exe
$(ADLC) $(ADLCFLAGS) $(Platform_arch_model).ad
mv $(GENERATED_NAMES) $(AdlcOutDir)/
$(Platform_arch_model).ad: $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad $(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm/windows_$(Platform_arch_model).ad
$(Platform_arch_model).ad: $(SOURCES_AD)
rm -f $(Platform_arch_model).ad
cat $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad \
$(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm/windows_$(Platform_arch_model).ad >$(Platform_arch_model).ad
cat $(SOURCES_AD) >$(Platform_arch_model).ad

View file

@ -3036,10 +3036,8 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
Label* L_failure,
Label* L_slow_path,
RegisterOrConstant super_check_offset) {
int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::secondary_super_cache_offset_in_bytes());
int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::super_check_offset_offset_in_bytes());
int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
int sco_offset = in_bytes(Klass::super_check_offset_offset());
bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
bool need_slow_path = (must_load_sco ||
@ -3159,10 +3157,8 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
assert(label_nulls <= 1, "at most one NULL in the batch");
// a couple of useful fields in sub_klass:
int ss_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::secondary_supers_offset_in_bytes());
int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::secondary_super_cache_offset_in_bytes());
int ss_offset = in_bytes(Klass::secondary_supers_offset());
int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
// Do a linear scan of the secondary super-klass chain.
// This code is rarely used, so simplicity is a virtue here.
@ -3336,7 +3332,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label);
load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
or3(G2_thread, temp_reg, temp_reg);
xor3(mark_reg, temp_reg, temp_reg);
andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg);
@ -3413,7 +3409,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
// FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them.
load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
or3(G2_thread, temp_reg, temp_reg);
casn(mark_addr.base(), mark_reg, temp_reg);
// If the biasing toward our thread failed, this means that
@ -3443,7 +3439,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
// FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them.
load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
casn(mark_addr.base(), mark_reg, temp_reg);
// Fall through to the normal CAS-based lock, because no matter what
// the result of the above CAS, some thread must have succeeded in

View file

@ -302,7 +302,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
assert(_obj != noreg, "must be a valid register");
assert(_oop_index >= 0, "must have oop index");
__ load_heap_oop(_obj, java_lang_Class::klass_offset_in_bytes(), G3);
__ ld_ptr(G3, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc), G3);
__ ld_ptr(G3, in_bytes(instanceKlass::init_thread_offset()), G3);
__ cmp_and_brx_short(G2_thread, G3, Assembler::notEqual, Assembler::pn, call_patch);
// load_klass patches may execute the patched code before it's
@ -471,7 +471,7 @@ void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
__ load_klass(src_reg, tmp_reg);
Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset_in_bytes() + sizeof(oopDesc));
Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset());
__ ld(ref_type_adr, tmp_reg);
// _reference_type field is of type ReferenceType (enum)

View file

@ -2202,8 +2202,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
__ load_klass(dst, tmp);
}
int lh_offset = klassOopDesc::header_size() * HeapWordSize +
Klass::layout_helper_offset_in_bytes();
int lh_offset = in_bytes(Klass::layout_helper_offset());
__ lduw(tmp, lh_offset, tmp2);
@ -2238,12 +2237,10 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ mov(length, len);
__ load_klass(dst, tmp);
int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
objArrayKlass::element_klass_offset_in_bytes());
int ek_offset = in_bytes(objArrayKlass::element_klass_offset());
__ ld_ptr(tmp, ek_offset, super_k);
int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::super_check_offset_offset_in_bytes());
int sco_offset = in_bytes(Klass::super_check_offset_offset());
__ lduw(super_k, sco_offset, chk_off);
__ call_VM_leaf(tmp, copyfunc_addr);
@ -2456,7 +2453,7 @@ void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
op->klass()->as_register() == G5, "must be");
if (op->init_check()) {
__ ld(op->klass()->as_register(),
instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc),
in_bytes(instanceKlass::init_state_offset()),
op->tmp1()->as_register());
add_debug_info_for_null_check_here(op->stub()->info());
__ cmp(op->tmp1()->as_register(), instanceKlass::fully_initialized);
@ -2627,7 +2624,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
} else {
bool need_slow_path = true;
if (k->is_loaded()) {
if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset()))
need_slow_path = false;
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
@ -2731,7 +2728,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ load_klass(value, klass_RInfo);
// get instance klass
__ ld_ptr(Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)), k_RInfo);
__ ld_ptr(Address(k_RInfo, objArrayKlass::element_klass_offset()), k_RInfo);
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL);

View file

@ -181,7 +181,7 @@ void C1_MacroAssembler::try_allocate(
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
assert_different_registers(obj, klass, len, t1, t2);
if (UseBiasedLocking && !len->is_valid()) {
ld_ptr(klass, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes(), t1);
ld_ptr(klass, in_bytes(Klass::prototype_header_offset()), t1);
} else {
set((intx)markOopDesc::prototype(), t1);
}
@ -252,7 +252,7 @@ void C1_MacroAssembler::initialize_object(
#ifdef ASSERT
{
Label ok;
ld(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), t1);
ld(klass, in_bytes(Klass::layout_helper_offset()), t1);
if (var_size_in_bytes != noreg) {
cmp_and_brx_short(t1, var_size_in_bytes, Assembler::equal, Assembler::pt, ok);
} else {

View file

@ -398,14 +398,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
if (id == fast_new_instance_init_check_id) {
// make sure the klass is initialized
__ ld(G5_klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_t1);
__ ld(G5_klass, in_bytes(instanceKlass::init_state_offset()), G3_t1);
__ cmp_and_br_short(G3_t1, instanceKlass::fully_initialized, Assembler::notEqual, Assembler::pn, slow_path);
}
#ifdef ASSERT
// assert object can be fast path allocated
{
Label ok, not_ok;
__ ld(G5_klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), G1_obj_size);
__ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);
// make sure it's an instance (LH > 0)
__ cmp_and_br_short(G1_obj_size, 0, Assembler::lessEqual, Assembler::pn, not_ok);
__ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size);
@ -425,7 +425,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ bind(retry_tlab);
// get the instance size
__ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size);
__ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);
__ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path);
@ -437,7 +437,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ bind(try_eden);
// get the instance size
__ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size);
__ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);
__ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path);
__ incr_allocated_bytes(G1_obj_size, G3_t1, G4_t2);
@ -471,8 +471,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Register G4_length = G4; // Incoming
Register O0_obj = O0; // Outgoing
Address klass_lh(G5_klass, ((klassOopDesc::header_size() * HeapWordSize)
+ Klass::layout_helper_offset_in_bytes()));
Address klass_lh(G5_klass, Klass::layout_helper_offset());
assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
assert(Klass::_lh_header_size_mask == 0xFF, "bytewise");
// Use this offset to pick out an individual byte of the layout_helper:
@ -592,7 +591,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Label register_finalizer;
Register t = O1;
__ load_klass(O0, t);
__ ld(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), t);
__ ld(t, in_bytes(Klass::access_flags_offset()), t);
__ set(JVM_ACC_HAS_FINALIZER, G3);
__ andcc(G3, t, G0);
__ br(Assembler::notZero, false, Assembler::pt, register_finalizer);

View file

@ -766,7 +766,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// get native function entry point(O0 is a good temp until the very end)
ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::native_function_offset())), O0);
// for static methods insert the mirror argument
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc:: constants_offset())), O1);
__ ld_ptr(Address(O1, 0, constantPoolOopDesc::pool_holder_offset_in_bytes()), O1);
@ -1173,7 +1173,7 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
__ btst(JVM_ACC_SYNCHRONIZED, O1);
__ br( Assembler::zero, false, Assembler::pt, done);
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ delayed()->btst(JVM_ACC_STATIC, O1);
__ ld_ptr(XXX_STATE(_locals), O1);
__ br( Assembler::zero, true, Assembler::pt, got_obj);

View file

@ -1098,7 +1098,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
Address G3_amh_argument ( G3_method_handle, java_lang_invoke_AdapterMethodHandle::argument_offset_in_bytes());
Address G3_amh_conversion(G3_method_handle, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes());
const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
const int java_mirror_offset = in_bytes(Klass::java_mirror_offset());
if (have_entry(ek)) {
__ nop(); // empty stubs make SG sick

View file

@ -6773,6 +6773,16 @@ instruct unnecessary_membar_volatile() %{
ins_pipe(empty);
%}
instruct membar_storestore() %{
match(MemBarStoreStore);
ins_cost(0);
size(0);
format %{ "!MEMBAR-storestore (empty encoding)" %}
ins_encode( );
ins_pipe(empty);
%}
//----------Register Move Instructions-----------------------------------------
instruct roundDouble_nop(regD dst) %{
match(Set dst (RoundDouble dst));

View file

@ -3046,8 +3046,7 @@ class StubGenerator: public StubCodeGenerator {
// array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
//
int lh_offset = klassOopDesc::header_size() * HeapWordSize +
Klass::layout_helper_offset_in_bytes();
int lh_offset = in_bytes(Klass::layout_helper_offset());
// Load 32-bits signed value. Use br() instruction with it to check icc.
__ lduw(G3_src_klass, lh_offset, G5_lh);
@ -3194,15 +3193,13 @@ class StubGenerator: public StubCodeGenerator {
G4_dst_klass, G3_src_klass);
// Generate the type check.
int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::super_check_offset_offset_in_bytes());
int sco_offset = in_bytes(Klass::super_check_offset_offset());
__ lduw(G4_dst_klass, sco_offset, sco_temp);
generate_type_check(G3_src_klass, sco_temp, G4_dst_klass,
O5_temp, L_plain_copy);
// Fetch destination element klass from the objArrayKlass header.
int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
objArrayKlass::element_klass_offset_in_bytes());
int ek_offset = in_bytes(objArrayKlass::element_klass_offset());
// the checkcast_copy loop needs two extra arguments:
__ ld_ptr(G4_dst_klass, ek_offset, O4); // dest elem klass

View file

@ -366,7 +366,7 @@ void InterpreterGenerator::lock_method(void) {
// get synchronization object to O0
{ Label done;
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ btst(JVM_ACC_STATIC, O0);
__ br( Assembler::zero, true, Assembler::pt, done);
__ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case
@ -984,7 +984,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// get native function entry point(O0 is a good temp until the very end)
__ delayed()->ld_ptr(Lmethod, in_bytes(methodOopDesc::native_function_offset()), O0);
// for static methods insert the mirror argument
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ ld_ptr(Lmethod, methodOopDesc:: constants_offset(), O1);
__ ld_ptr(O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1);

View file

@ -888,7 +888,7 @@ void TemplateTable::aastore() {
// do fast instanceof cache test
__ ld_ptr(O4, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes(), O4);
__ ld_ptr(O4, in_bytes(objArrayKlass::element_klass_offset()), O4);
assert(Otos_i == O0, "just checking");
@ -2031,7 +2031,7 @@ void TemplateTable::_return(TosState state) {
__ access_local_ptr(G3_scratch, Otos_i);
__ load_klass(Otos_i, O2);
__ set(JVM_ACC_HAS_FINALIZER, G3);
__ ld(O2, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), O2);
__ ld(O2, in_bytes(Klass::access_flags_offset()), O2);
__ andcc(G3, O2, G0);
Label skip_register_finalizer;
__ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer);
@ -3350,13 +3350,13 @@ void TemplateTable::_new() {
__ ld_ptr(Rscratch, Roffset, RinstanceKlass);
// make sure klass is fully initialized:
__ ld(RinstanceKlass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_scratch);
__ ld(RinstanceKlass, in_bytes(instanceKlass::init_state_offset()), G3_scratch);
__ cmp(G3_scratch, instanceKlass::fully_initialized);
__ br(Assembler::notEqual, false, Assembler::pn, slow_case);
__ delayed()->ld(RinstanceKlass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), Roffset);
__ delayed()->ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset);
// get instance_size in instanceKlass (already aligned)
//__ ld(RinstanceKlass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), Roffset);
//__ ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset);
// make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class
__ btst(Klass::_lh_instance_slow_path_bit, Roffset);
@ -3483,7 +3483,7 @@ void TemplateTable::_new() {
__ bind(initialize_header);
if (UseBiasedLocking) {
__ ld_ptr(RinstanceKlass, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), G4_scratch);
__ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch);
} else {
__ set((intptr_t)markOopDesc::prototype(), G4_scratch);
}

File diff suppressed because it is too large Load diff

View file

@ -503,7 +503,31 @@ class Assembler : public AbstractAssembler {
REX_WR = 0x4C,
REX_WRB = 0x4D,
REX_WRX = 0x4E,
REX_WRXB = 0x4F
REX_WRXB = 0x4F,
VEX_3bytes = 0xC4,
VEX_2bytes = 0xC5
};
enum VexPrefix {
VEX_B = 0x20,
VEX_X = 0x40,
VEX_R = 0x80,
VEX_W = 0x80
};
enum VexSimdPrefix {
VEX_SIMD_NONE = 0x0,
VEX_SIMD_66 = 0x1,
VEX_SIMD_F3 = 0x2,
VEX_SIMD_F2 = 0x3
};
enum VexOpcode {
VEX_OPCODE_NONE = 0x0,
VEX_OPCODE_0F = 0x1,
VEX_OPCODE_0F_38 = 0x2,
VEX_OPCODE_0F_3A = 0x3
};
enum WhichOperand {
@ -546,12 +570,99 @@ private:
void prefixq(Address adr);
void prefix(Address adr, Register reg, bool byteinst = false);
void prefixq(Address adr, Register reg);
void prefix(Address adr, XMMRegister reg);
void prefixq(Address adr, Register reg);
void prefixq(Address adr, XMMRegister reg);
void prefetch_prefix(Address src);
void rex_prefix(Address adr, XMMRegister xreg,
VexSimdPrefix pre, VexOpcode opc, bool rex_w);
int rex_prefix_and_encode(int dst_enc, int src_enc,
VexSimdPrefix pre, VexOpcode opc, bool rex_w);
void vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w,
int nds_enc, VexSimdPrefix pre, VexOpcode opc,
bool vector256);
void vex_prefix(Address adr, int nds_enc, int xreg_enc,
VexSimdPrefix pre, VexOpcode opc,
bool vex_w, bool vector256);
void vex_prefix(XMMRegister dst, XMMRegister nds, Address src,
VexSimdPrefix pre, bool vector256 = false) {
vex_prefix(src, nds->encoding(), dst->encoding(),
pre, VEX_OPCODE_0F, false, vector256);
}
int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc,
VexSimdPrefix pre, VexOpcode opc,
bool vex_w, bool vector256);
int vex_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
VexSimdPrefix pre, bool vector256 = false) {
return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
pre, VEX_OPCODE_0F, false, vector256);
}
void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr,
VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F,
bool rex_w = false, bool vector256 = false);
void simd_prefix(XMMRegister dst, Address src,
VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
simd_prefix(dst, xnoreg, src, pre, opc);
}
void simd_prefix(Address dst, XMMRegister src, VexSimdPrefix pre) {
simd_prefix(src, dst, pre);
}
void simd_prefix_q(XMMRegister dst, XMMRegister nds, Address src,
VexSimdPrefix pre) {
bool rex_w = true;
simd_prefix(dst, nds, src, pre, VEX_OPCODE_0F, rex_w);
}
int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F,
bool rex_w = false, bool vector256 = false);
int simd_prefix_and_encode(XMMRegister dst, XMMRegister src,
VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
return simd_prefix_and_encode(dst, xnoreg, src, pre, opc);
}
// Move/convert 32-bit integer value.
int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, Register src,
VexSimdPrefix pre) {
// It is OK to cast from Register to XMMRegister to pass argument here
// since only encoding is used in simd_prefix_and_encode() and number of
// Gen and Xmm registers are the same.
return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre);
}
int simd_prefix_and_encode(XMMRegister dst, Register src, VexSimdPrefix pre) {
return simd_prefix_and_encode(dst, xnoreg, src, pre);
}
int simd_prefix_and_encode(Register dst, XMMRegister src,
VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, opc);
}
// Move/convert 64-bit integer value.
int simd_prefix_and_encode_q(XMMRegister dst, XMMRegister nds, Register src,
VexSimdPrefix pre) {
bool rex_w = true;
return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre, VEX_OPCODE_0F, rex_w);
}
int simd_prefix_and_encode_q(XMMRegister dst, Register src, VexSimdPrefix pre) {
return simd_prefix_and_encode_q(dst, xnoreg, src, pre);
}
int simd_prefix_and_encode_q(Register dst, XMMRegister src,
VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
bool rex_w = true;
return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, opc, rex_w);
}
// Helper functions for groups of instructions
void emit_arith_b(int op1, int op2, Register dst, int imm8);
@ -764,6 +875,7 @@ private:
void addss(XMMRegister dst, Address src);
void addss(XMMRegister dst, XMMRegister src);
void andl(Address dst, int32_t imm32);
void andl(Register dst, int32_t imm32);
void andl(Register dst, Address src);
void andl(Register dst, Register src);
@ -774,9 +886,11 @@ private:
void andq(Register dst, Register src);
// Bitwise Logical AND of Packed Double-Precision Floating-Point Values
void andpd(XMMRegister dst, Address src);
void andpd(XMMRegister dst, XMMRegister src);
// Bitwise Logical AND of Packed Single-Precision Floating-Point Values
void andps(XMMRegister dst, XMMRegister src);
void bsfl(Register dst, Register src);
void bsrl(Register dst, Register src);
@ -837,9 +951,11 @@ private:
// Ordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
void comisd(XMMRegister dst, Address src);
void comisd(XMMRegister dst, XMMRegister src);
// Ordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
void comiss(XMMRegister dst, Address src);
void comiss(XMMRegister dst, XMMRegister src);
// Identify processor type and features
void cpuid() {
@ -849,14 +965,19 @@ private:
// Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value
void cvtsd2ss(XMMRegister dst, XMMRegister src);
void cvtsd2ss(XMMRegister dst, Address src);
// Convert Doubleword Integer to Scalar Double-Precision Floating-Point Value
void cvtsi2sdl(XMMRegister dst, Register src);
void cvtsi2sdl(XMMRegister dst, Address src);
void cvtsi2sdq(XMMRegister dst, Register src);
void cvtsi2sdq(XMMRegister dst, Address src);
// Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value
void cvtsi2ssl(XMMRegister dst, Register src);
void cvtsi2ssl(XMMRegister dst, Address src);
void cvtsi2ssq(XMMRegister dst, Register src);
void cvtsi2ssq(XMMRegister dst, Address src);
// Convert Packed Signed Doubleword Integers to Packed Double-Precision Floating-Point Value
void cvtdq2pd(XMMRegister dst, XMMRegister src);
@ -866,6 +987,7 @@ private:
// Convert Scalar Single-Precision Floating-Point Value to Scalar Double-Precision Floating-Point Value
void cvtss2sd(XMMRegister dst, XMMRegister src);
void cvtss2sd(XMMRegister dst, Address src);
// Convert with Truncation Scalar Double-Precision Floating-Point Value to Doubleword Integer
void cvttsd2sil(Register dst, Address src);
@ -1140,8 +1262,6 @@ private:
void movdq(Register dst, XMMRegister src);
// Move Aligned Double Quadword
void movdqa(Address dst, XMMRegister src);
void movdqa(XMMRegister dst, Address src);
void movdqa(XMMRegister dst, XMMRegister src);
// Move Unaligned Double Quadword
@ -1261,10 +1381,18 @@ private:
void orq(Register dst, Address src);
void orq(Register dst, Register src);
// Pack with unsigned saturation
void packuswb(XMMRegister dst, XMMRegister src);
void packuswb(XMMRegister dst, Address src);
// SSE4.2 string instructions
void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
void pcmpestri(XMMRegister xmm1, Address src, int imm8);
// SSE4.1 packed move
void pmovzxbw(XMMRegister dst, XMMRegister src);
void pmovzxbw(XMMRegister dst, Address src);
#ifndef _LP64 // no 32bit push/pop on amd64
void popl(Address dst);
#endif
@ -1292,6 +1420,7 @@ private:
// POR - Bitwise logical OR
void por(XMMRegister dst, XMMRegister src);
void por(XMMRegister dst, Address src);
// Shuffle Packed Doublewords
void pshufd(XMMRegister dst, XMMRegister src, int mode);
@ -1313,6 +1442,11 @@ private:
// Interleave Low Bytes
void punpcklbw(XMMRegister dst, XMMRegister src);
void punpcklbw(XMMRegister dst, Address src);
// Interleave Low Doublewords
void punpckldq(XMMRegister dst, XMMRegister src);
void punpckldq(XMMRegister dst, Address src);
#ifndef _LP64 // no 32bit push/pop on amd64
void pushl(Address src);
@ -1429,6 +1563,13 @@ private:
void xchgq(Register reg, Address adr);
void xchgq(Register dst, Register src);
// Get Value of Extended Control Register
void xgetbv() {
emit_byte(0x0F);
emit_byte(0x01);
emit_byte(0xD0);
}
void xorl(Register dst, int32_t imm32);
void xorl(Register dst, Address src);
void xorl(Register dst, Register src);
@ -1437,14 +1578,44 @@ private:
void xorq(Register dst, Register src);
// Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
void xorpd(XMMRegister dst, Address src);
void xorpd(XMMRegister dst, XMMRegister src);
// Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
void xorps(XMMRegister dst, Address src);
void xorps(XMMRegister dst, XMMRegister src);
void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0
// AVX 3-operands instructions (encoded with VEX prefix)
void vaddsd(XMMRegister dst, XMMRegister nds, Address src);
void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vaddss(XMMRegister dst, XMMRegister nds, Address src);
void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vandpd(XMMRegister dst, XMMRegister nds, Address src);
void vandps(XMMRegister dst, XMMRegister nds, Address src);
void vdivsd(XMMRegister dst, XMMRegister nds, Address src);
void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vdivss(XMMRegister dst, XMMRegister nds, Address src);
void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vmulsd(XMMRegister dst, XMMRegister nds, Address src);
void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vmulss(XMMRegister dst, XMMRegister nds, Address src);
void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vsubsd(XMMRegister dst, XMMRegister nds, Address src);
void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vsubss(XMMRegister dst, XMMRegister nds, Address src);
void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vxorpd(XMMRegister dst, XMMRegister nds, Address src);
void vxorps(XMMRegister dst, XMMRegister nds, Address src);
protected:
// Next instructions require address alignment 16 bytes SSE mode.
// They should be called only from corresponding MacroAssembler instructions.
void andpd(XMMRegister dst, Address src);
void andps(XMMRegister dst, Address src);
void xorpd(XMMRegister dst, Address src);
void xorps(XMMRegister dst, Address src);
};
@ -2175,9 +2346,15 @@ class MacroAssembler: public Assembler {
void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); }
void andpd(XMMRegister dst, AddressLiteral src);
void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); }
void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); }
void andps(XMMRegister dst, AddressLiteral src);
void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); }
void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); }
void comiss(XMMRegister dst, AddressLiteral src);
void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); }
void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
void comisd(XMMRegister dst, AddressLiteral src);
@ -2211,62 +2388,62 @@ private:
void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); }
void movss(XMMRegister dst, AddressLiteral src);
void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); }
void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); }
void movlpd(XMMRegister dst, AddressLiteral src);
public:
void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); }
void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); }
void addsd(XMMRegister dst, AddressLiteral src) { Assembler::addsd(dst, as_Address(src)); }
void addsd(XMMRegister dst, AddressLiteral src);
void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); }
void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); }
void addss(XMMRegister dst, AddressLiteral src) { Assembler::addss(dst, as_Address(src)); }
void addss(XMMRegister dst, AddressLiteral src);
void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); }
void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); }
void divsd(XMMRegister dst, AddressLiteral src) { Assembler::divsd(dst, as_Address(src)); }
void divsd(XMMRegister dst, AddressLiteral src);
void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); }
void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); }
void divss(XMMRegister dst, AddressLiteral src) { Assembler::divss(dst, as_Address(src)); }
void divss(XMMRegister dst, AddressLiteral src);
void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); }
void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
void movsd(XMMRegister dst, AddressLiteral src) { Assembler::movsd(dst, as_Address(src)); }
void movsd(XMMRegister dst, AddressLiteral src);
void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); }
void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); }
void mulsd(XMMRegister dst, AddressLiteral src) { Assembler::mulsd(dst, as_Address(src)); }
void mulsd(XMMRegister dst, AddressLiteral src);
void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); }
void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); }
void mulss(XMMRegister dst, AddressLiteral src) { Assembler::mulss(dst, as_Address(src)); }
void mulss(XMMRegister dst, AddressLiteral src);
void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); }
void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); }
void sqrtsd(XMMRegister dst, AddressLiteral src) { Assembler::sqrtsd(dst, as_Address(src)); }
void sqrtsd(XMMRegister dst, AddressLiteral src);
void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); }
void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); }
void sqrtss(XMMRegister dst, AddressLiteral src) { Assembler::sqrtss(dst, as_Address(src)); }
void sqrtss(XMMRegister dst, AddressLiteral src);
void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); }
void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); }
void subsd(XMMRegister dst, AddressLiteral src) { Assembler::subsd(dst, as_Address(src)); }
void subsd(XMMRegister dst, AddressLiteral src);
void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); }
void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); }
void subss(XMMRegister dst, AddressLiteral src) { Assembler::subss(dst, as_Address(src)); }
void subss(XMMRegister dst, AddressLiteral src);
void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); }
void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); }
void ucomiss(XMMRegister dst, AddressLiteral src);
void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); }
void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); }
void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); }
void ucomisd(XMMRegister dst, AddressLiteral src);
// Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
@ -2279,6 +2456,53 @@ public:
void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); }
void xorps(XMMRegister dst, AddressLiteral src);
// AVX 3-operands instructions
void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); }
void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); }
void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); }
void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vandpd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vandpd(dst, nds, src); }
void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vandps(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vandps(dst, nds, src); }
void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); }
void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); }
void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); }
void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); }
void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); }
void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); }
void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); }
void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vxorpd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vxorpd(dst, nds, src); }
void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vxorps(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vxorps(dst, nds, src); }
void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src);
// Data
void cmov32( Condition cc, Register dst, Address src);

View file

@ -86,6 +86,7 @@ inline void Assembler::prefix(Address adr, Register reg, bool byteinst) {}
inline void Assembler::prefixq(Address adr, Register reg) {}
inline void Assembler::prefix(Address adr, XMMRegister reg) {}
inline void Assembler::prefixq(Address adr, XMMRegister reg) {}
#else
inline void Assembler::emit_long64(jlong x) {
*(jlong*) _code_pos = x;

View file

@ -320,7 +320,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
// begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
__ load_heap_oop_not_null(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
__ get_thread(tmp);
__ cmpptr(tmp, Address(tmp2, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc)));
__ cmpptr(tmp, Address(tmp2, instanceKlass::init_thread_offset()));
__ pop(tmp2);
__ pop(tmp);
__ jcc(Assembler::notEqual, call_patch);
@ -519,7 +519,7 @@ void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
__ load_klass(tmp_reg, src_reg);
Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset_in_bytes() + sizeof(oopDesc));
Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset());
__ cmpl(ref_type_adr, REF_NONE);
__ jcc(Assembler::equal, _continuation);

View file

@ -1558,7 +1558,7 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
if (op->init_check()) {
__ cmpl(Address(op->klass()->as_register(),
instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)),
instanceKlass::init_state_offset()),
instanceKlass::fully_initialized);
add_debug_info_for_null_check_here(op->stub()->info());
__ jcc(Assembler::notEqual, *op->stub()->entry());
@ -1730,7 +1730,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
#else
__ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
#endif // _LP64
if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) {
if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
__ jcc(Assembler::notEqual, *failure_target);
// successful cast, fall through to profile or jump
} else {
@ -1842,7 +1842,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ load_klass(klass_RInfo, value);
// get instance klass (it's already uncompressed)
__ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
__ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset()));
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
@ -3289,8 +3289,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
__ load_klass(tmp, dst);
}
int lh_offset = klassOopDesc::header_size() * HeapWordSize +
Klass::layout_helper_offset_in_bytes();
int lh_offset = in_bytes(Klass::layout_helper_offset());
Address klass_lh_addr(tmp, lh_offset);
jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
__ cmpl(klass_lh_addr, objArray_lh);
@ -3307,9 +3306,9 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
#ifndef _LP64
__ movptr(tmp, dst_klass_addr);
__ movptr(tmp, Address(tmp, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
__ movptr(tmp, Address(tmp, objArrayKlass::element_klass_offset()));
__ push(tmp);
__ movl(tmp, Address(tmp, Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc)));
__ movl(tmp, Address(tmp, Klass::super_check_offset_offset()));
__ push(tmp);
__ push(length);
__ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
@ -3333,15 +3332,15 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// Allocate abi space for args but be sure to keep stack aligned
__ subptr(rsp, 6*wordSize);
__ load_klass(c_rarg3, dst);
__ movptr(c_rarg3, Address(c_rarg3, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
__ movptr(c_rarg3, Address(c_rarg3, objArrayKlass::element_klass_offset()));
store_parameter(c_rarg3, 4);
__ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc)));
__ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset()));
__ call(RuntimeAddress(copyfunc_addr));
__ addptr(rsp, 6*wordSize);
#else
__ load_klass(c_rarg4, dst);
__ movptr(c_rarg4, Address(c_rarg4, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
__ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc)));
__ movptr(c_rarg4, Address(c_rarg4, objArrayKlass::element_klass_offset()));
__ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
__ call(RuntimeAddress(copyfunc_addr));
#endif

View file

@ -150,7 +150,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
assert_different_registers(obj, klass, len);
if (UseBiasedLocking && !len->is_valid()) {
assert_different_registers(obj, klass, len, t1, t2);
movptr(t1, Address(klass, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
movptr(t1, Address(klass, Klass::prototype_header_offset()));
movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
} else {
// This assumes that all prototype bits fit in an int32_t

View file

@ -1011,7 +1011,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
if (id == fast_new_instance_init_check_id) {
// make sure the klass is initialized
__ cmpl(Address(klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized);
__ cmpl(Address(klass, instanceKlass::init_state_offset()), instanceKlass::fully_initialized);
__ jcc(Assembler::notEqual, slow_path);
}
@ -1019,7 +1019,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// assert object can be fast path allocated
{
Label ok, not_ok;
__ movl(obj_size, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
__ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
__ cmpl(obj_size, 0); // make sure it's an instance (LH > 0)
__ jcc(Assembler::lessEqual, not_ok);
__ testl(obj_size, Klass::_lh_instance_slow_path_bit);
@ -1040,7 +1040,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ bind(retry_tlab);
// get the instance size (size is postive so movl is fine for 64bit)
__ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
__ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
__ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
@ -1052,7 +1052,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ bind(try_eden);
// get the instance size (size is postive so movl is fine for 64bit)
__ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
__ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
__ eden_allocate(obj, obj_size, 0, t1, slow_path);
__ incr_allocated_bytes(thread, obj_size, 0);
@ -1119,7 +1119,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
{
Label ok;
Register t0 = obj;
__ movl(t0, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
__ movl(t0, Address(klass, Klass::layout_helper_offset()));
__ sarl(t0, Klass::_lh_array_tag_shift);
int tag = ((id == new_type_array_id)
? Klass::_lh_array_tag_type_value
@ -1153,7 +1153,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
// since size is positive movl does right thing on 64bit
__ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
__ movl(t1, Address(klass, Klass::layout_helper_offset()));
// since size is postive movl does right thing on 64bit
__ movl(arr_size, length);
assert(t1 == rcx, "fixed register usage");
@ -1167,7 +1167,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size
__ initialize_header(obj, klass, length, t1, t2);
__ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte)));
__ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
__ andptr(t1, Klass::_lh_header_size_mask);
@ -1180,7 +1180,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ bind(try_eden);
// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
// since size is positive movl does right thing on 64bit
__ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
__ movl(t1, Address(klass, Klass::layout_helper_offset()));
// since size is postive movl does right thing on 64bit
__ movl(arr_size, length);
assert(t1 == rcx, "fixed register usage");
@ -1195,7 +1195,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ incr_allocated_bytes(thread, arr_size, 0);
__ initialize_header(obj, klass, length, t1, t2);
__ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte)));
__ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
__ andptr(t1, Klass::_lh_header_size_mask);
@ -1267,7 +1267,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Label register_finalizer;
Register t = rsi;
__ load_klass(t, rax);
__ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
__ movl(t, Address(t, Klass::access_flags_offset()));
__ testl(t, JVM_ACC_HAS_FINALIZER);
__ jcc(Assembler::notZero, register_finalizer);
__ ret(0);

View file

@ -511,7 +511,7 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
// get synchronization object
Label done;
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ movl(rax, access_flags);
__ testl(rax, JVM_ACC_STATIC);
__ movptr(rax, Address(locals, 0)); // get receiver (assume this is frequent case)
@ -763,7 +763,7 @@ void InterpreterGenerator::lock_method(void) {
#endif // ASSERT
// get synchronization object
{ Label done;
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ movl(rax, access_flags);
__ movptr(rdi, STATE(_locals)); // prepare to get receiver (assume common case)
__ testl(rax, JVM_ACC_STATIC);
@ -1180,7 +1180,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// pass mirror handle if static call
{ Label L;
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ movl(t, Address(method, methodOopDesc::access_flags_offset()));
__ testl(t, JVM_ACC_STATIC);
__ jcc(Assembler::zero, L);

View file

@ -1160,7 +1160,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
Address rcx_amh_conversion( rcx_recv, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes() );
Address vmarg; // __ argument_address(vmargslot)
const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
const int java_mirror_offset = in_bytes(Klass::java_mirror_offset());
if (have_entry(ek)) {
__ nop(); // empty stubs make SG sick

View file

@ -237,9 +237,21 @@ int NativeMovRegMem::instruction_start() const {
int off = 0;
u_char instr_0 = ubyte_at(off);
// See comment in Assembler::locate_operand() about VEX prefixes.
if (instr_0 == instruction_VEX_prefix_2bytes) {
assert((UseAVX > 0), "shouldn't have VEX prefix");
NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions"));
return 2;
}
if (instr_0 == instruction_VEX_prefix_3bytes) {
assert((UseAVX > 0), "shouldn't have VEX prefix");
NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions"));
return 3;
}
// First check to see if we have a (prefixed or not) xor
if ( instr_0 >= instruction_prefix_wide_lo && // 0x40
instr_0 <= instruction_prefix_wide_hi) { // 0x4f
if (instr_0 >= instruction_prefix_wide_lo && // 0x40
instr_0 <= instruction_prefix_wide_hi) { // 0x4f
off++;
instr_0 = ubyte_at(off);
}
@ -256,13 +268,13 @@ int NativeMovRegMem::instruction_start() const {
instr_0 = ubyte_at(off);
}
if ( instr_0 == instruction_code_xmm_ss_prefix || // 0xf3
if ( instr_0 == instruction_code_xmm_ss_prefix || // 0xf3
instr_0 == instruction_code_xmm_sd_prefix) { // 0xf2
off++;
instr_0 = ubyte_at(off);
}
if ( instr_0 >= instruction_prefix_wide_lo && // 0x40
if ( instr_0 >= instruction_prefix_wide_lo && // 0x40
instr_0 <= instruction_prefix_wide_hi) { // 0x4f
off++;
instr_0 = ubyte_at(off);

View file

@ -287,6 +287,9 @@ class NativeMovRegMem: public NativeInstruction {
instruction_code_xmm_store = 0x11,
instruction_code_xmm_lpd = 0x12,
instruction_VEX_prefix_2bytes = Assembler::VEX_2bytes,
instruction_VEX_prefix_3bytes = Assembler::VEX_3bytes,
instruction_size = 4,
instruction_offset = 0,
data_offset = 2,

View file

@ -53,6 +53,7 @@ REGISTER_DEFINITION(Register, r14);
REGISTER_DEFINITION(Register, r15);
#endif // AMD64
REGISTER_DEFINITION(XMMRegister, xnoreg);
REGISTER_DEFINITION(XMMRegister, xmm0 );
REGISTER_DEFINITION(XMMRegister, xmm1 );
REGISTER_DEFINITION(XMMRegister, xmm2 );
@ -115,6 +116,7 @@ REGISTER_DEFINITION(Register, r12_heapbase);
REGISTER_DEFINITION(Register, r15_thread);
#endif // AMD64
REGISTER_DEFINITION(MMXRegister, mnoreg );
REGISTER_DEFINITION(MMXRegister, mmx0 );
REGISTER_DEFINITION(MMXRegister, mmx1 );
REGISTER_DEFINITION(MMXRegister, mmx2 );

View file

@ -1374,8 +1374,7 @@ class StubGenerator: public StubCodeGenerator {
// L_success, L_failure, NULL);
assert_different_registers(sub_klass, temp);
int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::secondary_super_cache_offset_in_bytes());
int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
// if the pointers are equal, we are done (e.g., String[] elements)
__ cmpptr(sub_klass, super_klass_addr);
@ -1787,8 +1786,7 @@ class StubGenerator: public StubCodeGenerator {
// array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
//
int lh_offset = klassOopDesc::header_size() * HeapWordSize +
Klass::layout_helper_offset_in_bytes();
int lh_offset = in_bytes(Klass::layout_helper_offset());
Address src_klass_lh_addr(rcx_src_klass, lh_offset);
// Handle objArrays completely differently...
@ -1914,10 +1912,8 @@ class StubGenerator: public StubCodeGenerator {
// live at this point: rcx_src_klass, dst[_pos], src[_pos]
{
// Handy offsets:
int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
objArrayKlass::element_klass_offset_in_bytes());
int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::super_check_offset_offset_in_bytes());
int ek_offset = in_bytes(objArrayKlass::element_klass_offset());
int sco_offset = in_bytes(Klass::super_check_offset_offset());
Register rsi_dst_klass = rsi;
Register rdi_temp = rdi;

View file

@ -2261,8 +2261,7 @@ class StubGenerator: public StubCodeGenerator {
// The ckoff and ckval must be mutually consistent,
// even though caller generates both.
{ Label L;
int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::super_check_offset_offset_in_bytes());
int sco_offset = in_bytes(Klass::super_check_offset_offset());
__ cmpl(ckoff, Address(ckval, sco_offset));
__ jcc(Assembler::equal, L);
__ stop("super_check_offset inconsistent");
@ -2572,8 +2571,7 @@ class StubGenerator: public StubCodeGenerator {
// array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
//
const int lh_offset = klassOopDesc::header_size() * HeapWordSize +
Klass::layout_helper_offset_in_bytes();
const int lh_offset = in_bytes(Klass::layout_helper_offset());
// Handle objArrays completely differently...
const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
@ -2722,15 +2720,13 @@ class StubGenerator: public StubCodeGenerator {
assert_clean_int(count, sco_temp);
// Generate the type check.
const int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::super_check_offset_offset_in_bytes());
const int sco_offset = in_bytes(Klass::super_check_offset_offset());
__ movl(sco_temp, Address(r11_dst_klass, sco_offset));
assert_clean_int(sco_temp, rax);
generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy);
// Fetch destination element klass from the objArrayKlass header.
int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
objArrayKlass::element_klass_offset_in_bytes());
int ek_offset = in_bytes(objArrayKlass::element_klass_offset());
__ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset));
__ movl( sco_temp, Address(r11_dst_klass, sco_offset));
assert_clean_int(sco_temp, rax);

View file

@ -552,7 +552,7 @@ void InterpreterGenerator::lock_method(void) {
#endif // ASSERT
// get synchronization object
{ Label done;
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ movl(rax, access_flags);
__ testl(rax, JVM_ACC_STATIC);
__ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case)
@ -1012,7 +1012,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// pass mirror handle if static call
{ Label L;
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ movl(t, Address(method, methodOopDesc::access_flags_offset()));
__ testl(t, JVM_ACC_STATIC);
__ jcc(Assembler::zero, L);

View file

@ -505,8 +505,7 @@ void InterpreterGenerator::lock_method(void) {
// get synchronization object
{
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() +
Klass::java_mirror_offset_in_bytes();
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
Label done;
__ movl(rax, access_flags);
__ testl(rax, JVM_ACC_STATIC);
@ -1006,8 +1005,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// pass mirror handle if static call
{
Label L;
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() +
Klass::java_mirror_offset_in_bytes();
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ movl(t, Address(method, methodOopDesc::access_flags_offset()));
__ testl(t, JVM_ACC_STATIC);
__ jcc(Assembler::zero, L);

View file

@ -980,7 +980,7 @@ void TemplateTable::aastore() {
__ load_klass(rbx, rax);
// Move superklass into EAX
__ load_klass(rax, rdx);
__ movptr(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes()));
__ movptr(rax, Address(rax, objArrayKlass::element_klass_offset()));
// Compress array+index*wordSize+12 into a single register. Frees ECX.
__ lea(rdx, element_address);
@ -2033,7 +2033,7 @@ void TemplateTable::_return(TosState state) {
assert(state == vtos, "only valid state");
__ movptr(rax, aaddress(0));
__ load_klass(rdi, rax);
__ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
__ movl(rdi, Address(rdi, Klass::access_flags_offset()));
__ testl(rdi, JVM_ACC_HAS_FINALIZER);
Label skip_register_finalizer;
__ jcc(Assembler::zero, skip_register_finalizer);
@ -3188,11 +3188,11 @@ void TemplateTable::_new() {
// make sure klass is initialized & doesn't have finalizer
// make sure klass is fully initialized
__ cmpl(Address(rcx, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized);
__ cmpl(Address(rcx, instanceKlass::init_state_offset()), instanceKlass::fully_initialized);
__ jcc(Assembler::notEqual, slow_case);
// get instance_size in instanceKlass (scaled to a count of bytes)
__ movl(rdx, Address(rcx, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
__ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
// test to see if it has a finalizer or is malformed in some way
__ testl(rdx, Klass::_lh_instance_slow_path_bit);
__ jcc(Assembler::notZero, slow_case);
@ -3293,7 +3293,7 @@ void TemplateTable::_new() {
__ bind(initialize_header);
if (UseBiasedLocking) {
__ pop(rcx); // get saved klass back in the register.
__ movptr(rbx, Address(rcx, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
__ movptr(rbx, Address(rcx, Klass::prototype_header_offset()));
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
} else {
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),

View file

@ -1004,8 +1004,7 @@ void TemplateTable::aastore() {
// Move superklass into rax
__ load_klass(rax, rdx);
__ movptr(rax, Address(rax,
sizeof(oopDesc) +
objArrayKlass::element_klass_offset_in_bytes()));
objArrayKlass::element_klass_offset()));
// Compress array + index*oopSize + 12 into a single register. Frees rcx.
__ lea(rdx, element_address);
@ -2067,7 +2066,7 @@ void TemplateTable::_return(TosState state) {
assert(state == vtos, "only valid state");
__ movptr(c_rarg1, aaddress(0));
__ load_klass(rdi, c_rarg1);
__ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
__ movl(rdi, Address(rdi, Klass::access_flags_offset()));
__ testl(rdi, JVM_ACC_HAS_FINALIZER);
Label skip_register_finalizer;
__ jcc(Assembler::zero, skip_register_finalizer);
@ -3236,15 +3235,14 @@ void TemplateTable::_new() {
// make sure klass is initialized & doesn't have finalizer
// make sure klass is fully initialized
__ cmpl(Address(rsi,
instanceKlass::init_state_offset_in_bytes() +
sizeof(oopDesc)),
instanceKlass::init_state_offset()),
instanceKlass::fully_initialized);
__ jcc(Assembler::notEqual, slow_case);
// get instance_size in instanceKlass (scaled to a count of bytes)
__ movl(rdx,
Address(rsi,
Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
Klass::layout_helper_offset()));
// test to see if it has a finalizer or is malformed in some way
__ testl(rdx, Klass::_lh_instance_slow_path_bit);
__ jcc(Assembler::notZero, slow_case);
@ -3337,7 +3335,7 @@ void TemplateTable::_new() {
// initialize object header only.
__ bind(initialize_header);
if (UseBiasedLocking) {
__ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
__ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset()));
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1);
} else {
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),

View file

@ -50,7 +50,7 @@ const char* VM_Version::_features_str = "";
VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
static BufferBlob* stub_blob;
static const int stub_size = 400;
static const int stub_size = 500;
extern "C" {
typedef void (*getPsrInfo_stub_t)(void*);
@ -73,7 +73,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT);
Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4;
Label ext_cpuid1, ext_cpuid5, done;
Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, done;
StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
# define __ _masm->
@ -229,6 +229,41 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ movl(Address(rsi, 8), rcx);
__ movl(Address(rsi,12), rdx);
//
// Check if OS has enabled XGETBV instruction to access XCR0
// (OSXSAVE feature flag) and CPU supports AVX
//
__ andl(rcx, 0x18000000);
__ cmpl(rcx, 0x18000000);
__ jccb(Assembler::notEqual, sef_cpuid);
//
// XCR0, XFEATURE_ENABLED_MASK register
//
__ xorl(rcx, rcx); // zero for XCR0 register
__ xgetbv();
__ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset())));
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rdx);
//
// cpuid(0x7) Structured Extended Features
//
__ bind(sef_cpuid);
__ movl(rax, 7);
__ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x7) supported?
__ jccb(Assembler::greater, ext_cpuid);
__ xorl(rcx, rcx);
__ cpuid();
__ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx);
//
// Extended cpuid(0x80000000)
//
__ bind(ext_cpuid);
__ movl(rax, 0x80000000);
__ cpuid();
__ cmpl(rax, 0x80000000); // Is cpuid(0x80000001) supported?
@ -359,13 +394,19 @@ void VM_Version::get_processor_features() {
if (UseSSE < 1)
_cpuFeatures &= ~CPU_SSE;
if (UseAVX < 2)
_cpuFeatures &= ~CPU_AVX2;
if (UseAVX < 1)
_cpuFeatures &= ~CPU_AVX;
if (logical_processors_per_package() == 1) {
// HT processor could be installed on a system which doesn't support HT.
_cpuFeatures &= ~CPU_HT;
}
char buf[256];
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
cores_per_cpu(), threads_per_core(),
cpu_family(), _model, _stepping,
(supports_cmov() ? ", cmov" : ""),
@ -379,6 +420,8 @@ void VM_Version::get_processor_features() {
(supports_sse4_1() ? ", sse4.1" : ""),
(supports_sse4_2() ? ", sse4.2" : ""),
(supports_popcnt() ? ", popcnt" : ""),
(supports_avx() ? ", avx" : ""),
(supports_avx2() ? ", avx2" : ""),
(supports_mmx_ext() ? ", mmxext" : ""),
(supports_3dnow_prefetch() ? ", 3dnowpref" : ""),
(supports_lzcnt() ? ", lzcnt": ""),
@ -389,17 +432,24 @@ void VM_Version::get_processor_features() {
// UseSSE is set to the smaller of what hardware supports and what
// the command line requires. I.e., you cannot set UseSSE to 2 on
// older Pentiums which do not support it.
if( UseSSE > 4 ) UseSSE=4;
if( UseSSE < 0 ) UseSSE=0;
if( !supports_sse4_1() ) // Drop to 3 if no SSE4 support
if (UseSSE > 4) UseSSE=4;
if (UseSSE < 0) UseSSE=0;
if (!supports_sse4_1()) // Drop to 3 if no SSE4 support
UseSSE = MIN2((intx)3,UseSSE);
if( !supports_sse3() ) // Drop to 2 if no SSE3 support
if (!supports_sse3()) // Drop to 2 if no SSE3 support
UseSSE = MIN2((intx)2,UseSSE);
if( !supports_sse2() ) // Drop to 1 if no SSE2 support
if (!supports_sse2()) // Drop to 1 if no SSE2 support
UseSSE = MIN2((intx)1,UseSSE);
if( !supports_sse () ) // Drop to 0 if no SSE support
if (!supports_sse ()) // Drop to 0 if no SSE support
UseSSE = 0;
if (UseAVX > 2) UseAVX=2;
if (UseAVX < 0) UseAVX=0;
if (!supports_avx2()) // Drop to 1 if no AVX2 support
UseAVX = MIN2((intx)1,UseAVX);
if (!supports_avx ()) // Drop to 0 if no AVX support
UseAVX = 0;
// On new cpus instructions which update whole XMM register should be used
// to prevent partial register stall due to dependencies on high half.
//
@ -534,6 +584,9 @@ void VM_Version::get_processor_features() {
if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
UsePopCountInstruction = true;
}
} else if (UsePopCountInstruction) {
warning("POPCNT instruction is not available on this CPU");
FLAG_SET_DEFAULT(UsePopCountInstruction, false);
}
#ifdef COMPILER2
@ -605,7 +658,11 @@ void VM_Version::get_processor_features() {
if (PrintMiscellaneous && Verbose) {
tty->print_cr("Logical CPUs per core: %u",
logical_processors_per_package());
tty->print_cr("UseSSE=%d",UseSSE);
tty->print("UseSSE=%d",UseSSE);
if (UseAVX > 0) {
tty->print(" UseAVX=%d",UseAVX);
}
tty->cr();
tty->print("Allocation");
if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) {
tty->print_cr(": no prefetching");

View file

@ -78,7 +78,10 @@ public:
sse4_2 : 1,
: 2,
popcnt : 1,
: 8;
: 3,
osxsave : 1,
avx : 1,
: 3;
} bits;
};
@ -176,6 +179,34 @@ public:
} bits;
};
union SefCpuid7Eax {
uint32_t value;
};
union SefCpuid7Ebx {
uint32_t value;
struct {
uint32_t fsgsbase : 1,
: 2,
bmi1 : 1,
: 1,
avx2 : 1,
: 2,
bmi2 : 1,
: 23;
} bits;
};
union XemXcr0Eax {
uint32_t value;
struct {
uint32_t x87 : 1,
sse : 1,
ymm : 1,
: 29;
} bits;
};
protected:
static int _cpu;
static int _model;
@ -200,7 +231,9 @@ protected:
CPU_SSE4_1 = (1 << 11),
CPU_SSE4_2 = (1 << 12),
CPU_POPCNT = (1 << 13),
CPU_LZCNT = (1 << 14)
CPU_LZCNT = (1 << 14),
CPU_AVX = (1 << 15),
CPU_AVX2 = (1 << 16)
} cpuFeatureFlags;
// cpuid information block. All info derived from executing cpuid with
@ -228,6 +261,12 @@ protected:
uint32_t dcp_cpuid4_ecx; // unused currently
uint32_t dcp_cpuid4_edx; // unused currently
// cpuid function 7 (structured extended features)
SefCpuid7Eax sef_cpuid7_eax;
SefCpuid7Ebx sef_cpuid7_ebx;
uint32_t sef_cpuid7_ecx; // unused currently
uint32_t sef_cpuid7_edx; // unused currently
// cpuid function 0xB (processor topology)
// ecx = 0
uint32_t tpl_cpuidB0_eax;
@ -275,6 +314,10 @@ protected:
uint32_t ext_cpuid8_ebx; // reserved
ExtCpuid8Ecx ext_cpuid8_ecx;
uint32_t ext_cpuid8_edx; // reserved
// extended control register XCR0 (the XFEATURE_ENABLED_MASK register)
XemXcr0Eax xem_xcr0_eax;
uint32_t xem_xcr0_edx; // reserved
};
// The actual cpuid info block
@ -328,6 +371,14 @@ protected:
result |= CPU_SSE4_2;
if (_cpuid_info.std_cpuid1_ecx.bits.popcnt != 0)
result |= CPU_POPCNT;
if (_cpuid_info.std_cpuid1_ecx.bits.avx != 0 &&
_cpuid_info.std_cpuid1_ecx.bits.osxsave != 0 &&
_cpuid_info.xem_xcr0_eax.bits.sse != 0 &&
_cpuid_info.xem_xcr0_eax.bits.ymm != 0) {
result |= CPU_AVX;
if (_cpuid_info.sef_cpuid7_ebx.bits.avx2 != 0)
result |= CPU_AVX2;
}
// AMD features.
if (is_amd()) {
@ -350,12 +401,14 @@ public:
static ByteSize std_cpuid0_offset() { return byte_offset_of(CpuidInfo, std_max_function); }
static ByteSize std_cpuid1_offset() { return byte_offset_of(CpuidInfo, std_cpuid1_eax); }
static ByteSize dcp_cpuid4_offset() { return byte_offset_of(CpuidInfo, dcp_cpuid4_eax); }
static ByteSize sef_cpuid7_offset() { return byte_offset_of(CpuidInfo, sef_cpuid7_eax); }
static ByteSize ext_cpuid1_offset() { return byte_offset_of(CpuidInfo, ext_cpuid1_eax); }
static ByteSize ext_cpuid5_offset() { return byte_offset_of(CpuidInfo, ext_cpuid5_eax); }
static ByteSize ext_cpuid8_offset() { return byte_offset_of(CpuidInfo, ext_cpuid8_eax); }
static ByteSize tpl_cpuidB0_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB0_eax); }
static ByteSize tpl_cpuidB1_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB1_eax); }
static ByteSize tpl_cpuidB2_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB2_eax); }
static ByteSize xem_xcr0_offset() { return byte_offset_of(CpuidInfo, xem_xcr0_eax); }
// Initialization
static void initialize();
@ -447,6 +500,8 @@ public:
static bool supports_sse4_1() { return (_cpuFeatures & CPU_SSE4_1) != 0; }
static bool supports_sse4_2() { return (_cpuFeatures & CPU_SSE4_2) != 0; }
static bool supports_popcnt() { return (_cpuFeatures & CPU_POPCNT) != 0; }
static bool supports_avx() { return (_cpuFeatures & CPU_AVX) != 0; }
static bool supports_avx2() { return (_cpuFeatures & CPU_AVX2) != 0; }
//
// AMD features
//

View file

@ -0,0 +1,777 @@
//
// Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
//
// X86 Common Architecture Description File
source %{
// Float masks come from different places depending on platform.
#ifdef _LP64
static address float_signmask() { return StubRoutines::x86::float_sign_mask(); }
static address float_signflip() { return StubRoutines::x86::float_sign_flip(); }
static address double_signmask() { return StubRoutines::x86::double_sign_mask(); }
static address double_signflip() { return StubRoutines::x86::double_sign_flip(); }
#else
static address float_signmask() { return (address)float_signmask_pool; }
static address float_signflip() { return (address)float_signflip_pool; }
static address double_signmask() { return (address)double_signmask_pool; }
static address double_signflip() { return (address)double_signflip_pool; }
#endif
%}
// INSTRUCTIONS -- Platform independent definitions (same for 32- and 64-bit)
instruct addF_reg(regF dst, regF src) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (AddF dst src));
format %{ "addss $dst, $src" %}
ins_cost(150);
ins_encode %{
__ addss($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct addF_mem(regF dst, memory src) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (AddF dst (LoadF src)));
format %{ "addss $dst, $src" %}
ins_cost(150);
ins_encode %{
__ addss($dst$$XMMRegister, $src$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct addF_imm(regF dst, immF con) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (AddF dst con));
format %{ "addss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
ins_cost(150);
ins_encode %{
__ addss($dst$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct vaddF_reg(regF dst, regF src1, regF src2) %{
predicate(UseAVX > 0);
match(Set dst (AddF src1 src2));
format %{ "vaddss $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vaddss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vaddF_mem(regF dst, regF src1, memory src2) %{
predicate(UseAVX > 0);
match(Set dst (AddF src1 (LoadF src2)));
format %{ "vaddss $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vaddss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct vaddF_imm(regF dst, regF src, immF con) %{
predicate(UseAVX > 0);
match(Set dst (AddF src con));
format %{ "vaddss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %}
ins_cost(150);
ins_encode %{
__ vaddss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct addD_reg(regD dst, regD src) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (AddD dst src));
format %{ "addsd $dst, $src" %}
ins_cost(150);
ins_encode %{
__ addsd($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct addD_mem(regD dst, memory src) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (AddD dst (LoadD src)));
format %{ "addsd $dst, $src" %}
ins_cost(150);
ins_encode %{
__ addsd($dst$$XMMRegister, $src$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct addD_imm(regD dst, immD con) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (AddD dst con));
format %{ "addsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
ins_cost(150);
ins_encode %{
__ addsd($dst$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct vaddD_reg(regD dst, regD src1, regD src2) %{
predicate(UseAVX > 0);
match(Set dst (AddD src1 src2));
format %{ "vaddsd $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vaddsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vaddD_mem(regD dst, regD src1, memory src2) %{
predicate(UseAVX > 0);
match(Set dst (AddD src1 (LoadD src2)));
format %{ "vaddsd $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vaddsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct vaddD_imm(regD dst, regD src, immD con) %{
predicate(UseAVX > 0);
match(Set dst (AddD src con));
format %{ "vaddsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %}
ins_cost(150);
ins_encode %{
__ vaddsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct subF_reg(regF dst, regF src) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (SubF dst src));
format %{ "subss $dst, $src" %}
ins_cost(150);
ins_encode %{
__ subss($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct subF_mem(regF dst, memory src) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (SubF dst (LoadF src)));
format %{ "subss $dst, $src" %}
ins_cost(150);
ins_encode %{
__ subss($dst$$XMMRegister, $src$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct subF_imm(regF dst, immF con) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (SubF dst con));
format %{ "subss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
ins_cost(150);
ins_encode %{
__ subss($dst$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct vsubF_reg(regF dst, regF src1, regF src2) %{
predicate(UseAVX > 0);
match(Set dst (SubF src1 src2));
format %{ "vsubss $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vsubss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vsubF_mem(regF dst, regF src1, memory src2) %{
predicate(UseAVX > 0);
match(Set dst (SubF src1 (LoadF src2)));
format %{ "vsubss $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vsubss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct vsubF_imm(regF dst, regF src, immF con) %{
predicate(UseAVX > 0);
match(Set dst (SubF src con));
format %{ "vsubss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %}
ins_cost(150);
ins_encode %{
__ vsubss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct subD_reg(regD dst, regD src) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (SubD dst src));
format %{ "subsd $dst, $src" %}
ins_cost(150);
ins_encode %{
__ subsd($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct subD_mem(regD dst, memory src) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (SubD dst (LoadD src)));
format %{ "subsd $dst, $src" %}
ins_cost(150);
ins_encode %{
__ subsd($dst$$XMMRegister, $src$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct subD_imm(regD dst, immD con) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (SubD dst con));
format %{ "subsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
ins_cost(150);
ins_encode %{
__ subsd($dst$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct vsubD_reg(regD dst, regD src1, regD src2) %{
predicate(UseAVX > 0);
match(Set dst (SubD src1 src2));
format %{ "vsubsd $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vsubsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vsubD_mem(regD dst, regD src1, memory src2) %{
predicate(UseAVX > 0);
match(Set dst (SubD src1 (LoadD src2)));
format %{ "vsubsd $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vsubsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct vsubD_imm(regD dst, regD src, immD con) %{
predicate(UseAVX > 0);
match(Set dst (SubD src con));
format %{ "vsubsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %}
ins_cost(150);
ins_encode %{
__ vsubsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct mulF_reg(regF dst, regF src) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (MulF dst src));
format %{ "mulss $dst, $src" %}
ins_cost(150);
ins_encode %{
__ mulss($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct mulF_mem(regF dst, memory src) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (MulF dst (LoadF src)));
format %{ "mulss $dst, $src" %}
ins_cost(150);
ins_encode %{
__ mulss($dst$$XMMRegister, $src$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct mulF_imm(regF dst, immF con) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (MulF dst con));
format %{ "mulss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
ins_cost(150);
ins_encode %{
__ mulss($dst$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct vmulF_reg(regF dst, regF src1, regF src2) %{
predicate(UseAVX > 0);
match(Set dst (MulF src1 src2));
format %{ "vmulss $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vmulss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vmulF_mem(regF dst, regF src1, memory src2) %{
predicate(UseAVX > 0);
match(Set dst (MulF src1 (LoadF src2)));
format %{ "vmulss $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vmulss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct vmulF_imm(regF dst, regF src, immF con) %{
predicate(UseAVX > 0);
match(Set dst (MulF src con));
format %{ "vmulss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %}
ins_cost(150);
ins_encode %{
__ vmulss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct mulD_reg(regD dst, regD src) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (MulD dst src));
format %{ "mulsd $dst, $src" %}
ins_cost(150);
ins_encode %{
__ mulsd($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct mulD_mem(regD dst, memory src) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (MulD dst (LoadD src)));
format %{ "mulsd $dst, $src" %}
ins_cost(150);
ins_encode %{
__ mulsd($dst$$XMMRegister, $src$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct mulD_imm(regD dst, immD con) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (MulD dst con));
format %{ "mulsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
ins_cost(150);
ins_encode %{
__ mulsd($dst$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct vmulD_reg(regD dst, regD src1, regD src2) %{
predicate(UseAVX > 0);
match(Set dst (MulD src1 src2));
format %{ "vmulsd $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vmulsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vmulD_mem(regD dst, regD src1, memory src2) %{
predicate(UseAVX > 0);
match(Set dst (MulD src1 (LoadD src2)));
format %{ "vmulsd $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vmulsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct vmulD_imm(regD dst, regD src, immD con) %{
predicate(UseAVX > 0);
match(Set dst (MulD src con));
format %{ "vmulsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %}
ins_cost(150);
ins_encode %{
__ vmulsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct divF_reg(regF dst, regF src) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (DivF dst src));
format %{ "divss $dst, $src" %}
ins_cost(150);
ins_encode %{
__ divss($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct divF_mem(regF dst, memory src) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (DivF dst (LoadF src)));
format %{ "divss $dst, $src" %}
ins_cost(150);
ins_encode %{
__ divss($dst$$XMMRegister, $src$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct divF_imm(regF dst, immF con) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (DivF dst con));
format %{ "divss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
ins_cost(150);
ins_encode %{
__ divss($dst$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct vdivF_reg(regF dst, regF src1, regF src2) %{
predicate(UseAVX > 0);
match(Set dst (DivF src1 src2));
format %{ "vdivss $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vdivss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vdivF_mem(regF dst, regF src1, memory src2) %{
predicate(UseAVX > 0);
match(Set dst (DivF src1 (LoadF src2)));
format %{ "vdivss $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vdivss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct vdivF_imm(regF dst, regF src, immF con) %{
predicate(UseAVX > 0);
match(Set dst (DivF src con));
format %{ "vdivss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %}
ins_cost(150);
ins_encode %{
__ vdivss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct divD_reg(regD dst, regD src) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (DivD dst src));
format %{ "divsd $dst, $src" %}
ins_cost(150);
ins_encode %{
__ divsd($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct divD_mem(regD dst, memory src) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (DivD dst (LoadD src)));
format %{ "divsd $dst, $src" %}
ins_cost(150);
ins_encode %{
__ divsd($dst$$XMMRegister, $src$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct divD_imm(regD dst, immD con) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (DivD dst con));
format %{ "divsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
ins_cost(150);
ins_encode %{
__ divsd($dst$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct vdivD_reg(regD dst, regD src1, regD src2) %{
predicate(UseAVX > 0);
match(Set dst (DivD src1 src2));
format %{ "vdivsd $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vdivsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vdivD_mem(regD dst, regD src1, memory src2) %{
predicate(UseAVX > 0);
match(Set dst (DivD src1 (LoadD src2)));
format %{ "vdivsd $dst, $src1, $src2" %}
ins_cost(150);
ins_encode %{
__ vdivsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct vdivD_imm(regD dst, regD src, immD con) %{
predicate(UseAVX > 0);
match(Set dst (DivD src con));
format %{ "vdivsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %}
ins_cost(150);
ins_encode %{
__ vdivsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct absF_reg(regF dst) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (AbsF dst));
ins_cost(150);
format %{ "andps $dst, [0x7fffffff]\t# abs float by sign masking" %}
ins_encode %{
__ andps($dst$$XMMRegister, ExternalAddress(float_signmask()));
%}
ins_pipe(pipe_slow);
%}
instruct vabsF_reg(regF dst, regF src) %{
predicate(UseAVX > 0);
match(Set dst (AbsF src));
ins_cost(150);
format %{ "vandps $dst, $src, [0x7fffffff]\t# abs float by sign masking" %}
ins_encode %{
__ vandps($dst$$XMMRegister, $src$$XMMRegister,
ExternalAddress(float_signmask()));
%}
ins_pipe(pipe_slow);
%}
instruct absD_reg(regD dst) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (AbsD dst));
ins_cost(150);
format %{ "andpd $dst, [0x7fffffffffffffff]\t"
"# abs double by sign masking" %}
ins_encode %{
__ andpd($dst$$XMMRegister, ExternalAddress(double_signmask()));
%}
ins_pipe(pipe_slow);
%}
instruct vabsD_reg(regD dst, regD src) %{
predicate(UseAVX > 0);
match(Set dst (AbsD src));
ins_cost(150);
format %{ "vandpd $dst, $src, [0x7fffffffffffffff]\t"
"# abs double by sign masking" %}
ins_encode %{
__ vandpd($dst$$XMMRegister, $src$$XMMRegister,
ExternalAddress(double_signmask()));
%}
ins_pipe(pipe_slow);
%}
instruct negF_reg(regF dst) %{
predicate((UseSSE>=1) && (UseAVX == 0));
match(Set dst (NegF dst));
ins_cost(150);
format %{ "xorps $dst, [0x80000000]\t# neg float by sign flipping" %}
ins_encode %{
__ xorps($dst$$XMMRegister, ExternalAddress(float_signflip()));
%}
ins_pipe(pipe_slow);
%}
instruct vnegF_reg(regF dst, regF src) %{
predicate(UseAVX > 0);
match(Set dst (NegF src));
ins_cost(150);
format %{ "vxorps $dst, $src, [0x80000000]\t# neg float by sign flipping" %}
ins_encode %{
__ vxorps($dst$$XMMRegister, $src$$XMMRegister,
ExternalAddress(float_signflip()));
%}
ins_pipe(pipe_slow);
%}
instruct negD_reg(regD dst) %{
predicate((UseSSE>=2) && (UseAVX == 0));
match(Set dst (NegD dst));
ins_cost(150);
format %{ "xorpd $dst, [0x8000000000000000]\t"
"# neg double by sign flipping" %}
ins_encode %{
__ xorpd($dst$$XMMRegister, ExternalAddress(double_signflip()));
%}
ins_pipe(pipe_slow);
%}
instruct vnegD_reg(regD dst, regD src) %{
predicate(UseAVX > 0);
match(Set dst (NegD src));
ins_cost(150);
format %{ "vxorpd $dst, $src, [0x8000000000000000]\t"
"# neg double by sign flipping" %}
ins_encode %{
__ vxorpd($dst$$XMMRegister, $src$$XMMRegister,
ExternalAddress(double_signflip()));
%}
ins_pipe(pipe_slow);
%}
instruct sqrtF_reg(regF dst, regF src) %{
predicate(UseSSE>=1);
match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
format %{ "sqrtss $dst, $src" %}
ins_cost(150);
ins_encode %{
__ sqrtss($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct sqrtF_mem(regF dst, memory src) %{
predicate(UseSSE>=1);
match(Set dst (ConvD2F (SqrtD (ConvF2D (LoadF src)))));
format %{ "sqrtss $dst, $src" %}
ins_cost(150);
ins_encode %{
__ sqrtss($dst$$XMMRegister, $src$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct sqrtF_imm(regF dst, immF con) %{
predicate(UseSSE>=1);
match(Set dst (ConvD2F (SqrtD (ConvF2D con))));
format %{ "sqrtss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
ins_cost(150);
ins_encode %{
__ sqrtss($dst$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}
instruct sqrtD_reg(regD dst, regD src) %{
predicate(UseSSE>=2);
match(Set dst (SqrtD src));
format %{ "sqrtsd $dst, $src" %}
ins_cost(150);
ins_encode %{
__ sqrtsd($dst$$XMMRegister, $src$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct sqrtD_mem(regD dst, memory src) %{
predicate(UseSSE>=2);
match(Set dst (SqrtD (LoadD src)));
format %{ "sqrtsd $dst, $src" %}
ins_cost(150);
ins_encode %{
__ sqrtsd($dst$$XMMRegister, $src$$Address);
%}
ins_pipe(pipe_slow);
%}
instruct sqrtD_imm(regD dst, immD con) %{
predicate(UseSSE>=2);
match(Set dst (SqrtD con));
format %{ "sqrtsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
ins_cost(150);
ins_encode %{
__ sqrtsd($dst$$XMMRegister, $constantaddress($con));
%}
ins_pipe(pipe_slow);
%}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -627,6 +627,7 @@ bool InstructForm::is_wide_memory_kill(FormDict &globals) const {
if( strcmp(_matrule->_opType,"MemBarAcquire") == 0 ) return true;
if( strcmp(_matrule->_opType,"MemBarReleaseLock") == 0 ) return true;
if( strcmp(_matrule->_opType,"MemBarAcquireLock") == 0 ) return true;
if( strcmp(_matrule->_opType,"MemBarStoreStore") == 0 ) return true;
return false;
}
@ -3978,7 +3979,8 @@ bool MatchRule::is_ideal_membar() const {
!strcmp(_opType,"MemBarAcquireLock") ||
!strcmp(_opType,"MemBarReleaseLock") ||
!strcmp(_opType,"MemBarVolatile" ) ||
!strcmp(_opType,"MemBarCPUOrder" ) ;
!strcmp(_opType,"MemBarCPUOrder" ) ||
!strcmp(_opType,"MemBarStoreStore" );
}
bool MatchRule::is_ideal_loadPC() const {

View file

@ -61,6 +61,7 @@ AbstractAssembler::AbstractAssembler(CodeBuffer* code) {
_code_limit = cs->limit();
_code_pos = cs->end();
_oop_recorder= code->oop_recorder();
DEBUG_ONLY( _short_branch_delta = 0; )
if (_code_begin == NULL) {
vm_exit_out_of_memory(0, err_msg("CodeCache: no room for %s",
code->name()));

View file

@ -241,6 +241,33 @@ class AbstractAssembler : public ResourceObj {
// Make it return true on platforms which need to verify
// instruction boundaries for some operations.
inline static bool pd_check_instruction_mark();
// Add delta to short branch distance to verify that it still fit into imm8.
int _short_branch_delta;
int short_branch_delta() const { return _short_branch_delta; }
void set_short_branch_delta() { _short_branch_delta = 32; }
void clear_short_branch_delta() { _short_branch_delta = 0; }
class ShortBranchVerifier: public StackObj {
private:
AbstractAssembler* _assm;
public:
ShortBranchVerifier(AbstractAssembler* assm) : _assm(assm) {
assert(assm->short_branch_delta() == 0, "overlapping instructions");
_assm->set_short_branch_delta();
}
~ShortBranchVerifier() {
_assm->clear_short_branch_delta();
}
};
#else
// Dummy in product.
class ShortBranchVerifier: public StackObj {
public:
ShortBranchVerifier(AbstractAssembler* assm) {}
};
#endif
// Label functions

View file

@ -854,6 +854,9 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
if (opTypeCheck->_info_for_exception) do_info(opTypeCheck->_info_for_exception);
if (opTypeCheck->_info_for_patch) do_info(opTypeCheck->_info_for_patch);
if (opTypeCheck->_object->is_valid()) do_input(opTypeCheck->_object);
if (op->code() == lir_store_check && opTypeCheck->_object->is_valid()) {
do_temp(opTypeCheck->_object);
}
if (opTypeCheck->_array->is_valid()) do_input(opTypeCheck->_array);
if (opTypeCheck->_tmp1->is_valid()) do_temp(opTypeCheck->_tmp1);
if (opTypeCheck->_tmp2->is_valid()) do_temp(opTypeCheck->_tmp2);

View file

@ -1256,8 +1256,7 @@ void LIRGenerator::do_getClass(Intrinsic* x) {
info = state_for(x);
}
__ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), result, info);
__ move_wide(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() +
klassOopDesc::klass_part_offset_in_bytes(), T_OBJECT), result);
__ move_wide(new LIR_Address(result, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
}

View file

@ -122,18 +122,32 @@ void CE_Eliminator::block_do(BlockBegin* block) {
if (sux != f_goto->default_sux()) return;
// check if at least one word was pushed on sux_state
// inlining depths must match
ValueStack* if_state = if_->state();
ValueStack* sux_state = sux->state();
if (sux_state->stack_size() <= if_->state()->stack_size()) return;
if (if_state->scope()->level() > sux_state->scope()->level()) {
while (sux_state->scope() != if_state->scope()) {
if_state = if_state->caller_state();
assert(if_state != NULL, "states do not match up");
}
} else if (if_state->scope()->level() < sux_state->scope()->level()) {
while (sux_state->scope() != if_state->scope()) {
sux_state = sux_state->caller_state();
assert(sux_state != NULL, "states do not match up");
}
}
if (sux_state->stack_size() <= if_state->stack_size()) return;
// check if phi function is present at end of successor stack and that
// only this phi was pushed on the stack
Value sux_phi = sux_state->stack_at(if_->state()->stack_size());
Value sux_phi = sux_state->stack_at(if_state->stack_size());
if (sux_phi == NULL || sux_phi->as_Phi() == NULL || sux_phi->as_Phi()->block() != sux) return;
if (sux_phi->type()->size() != sux_state->stack_size() - if_->state()->stack_size()) return;
if (sux_phi->type()->size() != sux_state->stack_size() - if_state->stack_size()) return;
// get the values that were pushed in the true- and false-branch
Value t_value = t_goto->state()->stack_at(if_->state()->stack_size());
Value f_value = f_goto->state()->stack_at(if_->state()->stack_size());
Value t_value = t_goto->state()->stack_at(if_state->stack_size());
Value f_value = f_goto->state()->stack_at(if_state->stack_size());
// backend does not support floats
assert(t_value->type()->base() == f_value->type()->base(), "incompatible types");
@ -180,11 +194,7 @@ void CE_Eliminator::block_do(BlockBegin* block) {
Goto* goto_ = new Goto(sux, state_before, if_->is_safepoint() || t_goto->is_safepoint() || f_goto->is_safepoint());
// prepare state for Goto
ValueStack* goto_state = if_->state();
while (sux_state->scope() != goto_state->scope()) {
goto_state = goto_state->caller_state();
assert(goto_state != NULL, "states do not match up");
}
ValueStack* goto_state = if_state;
goto_state = goto_state->copy(ValueStack::StateAfter, goto_state->bci());
goto_state->push(result->type(), result);
assert(goto_state->is_same(sux_state), "states must match now");

View file

@ -73,7 +73,7 @@ class arrayKlass: public Klass {
oop* adr_component_mirror() { return (oop*)&this->_component_mirror;}
// Compiler/Interpreter offset
static ByteSize component_mirror_offset() { return byte_offset_of(arrayKlass, _component_mirror); }
static ByteSize component_mirror_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(arrayKlass, _component_mirror)); }
virtual klassOop java_super() const;//{ return SystemDictionary::Object_klass(); }

View file

@ -405,7 +405,7 @@ class instanceKlass: public Klass {
ReferenceType reference_type() const { return _reference_type; }
void set_reference_type(ReferenceType t) { _reference_type = t; }
static int reference_type_offset_in_bytes() { return offset_of(instanceKlass, _reference_type); }
static ByteSize reference_type_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(instanceKlass, _reference_type)); }
// find local field, returns true if found
bool find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const;
@ -616,8 +616,8 @@ class instanceKlass: public Klass {
void set_breakpoints(BreakpointInfo* bps) { _breakpoints = bps; };
// support for stub routines
static int init_state_offset_in_bytes() { return offset_of(instanceKlass, _init_state); }
static int init_thread_offset_in_bytes() { return offset_of(instanceKlass, _init_thread); }
static ByteSize init_state_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(instanceKlass, _init_state)); }
static ByteSize init_thread_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(instanceKlass, _init_thread)); }
// subclass/subinterface checks
bool implements_interface(klassOop k) const;

View file

@ -144,7 +144,7 @@ klassOop Klass::base_create_klass_oop(KlassHandle& klass, int size,
}
kl->set_secondary_supers(NULL);
oop_store_without_check((oop*) &kl->_primary_supers[0], k);
kl->set_super_check_offset(primary_supers_offset_in_bytes() + sizeof(oopDesc));
kl->set_super_check_offset(in_bytes(primary_supers_offset()));
}
kl->set_java_mirror(NULL);

View file

@ -313,7 +313,7 @@ class Klass : public Klass_vtbl {
// Can this klass be a primary super? False for interfaces and arrays of
// interfaces. False also for arrays or classes with long super chains.
bool can_be_primary_super() const {
const juint secondary_offset = secondary_super_cache_offset_in_bytes() + sizeof(oopDesc);
const juint secondary_offset = in_bytes(secondary_super_cache_offset());
return super_check_offset() != secondary_offset;
}
virtual bool can_be_primary_super_slow() const;
@ -323,7 +323,7 @@ class Klass : public Klass_vtbl {
if (!can_be_primary_super()) {
return primary_super_limit();
} else {
juint d = (super_check_offset() - (primary_supers_offset_in_bytes() + sizeof(oopDesc))) / sizeof(klassOop);
juint d = (super_check_offset() - in_bytes(primary_supers_offset())) / sizeof(klassOop);
assert(d < primary_super_limit(), "oob");
assert(_primary_supers[d] == as_klassOop(), "proper init");
return d;
@ -373,15 +373,15 @@ class Klass : public Klass_vtbl {
virtual void set_alloc_size(juint n) = 0;
// Compiler support
static int super_offset_in_bytes() { return offset_of(Klass, _super); }
static int super_check_offset_offset_in_bytes() { return offset_of(Klass, _super_check_offset); }
static int primary_supers_offset_in_bytes(){ return offset_of(Klass, _primary_supers); }
static int secondary_super_cache_offset_in_bytes() { return offset_of(Klass, _secondary_super_cache); }
static int secondary_supers_offset_in_bytes() { return offset_of(Klass, _secondary_supers); }
static int java_mirror_offset_in_bytes() { return offset_of(Klass, _java_mirror); }
static int modifier_flags_offset_in_bytes(){ return offset_of(Klass, _modifier_flags); }
static int layout_helper_offset_in_bytes() { return offset_of(Klass, _layout_helper); }
static int access_flags_offset_in_bytes() { return offset_of(Klass, _access_flags); }
static ByteSize super_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(Klass, _super)); }
static ByteSize super_check_offset_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(Klass, _super_check_offset)); }
static ByteSize primary_supers_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(Klass, _primary_supers)); }
static ByteSize secondary_super_cache_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(Klass, _secondary_super_cache)); }
static ByteSize secondary_supers_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(Klass, _secondary_supers)); }
static ByteSize java_mirror_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(Klass, _java_mirror)); }
static ByteSize modifier_flags_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(Klass, _modifier_flags)); }
static ByteSize layout_helper_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(Klass, _layout_helper)); }
static ByteSize access_flags_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(Klass, _access_flags)); }
// Unpacking layout_helper:
enum {
@ -478,7 +478,7 @@ class Klass : public Klass_vtbl {
bool is_subtype_of(klassOop k) const {
juint off = k->klass_part()->super_check_offset();
klassOop sup = *(klassOop*)( (address)as_klassOop() + off );
const juint secondary_offset = secondary_super_cache_offset_in_bytes() + sizeof(oopDesc);
const juint secondary_offset = in_bytes(secondary_super_cache_offset());
if (sup == k) {
return true;
} else if (off != secondary_offset) {
@ -674,7 +674,7 @@ class Klass : public Klass_vtbl {
// are potential problems in setting the bias pattern for
// JVM-internal oops.
inline void set_prototype_header(markOop header);
static int prototype_header_offset_in_bytes() { return offset_of(Klass, _prototype_header); }
static ByteSize prototype_header_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(Klass, _prototype_header)); }
int biased_lock_revocation_count() const { return (int) _biased_lock_revocation_count; }
// Atomically increments biased_lock_revocation_count and returns updated value

View file

@ -38,14 +38,8 @@
class klassOopDesc : public oopDesc {
public:
// size operation
static int header_size() { return sizeof(klassOopDesc)/HeapWordSize; }
// support for code generation
static int klass_part_offset_in_bytes() { return sizeof(klassOopDesc); }
// returns the Klass part containing dispatching behavior
Klass* klass_part() const { return (Klass*)((address)this + klass_part_offset_in_bytes()); }
Klass* klass_part() const { return (Klass*)((address)this + sizeof(klassOopDesc)); }
// Convenience wrapper
inline oop java_mirror() const;

View file

@ -47,7 +47,7 @@ class objArrayKlass : public arrayKlass {
oop* bottom_klass_addr() { return (oop*)&_bottom_klass; }
// Compiler/Interpreter offset
static int element_klass_offset_in_bytes() { return offset_of(objArrayKlass, _element_klass); }
static ByteSize element_klass_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(objArrayKlass, _element_klass)); }
// Dispatched operation
bool can_be_primary_super_slow() const;

View file

@ -791,6 +791,10 @@ public:
// are defined in graphKit.cpp, which sets up the bidirectional relation.)
InitializeNode* initialization();
// Return the corresponding storestore barrier (or null if none).
// Walks out edges to find it...
MemBarStoreStoreNode* storestore();
// Convenience for initialization->maybe_set_complete(phase)
bool maybe_set_complete(PhaseGVN* phase);
};

View file

@ -166,6 +166,7 @@ macro(MemBarCPUOrder)
macro(MemBarRelease)
macro(MemBarReleaseLock)
macro(MemBarVolatile)
macro(MemBarStoreStore)
macro(MergeMem)
macro(MinI)
macro(ModD)

View file

@ -1282,12 +1282,11 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
if( tk ) {
// If we are referencing a field within a Klass, we need
// to assume the worst case of an Object. Both exact and
// inexact types must flatten to the same alias class.
// Since the flattened result for a klass is defined to be
// precisely java.lang.Object, use a constant ptr.
// inexact types must flatten to the same alias class so
// use NotNull as the PTR.
if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
tj = tk = TypeKlassPtr::make(TypePtr::Constant,
tj = tk = TypeKlassPtr::make(TypePtr::NotNull,
TypeKlassPtr::OBJECT->klass(),
offset);
}
@ -1307,10 +1306,12 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
// these 2 disparate memories into the same alias class. Since the
// primary supertype array is read-only, there's no chance of confusion
// where we bypass an array load and an array store.
uint off2 = offset - Klass::primary_supers_offset_in_bytes();
if( offset == Type::OffsetBot ||
off2 < Klass::primary_super_limit()*wordSize ) {
offset = sizeof(oopDesc) +Klass::secondary_super_cache_offset_in_bytes();
int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
if (offset == Type::OffsetBot ||
(offset >= primary_supers_offset &&
offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
offset = in_bytes(Klass::secondary_super_cache_offset());
tj = tk = TypeKlassPtr::make( TypePtr::NotNull, tk->klass(), offset );
}
}
@ -1489,13 +1490,13 @@ Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_cr
alias_type(idx)->set_rewritable(false);
}
if (flat->isa_klassptr()) {
if (flat->offset() == Klass::super_check_offset_offset_in_bytes() + (int)sizeof(oopDesc))
if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
alias_type(idx)->set_rewritable(false);
if (flat->offset() == Klass::modifier_flags_offset_in_bytes() + (int)sizeof(oopDesc))
if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
alias_type(idx)->set_rewritable(false);
if (flat->offset() == Klass::access_flags_offset_in_bytes() + (int)sizeof(oopDesc))
if (flat->offset() == in_bytes(Klass::access_flags_offset()))
alias_type(idx)->set_rewritable(false);
if (flat->offset() == Klass::java_mirror_offset_in_bytes() + (int)sizeof(oopDesc))
if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
alias_type(idx)->set_rewritable(false);
}
// %%% (We would like to finalize JavaThread::threadObj_offset(),
@ -2521,7 +2522,7 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) {
break;
}
}
assert(p != NULL, "must be found");
assert(proj != NULL, "must be found");
p->subsume_by(proj);
}
}

View file

@ -1595,6 +1595,7 @@ bool ConnectionGraph::compute_escape() {
GrowableArray<Node*> alloc_worklist;
GrowableArray<Node*> addp_worklist;
GrowableArray<Node*> ptr_cmp_worklist;
GrowableArray<Node*> storestore_worklist;
PhaseGVN* igvn = _igvn;
// Push all useful nodes onto CG list and set their type.
@ -1618,6 +1619,11 @@ bool ConnectionGraph::compute_escape() {
(n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {
// Compare pointers nodes
ptr_cmp_worklist.append(n);
} else if (n->is_MemBarStoreStore()) {
// Collect all MemBarStoreStore nodes so that depending on the
// escape status of the associated Allocate node some of them
// may be eliminated.
storestore_worklist.append(n);
}
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
Node* m = n->fast_out(i); // Get user
@ -1724,11 +1730,20 @@ bool ConnectionGraph::compute_escape() {
uint alloc_length = alloc_worklist.length();
for (uint next = 0; next < alloc_length; ++next) {
Node* n = alloc_worklist.at(next);
if (ptnode_adr(n->_idx)->escape_state() == PointsToNode::NoEscape) {
PointsToNode::EscapeState es = ptnode_adr(n->_idx)->escape_state();
if (es == PointsToNode::NoEscape) {
has_non_escaping_obj = true;
if (n->is_Allocate()) {
find_init_values(n, &visited, igvn);
// The object allocated by this Allocate node will never be
// seen by an other thread. Mark it so that when it is
// expanded no MemBarStoreStore is added.
n->as_Allocate()->initialization()->set_does_not_escape();
}
} else if ((es == PointsToNode::ArgEscape) && n->is_Allocate()) {
// Same as above. Mark this Allocate node so that when it is
// expanded no MemBarStoreStore is added.
n->as_Allocate()->initialization()->set_does_not_escape();
}
}
@ -1874,6 +1889,25 @@ bool ConnectionGraph::compute_escape() {
igvn->hash_delete(_pcmp_eq);
}
// For MemBarStoreStore nodes added in library_call.cpp, check
// escape status of associated AllocateNode and optimize out
// MemBarStoreStore node if the allocated object never escapes.
while (storestore_worklist.length() != 0) {
Node *n = storestore_worklist.pop();
MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore();
Node *alloc = storestore->in(MemBarNode::Precedent)->in(0);
assert (alloc->is_Allocate(), "storestore should point to AllocateNode");
PointsToNode::EscapeState es = ptnode_adr(alloc->_idx)->escape_state();
if (es == PointsToNode::NoEscape || es == PointsToNode::ArgEscape) {
MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory));
mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
_igvn->register_new_node_with_optimizer(mb);
_igvn->replace_node(storestore, mb);
}
}
#ifndef PRODUCT
if (PrintEscapeAnalysis) {
dump(); // Dump ConnectionGraph

View file

@ -2304,9 +2304,9 @@ Node* GraphKit::gen_subtype_check(Node* subklass, Node* superklass) {
// will always succeed. We could leave a dependency behind to ensure this.
// First load the super-klass's check-offset
Node *p1 = basic_plus_adr( superklass, superklass, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes() );
Node *p1 = basic_plus_adr( superklass, superklass, in_bytes(Klass::super_check_offset_offset()) );
Node *chk_off = _gvn.transform( new (C, 3) LoadINode( NULL, memory(p1), p1, _gvn.type(p1)->is_ptr() ) );
int cacheoff_con = sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes();
int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset());
bool might_be_cache = (find_int_con(chk_off, cacheoff_con) == cacheoff_con);
// Load from the sub-klass's super-class display list, or a 1-word cache of
@ -2934,7 +2934,7 @@ Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
}
}
constant_value = Klass::_lh_neutral_value; // put in a known value
Node* lhp = basic_plus_adr(klass_node, klass_node, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc));
Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
return make_load(NULL, lhp, TypeInt::INT, T_INT);
}
@ -3337,6 +3337,19 @@ InitializeNode* AllocateNode::initialization() {
return NULL;
}
// Trace Allocate -> Proj[Parm] -> MemBarStoreStore
MemBarStoreStoreNode* AllocateNode::storestore() {
ProjNode* rawoop = proj_out(AllocateNode::RawAddress);
if (rawoop == NULL) return NULL;
for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) {
Node* storestore = rawoop->fast_out(i);
if (storestore->is_MemBarStoreStore()) {
return storestore->as_MemBarStoreStore();
}
}
return NULL;
}
//----------------------------- loop predicates ---------------------------
//------------------------------add_predicate_impl----------------------------

View file

@ -2165,8 +2165,7 @@ void LibraryCallKit::insert_g1_pre_barrier(Node* base_oop, Node* offset, Node* p
IdealKit ideal(this);
#define __ ideal.
const int reference_type_offset = instanceKlass::reference_type_offset_in_bytes() +
sizeof(oopDesc);
const int reference_type_offset = in_bytes(instanceKlass::reference_type_offset());
Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset);
@ -2806,7 +2805,7 @@ bool LibraryCallKit::inline_unsafe_allocate() {
// Note: The argument might still be an illegal value like
// Serializable.class or Object[].class. The runtime will handle it.
// But we must make an explicit check for initialization.
Node* insp = basic_plus_adr(kls, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc));
Node* insp = basic_plus_adr(kls, in_bytes(instanceKlass::init_state_offset()));
Node* inst = make_load(NULL, insp, TypeInt::INT, T_INT);
Node* bits = intcon(instanceKlass::fully_initialized);
Node* test = _gvn.transform( new (C, 3) SubINode(inst, bits) );
@ -2954,7 +2953,7 @@ bool LibraryCallKit::inline_native_isInterrupted() {
//---------------------------load_mirror_from_klass----------------------------
// Given a klass oop, load its java mirror (a java.lang.Class oop).
Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
Node* p = basic_plus_adr(klass, Klass::java_mirror_offset_in_bytes() + sizeof(oopDesc));
Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT);
}
@ -2994,7 +2993,7 @@ Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
// Branch around if the given klass has the given modifier bit set.
// Like generate_guard, adds a new path onto the region.
Node* modp = basic_plus_adr(kls, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc));
Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT);
Node* mask = intcon(modifier_mask);
Node* bits = intcon(modifier_bits);
@ -3115,7 +3114,7 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
break;
case vmIntrinsics::_getModifiers:
p = basic_plus_adr(kls, Klass::modifier_flags_offset_in_bytes() + sizeof(oopDesc));
p = basic_plus_adr(kls, in_bytes(Klass::modifier_flags_offset()));
query_value = make_load(NULL, p, TypeInt::INT, T_INT);
break;
@ -3155,7 +3154,7 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
// A guard was added. If the guard is taken, it was an array.
phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
// If we fall through, it's a plain class. Get its _super.
p = basic_plus_adr(kls, Klass::super_offset_in_bytes() + sizeof(oopDesc));
p = basic_plus_adr(kls, in_bytes(Klass::super_offset()));
kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL) );
null_ctl = top();
kls = null_check_oop(kls, &null_ctl);
@ -3173,7 +3172,7 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
if (generate_array_guard(kls, region) != NULL) {
// Be sure to pin the oop load to the guard edge just created:
Node* is_array_ctrl = region->in(region->req()-1);
Node* cma = basic_plus_adr(kls, in_bytes(arrayKlass::component_mirror_offset()) + sizeof(oopDesc));
Node* cma = basic_plus_adr(kls, in_bytes(arrayKlass::component_mirror_offset()));
Node* cmo = make_load(is_array_ctrl, cma, TypeInstPtr::MIRROR, T_OBJECT);
phi->add_req(cmo);
}
@ -3181,7 +3180,7 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
break;
case vmIntrinsics::_getClassAccessFlags:
p = basic_plus_adr(kls, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc));
p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
query_value = make_load(NULL, p, TypeInt::INT, T_INT);
break;
@ -4194,12 +4193,17 @@ void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, b
Node* raw_obj = alloc_obj->in(1);
assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
AllocateNode* alloc = NULL;
if (ReduceBulkZeroing) {
// We will be completely responsible for initializing this object -
// mark Initialize node as complete.
AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
// The object was just allocated - there should be no any stores!
guarantee(alloc != NULL && alloc->maybe_set_complete(&_gvn), "");
// Mark as complete_with_arraycopy so that on AllocateNode
// expansion, we know this AllocateNode is initialized by an array
// copy and a StoreStore barrier exists after the array copy.
alloc->initialization()->set_complete_with_arraycopy();
}
// Copy the fastest available way.
@ -4261,7 +4265,18 @@ void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, b
}
// Do not let reads from the cloned object float above the arraycopy.
insert_mem_bar(Op_MemBarCPUOrder);
if (alloc != NULL) {
// Do not let stores that initialize this object be reordered with
// a subsequent store that would make this object accessible by
// other threads.
// Record what AllocateNode this StoreStore protects so that
// escape analysis can go from the MemBarStoreStoreNode to the
// AllocateNode and eliminate the MemBarStoreStoreNode if possible
// based on the escape status of the AllocateNode.
insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
} else {
insert_mem_bar(Op_MemBarCPUOrder);
}
}
//------------------------inline_native_clone----------------------------
@ -4857,7 +4872,7 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
PreserveJVMState pjvms(this);
set_control(not_subtype_ctrl);
// (At this point we can assume disjoint_bases, since types differ.)
int ek_offset = objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc);
int ek_offset = in_bytes(objArrayKlass::element_klass_offset());
Node* p1 = basic_plus_adr(dest_klass, ek_offset);
Node* n1 = LoadKlassNode::make(_gvn, immutable_memory(), p1, TypeRawPtr::BOTTOM);
Node* dest_elem_klass = _gvn.transform(n1);
@ -5004,7 +5019,16 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
// the membar also.
//
// Do not let reads from the cloned object float above the arraycopy.
if (InsertMemBarAfterArraycopy || alloc != NULL)
if (alloc != NULL) {
// Do not let stores that initialize this object be reordered with
// a subsequent store that would make this object accessible by
// other threads.
// Record what AllocateNode this StoreStore protects so that
// escape analysis can go from the MemBarStoreStoreNode to the
// AllocateNode and eliminate the MemBarStoreStoreNode if possible
// based on the escape status of the AllocateNode.
insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
} else if (InsertMemBarAfterArraycopy)
insert_mem_bar(Op_MemBarCPUOrder);
}
@ -5308,7 +5332,7 @@ LibraryCallKit::generate_checkcast_arraycopy(const TypePtr* adr_type,
// for the target array. This is an optimistic check. It will
// look in each non-null element's class, at the desired klass's
// super_check_offset, for the desired klass.
int sco_offset = Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc);
int sco_offset = in_bytes(Klass::super_check_offset_offset());
Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset);
Node* n3 = new(C, 3) LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr());
Node* check_offset = ConvI2X(_gvn.transform(n3));

View file

@ -1088,6 +1088,12 @@ void PhaseMacroExpand::expand_allocate_common(
Node* klass_node = alloc->in(AllocateNode::KlassNode);
Node* initial_slow_test = alloc->in(AllocateNode::InitialTest);
Node* storestore = alloc->storestore();
if (storestore != NULL) {
// Break this link that is no longer useful and confuses register allocation
storestore->set_req(MemBarNode::Precedent, top());
}
assert(ctrl != NULL, "must have control");
// We need a Region and corresponding Phi's to merge the slow-path and fast-path results.
// they will not be used if "always_slow" is set
@ -1289,10 +1295,66 @@ void PhaseMacroExpand::expand_allocate_common(
0, new_alloc_bytes, T_LONG);
}
InitializeNode* init = alloc->initialization();
fast_oop_rawmem = initialize_object(alloc,
fast_oop_ctrl, fast_oop_rawmem, fast_oop,
klass_node, length, size_in_bytes);
// If initialization is performed by an array copy, any required
// MemBarStoreStore was already added. If the object does not
// escape no need for a MemBarStoreStore. Otherwise we need a
// MemBarStoreStore so that stores that initialize this object
// can't be reordered with a subsequent store that makes this
// object accessible by other threads.
if (init == NULL || (!init->is_complete_with_arraycopy() && !init->does_not_escape())) {
if (init == NULL || init->req() < InitializeNode::RawStores) {
// No InitializeNode or no stores captured by zeroing
// elimination. Simply add the MemBarStoreStore after object
// initialization.
MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot, fast_oop_rawmem);
transform_later(mb);
mb->init_req(TypeFunc::Memory, fast_oop_rawmem);
mb->init_req(TypeFunc::Control, fast_oop_ctrl);
fast_oop_ctrl = new (C, 1) ProjNode(mb,TypeFunc::Control);
transform_later(fast_oop_ctrl);
fast_oop_rawmem = new (C, 1) ProjNode(mb,TypeFunc::Memory);
transform_later(fast_oop_rawmem);
} else {
// Add the MemBarStoreStore after the InitializeNode so that
// all stores performing the initialization that were moved
// before the InitializeNode happen before the storestore
// barrier.
Node* init_ctrl = init->proj_out(TypeFunc::Control);
Node* init_mem = init->proj_out(TypeFunc::Memory);
MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot);
transform_later(mb);
Node* ctrl = new (C, 1) ProjNode(init,TypeFunc::Control);
transform_later(ctrl);
Node* mem = new (C, 1) ProjNode(init,TypeFunc::Memory);
transform_later(mem);
// The MemBarStoreStore depends on control and memory coming
// from the InitializeNode
mb->init_req(TypeFunc::Memory, mem);
mb->init_req(TypeFunc::Control, ctrl);
ctrl = new (C, 1) ProjNode(mb,TypeFunc::Control);
transform_later(ctrl);
mem = new (C, 1) ProjNode(mb,TypeFunc::Memory);
transform_later(mem);
// All nodes that depended on the InitializeNode for control
// and memory must now depend on the MemBarNode that itself
// depends on the InitializeNode
_igvn.replace_node(init_ctrl, ctrl);
_igvn.replace_node(init_mem, mem);
}
}
if (C->env()->dtrace_extended_probes()) {
// Slow-path call
int size = TypeFunc::Parms + 2;
@ -1326,6 +1388,7 @@ void PhaseMacroExpand::expand_allocate_common(
result_phi_rawmem->init_req(fast_result_path, fast_oop_rawmem);
} else {
slow_region = ctrl;
result_phi_i_o = i_o; // Rename it to use in the following code.
}
// Generate slow-path call
@ -1350,6 +1413,10 @@ void PhaseMacroExpand::expand_allocate_common(
copy_call_debug_info((CallNode *) alloc, call);
if (!always_slow) {
call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON.
} else {
// Hook i_o projection to avoid its elimination during allocation
// replacement (when only a slow call is generated).
call->set_req(TypeFunc::I_O, result_phi_i_o);
}
_igvn.replace_node(alloc, call);
transform_later(call);
@ -1366,8 +1433,10 @@ void PhaseMacroExpand::expand_allocate_common(
//
extract_call_projections(call);
// An allocate node has separate memory projections for the uses on the control and i_o paths
// Replace uses of the control memory projection with result_phi_rawmem (unless we are only generating a slow call)
// An allocate node has separate memory projections for the uses on
// the control and i_o paths. Replace the control memory projection with
// result_phi_rawmem (unless we are only generating a slow call when
// both memory projections are combined)
if (!always_slow && _memproj_fallthrough != NULL) {
for (DUIterator_Fast imax, i = _memproj_fallthrough->fast_outs(imax); i < imax; i++) {
Node *use = _memproj_fallthrough->fast_out(i);
@ -1378,8 +1447,8 @@ void PhaseMacroExpand::expand_allocate_common(
--i;
}
}
// Now change uses of _memproj_catchall to use _memproj_fallthrough and delete _memproj_catchall so
// we end up with a call that has only 1 memory projection
// Now change uses of _memproj_catchall to use _memproj_fallthrough and delete
// _memproj_catchall so we end up with a call that has only 1 memory projection.
if (_memproj_catchall != NULL ) {
if (_memproj_fallthrough == NULL) {
_memproj_fallthrough = new (C, 1) ProjNode(call, TypeFunc::Memory);
@ -1393,17 +1462,18 @@ void PhaseMacroExpand::expand_allocate_common(
// back up iterator
--i;
}
assert(_memproj_catchall->outcnt() == 0, "all uses must be deleted");
_igvn.remove_dead_node(_memproj_catchall);
}
// An allocate node has separate i_o projections for the uses on the control and i_o paths
// Replace uses of the control i_o projection with result_phi_i_o (unless we are only generating a slow call)
if (_ioproj_fallthrough == NULL) {
_ioproj_fallthrough = new (C, 1) ProjNode(call, TypeFunc::I_O);
transform_later(_ioproj_fallthrough);
} else if (!always_slow) {
// An allocate node has separate i_o projections for the uses on the control
// and i_o paths. Always replace the control i_o projection with result i_o
// otherwise incoming i_o become dead when only a slow call is generated
// (it is different from memory projections where both projections are
// combined in such case).
if (_ioproj_fallthrough != NULL) {
for (DUIterator_Fast imax, i = _ioproj_fallthrough->fast_outs(imax); i < imax; i++) {
Node *use = _ioproj_fallthrough->fast_out(i);
_igvn.hash_delete(use);
imax -= replace_input(use, _ioproj_fallthrough, result_phi_i_o);
_igvn._worklist.push(use);
@ -1411,9 +1481,13 @@ void PhaseMacroExpand::expand_allocate_common(
--i;
}
}
// Now change uses of _ioproj_catchall to use _ioproj_fallthrough and delete _ioproj_catchall so
// we end up with a call that has only 1 control projection
// Now change uses of _ioproj_catchall to use _ioproj_fallthrough and delete
// _ioproj_catchall so we end up with a call that has only 1 i_o projection.
if (_ioproj_catchall != NULL ) {
if (_ioproj_fallthrough == NULL) {
_ioproj_fallthrough = new (C, 1) ProjNode(call, TypeFunc::I_O);
transform_later(_ioproj_fallthrough);
}
for (DUIterator_Fast imax, i = _ioproj_catchall->fast_outs(imax); i < imax; i++) {
Node *use = _ioproj_catchall->fast_out(i);
_igvn.hash_delete(use);
@ -1422,11 +1496,18 @@ void PhaseMacroExpand::expand_allocate_common(
// back up iterator
--i;
}
assert(_ioproj_catchall->outcnt() == 0, "all uses must be deleted");
_igvn.remove_dead_node(_ioproj_catchall);
}
// if we generated only a slow call, we are done
if (always_slow)
if (always_slow) {
// Now we can unhook i_o.
call->set_req(TypeFunc::I_O, top());
if (result_phi_i_o->outcnt() == 0)
_igvn.remove_dead_node(result_phi_i_o);
return;
}
if (_fallthroughcatchproj != NULL) {
@ -1470,7 +1551,7 @@ PhaseMacroExpand::initialize_object(AllocateNode* alloc,
Node* mark_node = NULL;
// For now only enable fast locking for non-array types
if (UseBiasedLocking && (length == NULL)) {
mark_node = make_load(control, rawmem, klass_node, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), TypeRawPtr::BOTTOM, T_ADDRESS);
mark_node = make_load(control, rawmem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
} else {
mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype()));
}
@ -1958,7 +2039,7 @@ void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
#endif
klass_node->init_req(0, ctrl);
}
Node *proto_node = make_load(ctrl, mem, klass_node, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), TypeX_X, TypeX_X->basic_type());
Node *proto_node = make_load(ctrl, mem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeX_X, TypeX_X->basic_type());
Node* thread = transform_later(new (C, 1) ThreadLocalNode());
Node* cast_thread = transform_later(new (C, 2) CastP2XNode(ctrl, thread));

View file

@ -1365,31 +1365,36 @@ static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool s
const Type *t = m->bottom_type();
if( t->singleton() ) {
if (t->singleton()) {
// Never force constants into registers. Allow them to match as
// constants or registers. Copies of the same value will share
// the same register. See find_shared_node.
return false;
} else { // Not a constant
// Stop recursion if they have different Controls.
// Slot 0 of constants is not really a Control.
if( control && m->in(0) && control != m->in(0) ) {
Node* m_control = m->in(0);
// Control of load's memory can post-dominates load's control.
// So use it since load can't float above its memory.
Node* mem_control = (m->is_Load()) ? m->in(MemNode::Memory)->in(0) : NULL;
if (control && m_control && control != m_control && control != mem_control) {
// Actually, we can live with the most conservative control we
// find, if it post-dominates the others. This allows us to
// pick up load/op/store trees where the load can float a little
// above the store.
Node *x = control;
const uint max_scan = 6; // Arbitrary scan cutoff
const uint max_scan = 6; // Arbitrary scan cutoff
uint j;
for( j=0; j<max_scan; j++ ) {
if( x->is_Region() ) // Bail out at merge points
for (j=0; j<max_scan; j++) {
if (x->is_Region()) // Bail out at merge points
return true;
x = x->in(0);
if( x == m->in(0) ) // Does 'control' post-dominate
if (x == m_control) // Does 'control' post-dominate
break; // m->in(0)? If so, we can use it
if (x == mem_control) // Does 'control' post-dominate
break; // mem_control? If so, we can use it
}
if( j == max_scan ) // No post-domination before scan end?
if (j == max_scan) // No post-domination before scan end?
return true; // Then break the match tree up
}
if (m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) {

View file

@ -1473,19 +1473,19 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) {
const Type*
LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
ciKlass* klass) const {
if (tkls->offset() == Klass::modifier_flags_offset_in_bytes() + (int)sizeof(oopDesc)) {
if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) {
// The field is Klass::_modifier_flags. Return its (constant) value.
// (Folds up the 2nd indirection in aClassConstant.getModifiers().)
assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags");
return TypeInt::make(klass->modifier_flags());
}
if (tkls->offset() == Klass::access_flags_offset_in_bytes() + (int)sizeof(oopDesc)) {
if (tkls->offset() == in_bytes(Klass::access_flags_offset())) {
// The field is Klass::_access_flags. Return its (constant) value.
// (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).)
assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags");
return TypeInt::make(klass->access_flags());
}
if (tkls->offset() == Klass::layout_helper_offset_in_bytes() + (int)sizeof(oopDesc)) {
if (tkls->offset() == in_bytes(Klass::layout_helper_offset())) {
// The field is Klass::_layout_helper. Return its constant value if known.
assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper");
return TypeInt::make(klass->layout_helper());
@ -1636,14 +1636,14 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
// We are loading a field from a Klass metaobject whose identity
// is known at compile time (the type is "exact" or "precise").
// Check for fields we know are maintained as constants by the VM.
if (tkls->offset() == Klass::super_check_offset_offset_in_bytes() + (int)sizeof(oopDesc)) {
if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
// The field is Klass::_super_check_offset. Return its (constant) value.
// (Folds up type checking code.)
assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
return TypeInt::make(klass->super_check_offset());
}
// Compute index into primary_supers array
juint depth = (tkls->offset() - (Klass::primary_supers_offset_in_bytes() + (int)sizeof(oopDesc))) / sizeof(klassOop);
juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(klassOop);
// Check for overflowing; use unsigned compare to handle the negative case.
if( depth < ciKlass::primary_super_limit() ) {
// The field is an element of Klass::_primary_supers. Return its (constant) value.
@ -1654,14 +1654,14 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
}
const Type* aift = load_array_final_field(tkls, klass);
if (aift != NULL) return aift;
if (tkls->offset() == in_bytes(arrayKlass::component_mirror_offset()) + (int)sizeof(oopDesc)
if (tkls->offset() == in_bytes(arrayKlass::component_mirror_offset())
&& klass->is_array_klass()) {
// The field is arrayKlass::_component_mirror. Return its (constant) value.
// (Folds up aClassConstant.getComponentType, common in Arrays.copyOf.)
assert(Opcode() == Op_LoadP, "must load an oop from _component_mirror");
return TypeInstPtr::make(klass->as_array_klass()->component_mirror());
}
if (tkls->offset() == Klass::java_mirror_offset_in_bytes() + (int)sizeof(oopDesc)) {
if (tkls->offset() == in_bytes(Klass::java_mirror_offset())) {
// The field is Klass::_java_mirror. Return its (constant) value.
// (Folds up the 2nd indirection in anObjConstant.getClass().)
assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
@ -1679,7 +1679,7 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
if( inner->is_instance_klass() &&
!inner->as_instance_klass()->flags().is_interface() ) {
// Compute index into primary_supers array
juint depth = (tkls->offset() - (Klass::primary_supers_offset_in_bytes() + (int)sizeof(oopDesc))) / sizeof(klassOop);
juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(klassOop);
// Check for overflowing; use unsigned compare to handle the negative case.
if( depth < ciKlass::primary_super_limit() &&
depth <= klass->super_depth() ) { // allow self-depth checks to handle self-check case
@ -1695,7 +1695,7 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
// If the type is enough to determine that the thing is not an array,
// we can give the layout_helper a positive interval type.
// This will help short-circuit some reflective code.
if (tkls->offset() == Klass::layout_helper_offset_in_bytes() + (int)sizeof(oopDesc)
if (tkls->offset() == in_bytes(Klass::layout_helper_offset())
&& !klass->is_array_klass() // not directly typed as an array
&& !klass->is_interface() // specifically not Serializable & Cloneable
&& !klass->is_java_lang_Object() // not the supertype of all T[]
@ -1938,7 +1938,7 @@ const Type *LoadNode::klass_value_common( PhaseTransform *phase ) const {
if( !klass->is_loaded() )
return _type; // Bail out if not loaded
if( klass->is_obj_array_klass() &&
(uint)tkls->offset() == objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)) {
tkls->offset() == in_bytes(objArrayKlass::element_klass_offset())) {
ciKlass* elem = klass->as_obj_array_klass()->element_klass();
// // Always returning precise element type is incorrect,
// // e.g., element type could be object and array may contain strings
@ -1949,7 +1949,7 @@ const Type *LoadNode::klass_value_common( PhaseTransform *phase ) const {
return TypeKlassPtr::make(tkls->ptr(), elem, 0/*offset*/);
}
if( klass->is_instance_klass() && tkls->klass_is_exact() &&
(uint)tkls->offset() == Klass::super_offset_in_bytes() + sizeof(oopDesc)) {
tkls->offset() == in_bytes(Klass::super_offset())) {
ciKlass* sup = klass->as_instance_klass()->super();
// The field is Klass::_super. Return its (constant) value.
// (Folds up the 2nd indirection in aClassConstant.getSuperClass().)
@ -2013,11 +2013,11 @@ Node* LoadNode::klass_identity_common(PhaseTransform *phase ) {
tkls->klass()->is_array_klass())
&& adr2->is_AddP()
) {
int mirror_field = Klass::java_mirror_offset_in_bytes();
int mirror_field = in_bytes(Klass::java_mirror_offset());
if (offset == java_lang_Class::array_klass_offset_in_bytes()) {
mirror_field = in_bytes(arrayKlass::component_mirror_offset());
}
if (tkls->offset() == mirror_field + (int)sizeof(oopDesc)) {
if (tkls->offset() == mirror_field) {
return adr2->in(AddPNode::Base);
}
}
@ -2721,6 +2721,7 @@ MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) {
case Op_MemBarVolatile: return new(C, len) MemBarVolatileNode(C, atp, pn);
case Op_MemBarCPUOrder: return new(C, len) MemBarCPUOrderNode(C, atp, pn);
case Op_Initialize: return new(C, len) InitializeNode(C, atp, pn);
case Op_MemBarStoreStore: return new(C, len) MemBarStoreStoreNode(C, atp, pn);
default: ShouldNotReachHere(); return NULL;
}
}
@ -2870,7 +2871,7 @@ Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
//---------------------------InitializeNode------------------------------------
InitializeNode::InitializeNode(Compile* C, int adr_type, Node* rawoop)
: _is_complete(Incomplete),
: _is_complete(Incomplete), _does_not_escape(false),
MemBarNode(C, adr_type, rawoop)
{
init_class_id(Class_Initialize);

View file

@ -918,6 +918,15 @@ public:
virtual int Opcode() const;
};
class MemBarStoreStoreNode: public MemBarNode {
public:
MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent)
: MemBarNode(C, alias_idx, precedent) {
init_class_id(Class_MemBarStoreStore);
}
virtual int Opcode() const;
};
// Ordering between a volatile store and a following volatile load.
// Requires multi-CPU visibility?
class MemBarVolatileNode: public MemBarNode {
@ -950,6 +959,8 @@ class InitializeNode: public MemBarNode {
};
int _is_complete;
bool _does_not_escape;
public:
enum {
Control = TypeFunc::Control,
@ -989,6 +1000,9 @@ public:
void set_complete(PhaseGVN* phase);
void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; }
bool does_not_escape() { return _does_not_escape; }
void set_does_not_escape() { _does_not_escape = true; }
#ifdef ASSERT
// ensure all non-degenerate stores are ordered and non-overlapping
bool stores_are_sane(PhaseTransform* phase);

View file

@ -97,6 +97,7 @@ class MachSpillCopyNode;
class MachTempNode;
class Matcher;
class MemBarNode;
class MemBarStoreStoreNode;
class MemNode;
class MergeMemNode;
class MultiNode;
@ -564,7 +565,8 @@ public:
DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2)
DEFINE_CLASS_ID(Start, Multi, 2)
DEFINE_CLASS_ID(MemBar, Multi, 3)
DEFINE_CLASS_ID(Initialize, MemBar, 0)
DEFINE_CLASS_ID(Initialize, MemBar, 0)
DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1)
DEFINE_CLASS_ID(Mach, Node, 1)
DEFINE_CLASS_ID(MachReturn, Mach, 0)
@ -744,6 +746,7 @@ public:
DEFINE_CLASS_QUERY(MachTemp)
DEFINE_CLASS_QUERY(Mem)
DEFINE_CLASS_QUERY(MemBar)
DEFINE_CLASS_QUERY(MemBarStoreStore)
DEFINE_CLASS_QUERY(MergeMem)
DEFINE_CLASS_QUERY(Multi)
DEFINE_CLASS_QUERY(MultiBranch)

View file

@ -1911,7 +1911,7 @@ void Parse::call_register_finalizer() {
Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() );
Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) );
Node* access_flags_addr = basic_plus_adr(klass, klass, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc));
Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset()));
Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT);
Node* mask = _gvn.transform(new (C, 3) AndINode(access_flags, intcon(JVM_ACC_HAS_FINALIZER)));

View file

@ -200,7 +200,7 @@ void Parse::array_store_check() {
// Come here for polymorphic array klasses
// Extract the array element class
int element_klass_offset = objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc);
int element_klass_offset = in_bytes(objArrayKlass::element_klass_offset());
Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset);
Node *a_e_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p2, tak) );
@ -220,7 +220,7 @@ void Parse::emit_guard_for_new(ciInstanceKlass* klass) {
_gvn.set_type(merge, Type::CONTROL);
Node* kls = makecon(TypeKlassPtr::make(klass));
Node* init_thread_offset = _gvn.MakeConX(instanceKlass::init_thread_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes());
Node* init_thread_offset = _gvn.MakeConX(in_bytes(instanceKlass::init_thread_offset()));
Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset);
Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS);
Node *tst = Bool( CmpP( init_thread, cur_thread), BoolTest::eq);
@ -228,7 +228,7 @@ void Parse::emit_guard_for_new(ciInstanceKlass* klass) {
set_control(IfTrue(iff));
merge->set_req(1, IfFalse(iff));
Node* init_state_offset = _gvn.MakeConX(instanceKlass::init_state_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes());
Node* init_state_offset = _gvn.MakeConX(in_bytes(instanceKlass::init_state_offset()));
adr_node = basic_plus_adr(kls, kls, init_state_offset);
Node* init_state = make_load(NULL, adr_node, TypeInt::INT, T_INT);
Node* being_init = _gvn.intcon(instanceKlass::being_initialized);

View file

@ -156,20 +156,19 @@ bool AdvancedThresholdPolicy::is_method_profiled(methodOop method) {
// Called with the queue locked and with at least one element
CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) {
CompileTask *max_task = NULL;
methodOop max_method;
methodHandle max_method;
jlong t = os::javaTimeMillis();
// Iterate through the queue and find a method with a maximum rate.
for (CompileTask* task = compile_queue->first(); task != NULL;) {
CompileTask* next_task = task->next();
methodOop method = (methodOop)JNIHandles::resolve(task->method_handle());
methodDataOop mdo = method->method_data();
update_rate(t, method);
methodHandle method = (methodOop)JNIHandles::resolve(task->method_handle());
update_rate(t, method());
if (max_task == NULL) {
max_task = task;
max_method = method;
} else {
// If a method has been stale for some time, remove it from the queue.
if (is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) {
if (is_stale(t, TieredCompileTaskTimeout, method()) && !is_old(method())) {
if (PrintTieredEvents) {
print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level());
}
@ -181,7 +180,7 @@ CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) {
}
// Select a method with a higher rate
if (compare_methods(method, max_method)) {
if (compare_methods(method(), max_method())) {
max_task = task;
max_method = method;
}
@ -190,7 +189,7 @@ CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) {
}
if (max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile
&& is_method_profiled(max_method)) {
&& is_method_profiled(max_method())) {
max_task->set_comp_level(CompLevel_limited_profile);
if (PrintTieredEvents) {
print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());

View file

@ -1000,6 +1000,13 @@ void Arguments::set_mode_flags(Mode mode) {
UseInterpreter = false;
BackgroundCompilation = false;
ClipInlining = false;
// Be much more aggressive in tiered mode with -Xcomp and exercise C2 more.
// We will first compile a level 3 version (C1 with full profiling), then do one invocation of it and
// compile a level 4 (C2) and then continue executing it.
if (TieredCompilation) {
Tier3InvokeNotifyFreqLog = 0;
Tier4InvocationThreshold = 0;
}
break;
}
}

View file

@ -527,6 +527,9 @@ class CommandLineFlags {
product(intx, UseSSE, 99, \
"Highest supported SSE instructions set on x86/x64") \
\
product(intx, UseAVX, 99, \
"Highest supported AVX instructions set on x86/x64") \
\
product(intx, UseVIS, 99, \
"Highest supported VIS instructions set on Sparc") \
\

View file

@ -213,17 +213,11 @@ void SharkIntrinsics::do_Object_getClass() {
SharkType::oop_type(),
"klass");
Value *klass_part = builder()->CreateAddressOfStructEntry(
klass,
in_ByteSize(klassOopDesc::klass_part_offset_in_bytes()),
SharkType::klass_type(),
"klass_part");
state()->push(
SharkValue::create_jobject(
builder()->CreateValueOfStructEntry(
klass_part,
in_ByteSize(Klass::java_mirror_offset_in_bytes()),
klass,
Klass::java_mirror_offset(),
SharkType::oop_type(),
"java_mirror"),
true));

View file

@ -745,15 +745,9 @@ void SharkTopLevelBlock::call_register_finalizer(Value *receiver) {
SharkType::oop_type(),
"klass");
Value *klass_part = builder()->CreateAddressOfStructEntry(
klass,
in_ByteSize(klassOopDesc::klass_part_offset_in_bytes()),
SharkType::klass_type(),
"klass_part");
Value *access_flags = builder()->CreateValueOfStructEntry(
klass_part,
in_ByteSize(Klass::access_flags_offset_in_bytes()),
klass,
Klass::access_flags_offset(),
SharkType::jint_type(),
"access_flags");

View file

@ -0,0 +1,60 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 7123108
* @summary C1 crashes with assert(if_state != NULL) failed: states do not match up
*
* @run main/othervm -Xcomp Test7123108
*/
public class Test7123108 {
static class Test_Class_0 {
final static byte var_2 = 67;
byte var_3;
}
Object var_25 = "kgfpyhcms";
static long var_27 = 6899666748616086528L;
static float func_1()
{
return 0.0F;
}
private void test()
{
"dlwq".charAt(((short)'x' > var_27 | func_1() <= (((Test_Class_0)var_25).var_3) ? true : true) ? Test_Class_0.var_2 & (short)-1.1173839E38F : 'Y');
}
public static void main(String[] args)
{
Test7123108 t = new Test7123108();
try {
t.test();
} catch (Throwable e) { }
}
}