7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions

Moved sizeof(klassOopDesc), changed the return type to ByteSize and removed the _in_bytes suffix.

Reviewed-by: never, bdelsart, coleenp, jrose
This commit is contained in:
Stefan Karlsson 2011-12-07 11:35:03 +01:00
parent dc542c9909
commit e057d60ca1
40 changed files with 165 additions and 213 deletions

View file

@ -3036,10 +3036,8 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
Label* L_failure, Label* L_failure,
Label* L_slow_path, Label* L_slow_path,
RegisterOrConstant super_check_offset) { RegisterOrConstant super_check_offset) {
int sc_offset = (klassOopDesc::header_size() * HeapWordSize + int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
Klass::secondary_super_cache_offset_in_bytes()); int sco_offset = in_bytes(Klass::super_check_offset_offset());
int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::super_check_offset_offset_in_bytes());
bool must_load_sco = (super_check_offset.constant_or_zero() == -1); bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
bool need_slow_path = (must_load_sco || bool need_slow_path = (must_load_sco ||
@ -3159,10 +3157,8 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
assert(label_nulls <= 1, "at most one NULL in the batch"); assert(label_nulls <= 1, "at most one NULL in the batch");
// a couple of useful fields in sub_klass: // a couple of useful fields in sub_klass:
int ss_offset = (klassOopDesc::header_size() * HeapWordSize + int ss_offset = in_bytes(Klass::secondary_supers_offset());
Klass::secondary_supers_offset_in_bytes()); int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::secondary_super_cache_offset_in_bytes());
// Do a linear scan of the secondary super-klass chain. // Do a linear scan of the secondary super-klass chain.
// This code is rarely used, so simplicity is a virtue here. // This code is rarely used, so simplicity is a virtue here.
@ -3336,7 +3332,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label); cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label);
load_klass(obj_reg, temp_reg); load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg); ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
or3(G2_thread, temp_reg, temp_reg); or3(G2_thread, temp_reg, temp_reg);
xor3(mark_reg, temp_reg, temp_reg); xor3(mark_reg, temp_reg, temp_reg);
andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg); andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg);
@ -3413,7 +3409,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
// FIXME: due to a lack of registers we currently blow away the age // FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them. // bits in this situation. Should attempt to preserve them.
load_klass(obj_reg, temp_reg); load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg); ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
or3(G2_thread, temp_reg, temp_reg); or3(G2_thread, temp_reg, temp_reg);
casn(mark_addr.base(), mark_reg, temp_reg); casn(mark_addr.base(), mark_reg, temp_reg);
// If the biasing toward our thread failed, this means that // If the biasing toward our thread failed, this means that
@ -3443,7 +3439,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
// FIXME: due to a lack of registers we currently blow away the age // FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them. // bits in this situation. Should attempt to preserve them.
load_klass(obj_reg, temp_reg); load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg); ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
casn(mark_addr.base(), mark_reg, temp_reg); casn(mark_addr.base(), mark_reg, temp_reg);
// Fall through to the normal CAS-based lock, because no matter what // Fall through to the normal CAS-based lock, because no matter what
// the result of the above CAS, some thread must have succeeded in // the result of the above CAS, some thread must have succeeded in

View file

@ -302,7 +302,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
assert(_obj != noreg, "must be a valid register"); assert(_obj != noreg, "must be a valid register");
assert(_oop_index >= 0, "must have oop index"); assert(_oop_index >= 0, "must have oop index");
__ load_heap_oop(_obj, java_lang_Class::klass_offset_in_bytes(), G3); __ load_heap_oop(_obj, java_lang_Class::klass_offset_in_bytes(), G3);
__ ld_ptr(G3, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc), G3); __ ld_ptr(G3, in_bytes(instanceKlass::init_thread_offset()), G3);
__ cmp_and_brx_short(G2_thread, G3, Assembler::notEqual, Assembler::pn, call_patch); __ cmp_and_brx_short(G2_thread, G3, Assembler::notEqual, Assembler::pn, call_patch);
// load_klass patches may execute the patched code before it's // load_klass patches may execute the patched code before it's
@ -471,7 +471,7 @@ void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
__ load_klass(src_reg, tmp_reg); __ load_klass(src_reg, tmp_reg);
Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset_in_bytes() + sizeof(oopDesc)); Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset());
__ ld(ref_type_adr, tmp_reg); __ ld(ref_type_adr, tmp_reg);
// _reference_type field is of type ReferenceType (enum) // _reference_type field is of type ReferenceType (enum)

View file

@ -2202,8 +2202,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
__ load_klass(dst, tmp); __ load_klass(dst, tmp);
} }
int lh_offset = klassOopDesc::header_size() * HeapWordSize + int lh_offset = in_bytes(Klass::layout_helper_offset());
Klass::layout_helper_offset_in_bytes();
__ lduw(tmp, lh_offset, tmp2); __ lduw(tmp, lh_offset, tmp2);
@ -2238,12 +2237,10 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ mov(length, len); __ mov(length, len);
__ load_klass(dst, tmp); __ load_klass(dst, tmp);
int ek_offset = (klassOopDesc::header_size() * HeapWordSize + int ek_offset = in_bytes(objArrayKlass::element_klass_offset());
objArrayKlass::element_klass_offset_in_bytes());
__ ld_ptr(tmp, ek_offset, super_k); __ ld_ptr(tmp, ek_offset, super_k);
int sco_offset = (klassOopDesc::header_size() * HeapWordSize + int sco_offset = in_bytes(Klass::super_check_offset_offset());
Klass::super_check_offset_offset_in_bytes());
__ lduw(super_k, sco_offset, chk_off); __ lduw(super_k, sco_offset, chk_off);
__ call_VM_leaf(tmp, copyfunc_addr); __ call_VM_leaf(tmp, copyfunc_addr);
@ -2456,7 +2453,7 @@ void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
op->klass()->as_register() == G5, "must be"); op->klass()->as_register() == G5, "must be");
if (op->init_check()) { if (op->init_check()) {
__ ld(op->klass()->as_register(), __ ld(op->klass()->as_register(),
instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), in_bytes(instanceKlass::init_state_offset()),
op->tmp1()->as_register()); op->tmp1()->as_register());
add_debug_info_for_null_check_here(op->stub()->info()); add_debug_info_for_null_check_here(op->stub()->info());
__ cmp(op->tmp1()->as_register(), instanceKlass::fully_initialized); __ cmp(op->tmp1()->as_register(), instanceKlass::fully_initialized);
@ -2627,7 +2624,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
} else { } else {
bool need_slow_path = true; bool need_slow_path = true;
if (k->is_loaded()) { if (k->is_loaded()) {
if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()) if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset()))
need_slow_path = false; need_slow_path = false;
// perform the fast part of the checking logic // perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg, __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
@ -2731,7 +2728,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ load_klass(value, klass_RInfo); __ load_klass(value, klass_RInfo);
// get instance klass // get instance klass
__ ld_ptr(Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)), k_RInfo); __ ld_ptr(Address(k_RInfo, objArrayKlass::element_klass_offset()), k_RInfo);
// perform the fast part of the checking logic // perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL); __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL);

View file

@ -181,7 +181,7 @@ void C1_MacroAssembler::try_allocate(
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
assert_different_registers(obj, klass, len, t1, t2); assert_different_registers(obj, klass, len, t1, t2);
if (UseBiasedLocking && !len->is_valid()) { if (UseBiasedLocking && !len->is_valid()) {
ld_ptr(klass, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes(), t1); ld_ptr(klass, in_bytes(Klass::prototype_header_offset()), t1);
} else { } else {
set((intx)markOopDesc::prototype(), t1); set((intx)markOopDesc::prototype(), t1);
} }
@ -252,7 +252,7 @@ void C1_MacroAssembler::initialize_object(
#ifdef ASSERT #ifdef ASSERT
{ {
Label ok; Label ok;
ld(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), t1); ld(klass, in_bytes(Klass::layout_helper_offset()), t1);
if (var_size_in_bytes != noreg) { if (var_size_in_bytes != noreg) {
cmp_and_brx_short(t1, var_size_in_bytes, Assembler::equal, Assembler::pt, ok); cmp_and_brx_short(t1, var_size_in_bytes, Assembler::equal, Assembler::pt, ok);
} else { } else {

View file

@ -398,14 +398,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
if (id == fast_new_instance_init_check_id) { if (id == fast_new_instance_init_check_id) {
// make sure the klass is initialized // make sure the klass is initialized
__ ld(G5_klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_t1); __ ld(G5_klass, in_bytes(instanceKlass::init_state_offset()), G3_t1);
__ cmp_and_br_short(G3_t1, instanceKlass::fully_initialized, Assembler::notEqual, Assembler::pn, slow_path); __ cmp_and_br_short(G3_t1, instanceKlass::fully_initialized, Assembler::notEqual, Assembler::pn, slow_path);
} }
#ifdef ASSERT #ifdef ASSERT
// assert object can be fast path allocated // assert object can be fast path allocated
{ {
Label ok, not_ok; Label ok, not_ok;
__ ld(G5_klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), G1_obj_size); __ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);
// make sure it's an instance (LH > 0) // make sure it's an instance (LH > 0)
__ cmp_and_br_short(G1_obj_size, 0, Assembler::lessEqual, Assembler::pn, not_ok); __ cmp_and_br_short(G1_obj_size, 0, Assembler::lessEqual, Assembler::pn, not_ok);
__ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size); __ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size);
@ -425,7 +425,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ bind(retry_tlab); __ bind(retry_tlab);
// get the instance size // get the instance size
__ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size); __ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);
__ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path); __ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path);
@ -437,7 +437,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ bind(try_eden); __ bind(try_eden);
// get the instance size // get the instance size
__ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size); __ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);
__ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path); __ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path);
__ incr_allocated_bytes(G1_obj_size, G3_t1, G4_t2); __ incr_allocated_bytes(G1_obj_size, G3_t1, G4_t2);
@ -471,8 +471,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Register G4_length = G4; // Incoming Register G4_length = G4; // Incoming
Register O0_obj = O0; // Outgoing Register O0_obj = O0; // Outgoing
Address klass_lh(G5_klass, ((klassOopDesc::header_size() * HeapWordSize) Address klass_lh(G5_klass, Klass::layout_helper_offset());
+ Klass::layout_helper_offset_in_bytes()));
assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
assert(Klass::_lh_header_size_mask == 0xFF, "bytewise"); assert(Klass::_lh_header_size_mask == 0xFF, "bytewise");
// Use this offset to pick out an individual byte of the layout_helper: // Use this offset to pick out an individual byte of the layout_helper:
@ -592,7 +591,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Label register_finalizer; Label register_finalizer;
Register t = O1; Register t = O1;
__ load_klass(O0, t); __ load_klass(O0, t);
__ ld(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), t); __ ld(t, in_bytes(Klass::access_flags_offset()), t);
__ set(JVM_ACC_HAS_FINALIZER, G3); __ set(JVM_ACC_HAS_FINALIZER, G3);
__ andcc(G3, t, G0); __ andcc(G3, t, G0);
__ br(Assembler::notZero, false, Assembler::pt, register_finalizer); __ br(Assembler::notZero, false, Assembler::pt, register_finalizer);

View file

@ -766,7 +766,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// get native function entry point(O0 is a good temp until the very end) // get native function entry point(O0 is a good temp until the very end)
ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::native_function_offset())), O0); ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::native_function_offset())), O0);
// for static methods insert the mirror argument // for static methods insert the mirror argument
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc:: constants_offset())), O1); __ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc:: constants_offset())), O1);
__ ld_ptr(Address(O1, 0, constantPoolOopDesc::pool_holder_offset_in_bytes()), O1); __ ld_ptr(Address(O1, 0, constantPoolOopDesc::pool_holder_offset_in_bytes()), O1);
@ -1173,7 +1173,7 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
__ btst(JVM_ACC_SYNCHRONIZED, O1); __ btst(JVM_ACC_SYNCHRONIZED, O1);
__ br( Assembler::zero, false, Assembler::pt, done); __ br( Assembler::zero, false, Assembler::pt, done);
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ delayed()->btst(JVM_ACC_STATIC, O1); __ delayed()->btst(JVM_ACC_STATIC, O1);
__ ld_ptr(XXX_STATE(_locals), O1); __ ld_ptr(XXX_STATE(_locals), O1);
__ br( Assembler::zero, true, Assembler::pt, got_obj); __ br( Assembler::zero, true, Assembler::pt, got_obj);

View file

@ -1098,7 +1098,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
Address G3_amh_argument ( G3_method_handle, java_lang_invoke_AdapterMethodHandle::argument_offset_in_bytes()); Address G3_amh_argument ( G3_method_handle, java_lang_invoke_AdapterMethodHandle::argument_offset_in_bytes());
Address G3_amh_conversion(G3_method_handle, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes()); Address G3_amh_conversion(G3_method_handle, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes());
const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); const int java_mirror_offset = in_bytes(Klass::java_mirror_offset());
if (have_entry(ek)) { if (have_entry(ek)) {
__ nop(); // empty stubs make SG sick __ nop(); // empty stubs make SG sick

View file

@ -3046,8 +3046,7 @@ class StubGenerator: public StubCodeGenerator {
// array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
// //
int lh_offset = klassOopDesc::header_size() * HeapWordSize + int lh_offset = in_bytes(Klass::layout_helper_offset());
Klass::layout_helper_offset_in_bytes();
// Load 32-bits signed value. Use br() instruction with it to check icc. // Load 32-bits signed value. Use br() instruction with it to check icc.
__ lduw(G3_src_klass, lh_offset, G5_lh); __ lduw(G3_src_klass, lh_offset, G5_lh);
@ -3194,15 +3193,13 @@ class StubGenerator: public StubCodeGenerator {
G4_dst_klass, G3_src_klass); G4_dst_klass, G3_src_klass);
// Generate the type check. // Generate the type check.
int sco_offset = (klassOopDesc::header_size() * HeapWordSize + int sco_offset = in_bytes(Klass::super_check_offset_offset());
Klass::super_check_offset_offset_in_bytes());
__ lduw(G4_dst_klass, sco_offset, sco_temp); __ lduw(G4_dst_klass, sco_offset, sco_temp);
generate_type_check(G3_src_klass, sco_temp, G4_dst_klass, generate_type_check(G3_src_klass, sco_temp, G4_dst_klass,
O5_temp, L_plain_copy); O5_temp, L_plain_copy);
// Fetch destination element klass from the objArrayKlass header. // Fetch destination element klass from the objArrayKlass header.
int ek_offset = (klassOopDesc::header_size() * HeapWordSize + int ek_offset = in_bytes(objArrayKlass::element_klass_offset());
objArrayKlass::element_klass_offset_in_bytes());
// the checkcast_copy loop needs two extra arguments: // the checkcast_copy loop needs two extra arguments:
__ ld_ptr(G4_dst_klass, ek_offset, O4); // dest elem klass __ ld_ptr(G4_dst_klass, ek_offset, O4); // dest elem klass

View file

@ -366,7 +366,7 @@ void InterpreterGenerator::lock_method(void) {
// get synchronization object to O0 // get synchronization object to O0
{ Label done; { Label done;
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ btst(JVM_ACC_STATIC, O0); __ btst(JVM_ACC_STATIC, O0);
__ br( Assembler::zero, true, Assembler::pt, done); __ br( Assembler::zero, true, Assembler::pt, done);
__ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case
@ -984,7 +984,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// get native function entry point(O0 is a good temp until the very end) // get native function entry point(O0 is a good temp until the very end)
__ delayed()->ld_ptr(Lmethod, in_bytes(methodOopDesc::native_function_offset()), O0); __ delayed()->ld_ptr(Lmethod, in_bytes(methodOopDesc::native_function_offset()), O0);
// for static methods insert the mirror argument // for static methods insert the mirror argument
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ ld_ptr(Lmethod, methodOopDesc:: constants_offset(), O1); __ ld_ptr(Lmethod, methodOopDesc:: constants_offset(), O1);
__ ld_ptr(O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1); __ ld_ptr(O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1);

View file

@ -888,7 +888,7 @@ void TemplateTable::aastore() {
// do fast instanceof cache test // do fast instanceof cache test
__ ld_ptr(O4, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes(), O4); __ ld_ptr(O4, in_bytes(objArrayKlass::element_klass_offset()), O4);
assert(Otos_i == O0, "just checking"); assert(Otos_i == O0, "just checking");
@ -2031,7 +2031,7 @@ void TemplateTable::_return(TosState state) {
__ access_local_ptr(G3_scratch, Otos_i); __ access_local_ptr(G3_scratch, Otos_i);
__ load_klass(Otos_i, O2); __ load_klass(Otos_i, O2);
__ set(JVM_ACC_HAS_FINALIZER, G3); __ set(JVM_ACC_HAS_FINALIZER, G3);
__ ld(O2, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), O2); __ ld(O2, in_bytes(Klass::access_flags_offset()), O2);
__ andcc(G3, O2, G0); __ andcc(G3, O2, G0);
Label skip_register_finalizer; Label skip_register_finalizer;
__ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer); __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer);
@ -3350,13 +3350,13 @@ void TemplateTable::_new() {
__ ld_ptr(Rscratch, Roffset, RinstanceKlass); __ ld_ptr(Rscratch, Roffset, RinstanceKlass);
// make sure klass is fully initialized: // make sure klass is fully initialized:
__ ld(RinstanceKlass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_scratch); __ ld(RinstanceKlass, in_bytes(instanceKlass::init_state_offset()), G3_scratch);
__ cmp(G3_scratch, instanceKlass::fully_initialized); __ cmp(G3_scratch, instanceKlass::fully_initialized);
__ br(Assembler::notEqual, false, Assembler::pn, slow_case); __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
__ delayed()->ld(RinstanceKlass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), Roffset); __ delayed()->ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset);
// get instance_size in instanceKlass (already aligned) // get instance_size in instanceKlass (already aligned)
//__ ld(RinstanceKlass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), Roffset); //__ ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset);
// make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class
__ btst(Klass::_lh_instance_slow_path_bit, Roffset); __ btst(Klass::_lh_instance_slow_path_bit, Roffset);
@ -3483,7 +3483,7 @@ void TemplateTable::_new() {
__ bind(initialize_header); __ bind(initialize_header);
if (UseBiasedLocking) { if (UseBiasedLocking) {
__ ld_ptr(RinstanceKlass, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), G4_scratch); __ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch);
} else { } else {
__ set((intptr_t)markOopDesc::prototype(), G4_scratch); __ set((intptr_t)markOopDesc::prototype(), G4_scratch);
} }

View file

@ -4920,7 +4920,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
null_check_offset = offset(); null_check_offset = offset();
} }
movl(tmp_reg, klass_addr); movl(tmp_reg, klass_addr);
xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset()));
andl(swap_reg, ~((int) markOopDesc::age_mask_in_place)); andl(swap_reg, ~((int) markOopDesc::age_mask_in_place));
if (need_tmp_reg) { if (need_tmp_reg) {
pop(tmp_reg); pop(tmp_reg);
@ -5007,7 +5007,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
} }
get_thread(tmp_reg); get_thread(tmp_reg);
movl(swap_reg, klass_addr); movl(swap_reg, klass_addr);
orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset()));
movl(swap_reg, saved_mark_addr); movl(swap_reg, saved_mark_addr);
if (os::is_MP()) { if (os::is_MP()) {
lock(); lock();
@ -5045,7 +5045,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
push(tmp_reg); push(tmp_reg);
} }
movl(tmp_reg, klass_addr); movl(tmp_reg, klass_addr);
movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset()));
if (os::is_MP()) { if (os::is_MP()) {
lock(); lock();
} }
@ -8234,10 +8234,8 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
assert(label_nulls <= 1, "at most one NULL in the batch"); assert(label_nulls <= 1, "at most one NULL in the batch");
int sc_offset = (klassOopDesc::header_size() * HeapWordSize + int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
Klass::secondary_super_cache_offset_in_bytes()); int sco_offset = in_bytes(Klass::super_check_offset_offset());
int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::super_check_offset_offset_in_bytes());
Address super_check_offset_addr(super_klass, sco_offset); Address super_check_offset_addr(super_klass, sco_offset);
// Hacked jcc, which "knows" that L_fallthrough, at least, is in // Hacked jcc, which "knows" that L_fallthrough, at least, is in
@ -8335,10 +8333,8 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
assert(label_nulls <= 1, "at most one NULL in the batch"); assert(label_nulls <= 1, "at most one NULL in the batch");
// a couple of useful fields in sub_klass: // a couple of useful fields in sub_klass:
int ss_offset = (klassOopDesc::header_size() * HeapWordSize + int ss_offset = in_bytes(Klass::secondary_supers_offset());
Klass::secondary_supers_offset_in_bytes()); int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::secondary_super_cache_offset_in_bytes());
Address secondary_supers_addr(sub_klass, ss_offset); Address secondary_supers_addr(sub_klass, ss_offset);
Address super_cache_addr( sub_klass, sc_offset); Address super_cache_addr( sub_klass, sc_offset);
@ -9010,20 +9006,20 @@ void MacroAssembler::load_prototype_header(Register dst, Register src) {
if (Universe::narrow_oop_shift() != 0) { if (Universe::narrow_oop_shift() != 0) {
assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
if (LogMinObjAlignmentInBytes == Address::times_8) { if (LogMinObjAlignmentInBytes == Address::times_8) {
movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset()));
} else { } else {
// OK to use shift since we don't need to preserve flags. // OK to use shift since we don't need to preserve flags.
shlq(dst, LogMinObjAlignmentInBytes); shlq(dst, LogMinObjAlignmentInBytes);
movq(dst, Address(r12_heapbase, dst, Address::times_1, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); movq(dst, Address(r12_heapbase, dst, Address::times_1, Klass::prototype_header_offset()));
} }
} else { } else {
movq(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); movq(dst, Address(dst, Klass::prototype_header_offset()));
} }
} else } else
#endif #endif
{ {
movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
movptr(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); movptr(dst, Address(dst, Klass::prototype_header_offset()));
} }
} }

View file

@ -320,7 +320,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
// begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null. // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
__ load_heap_oop_not_null(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes())); __ load_heap_oop_not_null(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
__ get_thread(tmp); __ get_thread(tmp);
__ cmpptr(tmp, Address(tmp2, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc))); __ cmpptr(tmp, Address(tmp2, instanceKlass::init_thread_offset()));
__ pop(tmp2); __ pop(tmp2);
__ pop(tmp); __ pop(tmp);
__ jcc(Assembler::notEqual, call_patch); __ jcc(Assembler::notEqual, call_patch);
@ -519,7 +519,7 @@ void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
__ load_klass(tmp_reg, src_reg); __ load_klass(tmp_reg, src_reg);
Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset_in_bytes() + sizeof(oopDesc)); Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset());
__ cmpl(ref_type_adr, REF_NONE); __ cmpl(ref_type_adr, REF_NONE);
__ jcc(Assembler::equal, _continuation); __ jcc(Assembler::equal, _continuation);

View file

@ -1558,7 +1558,7 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
if (op->init_check()) { if (op->init_check()) {
__ cmpl(Address(op->klass()->as_register(), __ cmpl(Address(op->klass()->as_register(),
instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::init_state_offset()),
instanceKlass::fully_initialized); instanceKlass::fully_initialized);
add_debug_info_for_null_check_here(op->stub()->info()); add_debug_info_for_null_check_here(op->stub()->info());
__ jcc(Assembler::notEqual, *op->stub()->entry()); __ jcc(Assembler::notEqual, *op->stub()->entry());
@ -1730,7 +1730,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
#else #else
__ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding()); __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
#endif // _LP64 #endif // _LP64
if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) { if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
__ jcc(Assembler::notEqual, *failure_target); __ jcc(Assembler::notEqual, *failure_target);
// successful cast, fall through to profile or jump // successful cast, fall through to profile or jump
} else { } else {
@ -1842,7 +1842,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ load_klass(klass_RInfo, value); __ load_klass(klass_RInfo, value);
// get instance klass (it's already uncompressed) // get instance klass (it's already uncompressed)
__ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc))); __ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset()));
// perform the fast part of the checking logic // perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
// call out-of-line instance of __ check_klass_subtype_slow_path(...): // call out-of-line instance of __ check_klass_subtype_slow_path(...):
@ -3289,8 +3289,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
__ load_klass(tmp, dst); __ load_klass(tmp, dst);
} }
int lh_offset = klassOopDesc::header_size() * HeapWordSize + int lh_offset = in_bytes(Klass::layout_helper_offset());
Klass::layout_helper_offset_in_bytes();
Address klass_lh_addr(tmp, lh_offset); Address klass_lh_addr(tmp, lh_offset);
jint objArray_lh = Klass::array_layout_helper(T_OBJECT); jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
__ cmpl(klass_lh_addr, objArray_lh); __ cmpl(klass_lh_addr, objArray_lh);
@ -3307,9 +3306,9 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
#ifndef _LP64 #ifndef _LP64
__ movptr(tmp, dst_klass_addr); __ movptr(tmp, dst_klass_addr);
__ movptr(tmp, Address(tmp, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc))); __ movptr(tmp, Address(tmp, objArrayKlass::element_klass_offset()));
__ push(tmp); __ push(tmp);
__ movl(tmp, Address(tmp, Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc))); __ movl(tmp, Address(tmp, Klass::super_check_offset_offset()));
__ push(tmp); __ push(tmp);
__ push(length); __ push(length);
__ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
@ -3333,15 +3332,15 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// Allocate abi space for args but be sure to keep stack aligned // Allocate abi space for args but be sure to keep stack aligned
__ subptr(rsp, 6*wordSize); __ subptr(rsp, 6*wordSize);
__ load_klass(c_rarg3, dst); __ load_klass(c_rarg3, dst);
__ movptr(c_rarg3, Address(c_rarg3, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc))); __ movptr(c_rarg3, Address(c_rarg3, objArrayKlass::element_klass_offset()));
store_parameter(c_rarg3, 4); store_parameter(c_rarg3, 4);
__ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc))); __ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset()));
__ call(RuntimeAddress(copyfunc_addr)); __ call(RuntimeAddress(copyfunc_addr));
__ addptr(rsp, 6*wordSize); __ addptr(rsp, 6*wordSize);
#else #else
__ load_klass(c_rarg4, dst); __ load_klass(c_rarg4, dst);
__ movptr(c_rarg4, Address(c_rarg4, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc))); __ movptr(c_rarg4, Address(c_rarg4, objArrayKlass::element_klass_offset()));
__ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc))); __ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
__ call(RuntimeAddress(copyfunc_addr)); __ call(RuntimeAddress(copyfunc_addr));
#endif #endif

View file

@ -150,7 +150,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
assert_different_registers(obj, klass, len); assert_different_registers(obj, klass, len);
if (UseBiasedLocking && !len->is_valid()) { if (UseBiasedLocking && !len->is_valid()) {
assert_different_registers(obj, klass, len, t1, t2); assert_different_registers(obj, klass, len, t1, t2);
movptr(t1, Address(klass, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); movptr(t1, Address(klass, Klass::prototype_header_offset()));
movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1); movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
} else { } else {
// This assumes that all prototype bits fit in an int32_t // This assumes that all prototype bits fit in an int32_t

View file

@ -1011,7 +1011,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
if (id == fast_new_instance_init_check_id) { if (id == fast_new_instance_init_check_id) {
// make sure the klass is initialized // make sure the klass is initialized
__ cmpl(Address(klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized); __ cmpl(Address(klass, instanceKlass::init_state_offset()), instanceKlass::fully_initialized);
__ jcc(Assembler::notEqual, slow_path); __ jcc(Assembler::notEqual, slow_path);
} }
@ -1019,7 +1019,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// assert object can be fast path allocated // assert object can be fast path allocated
{ {
Label ok, not_ok; Label ok, not_ok;
__ movl(obj_size, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc))); __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
__ cmpl(obj_size, 0); // make sure it's an instance (LH > 0) __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0)
__ jcc(Assembler::lessEqual, not_ok); __ jcc(Assembler::lessEqual, not_ok);
__ testl(obj_size, Klass::_lh_instance_slow_path_bit); __ testl(obj_size, Klass::_lh_instance_slow_path_bit);
@ -1040,7 +1040,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ bind(retry_tlab); __ bind(retry_tlab);
// get the instance size (size is postive so movl is fine for 64bit) // get the instance size (size is postive so movl is fine for 64bit)
__ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
__ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path); __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
@ -1052,7 +1052,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ bind(try_eden); __ bind(try_eden);
// get the instance size (size is postive so movl is fine for 64bit) // get the instance size (size is postive so movl is fine for 64bit)
__ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
__ eden_allocate(obj, obj_size, 0, t1, slow_path); __ eden_allocate(obj, obj_size, 0, t1, slow_path);
__ incr_allocated_bytes(thread, obj_size, 0); __ incr_allocated_bytes(thread, obj_size, 0);
@ -1119,7 +1119,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
{ {
Label ok; Label ok;
Register t0 = obj; Register t0 = obj;
__ movl(t0, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc))); __ movl(t0, Address(klass, Klass::layout_helper_offset()));
__ sarl(t0, Klass::_lh_array_tag_shift); __ sarl(t0, Klass::_lh_array_tag_shift);
int tag = ((id == new_type_array_id) int tag = ((id == new_type_array_id)
? Klass::_lh_array_tag_type_value ? Klass::_lh_array_tag_type_value
@ -1153,7 +1153,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
// since size is positive movl does right thing on 64bit // since size is positive movl does right thing on 64bit
__ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); __ movl(t1, Address(klass, Klass::layout_helper_offset()));
// since size is postive movl does right thing on 64bit // since size is postive movl does right thing on 64bit
__ movl(arr_size, length); __ movl(arr_size, length);
assert(t1 == rcx, "fixed register usage"); assert(t1 == rcx, "fixed register usage");
@ -1167,7 +1167,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size
__ initialize_header(obj, klass, length, t1, t2); __ initialize_header(obj, klass, length, t1, t2);
__ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte))); __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
__ andptr(t1, Klass::_lh_header_size_mask); __ andptr(t1, Klass::_lh_header_size_mask);
@ -1180,7 +1180,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ bind(try_eden); __ bind(try_eden);
// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
// since size is positive movl does right thing on 64bit // since size is positive movl does right thing on 64bit
__ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); __ movl(t1, Address(klass, Klass::layout_helper_offset()));
// since size is postive movl does right thing on 64bit // since size is postive movl does right thing on 64bit
__ movl(arr_size, length); __ movl(arr_size, length);
assert(t1 == rcx, "fixed register usage"); assert(t1 == rcx, "fixed register usage");
@ -1195,7 +1195,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ incr_allocated_bytes(thread, arr_size, 0); __ incr_allocated_bytes(thread, arr_size, 0);
__ initialize_header(obj, klass, length, t1, t2); __ initialize_header(obj, klass, length, t1, t2);
__ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte))); __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
__ andptr(t1, Klass::_lh_header_size_mask); __ andptr(t1, Klass::_lh_header_size_mask);
@ -1267,7 +1267,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Label register_finalizer; Label register_finalizer;
Register t = rsi; Register t = rsi;
__ load_klass(t, rax); __ load_klass(t, rax);
__ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); __ movl(t, Address(t, Klass::access_flags_offset()));
__ testl(t, JVM_ACC_HAS_FINALIZER); __ testl(t, JVM_ACC_HAS_FINALIZER);
__ jcc(Assembler::notZero, register_finalizer); __ jcc(Assembler::notZero, register_finalizer);
__ ret(0); __ ret(0);

View file

@ -511,7 +511,7 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
// get synchronization object // get synchronization object
Label done; Label done;
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ movl(rax, access_flags); __ movl(rax, access_flags);
__ testl(rax, JVM_ACC_STATIC); __ testl(rax, JVM_ACC_STATIC);
__ movptr(rax, Address(locals, 0)); // get receiver (assume this is frequent case) __ movptr(rax, Address(locals, 0)); // get receiver (assume this is frequent case)
@ -763,7 +763,7 @@ void InterpreterGenerator::lock_method(void) {
#endif // ASSERT #endif // ASSERT
// get synchronization object // get synchronization object
{ Label done; { Label done;
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ movl(rax, access_flags); __ movl(rax, access_flags);
__ movptr(rdi, STATE(_locals)); // prepare to get receiver (assume common case) __ movptr(rdi, STATE(_locals)); // prepare to get receiver (assume common case)
__ testl(rax, JVM_ACC_STATIC); __ testl(rax, JVM_ACC_STATIC);
@ -1180,7 +1180,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// pass mirror handle if static call // pass mirror handle if static call
{ Label L; { Label L;
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ movl(t, Address(method, methodOopDesc::access_flags_offset())); __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
__ testl(t, JVM_ACC_STATIC); __ testl(t, JVM_ACC_STATIC);
__ jcc(Assembler::zero, L); __ jcc(Assembler::zero, L);

View file

@ -1160,7 +1160,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
Address rcx_amh_conversion( rcx_recv, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes() ); Address rcx_amh_conversion( rcx_recv, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes() );
Address vmarg; // __ argument_address(vmargslot) Address vmarg; // __ argument_address(vmargslot)
const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); const int java_mirror_offset = in_bytes(Klass::java_mirror_offset());
if (have_entry(ek)) { if (have_entry(ek)) {
__ nop(); // empty stubs make SG sick __ nop(); // empty stubs make SG sick

View file

@ -1374,8 +1374,7 @@ class StubGenerator: public StubCodeGenerator {
// L_success, L_failure, NULL); // L_success, L_failure, NULL);
assert_different_registers(sub_klass, temp); assert_different_registers(sub_klass, temp);
int sc_offset = (klassOopDesc::header_size() * HeapWordSize + int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
Klass::secondary_super_cache_offset_in_bytes());
// if the pointers are equal, we are done (e.g., String[] elements) // if the pointers are equal, we are done (e.g., String[] elements)
__ cmpptr(sub_klass, super_klass_addr); __ cmpptr(sub_klass, super_klass_addr);
@ -1787,8 +1786,7 @@ class StubGenerator: public StubCodeGenerator {
// array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
// //
int lh_offset = klassOopDesc::header_size() * HeapWordSize + int lh_offset = in_bytes(Klass::layout_helper_offset());
Klass::layout_helper_offset_in_bytes();
Address src_klass_lh_addr(rcx_src_klass, lh_offset); Address src_klass_lh_addr(rcx_src_klass, lh_offset);
// Handle objArrays completely differently... // Handle objArrays completely differently...
@ -1914,10 +1912,8 @@ class StubGenerator: public StubCodeGenerator {
// live at this point: rcx_src_klass, dst[_pos], src[_pos] // live at this point: rcx_src_klass, dst[_pos], src[_pos]
{ {
// Handy offsets: // Handy offsets:
int ek_offset = (klassOopDesc::header_size() * HeapWordSize + int ek_offset = in_bytes(objArrayKlass::element_klass_offset());
objArrayKlass::element_klass_offset_in_bytes()); int sco_offset = in_bytes(Klass::super_check_offset_offset());
int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
Klass::super_check_offset_offset_in_bytes());
Register rsi_dst_klass = rsi; Register rsi_dst_klass = rsi;
Register rdi_temp = rdi; Register rdi_temp = rdi;

View file

@ -2261,8 +2261,7 @@ class StubGenerator: public StubCodeGenerator {
// The ckoff and ckval must be mutually consistent, // The ckoff and ckval must be mutually consistent,
// even though caller generates both. // even though caller generates both.
{ Label L; { Label L;
int sco_offset = (klassOopDesc::header_size() * HeapWordSize + int sco_offset = in_bytes(Klass::super_check_offset_offset());
Klass::super_check_offset_offset_in_bytes());
__ cmpl(ckoff, Address(ckval, sco_offset)); __ cmpl(ckoff, Address(ckval, sco_offset));
__ jcc(Assembler::equal, L); __ jcc(Assembler::equal, L);
__ stop("super_check_offset inconsistent"); __ stop("super_check_offset inconsistent");
@ -2572,8 +2571,7 @@ class StubGenerator: public StubCodeGenerator {
// array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
// //
const int lh_offset = klassOopDesc::header_size() * HeapWordSize + const int lh_offset = in_bytes(Klass::layout_helper_offset());
Klass::layout_helper_offset_in_bytes();
// Handle objArrays completely differently... // Handle objArrays completely differently...
const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
@ -2722,15 +2720,13 @@ class StubGenerator: public StubCodeGenerator {
assert_clean_int(count, sco_temp); assert_clean_int(count, sco_temp);
// Generate the type check. // Generate the type check.
const int sco_offset = (klassOopDesc::header_size() * HeapWordSize + const int sco_offset = in_bytes(Klass::super_check_offset_offset());
Klass::super_check_offset_offset_in_bytes());
__ movl(sco_temp, Address(r11_dst_klass, sco_offset)); __ movl(sco_temp, Address(r11_dst_klass, sco_offset));
assert_clean_int(sco_temp, rax); assert_clean_int(sco_temp, rax);
generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy);
// Fetch destination element klass from the objArrayKlass header. // Fetch destination element klass from the objArrayKlass header.
int ek_offset = (klassOopDesc::header_size() * HeapWordSize + int ek_offset = in_bytes(objArrayKlass::element_klass_offset());
objArrayKlass::element_klass_offset_in_bytes());
__ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset));
__ movl( sco_temp, Address(r11_dst_klass, sco_offset)); __ movl( sco_temp, Address(r11_dst_klass, sco_offset));
assert_clean_int(sco_temp, rax); assert_clean_int(sco_temp, rax);

View file

@ -552,7 +552,7 @@ void InterpreterGenerator::lock_method(void) {
#endif // ASSERT #endif // ASSERT
// get synchronization object // get synchronization object
{ Label done; { Label done;
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ movl(rax, access_flags); __ movl(rax, access_flags);
__ testl(rax, JVM_ACC_STATIC); __ testl(rax, JVM_ACC_STATIC);
__ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case) __ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case)
@ -1012,7 +1012,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// pass mirror handle if static call // pass mirror handle if static call
{ Label L; { Label L;
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ movl(t, Address(method, methodOopDesc::access_flags_offset())); __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
__ testl(t, JVM_ACC_STATIC); __ testl(t, JVM_ACC_STATIC);
__ jcc(Assembler::zero, L); __ jcc(Assembler::zero, L);

View file

@ -505,8 +505,7 @@ void InterpreterGenerator::lock_method(void) {
// get synchronization object // get synchronization object
{ {
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + const int mirror_offset = in_bytes(Klass::java_mirror_offset());
Klass::java_mirror_offset_in_bytes();
Label done; Label done;
__ movl(rax, access_flags); __ movl(rax, access_flags);
__ testl(rax, JVM_ACC_STATIC); __ testl(rax, JVM_ACC_STATIC);
@ -1006,8 +1005,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// pass mirror handle if static call // pass mirror handle if static call
{ {
Label L; Label L;
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + const int mirror_offset = in_bytes(Klass::java_mirror_offset());
Klass::java_mirror_offset_in_bytes();
__ movl(t, Address(method, methodOopDesc::access_flags_offset())); __ movl(t, Address(method, methodOopDesc::access_flags_offset()));
__ testl(t, JVM_ACC_STATIC); __ testl(t, JVM_ACC_STATIC);
__ jcc(Assembler::zero, L); __ jcc(Assembler::zero, L);

View file

@ -980,7 +980,7 @@ void TemplateTable::aastore() {
__ load_klass(rbx, rax); __ load_klass(rbx, rax);
// Move superklass into EAX // Move superklass into EAX
__ load_klass(rax, rdx); __ load_klass(rax, rdx);
__ movptr(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes())); __ movptr(rax, Address(rax, objArrayKlass::element_klass_offset()));
// Compress array+index*wordSize+12 into a single register. Frees ECX. // Compress array+index*wordSize+12 into a single register. Frees ECX.
__ lea(rdx, element_address); __ lea(rdx, element_address);
@ -2033,7 +2033,7 @@ void TemplateTable::_return(TosState state) {
assert(state == vtos, "only valid state"); assert(state == vtos, "only valid state");
__ movptr(rax, aaddress(0)); __ movptr(rax, aaddress(0));
__ load_klass(rdi, rax); __ load_klass(rdi, rax);
__ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
__ testl(rdi, JVM_ACC_HAS_FINALIZER); __ testl(rdi, JVM_ACC_HAS_FINALIZER);
Label skip_register_finalizer; Label skip_register_finalizer;
__ jcc(Assembler::zero, skip_register_finalizer); __ jcc(Assembler::zero, skip_register_finalizer);
@ -3188,11 +3188,11 @@ void TemplateTable::_new() {
// make sure klass is initialized & doesn't have finalizer // make sure klass is initialized & doesn't have finalizer
// make sure klass is fully initialized // make sure klass is fully initialized
__ cmpl(Address(rcx, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized); __ cmpl(Address(rcx, instanceKlass::init_state_offset()), instanceKlass::fully_initialized);
__ jcc(Assembler::notEqual, slow_case); __ jcc(Assembler::notEqual, slow_case);
// get instance_size in instanceKlass (scaled to a count of bytes) // get instance_size in instanceKlass (scaled to a count of bytes)
__ movl(rdx, Address(rcx, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc))); __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
// test to see if it has a finalizer or is malformed in some way // test to see if it has a finalizer or is malformed in some way
__ testl(rdx, Klass::_lh_instance_slow_path_bit); __ testl(rdx, Klass::_lh_instance_slow_path_bit);
__ jcc(Assembler::notZero, slow_case); __ jcc(Assembler::notZero, slow_case);
@ -3293,7 +3293,7 @@ void TemplateTable::_new() {
__ bind(initialize_header); __ bind(initialize_header);
if (UseBiasedLocking) { if (UseBiasedLocking) {
__ pop(rcx); // get saved klass back in the register. __ pop(rcx); // get saved klass back in the register.
__ movptr(rbx, Address(rcx, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); __ movptr(rbx, Address(rcx, Klass::prototype_header_offset()));
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx); __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
} else { } else {
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),

View file

@ -1004,8 +1004,7 @@ void TemplateTable::aastore() {
// Move superklass into rax // Move superklass into rax
__ load_klass(rax, rdx); __ load_klass(rax, rdx);
__ movptr(rax, Address(rax, __ movptr(rax, Address(rax,
sizeof(oopDesc) + objArrayKlass::element_klass_offset()));
objArrayKlass::element_klass_offset_in_bytes()));
// Compress array + index*oopSize + 12 into a single register. Frees rcx. // Compress array + index*oopSize + 12 into a single register. Frees rcx.
__ lea(rdx, element_address); __ lea(rdx, element_address);
@ -2067,7 +2066,7 @@ void TemplateTable::_return(TosState state) {
assert(state == vtos, "only valid state"); assert(state == vtos, "only valid state");
__ movptr(c_rarg1, aaddress(0)); __ movptr(c_rarg1, aaddress(0));
__ load_klass(rdi, c_rarg1); __ load_klass(rdi, c_rarg1);
__ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
__ testl(rdi, JVM_ACC_HAS_FINALIZER); __ testl(rdi, JVM_ACC_HAS_FINALIZER);
Label skip_register_finalizer; Label skip_register_finalizer;
__ jcc(Assembler::zero, skip_register_finalizer); __ jcc(Assembler::zero, skip_register_finalizer);
@ -3236,15 +3235,14 @@ void TemplateTable::_new() {
// make sure klass is initialized & doesn't have finalizer // make sure klass is initialized & doesn't have finalizer
// make sure klass is fully initialized // make sure klass is fully initialized
__ cmpl(Address(rsi, __ cmpl(Address(rsi,
instanceKlass::init_state_offset_in_bytes() + instanceKlass::init_state_offset()),
sizeof(oopDesc)),
instanceKlass::fully_initialized); instanceKlass::fully_initialized);
__ jcc(Assembler::notEqual, slow_case); __ jcc(Assembler::notEqual, slow_case);
// get instance_size in instanceKlass (scaled to a count of bytes) // get instance_size in instanceKlass (scaled to a count of bytes)
__ movl(rdx, __ movl(rdx,
Address(rsi, Address(rsi,
Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc))); Klass::layout_helper_offset()));
// test to see if it has a finalizer or is malformed in some way // test to see if it has a finalizer or is malformed in some way
__ testl(rdx, Klass::_lh_instance_slow_path_bit); __ testl(rdx, Klass::_lh_instance_slow_path_bit);
__ jcc(Assembler::notZero, slow_case); __ jcc(Assembler::notZero, slow_case);
@ -3337,7 +3335,7 @@ void TemplateTable::_new() {
// initialize object header only. // initialize object header only.
__ bind(initialize_header); __ bind(initialize_header);
if (UseBiasedLocking) { if (UseBiasedLocking) {
__ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); __ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset()));
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1); __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1);
} else { } else {
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),

View file

@ -11305,12 +11305,12 @@ instruct partialSubtypeCheck(rdi_RegP result,
effect(KILL rcx, KILL cr); effect(KILL rcx, KILL cr);
ins_cost(1100); // slightly larger than the next version ins_cost(1100); // slightly larger than the next version
format %{ "movq rdi, [$sub + (sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())]\n\t" format %{ "movq rdi, [$sub + in_bytes(Klass::secondary_supers_offset())]\n\t"
"movl rcx, [rdi + arrayOopDesc::length_offset_in_bytes()]\t# length to scan\n\t" "movl rcx, [rdi + arrayOopDesc::length_offset_in_bytes()]\t# length to scan\n\t"
"addq rdi, arrayOopDex::base_offset_in_bytes(T_OBJECT)\t# Skip to start of data; set NZ in case count is zero\n\t" "addq rdi, arrayOopDex::base_offset_in_bytes(T_OBJECT)\t# Skip to start of data; set NZ in case count is zero\n\t"
"repne scasq\t# Scan *rdi++ for a match with rax while rcx--\n\t" "repne scasq\t# Scan *rdi++ for a match with rax while rcx--\n\t"
"jne,s miss\t\t# Missed: rdi not-zero\n\t" "jne,s miss\t\t# Missed: rdi not-zero\n\t"
"movq [$sub + (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())], $super\t# Hit: update cache\n\t" "movq [$sub + in_bytes(Klass::secondary_super_cache_offset())], $super\t# Hit: update cache\n\t"
"xorq $result, $result\t\t Hit: rdi zero\n\t" "xorq $result, $result\t\t Hit: rdi zero\n\t"
"miss:\t" %} "miss:\t" %}
@ -11328,12 +11328,12 @@ instruct partialSubtypeCheck_vs_Zero(rFlagsReg cr,
effect(KILL rcx, KILL result); effect(KILL rcx, KILL result);
ins_cost(1000); ins_cost(1000);
format %{ "movq rdi, [$sub + (sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())]\n\t" format %{ "movq rdi, [$sub + in_bytes(Klass::secondary_supers_offset())]\n\t"
"movl rcx, [rdi + arrayOopDesc::length_offset_in_bytes()]\t# length to scan\n\t" "movl rcx, [rdi + arrayOopDesc::length_offset_in_bytes()]\t# length to scan\n\t"
"addq rdi, arrayOopDex::base_offset_in_bytes(T_OBJECT)\t# Skip to start of data; set NZ in case count is zero\n\t" "addq rdi, arrayOopDex::base_offset_in_bytes(T_OBJECT)\t# Skip to start of data; set NZ in case count is zero\n\t"
"repne scasq\t# Scan *rdi++ for a match with rax while cx-- != 0\n\t" "repne scasq\t# Scan *rdi++ for a match with rax while cx-- != 0\n\t"
"jne,s miss\t\t# Missed: flags nz\n\t" "jne,s miss\t\t# Missed: flags nz\n\t"
"movq [$sub + (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())], $super\t# Hit: update cache\n\t" "movq [$sub + in_bytes(Klass::secondary_super_cache_offset())], $super\t# Hit: update cache\n\t"
"miss:\t" %} "miss:\t" %}
opcode(0x0); // No need to XOR RDI opcode(0x0); // No need to XOR RDI

View file

@ -1256,8 +1256,7 @@ void LIRGenerator::do_getClass(Intrinsic* x) {
info = state_for(x); info = state_for(x);
} }
__ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), result, info); __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), result, info);
__ move_wide(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() + __ move_wide(new LIR_Address(result, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
klassOopDesc::klass_part_offset_in_bytes(), T_OBJECT), result);
} }

View file

@ -73,7 +73,7 @@ class arrayKlass: public Klass {
oop* adr_component_mirror() { return (oop*)&this->_component_mirror;} oop* adr_component_mirror() { return (oop*)&this->_component_mirror;}
// Compiler/Interpreter offset // Compiler/Interpreter offset
static ByteSize component_mirror_offset() { return byte_offset_of(arrayKlass, _component_mirror); } static ByteSize component_mirror_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(arrayKlass, _component_mirror)); }
virtual klassOop java_super() const;//{ return SystemDictionary::Object_klass(); } virtual klassOop java_super() const;//{ return SystemDictionary::Object_klass(); }

View file

@ -405,7 +405,7 @@ class instanceKlass: public Klass {
ReferenceType reference_type() const { return _reference_type; } ReferenceType reference_type() const { return _reference_type; }
void set_reference_type(ReferenceType t) { _reference_type = t; } void set_reference_type(ReferenceType t) { _reference_type = t; }
static int reference_type_offset_in_bytes() { return offset_of(instanceKlass, _reference_type); } static ByteSize reference_type_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(instanceKlass, _reference_type)); }
// find local field, returns true if found // find local field, returns true if found
bool find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const; bool find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const;
@ -616,8 +616,8 @@ class instanceKlass: public Klass {
void set_breakpoints(BreakpointInfo* bps) { _breakpoints = bps; }; void set_breakpoints(BreakpointInfo* bps) { _breakpoints = bps; };
// support for stub routines // support for stub routines
static int init_state_offset_in_bytes() { return offset_of(instanceKlass, _init_state); } static ByteSize init_state_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(instanceKlass, _init_state)); }
static int init_thread_offset_in_bytes() { return offset_of(instanceKlass, _init_thread); } static ByteSize init_thread_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(instanceKlass, _init_thread)); }
// subclass/subinterface checks // subclass/subinterface checks
bool implements_interface(klassOop k) const; bool implements_interface(klassOop k) const;

View file

@ -144,7 +144,7 @@ klassOop Klass::base_create_klass_oop(KlassHandle& klass, int size,
} }
kl->set_secondary_supers(NULL); kl->set_secondary_supers(NULL);
oop_store_without_check((oop*) &kl->_primary_supers[0], k); oop_store_without_check((oop*) &kl->_primary_supers[0], k);
kl->set_super_check_offset(primary_supers_offset_in_bytes() + sizeof(oopDesc)); kl->set_super_check_offset(in_bytes(primary_supers_offset()));
} }
kl->set_java_mirror(NULL); kl->set_java_mirror(NULL);

View file

@ -313,7 +313,7 @@ class Klass : public Klass_vtbl {
// Can this klass be a primary super? False for interfaces and arrays of // Can this klass be a primary super? False for interfaces and arrays of
// interfaces. False also for arrays or classes with long super chains. // interfaces. False also for arrays or classes with long super chains.
bool can_be_primary_super() const { bool can_be_primary_super() const {
const juint secondary_offset = secondary_super_cache_offset_in_bytes() + sizeof(oopDesc); const juint secondary_offset = in_bytes(secondary_super_cache_offset());
return super_check_offset() != secondary_offset; return super_check_offset() != secondary_offset;
} }
virtual bool can_be_primary_super_slow() const; virtual bool can_be_primary_super_slow() const;
@ -323,7 +323,7 @@ class Klass : public Klass_vtbl {
if (!can_be_primary_super()) { if (!can_be_primary_super()) {
return primary_super_limit(); return primary_super_limit();
} else { } else {
juint d = (super_check_offset() - (primary_supers_offset_in_bytes() + sizeof(oopDesc))) / sizeof(klassOop); juint d = (super_check_offset() - in_bytes(primary_supers_offset())) / sizeof(klassOop);
assert(d < primary_super_limit(), "oob"); assert(d < primary_super_limit(), "oob");
assert(_primary_supers[d] == as_klassOop(), "proper init"); assert(_primary_supers[d] == as_klassOop(), "proper init");
return d; return d;
@ -373,15 +373,15 @@ class Klass : public Klass_vtbl {
virtual void set_alloc_size(juint n) = 0; virtual void set_alloc_size(juint n) = 0;
// Compiler support // Compiler support
static int super_offset_in_bytes() { return offset_of(Klass, _super); } static ByteSize super_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(Klass, _super)); }
static int super_check_offset_offset_in_bytes() { return offset_of(Klass, _super_check_offset); } static ByteSize super_check_offset_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(Klass, _super_check_offset)); }
static int primary_supers_offset_in_bytes(){ return offset_of(Klass, _primary_supers); } static ByteSize primary_supers_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(Klass, _primary_supers)); }
static int secondary_super_cache_offset_in_bytes() { return offset_of(Klass, _secondary_super_cache); } static ByteSize secondary_super_cache_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(Klass, _secondary_super_cache)); }
static int secondary_supers_offset_in_bytes() { return offset_of(Klass, _secondary_supers); } static ByteSize secondary_supers_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(Klass, _secondary_supers)); }
static int java_mirror_offset_in_bytes() { return offset_of(Klass, _java_mirror); } static ByteSize java_mirror_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(Klass, _java_mirror)); }
static int modifier_flags_offset_in_bytes(){ return offset_of(Klass, _modifier_flags); } static ByteSize modifier_flags_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(Klass, _modifier_flags)); }
static int layout_helper_offset_in_bytes() { return offset_of(Klass, _layout_helper); } static ByteSize layout_helper_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(Klass, _layout_helper)); }
static int access_flags_offset_in_bytes() { return offset_of(Klass, _access_flags); } static ByteSize access_flags_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(Klass, _access_flags)); }
// Unpacking layout_helper: // Unpacking layout_helper:
enum { enum {
@ -478,7 +478,7 @@ class Klass : public Klass_vtbl {
bool is_subtype_of(klassOop k) const { bool is_subtype_of(klassOop k) const {
juint off = k->klass_part()->super_check_offset(); juint off = k->klass_part()->super_check_offset();
klassOop sup = *(klassOop*)( (address)as_klassOop() + off ); klassOop sup = *(klassOop*)( (address)as_klassOop() + off );
const juint secondary_offset = secondary_super_cache_offset_in_bytes() + sizeof(oopDesc); const juint secondary_offset = in_bytes(secondary_super_cache_offset());
if (sup == k) { if (sup == k) {
return true; return true;
} else if (off != secondary_offset) { } else if (off != secondary_offset) {
@ -674,7 +674,7 @@ class Klass : public Klass_vtbl {
// are potential problems in setting the bias pattern for // are potential problems in setting the bias pattern for
// JVM-internal oops. // JVM-internal oops.
inline void set_prototype_header(markOop header); inline void set_prototype_header(markOop header);
static int prototype_header_offset_in_bytes() { return offset_of(Klass, _prototype_header); } static ByteSize prototype_header_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(Klass, _prototype_header)); }
int biased_lock_revocation_count() const { return (int) _biased_lock_revocation_count; } int biased_lock_revocation_count() const { return (int) _biased_lock_revocation_count; }
// Atomically increments biased_lock_revocation_count and returns updated value // Atomically increments biased_lock_revocation_count and returns updated value

View file

@ -38,14 +38,8 @@
class klassOopDesc : public oopDesc { class klassOopDesc : public oopDesc {
public: public:
// size operation
static int header_size() { return sizeof(klassOopDesc)/HeapWordSize; }
// support for code generation
static int klass_part_offset_in_bytes() { return sizeof(klassOopDesc); }
// returns the Klass part containing dispatching behavior // returns the Klass part containing dispatching behavior
Klass* klass_part() const { return (Klass*)((address)this + klass_part_offset_in_bytes()); } Klass* klass_part() const { return (Klass*)((address)this + sizeof(klassOopDesc)); }
// Convenience wrapper // Convenience wrapper
inline oop java_mirror() const; inline oop java_mirror() const;

View file

@ -47,7 +47,7 @@ class objArrayKlass : public arrayKlass {
oop* bottom_klass_addr() { return (oop*)&_bottom_klass; } oop* bottom_klass_addr() { return (oop*)&_bottom_klass; }
// Compiler/Interpreter offset // Compiler/Interpreter offset
static int element_klass_offset_in_bytes() { return offset_of(objArrayKlass, _element_klass); } static ByteSize element_klass_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(objArrayKlass, _element_klass)); }
// Dispatched operation // Dispatched operation
bool can_be_primary_super_slow() const; bool can_be_primary_super_slow() const;

View file

@ -1306,12 +1306,12 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
// these 2 disparate memories into the same alias class. Since the // these 2 disparate memories into the same alias class. Since the
// primary supertype array is read-only, there's no chance of confusion // primary supertype array is read-only, there's no chance of confusion
// where we bypass an array load and an array store. // where we bypass an array load and an array store.
int primary_supers_offset = sizeof(klassOopDesc) + Klass::primary_supers_offset_in_bytes(); int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
if (offset == Type::OffsetBot || if (offset == Type::OffsetBot ||
(offset >= primary_supers_offset && (offset >= primary_supers_offset &&
offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) || offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
offset == (int)(sizeof(klassOopDesc) + Klass::secondary_super_cache_offset_in_bytes())) { offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
offset = sizeof(klassOopDesc) + Klass::secondary_super_cache_offset_in_bytes(); offset = in_bytes(Klass::secondary_super_cache_offset());
tj = tk = TypeKlassPtr::make( TypePtr::NotNull, tk->klass(), offset ); tj = tk = TypeKlassPtr::make( TypePtr::NotNull, tk->klass(), offset );
} }
} }
@ -1490,13 +1490,13 @@ Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_cr
alias_type(idx)->set_rewritable(false); alias_type(idx)->set_rewritable(false);
} }
if (flat->isa_klassptr()) { if (flat->isa_klassptr()) {
if (flat->offset() == Klass::super_check_offset_offset_in_bytes() + (int)sizeof(oopDesc)) if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
alias_type(idx)->set_rewritable(false); alias_type(idx)->set_rewritable(false);
if (flat->offset() == Klass::modifier_flags_offset_in_bytes() + (int)sizeof(oopDesc)) if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
alias_type(idx)->set_rewritable(false); alias_type(idx)->set_rewritable(false);
if (flat->offset() == Klass::access_flags_offset_in_bytes() + (int)sizeof(oopDesc)) if (flat->offset() == in_bytes(Klass::access_flags_offset()))
alias_type(idx)->set_rewritable(false); alias_type(idx)->set_rewritable(false);
if (flat->offset() == Klass::java_mirror_offset_in_bytes() + (int)sizeof(oopDesc)) if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
alias_type(idx)->set_rewritable(false); alias_type(idx)->set_rewritable(false);
} }
// %%% (We would like to finalize JavaThread::threadObj_offset(), // %%% (We would like to finalize JavaThread::threadObj_offset(),

View file

@ -2304,9 +2304,9 @@ Node* GraphKit::gen_subtype_check(Node* subklass, Node* superklass) {
// will always succeed. We could leave a dependency behind to ensure this. // will always succeed. We could leave a dependency behind to ensure this.
// First load the super-klass's check-offset // First load the super-klass's check-offset
Node *p1 = basic_plus_adr( superklass, superklass, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes() ); Node *p1 = basic_plus_adr( superklass, superklass, in_bytes(Klass::super_check_offset_offset()) );
Node *chk_off = _gvn.transform( new (C, 3) LoadINode( NULL, memory(p1), p1, _gvn.type(p1)->is_ptr() ) ); Node *chk_off = _gvn.transform( new (C, 3) LoadINode( NULL, memory(p1), p1, _gvn.type(p1)->is_ptr() ) );
int cacheoff_con = sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes(); int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset());
bool might_be_cache = (find_int_con(chk_off, cacheoff_con) == cacheoff_con); bool might_be_cache = (find_int_con(chk_off, cacheoff_con) == cacheoff_con);
// Load from the sub-klass's super-class display list, or a 1-word cache of // Load from the sub-klass's super-class display list, or a 1-word cache of
@ -2934,7 +2934,7 @@ Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
} }
} }
constant_value = Klass::_lh_neutral_value; // put in a known value constant_value = Klass::_lh_neutral_value; // put in a known value
Node* lhp = basic_plus_adr(klass_node, klass_node, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)); Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
return make_load(NULL, lhp, TypeInt::INT, T_INT); return make_load(NULL, lhp, TypeInt::INT, T_INT);
} }

View file

@ -2165,8 +2165,7 @@ void LibraryCallKit::insert_g1_pre_barrier(Node* base_oop, Node* offset, Node* p
IdealKit ideal(this); IdealKit ideal(this);
#define __ ideal. #define __ ideal.
const int reference_type_offset = instanceKlass::reference_type_offset_in_bytes() + const int reference_type_offset = in_bytes(instanceKlass::reference_type_offset());
sizeof(oopDesc);
Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset); Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset);
@ -2806,7 +2805,7 @@ bool LibraryCallKit::inline_unsafe_allocate() {
// Note: The argument might still be an illegal value like // Note: The argument might still be an illegal value like
// Serializable.class or Object[].class. The runtime will handle it. // Serializable.class or Object[].class. The runtime will handle it.
// But we must make an explicit check for initialization. // But we must make an explicit check for initialization.
Node* insp = basic_plus_adr(kls, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)); Node* insp = basic_plus_adr(kls, in_bytes(instanceKlass::init_state_offset()));
Node* inst = make_load(NULL, insp, TypeInt::INT, T_INT); Node* inst = make_load(NULL, insp, TypeInt::INT, T_INT);
Node* bits = intcon(instanceKlass::fully_initialized); Node* bits = intcon(instanceKlass::fully_initialized);
Node* test = _gvn.transform( new (C, 3) SubINode(inst, bits) ); Node* test = _gvn.transform( new (C, 3) SubINode(inst, bits) );
@ -2954,7 +2953,7 @@ bool LibraryCallKit::inline_native_isInterrupted() {
//---------------------------load_mirror_from_klass---------------------------- //---------------------------load_mirror_from_klass----------------------------
// Given a klass oop, load its java mirror (a java.lang.Class oop). // Given a klass oop, load its java mirror (a java.lang.Class oop).
Node* LibraryCallKit::load_mirror_from_klass(Node* klass) { Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
Node* p = basic_plus_adr(klass, Klass::java_mirror_offset_in_bytes() + sizeof(oopDesc)); Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT); return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT);
} }
@ -2994,7 +2993,7 @@ Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) { Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
// Branch around if the given klass has the given modifier bit set. // Branch around if the given klass has the given modifier bit set.
// Like generate_guard, adds a new path onto the region. // Like generate_guard, adds a new path onto the region.
Node* modp = basic_plus_adr(kls, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)); Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT); Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT);
Node* mask = intcon(modifier_mask); Node* mask = intcon(modifier_mask);
Node* bits = intcon(modifier_bits); Node* bits = intcon(modifier_bits);
@ -3115,7 +3114,7 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
break; break;
case vmIntrinsics::_getModifiers: case vmIntrinsics::_getModifiers:
p = basic_plus_adr(kls, Klass::modifier_flags_offset_in_bytes() + sizeof(oopDesc)); p = basic_plus_adr(kls, in_bytes(Klass::modifier_flags_offset()));
query_value = make_load(NULL, p, TypeInt::INT, T_INT); query_value = make_load(NULL, p, TypeInt::INT, T_INT);
break; break;
@ -3155,7 +3154,7 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
// A guard was added. If the guard is taken, it was an array. // A guard was added. If the guard is taken, it was an array.
phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror()))); phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
// If we fall through, it's a plain class. Get its _super. // If we fall through, it's a plain class. Get its _super.
p = basic_plus_adr(kls, Klass::super_offset_in_bytes() + sizeof(oopDesc)); p = basic_plus_adr(kls, in_bytes(Klass::super_offset()));
kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL) ); kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL) );
null_ctl = top(); null_ctl = top();
kls = null_check_oop(kls, &null_ctl); kls = null_check_oop(kls, &null_ctl);
@ -3173,7 +3172,7 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
if (generate_array_guard(kls, region) != NULL) { if (generate_array_guard(kls, region) != NULL) {
// Be sure to pin the oop load to the guard edge just created: // Be sure to pin the oop load to the guard edge just created:
Node* is_array_ctrl = region->in(region->req()-1); Node* is_array_ctrl = region->in(region->req()-1);
Node* cma = basic_plus_adr(kls, in_bytes(arrayKlass::component_mirror_offset()) + sizeof(oopDesc)); Node* cma = basic_plus_adr(kls, in_bytes(arrayKlass::component_mirror_offset()));
Node* cmo = make_load(is_array_ctrl, cma, TypeInstPtr::MIRROR, T_OBJECT); Node* cmo = make_load(is_array_ctrl, cma, TypeInstPtr::MIRROR, T_OBJECT);
phi->add_req(cmo); phi->add_req(cmo);
} }
@ -3181,7 +3180,7 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
break; break;
case vmIntrinsics::_getClassAccessFlags: case vmIntrinsics::_getClassAccessFlags:
p = basic_plus_adr(kls, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)); p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
query_value = make_load(NULL, p, TypeInt::INT, T_INT); query_value = make_load(NULL, p, TypeInt::INT, T_INT);
break; break;
@ -4857,7 +4856,7 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
PreserveJVMState pjvms(this); PreserveJVMState pjvms(this);
set_control(not_subtype_ctrl); set_control(not_subtype_ctrl);
// (At this point we can assume disjoint_bases, since types differ.) // (At this point we can assume disjoint_bases, since types differ.)
int ek_offset = objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc); int ek_offset = in_bytes(objArrayKlass::element_klass_offset());
Node* p1 = basic_plus_adr(dest_klass, ek_offset); Node* p1 = basic_plus_adr(dest_klass, ek_offset);
Node* n1 = LoadKlassNode::make(_gvn, immutable_memory(), p1, TypeRawPtr::BOTTOM); Node* n1 = LoadKlassNode::make(_gvn, immutable_memory(), p1, TypeRawPtr::BOTTOM);
Node* dest_elem_klass = _gvn.transform(n1); Node* dest_elem_klass = _gvn.transform(n1);
@ -5308,7 +5307,7 @@ LibraryCallKit::generate_checkcast_arraycopy(const TypePtr* adr_type,
// for the target array. This is an optimistic check. It will // for the target array. This is an optimistic check. It will
// look in each non-null element's class, at the desired klass's // look in each non-null element's class, at the desired klass's
// super_check_offset, for the desired klass. // super_check_offset, for the desired klass.
int sco_offset = Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc); int sco_offset = in_bytes(Klass::super_check_offset_offset());
Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset); Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset);
Node* n3 = new(C, 3) LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr()); Node* n3 = new(C, 3) LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr());
Node* check_offset = ConvI2X(_gvn.transform(n3)); Node* check_offset = ConvI2X(_gvn.transform(n3));

View file

@ -1470,7 +1470,7 @@ PhaseMacroExpand::initialize_object(AllocateNode* alloc,
Node* mark_node = NULL; Node* mark_node = NULL;
// For now only enable fast locking for non-array types // For now only enable fast locking for non-array types
if (UseBiasedLocking && (length == NULL)) { if (UseBiasedLocking && (length == NULL)) {
mark_node = make_load(control, rawmem, klass_node, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), TypeRawPtr::BOTTOM, T_ADDRESS); mark_node = make_load(control, rawmem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
} else { } else {
mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype())); mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype()));
} }
@ -1958,7 +1958,7 @@ void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
#endif #endif
klass_node->init_req(0, ctrl); klass_node->init_req(0, ctrl);
} }
Node *proto_node = make_load(ctrl, mem, klass_node, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), TypeX_X, TypeX_X->basic_type()); Node *proto_node = make_load(ctrl, mem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeX_X, TypeX_X->basic_type());
Node* thread = transform_later(new (C, 1) ThreadLocalNode()); Node* thread = transform_later(new (C, 1) ThreadLocalNode());
Node* cast_thread = transform_later(new (C, 2) CastP2XNode(ctrl, thread)); Node* cast_thread = transform_later(new (C, 2) CastP2XNode(ctrl, thread));

View file

@ -1473,19 +1473,19 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) {
const Type* const Type*
LoadNode::load_array_final_field(const TypeKlassPtr *tkls, LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
ciKlass* klass) const { ciKlass* klass) const {
if (tkls->offset() == Klass::modifier_flags_offset_in_bytes() + (int)sizeof(oopDesc)) { if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) {
// The field is Klass::_modifier_flags. Return its (constant) value. // The field is Klass::_modifier_flags. Return its (constant) value.
// (Folds up the 2nd indirection in aClassConstant.getModifiers().) // (Folds up the 2nd indirection in aClassConstant.getModifiers().)
assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags"); assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags");
return TypeInt::make(klass->modifier_flags()); return TypeInt::make(klass->modifier_flags());
} }
if (tkls->offset() == Klass::access_flags_offset_in_bytes() + (int)sizeof(oopDesc)) { if (tkls->offset() == in_bytes(Klass::access_flags_offset())) {
// The field is Klass::_access_flags. Return its (constant) value. // The field is Klass::_access_flags. Return its (constant) value.
// (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).) // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).)
assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags"); assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags");
return TypeInt::make(klass->access_flags()); return TypeInt::make(klass->access_flags());
} }
if (tkls->offset() == Klass::layout_helper_offset_in_bytes() + (int)sizeof(oopDesc)) { if (tkls->offset() == in_bytes(Klass::layout_helper_offset())) {
// The field is Klass::_layout_helper. Return its constant value if known. // The field is Klass::_layout_helper. Return its constant value if known.
assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper"); assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper");
return TypeInt::make(klass->layout_helper()); return TypeInt::make(klass->layout_helper());
@ -1636,14 +1636,14 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
// We are loading a field from a Klass metaobject whose identity // We are loading a field from a Klass metaobject whose identity
// is known at compile time (the type is "exact" or "precise"). // is known at compile time (the type is "exact" or "precise").
// Check for fields we know are maintained as constants by the VM. // Check for fields we know are maintained as constants by the VM.
if (tkls->offset() == Klass::super_check_offset_offset_in_bytes() + (int)sizeof(oopDesc)) { if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
// The field is Klass::_super_check_offset. Return its (constant) value. // The field is Klass::_super_check_offset. Return its (constant) value.
// (Folds up type checking code.) // (Folds up type checking code.)
assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset"); assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
return TypeInt::make(klass->super_check_offset()); return TypeInt::make(klass->super_check_offset());
} }
// Compute index into primary_supers array // Compute index into primary_supers array
juint depth = (tkls->offset() - (Klass::primary_supers_offset_in_bytes() + (int)sizeof(oopDesc))) / sizeof(klassOop); juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(klassOop);
// Check for overflowing; use unsigned compare to handle the negative case. // Check for overflowing; use unsigned compare to handle the negative case.
if( depth < ciKlass::primary_super_limit() ) { if( depth < ciKlass::primary_super_limit() ) {
// The field is an element of Klass::_primary_supers. Return its (constant) value. // The field is an element of Klass::_primary_supers. Return its (constant) value.
@ -1654,14 +1654,14 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
} }
const Type* aift = load_array_final_field(tkls, klass); const Type* aift = load_array_final_field(tkls, klass);
if (aift != NULL) return aift; if (aift != NULL) return aift;
if (tkls->offset() == in_bytes(arrayKlass::component_mirror_offset()) + (int)sizeof(oopDesc) if (tkls->offset() == in_bytes(arrayKlass::component_mirror_offset())
&& klass->is_array_klass()) { && klass->is_array_klass()) {
// The field is arrayKlass::_component_mirror. Return its (constant) value. // The field is arrayKlass::_component_mirror. Return its (constant) value.
// (Folds up aClassConstant.getComponentType, common in Arrays.copyOf.) // (Folds up aClassConstant.getComponentType, common in Arrays.copyOf.)
assert(Opcode() == Op_LoadP, "must load an oop from _component_mirror"); assert(Opcode() == Op_LoadP, "must load an oop from _component_mirror");
return TypeInstPtr::make(klass->as_array_klass()->component_mirror()); return TypeInstPtr::make(klass->as_array_klass()->component_mirror());
} }
if (tkls->offset() == Klass::java_mirror_offset_in_bytes() + (int)sizeof(oopDesc)) { if (tkls->offset() == in_bytes(Klass::java_mirror_offset())) {
// The field is Klass::_java_mirror. Return its (constant) value. // The field is Klass::_java_mirror. Return its (constant) value.
// (Folds up the 2nd indirection in anObjConstant.getClass().) // (Folds up the 2nd indirection in anObjConstant.getClass().)
assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror"); assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
@ -1679,7 +1679,7 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
if( inner->is_instance_klass() && if( inner->is_instance_klass() &&
!inner->as_instance_klass()->flags().is_interface() ) { !inner->as_instance_klass()->flags().is_interface() ) {
// Compute index into primary_supers array // Compute index into primary_supers array
juint depth = (tkls->offset() - (Klass::primary_supers_offset_in_bytes() + (int)sizeof(oopDesc))) / sizeof(klassOop); juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(klassOop);
// Check for overflowing; use unsigned compare to handle the negative case. // Check for overflowing; use unsigned compare to handle the negative case.
if( depth < ciKlass::primary_super_limit() && if( depth < ciKlass::primary_super_limit() &&
depth <= klass->super_depth() ) { // allow self-depth checks to handle self-check case depth <= klass->super_depth() ) { // allow self-depth checks to handle self-check case
@ -1695,7 +1695,7 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
// If the type is enough to determine that the thing is not an array, // If the type is enough to determine that the thing is not an array,
// we can give the layout_helper a positive interval type. // we can give the layout_helper a positive interval type.
// This will help short-circuit some reflective code. // This will help short-circuit some reflective code.
if (tkls->offset() == Klass::layout_helper_offset_in_bytes() + (int)sizeof(oopDesc) if (tkls->offset() == in_bytes(Klass::layout_helper_offset())
&& !klass->is_array_klass() // not directly typed as an array && !klass->is_array_klass() // not directly typed as an array
&& !klass->is_interface() // specifically not Serializable & Cloneable && !klass->is_interface() // specifically not Serializable & Cloneable
&& !klass->is_java_lang_Object() // not the supertype of all T[] && !klass->is_java_lang_Object() // not the supertype of all T[]
@ -1938,7 +1938,7 @@ const Type *LoadNode::klass_value_common( PhaseTransform *phase ) const {
if( !klass->is_loaded() ) if( !klass->is_loaded() )
return _type; // Bail out if not loaded return _type; // Bail out if not loaded
if( klass->is_obj_array_klass() && if( klass->is_obj_array_klass() &&
(uint)tkls->offset() == objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)) { tkls->offset() == in_bytes(objArrayKlass::element_klass_offset())) {
ciKlass* elem = klass->as_obj_array_klass()->element_klass(); ciKlass* elem = klass->as_obj_array_klass()->element_klass();
// // Always returning precise element type is incorrect, // // Always returning precise element type is incorrect,
// // e.g., element type could be object and array may contain strings // // e.g., element type could be object and array may contain strings
@ -1949,7 +1949,7 @@ const Type *LoadNode::klass_value_common( PhaseTransform *phase ) const {
return TypeKlassPtr::make(tkls->ptr(), elem, 0/*offset*/); return TypeKlassPtr::make(tkls->ptr(), elem, 0/*offset*/);
} }
if( klass->is_instance_klass() && tkls->klass_is_exact() && if( klass->is_instance_klass() && tkls->klass_is_exact() &&
(uint)tkls->offset() == Klass::super_offset_in_bytes() + sizeof(oopDesc)) { tkls->offset() == in_bytes(Klass::super_offset())) {
ciKlass* sup = klass->as_instance_klass()->super(); ciKlass* sup = klass->as_instance_klass()->super();
// The field is Klass::_super. Return its (constant) value. // The field is Klass::_super. Return its (constant) value.
// (Folds up the 2nd indirection in aClassConstant.getSuperClass().) // (Folds up the 2nd indirection in aClassConstant.getSuperClass().)
@ -2013,11 +2013,11 @@ Node* LoadNode::klass_identity_common(PhaseTransform *phase ) {
tkls->klass()->is_array_klass()) tkls->klass()->is_array_klass())
&& adr2->is_AddP() && adr2->is_AddP()
) { ) {
int mirror_field = Klass::java_mirror_offset_in_bytes(); int mirror_field = in_bytes(Klass::java_mirror_offset());
if (offset == java_lang_Class::array_klass_offset_in_bytes()) { if (offset == java_lang_Class::array_klass_offset_in_bytes()) {
mirror_field = in_bytes(arrayKlass::component_mirror_offset()); mirror_field = in_bytes(arrayKlass::component_mirror_offset());
} }
if (tkls->offset() == mirror_field + (int)sizeof(oopDesc)) { if (tkls->offset() == mirror_field) {
return adr2->in(AddPNode::Base); return adr2->in(AddPNode::Base);
} }
} }

View file

@ -1911,7 +1911,7 @@ void Parse::call_register_finalizer() {
Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() ); Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() );
Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) ); Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) );
Node* access_flags_addr = basic_plus_adr(klass, klass, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)); Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset()));
Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT); Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT);
Node* mask = _gvn.transform(new (C, 3) AndINode(access_flags, intcon(JVM_ACC_HAS_FINALIZER))); Node* mask = _gvn.transform(new (C, 3) AndINode(access_flags, intcon(JVM_ACC_HAS_FINALIZER)));

View file

@ -200,7 +200,7 @@ void Parse::array_store_check() {
// Come here for polymorphic array klasses // Come here for polymorphic array klasses
// Extract the array element class // Extract the array element class
int element_klass_offset = objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc); int element_klass_offset = in_bytes(objArrayKlass::element_klass_offset());
Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset); Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset);
Node *a_e_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p2, tak) ); Node *a_e_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p2, tak) );
@ -220,7 +220,7 @@ void Parse::emit_guard_for_new(ciInstanceKlass* klass) {
_gvn.set_type(merge, Type::CONTROL); _gvn.set_type(merge, Type::CONTROL);
Node* kls = makecon(TypeKlassPtr::make(klass)); Node* kls = makecon(TypeKlassPtr::make(klass));
Node* init_thread_offset = _gvn.MakeConX(instanceKlass::init_thread_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()); Node* init_thread_offset = _gvn.MakeConX(in_bytes(instanceKlass::init_thread_offset()));
Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset); Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset);
Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS); Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS);
Node *tst = Bool( CmpP( init_thread, cur_thread), BoolTest::eq); Node *tst = Bool( CmpP( init_thread, cur_thread), BoolTest::eq);
@ -228,7 +228,7 @@ void Parse::emit_guard_for_new(ciInstanceKlass* klass) {
set_control(IfTrue(iff)); set_control(IfTrue(iff));
merge->set_req(1, IfFalse(iff)); merge->set_req(1, IfFalse(iff));
Node* init_state_offset = _gvn.MakeConX(instanceKlass::init_state_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()); Node* init_state_offset = _gvn.MakeConX(in_bytes(instanceKlass::init_state_offset()));
adr_node = basic_plus_adr(kls, kls, init_state_offset); adr_node = basic_plus_adr(kls, kls, init_state_offset);
Node* init_state = make_load(NULL, adr_node, TypeInt::INT, T_INT); Node* init_state = make_load(NULL, adr_node, TypeInt::INT, T_INT);
Node* being_init = _gvn.intcon(instanceKlass::being_initialized); Node* being_init = _gvn.intcon(instanceKlass::being_initialized);

View file

@ -213,17 +213,11 @@ void SharkIntrinsics::do_Object_getClass() {
SharkType::oop_type(), SharkType::oop_type(),
"klass"); "klass");
Value *klass_part = builder()->CreateAddressOfStructEntry(
klass,
in_ByteSize(klassOopDesc::klass_part_offset_in_bytes()),
SharkType::klass_type(),
"klass_part");
state()->push( state()->push(
SharkValue::create_jobject( SharkValue::create_jobject(
builder()->CreateValueOfStructEntry( builder()->CreateValueOfStructEntry(
klass_part, klass,
in_ByteSize(Klass::java_mirror_offset_in_bytes()), Klass::java_mirror_offset(),
SharkType::oop_type(), SharkType::oop_type(),
"java_mirror"), "java_mirror"),
true)); true));

View file

@ -745,15 +745,9 @@ void SharkTopLevelBlock::call_register_finalizer(Value *receiver) {
SharkType::oop_type(), SharkType::oop_type(),
"klass"); "klass");
Value *klass_part = builder()->CreateAddressOfStructEntry(
klass,
in_ByteSize(klassOopDesc::klass_part_offset_in_bytes()),
SharkType::klass_type(),
"klass_part");
Value *access_flags = builder()->CreateValueOfStructEntry( Value *access_flags = builder()->CreateValueOfStructEntry(
klass_part, klass,
in_ByteSize(Klass::access_flags_offset_in_bytes()), Klass::access_flags_offset(),
SharkType::jint_type(), SharkType::jint_type(),
"access_flags"); "access_flags");