mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-26 14:24:46 +02:00
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
Added new product ObjectAlignmentInBytes flag to control object alignment. Reviewed-by: twisti, ysr, iveresov
This commit is contained in:
parent
05b4f2e796
commit
cc18a50e59
29 changed files with 222 additions and 136 deletions
|
@ -73,18 +73,11 @@ public class CompactibleFreeListSpace extends CompactibleSpace {
|
||||||
|
|
||||||
public CompactibleFreeListSpace(Address addr) {
|
public CompactibleFreeListSpace(Address addr) {
|
||||||
super(addr);
|
super(addr);
|
||||||
if ( VM.getVM().isLP64() ) {
|
VM vm = VM.getVM();
|
||||||
heapWordSize = 8;
|
heapWordSize = vm.getHeapWordSize();
|
||||||
IndexSetStart = 1;
|
IndexSetStart = vm.getMinObjAlignmentInBytes() / heapWordSize;
|
||||||
IndexSetStride = 1;
|
IndexSetStride = IndexSetStart;
|
||||||
}
|
IndexSetSize = 257;
|
||||||
else {
|
|
||||||
heapWordSize = 4;
|
|
||||||
IndexSetStart = 2;
|
|
||||||
IndexSetStride = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
IndexSetSize = 257;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Accessing block offset table
|
// Accessing block offset table
|
||||||
|
|
|
@ -128,7 +128,7 @@ public class Oop {
|
||||||
|
|
||||||
// Align the object size.
|
// Align the object size.
|
||||||
public static long alignObjectSize(long size) {
|
public static long alignObjectSize(long size) {
|
||||||
return VM.getVM().alignUp(size, VM.getVM().getMinObjAlignment());
|
return VM.getVM().alignUp(size, VM.getVM().getMinObjAlignmentInBytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
// All vm's align longs, so pad out certain offsets.
|
// All vm's align longs, so pad out certain offsets.
|
||||||
|
|
|
@ -93,6 +93,7 @@ public class VM {
|
||||||
/** alignment constants */
|
/** alignment constants */
|
||||||
private boolean isLP64;
|
private boolean isLP64;
|
||||||
private int bytesPerLong;
|
private int bytesPerLong;
|
||||||
|
private int objectAlignmentInBytes;
|
||||||
private int minObjAlignmentInBytes;
|
private int minObjAlignmentInBytes;
|
||||||
private int logMinObjAlignmentInBytes;
|
private int logMinObjAlignmentInBytes;
|
||||||
private int heapWordSize;
|
private int heapWordSize;
|
||||||
|
@ -313,9 +314,15 @@ public class VM {
|
||||||
isLP64 = debugger.getMachineDescription().isLP64();
|
isLP64 = debugger.getMachineDescription().isLP64();
|
||||||
}
|
}
|
||||||
bytesPerLong = db.lookupIntConstant("BytesPerLong").intValue();
|
bytesPerLong = db.lookupIntConstant("BytesPerLong").intValue();
|
||||||
minObjAlignmentInBytes = db.lookupIntConstant("MinObjAlignmentInBytes").intValue();
|
minObjAlignmentInBytes = getObjectAlignmentInBytes();
|
||||||
// minObjAlignment = db.lookupIntConstant("MinObjAlignment").intValue();
|
if (minObjAlignmentInBytes == 8) {
|
||||||
logMinObjAlignmentInBytes = db.lookupIntConstant("LogMinObjAlignmentInBytes").intValue();
|
logMinObjAlignmentInBytes = 3;
|
||||||
|
} else if (minObjAlignmentInBytes == 16) {
|
||||||
|
logMinObjAlignmentInBytes = 4;
|
||||||
|
} else {
|
||||||
|
throw new RuntimeException("Object alignment " + minObjAlignmentInBytes + " not yet supported");
|
||||||
|
}
|
||||||
|
|
||||||
heapWordSize = db.lookupIntConstant("HeapWordSize").intValue();
|
heapWordSize = db.lookupIntConstant("HeapWordSize").intValue();
|
||||||
oopSize = db.lookupIntConstant("oopSize").intValue();
|
oopSize = db.lookupIntConstant("oopSize").intValue();
|
||||||
|
|
||||||
|
@ -492,10 +499,6 @@ public class VM {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Get minimum object alignment in bytes. */
|
/** Get minimum object alignment in bytes. */
|
||||||
public int getMinObjAlignment() {
|
|
||||||
return minObjAlignmentInBytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
public int getMinObjAlignmentInBytes() {
|
public int getMinObjAlignmentInBytes() {
|
||||||
return minObjAlignmentInBytes;
|
return minObjAlignmentInBytes;
|
||||||
}
|
}
|
||||||
|
@ -754,6 +757,14 @@ public class VM {
|
||||||
return compressedOopsEnabled.booleanValue();
|
return compressedOopsEnabled.booleanValue();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public int getObjectAlignmentInBytes() {
|
||||||
|
if (objectAlignmentInBytes == 0) {
|
||||||
|
Flag flag = getCommandLineFlag("ObjectAlignmentInBytes");
|
||||||
|
objectAlignmentInBytes = (flag == null) ? 8 : (int)flag.getIntx();
|
||||||
|
}
|
||||||
|
return objectAlignmentInBytes;
|
||||||
|
}
|
||||||
|
|
||||||
// returns null, if not available.
|
// returns null, if not available.
|
||||||
public Flag[] getCommandLineFlags() {
|
public Flag[] getCommandLineFlags() {
|
||||||
if (commandLineFlags == null) {
|
if (commandLineFlags == null) {
|
||||||
|
|
|
@ -154,7 +154,7 @@ static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
|
static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
|
||||||
assert(MinObjAlignmentInBytes == BytesPerLong, "need alternate implementation");
|
assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
|
||||||
|
|
||||||
julong* to = (julong*)tohw;
|
julong* to = (julong*)tohw;
|
||||||
julong v = ((julong)value << 32) | value;
|
julong v = ((julong)value << 32) | value;
|
||||||
|
@ -162,7 +162,7 @@ static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value)
|
||||||
// and be equal to 0 on 64-bit platform.
|
// and be equal to 0 on 64-bit platform.
|
||||||
size_t odd = count % (BytesPerLong / HeapWordSize) ;
|
size_t odd = count % (BytesPerLong / HeapWordSize) ;
|
||||||
|
|
||||||
size_t aligned_count = align_object_size(count - odd) / HeapWordsPerLong;
|
size_t aligned_count = align_object_offset(count - odd) / HeapWordsPerLong;
|
||||||
julong* end = ((julong*)tohw) + aligned_count - 1;
|
julong* end = ((julong*)tohw) + aligned_count - 1;
|
||||||
while (to <= end) {
|
while (to <= end) {
|
||||||
DEBUG_ONLY(count -= BytesPerLong / HeapWordSize ;)
|
DEBUG_ONLY(count -= BytesPerLong / HeapWordSize ;)
|
||||||
|
|
|
@ -821,6 +821,10 @@ void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int te
|
||||||
!(n->ideal_Opcode()==Op_ConvI2D && ld_op==Op_LoadF) &&
|
!(n->ideal_Opcode()==Op_ConvI2D && ld_op==Op_LoadF) &&
|
||||||
!(n->ideal_Opcode()==Op_PrefetchRead && ld_op==Op_LoadI) &&
|
!(n->ideal_Opcode()==Op_PrefetchRead && ld_op==Op_LoadI) &&
|
||||||
!(n->ideal_Opcode()==Op_PrefetchWrite && ld_op==Op_LoadI) &&
|
!(n->ideal_Opcode()==Op_PrefetchWrite && ld_op==Op_LoadI) &&
|
||||||
|
!(n->ideal_Opcode()==Op_Load2I && ld_op==Op_LoadD) &&
|
||||||
|
!(n->ideal_Opcode()==Op_Load4C && ld_op==Op_LoadD) &&
|
||||||
|
!(n->ideal_Opcode()==Op_Load4S && ld_op==Op_LoadD) &&
|
||||||
|
!(n->ideal_Opcode()==Op_Load8B && ld_op==Op_LoadD) &&
|
||||||
!(n->rule() == loadUB_rule)) {
|
!(n->rule() == loadUB_rule)) {
|
||||||
verify_oops_warning(n, n->ideal_Opcode(), ld_op);
|
verify_oops_warning(n, n->ideal_Opcode(), ld_op);
|
||||||
}
|
}
|
||||||
|
@ -832,6 +836,9 @@ void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int te
|
||||||
!(n->ideal_Opcode()==Op_StoreI && st_op==Op_StoreF) &&
|
!(n->ideal_Opcode()==Op_StoreI && st_op==Op_StoreF) &&
|
||||||
!(n->ideal_Opcode()==Op_StoreF && st_op==Op_StoreI) &&
|
!(n->ideal_Opcode()==Op_StoreF && st_op==Op_StoreI) &&
|
||||||
!(n->ideal_Opcode()==Op_StoreL && st_op==Op_StoreI) &&
|
!(n->ideal_Opcode()==Op_StoreL && st_op==Op_StoreI) &&
|
||||||
|
!(n->ideal_Opcode()==Op_Store2I && st_op==Op_StoreD) &&
|
||||||
|
!(n->ideal_Opcode()==Op_Store4C && st_op==Op_StoreD) &&
|
||||||
|
!(n->ideal_Opcode()==Op_Store8B && st_op==Op_StoreD) &&
|
||||||
!(n->ideal_Opcode()==Op_StoreD && st_op==Op_StoreI && n->rule() == storeD0_rule)) {
|
!(n->ideal_Opcode()==Op_StoreD && st_op==Op_StoreI && n->rule() == storeD0_rule)) {
|
||||||
verify_oops_warning(n, n->ideal_Opcode(), st_op);
|
verify_oops_warning(n, n->ideal_Opcode(), st_op);
|
||||||
}
|
}
|
||||||
|
|
|
@ -8185,9 +8185,14 @@ void MacroAssembler::load_prototype_header(Register dst, Register src) {
|
||||||
assert (Universe::heap() != NULL, "java heap should be initialized");
|
assert (Universe::heap() != NULL, "java heap should be initialized");
|
||||||
movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
|
movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
|
||||||
if (Universe::narrow_oop_shift() != 0) {
|
if (Universe::narrow_oop_shift() != 0) {
|
||||||
assert(Address::times_8 == LogMinObjAlignmentInBytes &&
|
assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
|
||||||
Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
|
if (LogMinObjAlignmentInBytes == Address::times_8) {
|
||||||
movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
|
movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
|
||||||
|
} else {
|
||||||
|
// OK to use shift since we don't need to preserve flags.
|
||||||
|
shlq(dst, LogMinObjAlignmentInBytes);
|
||||||
|
movq(dst, Address(r12_heapbase, dst, Address::times_1, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
movq(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
|
movq(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
|
||||||
}
|
}
|
||||||
|
@ -8361,31 +8366,43 @@ void MacroAssembler::decode_heap_oop(Register r) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::decode_heap_oop_not_null(Register r) {
|
void MacroAssembler::decode_heap_oop_not_null(Register r) {
|
||||||
|
// Note: it will change flags
|
||||||
assert (UseCompressedOops, "should only be used for compressed headers");
|
assert (UseCompressedOops, "should only be used for compressed headers");
|
||||||
assert (Universe::heap() != NULL, "java heap should be initialized");
|
assert (Universe::heap() != NULL, "java heap should be initialized");
|
||||||
// Cannot assert, unverified entry point counts instructions (see .ad file)
|
// Cannot assert, unverified entry point counts instructions (see .ad file)
|
||||||
// vtableStubs also counts instructions in pd_code_size_limit.
|
// vtableStubs also counts instructions in pd_code_size_limit.
|
||||||
// Also do not verify_oop as this is called by verify_oop.
|
// Also do not verify_oop as this is called by verify_oop.
|
||||||
if (Universe::narrow_oop_shift() != 0) {
|
if (Universe::narrow_oop_shift() != 0) {
|
||||||
assert (Address::times_8 == LogMinObjAlignmentInBytes &&
|
assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
|
||||||
Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
|
shlq(r, LogMinObjAlignmentInBytes);
|
||||||
// Don't use Shift since it modifies flags.
|
if (Universe::narrow_oop_base() != NULL) {
|
||||||
leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
|
addq(r, r12_heapbase);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
assert (Universe::narrow_oop_base() == NULL, "sanity");
|
assert (Universe::narrow_oop_base() == NULL, "sanity");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
|
void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
|
||||||
|
// Note: it will change flags
|
||||||
assert (UseCompressedOops, "should only be used for compressed headers");
|
assert (UseCompressedOops, "should only be used for compressed headers");
|
||||||
assert (Universe::heap() != NULL, "java heap should be initialized");
|
assert (Universe::heap() != NULL, "java heap should be initialized");
|
||||||
// Cannot assert, unverified entry point counts instructions (see .ad file)
|
// Cannot assert, unverified entry point counts instructions (see .ad file)
|
||||||
// vtableStubs also counts instructions in pd_code_size_limit.
|
// vtableStubs also counts instructions in pd_code_size_limit.
|
||||||
// Also do not verify_oop as this is called by verify_oop.
|
// Also do not verify_oop as this is called by verify_oop.
|
||||||
if (Universe::narrow_oop_shift() != 0) {
|
if (Universe::narrow_oop_shift() != 0) {
|
||||||
assert (Address::times_8 == LogMinObjAlignmentInBytes &&
|
assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
|
||||||
Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
|
if (LogMinObjAlignmentInBytes == Address::times_8) {
|
||||||
leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
|
leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
|
||||||
|
} else {
|
||||||
|
if (dst != src) {
|
||||||
|
movq(dst, src);
|
||||||
|
}
|
||||||
|
shlq(dst, LogMinObjAlignmentInBytes);
|
||||||
|
if (Universe::narrow_oop_base() != NULL) {
|
||||||
|
addq(dst, r12_heapbase);
|
||||||
|
}
|
||||||
|
}
|
||||||
} else if (dst != src) {
|
} else if (dst != src) {
|
||||||
assert (Universe::narrow_oop_base() == NULL, "sanity");
|
assert (Universe::narrow_oop_base() == NULL, "sanity");
|
||||||
movq(dst, src);
|
movq(dst, src);
|
||||||
|
|
|
@ -1851,29 +1851,24 @@ uint reloc_java_to_interp()
|
||||||
void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
|
void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
|
||||||
{
|
{
|
||||||
if (UseCompressedOops) {
|
if (UseCompressedOops) {
|
||||||
st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t", oopDesc::klass_offset_in_bytes());
|
st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
|
||||||
if (Universe::narrow_oop_shift() != 0) {
|
if (Universe::narrow_oop_shift() != 0) {
|
||||||
st->print_cr("leaq rscratch1, [r12_heapbase, r, Address::times_8, 0]");
|
st->print_cr("\tdecode_heap_oop_not_null rscratch1, rscratch1");
|
||||||
}
|
}
|
||||||
st->print_cr("cmpq rax, rscratch1\t # Inline cache check");
|
st->print_cr("\tcmpq rax, rscratch1\t # Inline cache check");
|
||||||
} else {
|
} else {
|
||||||
st->print_cr("cmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t"
|
st->print_cr("\tcmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t"
|
||||||
"# Inline cache check", oopDesc::klass_offset_in_bytes());
|
"# Inline cache check");
|
||||||
}
|
}
|
||||||
st->print_cr("\tjne SharedRuntime::_ic_miss_stub");
|
st->print_cr("\tjne SharedRuntime::_ic_miss_stub");
|
||||||
st->print_cr("\tnop");
|
st->print_cr("\tnop\t# nops to align entry point");
|
||||||
if (!OptoBreakpoint) {
|
|
||||||
st->print_cr("\tnop");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
|
void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
|
||||||
{
|
{
|
||||||
MacroAssembler masm(&cbuf);
|
MacroAssembler masm(&cbuf);
|
||||||
#ifdef ASSERT
|
|
||||||
uint code_size = cbuf.code_size();
|
uint code_size = cbuf.code_size();
|
||||||
#endif
|
|
||||||
if (UseCompressedOops) {
|
if (UseCompressedOops) {
|
||||||
masm.load_klass(rscratch1, j_rarg0);
|
masm.load_klass(rscratch1, j_rarg0);
|
||||||
masm.cmpptr(rax, rscratch1);
|
masm.cmpptr(rax, rscratch1);
|
||||||
|
@ -1884,33 +1879,21 @@ void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
|
||||||
masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
|
masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
|
||||||
|
|
||||||
/* WARNING these NOPs are critical so that verified entry point is properly
|
/* WARNING these NOPs are critical so that verified entry point is properly
|
||||||
aligned for patching by NativeJump::patch_verified_entry() */
|
4 bytes aligned for patching by NativeJump::patch_verified_entry() */
|
||||||
int nops_cnt = 1;
|
int nops_cnt = 4 - ((cbuf.code_size() - code_size) & 0x3);
|
||||||
if (!OptoBreakpoint) {
|
if (OptoBreakpoint) {
|
||||||
// Leave space for int3
|
// Leave space for int3
|
||||||
nops_cnt += 1;
|
nops_cnt -= 1;
|
||||||
}
|
}
|
||||||
if (UseCompressedOops) {
|
nops_cnt &= 0x3; // Do not add nops if code is aligned.
|
||||||
// ??? divisible by 4 is aligned?
|
if (nops_cnt > 0)
|
||||||
nops_cnt += 1;
|
masm.nop(nops_cnt);
|
||||||
}
|
|
||||||
masm.nop(nops_cnt);
|
|
||||||
|
|
||||||
assert(cbuf.code_size() - code_size == size(ra_),
|
|
||||||
"checking code size of inline cache node");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint MachUEPNode::size(PhaseRegAlloc* ra_) const
|
uint MachUEPNode::size(PhaseRegAlloc* ra_) const
|
||||||
{
|
{
|
||||||
if (UseCompressedOops) {
|
return MachNode::size(ra_); // too many variables; just compute it
|
||||||
if (Universe::narrow_oop_shift() == 0) {
|
// the hard way
|
||||||
return OptoBreakpoint ? 15 : 16;
|
|
||||||
} else {
|
|
||||||
return OptoBreakpoint ? 19 : 20;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return OptoBreakpoint ? 11 : 12;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -5127,7 +5110,7 @@ operand indPosIndexScaleOffset(any_RegP reg, immL32 off, rRegI idx, immI2 scale)
|
||||||
// Note: x86 architecture doesn't support "scale * index + offset" without a base
|
// Note: x86 architecture doesn't support "scale * index + offset" without a base
|
||||||
// we can't free r12 even with Universe::narrow_oop_base() == NULL.
|
// we can't free r12 even with Universe::narrow_oop_base() == NULL.
|
||||||
operand indCompressedOopOffset(rRegN reg, immL32 off) %{
|
operand indCompressedOopOffset(rRegN reg, immL32 off) %{
|
||||||
predicate(UseCompressedOops && (Universe::narrow_oop_shift() != 0));
|
predicate(UseCompressedOops && (Universe::narrow_oop_shift() == Address::times_8));
|
||||||
constraint(ALLOC_IN_RC(ptr_reg));
|
constraint(ALLOC_IN_RC(ptr_reg));
|
||||||
match(AddP (DecodeN reg) off);
|
match(AddP (DecodeN reg) off);
|
||||||
|
|
||||||
|
@ -7742,10 +7725,11 @@ instruct decodeHeapOop(rRegP dst, rRegN src, rFlagsReg cr) %{
|
||||||
ins_pipe(ialu_reg_long);
|
ins_pipe(ialu_reg_long);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
instruct decodeHeapOop_not_null(rRegP dst, rRegN src) %{
|
instruct decodeHeapOop_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{
|
||||||
predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
|
predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
|
||||||
n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant);
|
n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant);
|
||||||
match(Set dst (DecodeN src));
|
match(Set dst (DecodeN src));
|
||||||
|
effect(KILL cr);
|
||||||
format %{ "decode_heap_oop_not_null $dst,$src" %}
|
format %{ "decode_heap_oop_not_null $dst,$src" %}
|
||||||
ins_encode %{
|
ins_encode %{
|
||||||
Register s = $src$$Register;
|
Register s = $src$$Register;
|
||||||
|
|
|
@ -32,6 +32,23 @@
|
||||||
// highest ranked free list lock rank
|
// highest ranked free list lock rank
|
||||||
int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
|
int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
|
||||||
|
|
||||||
|
// Defaults are 0 so things will break badly if incorrectly initialized.
|
||||||
|
int CompactibleFreeListSpace::IndexSetStart = 0;
|
||||||
|
int CompactibleFreeListSpace::IndexSetStride = 0;
|
||||||
|
|
||||||
|
size_t MinChunkSize = 0;
|
||||||
|
|
||||||
|
void CompactibleFreeListSpace::set_cms_values() {
|
||||||
|
// Set CMS global values
|
||||||
|
assert(MinChunkSize == 0, "already set");
|
||||||
|
#define numQuanta(x,y) ((x+y-1)/y)
|
||||||
|
MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
|
||||||
|
|
||||||
|
assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
|
||||||
|
IndexSetStart = MinObjAlignment;
|
||||||
|
IndexSetStride = MinObjAlignment;
|
||||||
|
}
|
||||||
|
|
||||||
// Constructor
|
// Constructor
|
||||||
CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
|
CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
|
||||||
MemRegion mr, bool use_adaptive_freelists,
|
MemRegion mr, bool use_adaptive_freelists,
|
||||||
|
@ -302,7 +319,7 @@ size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
|
||||||
|
|
||||||
size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
|
size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
|
||||||
size_t count = 0;
|
size_t count = 0;
|
||||||
for (int i = MinChunkSize; i < IndexSetSize; i++) {
|
for (int i = (int)MinChunkSize; i < IndexSetSize; i++) {
|
||||||
debug_only(
|
debug_only(
|
||||||
ssize_t total_list_count = 0;
|
ssize_t total_list_count = 0;
|
||||||
for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
|
for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
|
||||||
|
|
|
@ -91,10 +91,10 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||||
enum SomeConstants {
|
enum SomeConstants {
|
||||||
SmallForLinearAlloc = 16, // size < this then use _sLAB
|
SmallForLinearAlloc = 16, // size < this then use _sLAB
|
||||||
SmallForDictionary = 257, // size < this then use _indexedFreeList
|
SmallForDictionary = 257, // size < this then use _indexedFreeList
|
||||||
IndexSetSize = SmallForDictionary, // keep this odd-sized
|
IndexSetSize = SmallForDictionary // keep this odd-sized
|
||||||
IndexSetStart = MinObjAlignment,
|
|
||||||
IndexSetStride = MinObjAlignment
|
|
||||||
};
|
};
|
||||||
|
static int IndexSetStart;
|
||||||
|
static int IndexSetStride;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
enum FitStrategyOptions {
|
enum FitStrategyOptions {
|
||||||
|
@ -278,6 +278,9 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||||
HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
|
HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
|
||||||
void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
|
void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
|
||||||
|
|
||||||
|
// Set CMS global values
|
||||||
|
static void set_cms_values();
|
||||||
|
|
||||||
// Return the free chunk at the end of the space. If no such
|
// Return the free chunk at the end of the space. If no such
|
||||||
// chunk exists, return NULL.
|
// chunk exists, return NULL.
|
||||||
FreeChunk* find_chunk_at_end();
|
FreeChunk* find_chunk_at_end();
|
||||||
|
|
|
@ -159,7 +159,7 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
|
||||||
CardTableRS* ct, bool use_adaptive_freelists,
|
CardTableRS* ct, bool use_adaptive_freelists,
|
||||||
FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
|
FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
|
||||||
CardGeneration(rs, initial_byte_size, level, ct),
|
CardGeneration(rs, initial_byte_size, level, ct),
|
||||||
_dilatation_factor(((double)MinChunkSize)/((double)(oopDesc::header_size()))),
|
_dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
|
||||||
_debug_collection_type(Concurrent_collection_type)
|
_debug_collection_type(Concurrent_collection_type)
|
||||||
{
|
{
|
||||||
HeapWord* bottom = (HeapWord*) _virtual_space.low();
|
HeapWord* bottom = (HeapWord*) _virtual_space.low();
|
||||||
|
@ -222,7 +222,7 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
|
||||||
// promoting generation, we'll instead just use the mimimum
|
// promoting generation, we'll instead just use the mimimum
|
||||||
// object size (which today is a header's worth of space);
|
// object size (which today is a header's worth of space);
|
||||||
// note that all arithmetic is in units of HeapWords.
|
// note that all arithmetic is in units of HeapWords.
|
||||||
assert(MinChunkSize >= oopDesc::header_size(), "just checking");
|
assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
|
||||||
assert(_dilatation_factor >= 1.0, "from previous assert");
|
assert(_dilatation_factor >= 1.0, "from previous assert");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -133,9 +133,5 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
|
||||||
void print_on(outputStream* st);
|
void print_on(outputStream* st);
|
||||||
};
|
};
|
||||||
|
|
||||||
// Alignment helpers etc.
|
extern size_t MinChunkSize;
|
||||||
#define numQuanta(x,y) ((x+y-1)/y)
|
|
||||||
enum AlignmentConstants {
|
|
||||||
MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
|
@ -3644,7 +3644,7 @@ void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
|
||||||
do {
|
do {
|
||||||
free_words = r->free()/HeapWordSize;
|
free_words = r->free()/HeapWordSize;
|
||||||
// If there's too little space, no one can allocate, so we're done.
|
// If there's too little space, no one can allocate, so we're done.
|
||||||
if (free_words < (size_t)oopDesc::header_size()) return;
|
if (free_words < CollectedHeap::min_fill_size()) return;
|
||||||
// Otherwise, try to claim it.
|
// Otherwise, try to claim it.
|
||||||
block = r->par_allocate(free_words);
|
block = r->par_allocate(free_words);
|
||||||
} while (block == NULL);
|
} while (block == NULL);
|
||||||
|
|
|
@ -2523,14 +2523,14 @@ record_concurrent_mark_cleanup_end(size_t freed_bytes,
|
||||||
}
|
}
|
||||||
if (ParallelGCThreads > 0) {
|
if (ParallelGCThreads > 0) {
|
||||||
const size_t OverpartitionFactor = 4;
|
const size_t OverpartitionFactor = 4;
|
||||||
const size_t MinChunkSize = 8;
|
const size_t MinWorkUnit = 8;
|
||||||
const size_t ChunkSize =
|
const size_t WorkUnit =
|
||||||
MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
|
MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
|
||||||
MinChunkSize);
|
MinWorkUnit);
|
||||||
_collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
|
_collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
|
||||||
ChunkSize);
|
WorkUnit);
|
||||||
ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
|
ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
|
||||||
(int) ChunkSize);
|
(int) WorkUnit);
|
||||||
_g1->workers()->run_task(&parKnownGarbageTask);
|
_g1->workers()->run_task(&parKnownGarbageTask);
|
||||||
|
|
||||||
assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
|
assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
|
||||||
|
|
|
@ -711,6 +711,7 @@ HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
|
||||||
// object in the region.
|
// object in the region.
|
||||||
if (region_ptr->data_size() == RegionSize) {
|
if (region_ptr->data_size() == RegionSize) {
|
||||||
result += pointer_delta(addr, region_addr);
|
result += pointer_delta(addr, region_addr);
|
||||||
|
DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result);)
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1487,13 +1488,14 @@ PSParallelCompact::provoke_split_fill_survivor(SpaceId id)
|
||||||
space->set_top_for_allocations();
|
space->set_top_for_allocations();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t obj_len = 8;
|
size_t min_size = CollectedHeap::min_fill_size();
|
||||||
|
size_t obj_len = min_size;
|
||||||
while (b + obj_len <= t) {
|
while (b + obj_len <= t) {
|
||||||
CollectedHeap::fill_with_object(b, obj_len);
|
CollectedHeap::fill_with_object(b, obj_len);
|
||||||
mark_bitmap()->mark_obj(b, obj_len);
|
mark_bitmap()->mark_obj(b, obj_len);
|
||||||
summary_data().add_obj(b, obj_len);
|
summary_data().add_obj(b, obj_len);
|
||||||
b += obj_len;
|
b += obj_len;
|
||||||
obj_len = (obj_len & 0x18) + 8; // 8 16 24 32 8 16 24 32 ...
|
obj_len = (obj_len & (min_size*3)) + min_size; // 8 16 24 32 8 16 24 32 ...
|
||||||
}
|
}
|
||||||
if (b < t) {
|
if (b < t) {
|
||||||
// The loop didn't completely fill to t (top); adjust top downward.
|
// The loop didn't completely fill to t (top); adjust top downward.
|
||||||
|
@ -1680,11 +1682,13 @@ void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
|
||||||
// +-------+
|
// +-------+
|
||||||
|
|
||||||
// Initially assume case a, c or e will apply.
|
// Initially assume case a, c or e will apply.
|
||||||
size_t obj_len = (size_t)oopDesc::header_size();
|
size_t obj_len = CollectedHeap::min_fill_size();
|
||||||
HeapWord* obj_beg = dense_prefix_end - obj_len;
|
HeapWord* obj_beg = dense_prefix_end - obj_len;
|
||||||
|
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
|
if (MinObjAlignment > 1) { // object alignment > heap word size
|
||||||
|
// Cases a, c or e.
|
||||||
|
} else if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
|
||||||
// Case b above.
|
// Case b above.
|
||||||
obj_beg = dense_prefix_end - 1;
|
obj_beg = dense_prefix_end - 1;
|
||||||
} else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
|
} else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
|
||||||
|
|
|
@ -1414,6 +1414,8 @@ PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr)
|
||||||
{
|
{
|
||||||
assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr),
|
assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr),
|
||||||
"must move left or to a different space");
|
"must move left or to a different space");
|
||||||
|
assert(is_object_aligned((intptr_t)old_addr) && is_object_aligned((intptr_t)new_addr),
|
||||||
|
"checking alignment");
|
||||||
}
|
}
|
||||||
#endif // ASSERT
|
#endif // ASSERT
|
||||||
|
|
||||||
|
|
|
@ -761,7 +761,7 @@ HeapWord* MutableNUMASpace::allocate(size_t size) {
|
||||||
|
|
||||||
if (p != NULL) {
|
if (p != NULL) {
|
||||||
size_t remainder = s->free_in_words();
|
size_t remainder = s->free_in_words();
|
||||||
if (remainder < (size_t)oopDesc::header_size() && remainder > 0) {
|
if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
|
||||||
s->set_top(s->top() - size);
|
s->set_top(s->top() - size);
|
||||||
p = NULL;
|
p = NULL;
|
||||||
}
|
}
|
||||||
|
@ -803,7 +803,7 @@ HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
|
||||||
HeapWord *p = s->cas_allocate(size);
|
HeapWord *p = s->cas_allocate(size);
|
||||||
if (p != NULL) {
|
if (p != NULL) {
|
||||||
size_t remainder = pointer_delta(s->end(), p + size);
|
size_t remainder = pointer_delta(s->end(), p + size);
|
||||||
if (remainder < (size_t)oopDesc::header_size() && remainder > 0) {
|
if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
|
||||||
if (s->cas_deallocate(p, size)) {
|
if (s->cas_deallocate(p, size)) {
|
||||||
// We were the last to allocate and created a fragment less than
|
// We were the last to allocate and created a fragment less than
|
||||||
// a minimal object.
|
// a minimal object.
|
||||||
|
|
|
@ -239,11 +239,11 @@ oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t CollectedHeap::filler_array_hdr_size() {
|
size_t CollectedHeap::filler_array_hdr_size() {
|
||||||
return size_t(arrayOopDesc::header_size(T_INT));
|
return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t CollectedHeap::filler_array_min_size() {
|
size_t CollectedHeap::filler_array_min_size() {
|
||||||
return align_object_size(filler_array_hdr_size());
|
return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t CollectedHeap::filler_array_max_size() {
|
size_t CollectedHeap::filler_array_max_size() {
|
||||||
|
|
|
@ -861,9 +861,9 @@ void ContiguousSpace::allocate_temporary_filler(int factor) {
|
||||||
}
|
}
|
||||||
size = align_object_size(size);
|
size = align_object_size(size);
|
||||||
|
|
||||||
const size_t min_int_array_size = typeArrayOopDesc::header_size(T_INT);
|
const size_t array_header_size = typeArrayOopDesc::header_size(T_INT);
|
||||||
if (size >= min_int_array_size) {
|
if (size >= (size_t)align_object_size(array_header_size)) {
|
||||||
size_t length = (size - min_int_array_size) * (HeapWordSize / sizeof(jint));
|
size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
|
||||||
// allocate uninitialized int array
|
// allocate uninitialized int array
|
||||||
typeArrayOop t = (typeArrayOop) allocate(size);
|
typeArrayOop t = (typeArrayOop) allocate(size);
|
||||||
assert(t != NULL, "allocation should succeed");
|
assert(t != NULL, "allocation should succeed");
|
||||||
|
@ -871,7 +871,7 @@ void ContiguousSpace::allocate_temporary_filler(int factor) {
|
||||||
t->set_klass(Universe::intArrayKlassObj());
|
t->set_klass(Universe::intArrayKlassObj());
|
||||||
t->set_length((int)length);
|
t->set_length((int)length);
|
||||||
} else {
|
} else {
|
||||||
assert((int) size == instanceOopDesc::header_size(),
|
assert(size == CollectedHeap::min_fill_size(),
|
||||||
"size for smallest fake object doesn't match");
|
"size for smallest fake object doesn't match");
|
||||||
instanceOop obj = (instanceOop) allocate(size);
|
instanceOop obj = (instanceOop) allocate(size);
|
||||||
obj->set_mark(markOopDesc::prototype());
|
obj->set_mark(markOopDesc::prototype());
|
||||||
|
|
|
@ -31,7 +31,7 @@ inline HeapWord* ThreadLocalAllocBuffer::allocate(size_t size) {
|
||||||
// Skip mangling the space corresponding to the object header to
|
// Skip mangling the space corresponding to the object header to
|
||||||
// ensure that the returned space is not considered parsable by
|
// ensure that the returned space is not considered parsable by
|
||||||
// any concurrent GC thread.
|
// any concurrent GC thread.
|
||||||
size_t hdr_size = CollectedHeap::min_fill_size();
|
size_t hdr_size = oopDesc::header_size();
|
||||||
Copy::fill_to_words(obj + hdr_size, size - hdr_size, badHeapWordVal);
|
Copy::fill_to_words(obj + hdr_size, size - hdr_size, badHeapWordVal);
|
||||||
#endif // ASSERT
|
#endif // ASSERT
|
||||||
// This addition is safe because we know that top is
|
// This addition is safe because we know that top is
|
||||||
|
|
|
@ -748,7 +748,7 @@ jint universe_init() {
|
||||||
// 4Gb
|
// 4Gb
|
||||||
static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
|
static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
|
||||||
// 32Gb
|
// 32Gb
|
||||||
static const uint64_t OopEncodingHeapMax = NarrowOopHeapMax << LogMinObjAlignmentInBytes;
|
// OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
|
||||||
|
|
||||||
char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
|
char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
|
||||||
size_t base = 0;
|
size_t base = 0;
|
||||||
|
@ -1261,7 +1261,7 @@ static void calculate_verify_data(uintptr_t verify_data[2],
|
||||||
|
|
||||||
// decide which low-order bits we require to be clear:
|
// decide which low-order bits we require to be clear:
|
||||||
size_t alignSize = MinObjAlignmentInBytes;
|
size_t alignSize = MinObjAlignmentInBytes;
|
||||||
size_t min_object_size = oopDesc::header_size();
|
size_t min_object_size = CollectedHeap::min_fill_size();
|
||||||
|
|
||||||
// make an inclusive limit:
|
// make an inclusive limit:
|
||||||
uintptr_t max = (uintptr_t)high_boundary - min_object_size*wordSize;
|
uintptr_t max = (uintptr_t)high_boundary - min_object_size*wordSize;
|
||||||
|
|
|
@ -92,7 +92,7 @@ class arrayOopDesc : public oopDesc {
|
||||||
static int header_size(BasicType type) {
|
static int header_size(BasicType type) {
|
||||||
size_t typesize_in_bytes = header_size_in_bytes();
|
size_t typesize_in_bytes = header_size_in_bytes();
|
||||||
return (int)(Universe::element_type_should_be_aligned(type)
|
return (int)(Universe::element_type_should_be_aligned(type)
|
||||||
? align_object_size(typesize_in_bytes/HeapWordSize)
|
? align_object_offset(typesize_in_bytes/HeapWordSize)
|
||||||
: typesize_in_bytes/HeapWordSize);
|
: typesize_in_bytes/HeapWordSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -149,10 +149,6 @@ class oopDesc {
|
||||||
// Need this as public for garbage collection.
|
// Need this as public for garbage collection.
|
||||||
template <class T> T* obj_field_addr(int offset) const;
|
template <class T> T* obj_field_addr(int offset) const;
|
||||||
|
|
||||||
// Oop encoding heap max
|
|
||||||
static const uint64_t OopEncodingHeapMax =
|
|
||||||
(uint64_t(max_juint) + 1) << LogMinObjAlignmentInBytes;
|
|
||||||
|
|
||||||
static bool is_null(oop obj);
|
static bool is_null(oop obj);
|
||||||
static bool is_null(narrowOop obj);
|
static bool is_null(narrowOop obj);
|
||||||
|
|
||||||
|
|
|
@ -146,8 +146,13 @@ inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
|
||||||
// offset from the heap base. Saving the check for null can save instructions
|
// offset from the heap base. Saving the check for null can save instructions
|
||||||
// in inner GC loops so these are separated.
|
// in inner GC loops so these are separated.
|
||||||
|
|
||||||
|
inline bool check_obj_alignment(oop obj) {
|
||||||
|
return (intptr_t)obj % MinObjAlignmentInBytes == 0;
|
||||||
|
}
|
||||||
|
|
||||||
inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
|
inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
|
||||||
assert(!is_null(v), "oop value can never be zero");
|
assert(!is_null(v), "oop value can never be zero");
|
||||||
|
assert(check_obj_alignment(v), "Address not aligned");
|
||||||
assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
|
assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
|
||||||
address base = Universe::narrow_oop_base();
|
address base = Universe::narrow_oop_base();
|
||||||
int shift = Universe::narrow_oop_shift();
|
int shift = Universe::narrow_oop_shift();
|
||||||
|
@ -167,7 +172,9 @@ inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
|
||||||
assert(!is_null(v), "narrow oop value can never be zero");
|
assert(!is_null(v), "narrow oop value can never be zero");
|
||||||
address base = Universe::narrow_oop_base();
|
address base = Universe::narrow_oop_base();
|
||||||
int shift = Universe::narrow_oop_shift();
|
int shift = Universe::narrow_oop_shift();
|
||||||
return (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
|
oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
|
||||||
|
assert(check_obj_alignment(result), "Address not aligned");
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline oop oopDesc::decode_heap_oop(narrowOop v) {
|
inline oop oopDesc::decode_heap_oop(narrowOop v) {
|
||||||
|
@ -522,10 +529,6 @@ inline bool oopDesc::has_bias_pattern() const {
|
||||||
return mark()->has_bias_pattern();
|
return mark()->has_bias_pattern();
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool check_obj_alignment(oop obj) {
|
|
||||||
return (intptr_t)obj % MinObjAlignmentInBytes == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// used only for asserts
|
// used only for asserts
|
||||||
inline bool oopDesc::is_oop(bool ignore_mark_word) const {
|
inline bool oopDesc::is_oop(bool ignore_mark_word) const {
|
||||||
|
@ -600,6 +603,8 @@ inline bool oopDesc::is_forwarded() const {
|
||||||
|
|
||||||
// Used by scavengers
|
// Used by scavengers
|
||||||
inline void oopDesc::forward_to(oop p) {
|
inline void oopDesc::forward_to(oop p) {
|
||||||
|
assert(check_obj_alignment(p),
|
||||||
|
"forwarding to something not aligned");
|
||||||
assert(Universe::heap()->is_in_reserved(p),
|
assert(Universe::heap()->is_in_reserved(p),
|
||||||
"forwarding to something not in heap");
|
"forwarding to something not in heap");
|
||||||
markOop m = markOopDesc::encode_pointer_as_mark(p);
|
markOop m = markOopDesc::encode_pointer_as_mark(p);
|
||||||
|
@ -609,6 +614,8 @@ inline void oopDesc::forward_to(oop p) {
|
||||||
|
|
||||||
// Used by parallel scavengers
|
// Used by parallel scavengers
|
||||||
inline bool oopDesc::cas_forward_to(oop p, markOop compare) {
|
inline bool oopDesc::cas_forward_to(oop p, markOop compare) {
|
||||||
|
assert(check_obj_alignment(p),
|
||||||
|
"forwarding to something not aligned");
|
||||||
assert(Universe::heap()->is_in_reserved(p),
|
assert(Universe::heap()->is_in_reserved(p),
|
||||||
"forwarding to something not in heap");
|
"forwarding to something not in heap");
|
||||||
markOop m = markOopDesc::encode_pointer_as_mark(p);
|
markOop m = markOopDesc::encode_pointer_as_mark(p);
|
||||||
|
|
|
@ -1211,8 +1211,44 @@ void Arguments::set_cms_and_parnew_gc_flags() {
|
||||||
}
|
}
|
||||||
#endif // KERNEL
|
#endif // KERNEL
|
||||||
|
|
||||||
|
void set_object_alignment() {
|
||||||
|
// Object alignment.
|
||||||
|
assert(is_power_of_2(ObjectAlignmentInBytes), "ObjectAlignmentInBytes must be power of 2");
|
||||||
|
MinObjAlignmentInBytes = ObjectAlignmentInBytes;
|
||||||
|
assert(MinObjAlignmentInBytes >= HeapWordsPerLong * HeapWordSize, "ObjectAlignmentInBytes value is too small");
|
||||||
|
MinObjAlignment = MinObjAlignmentInBytes / HeapWordSize;
|
||||||
|
assert(MinObjAlignmentInBytes == MinObjAlignment * HeapWordSize, "ObjectAlignmentInBytes value is incorrect");
|
||||||
|
MinObjAlignmentInBytesMask = MinObjAlignmentInBytes - 1;
|
||||||
|
|
||||||
|
LogMinObjAlignmentInBytes = exact_log2(ObjectAlignmentInBytes);
|
||||||
|
LogMinObjAlignment = LogMinObjAlignmentInBytes - LogHeapWordSize;
|
||||||
|
|
||||||
|
// Oop encoding heap max
|
||||||
|
OopEncodingHeapMax = (uint64_t(max_juint) + 1) << LogMinObjAlignmentInBytes;
|
||||||
|
|
||||||
|
#ifndef KERNEL
|
||||||
|
// Set CMS global values
|
||||||
|
CompactibleFreeListSpace::set_cms_values();
|
||||||
|
#endif // KERNEL
|
||||||
|
}
|
||||||
|
|
||||||
|
bool verify_object_alignment() {
|
||||||
|
// Object alignment.
|
||||||
|
if (!is_power_of_2(ObjectAlignmentInBytes)) {
|
||||||
|
jio_fprintf(defaultStream::error_stream(),
|
||||||
|
"error: ObjectAlignmentInBytes=%d must be power of 2", (int)ObjectAlignmentInBytes);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if ((int)ObjectAlignmentInBytes < BytesPerLong) {
|
||||||
|
jio_fprintf(defaultStream::error_stream(),
|
||||||
|
"error: ObjectAlignmentInBytes=%d must be greater or equal %d", (int)ObjectAlignmentInBytes, BytesPerLong);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
inline uintx max_heap_for_compressed_oops() {
|
inline uintx max_heap_for_compressed_oops() {
|
||||||
LP64_ONLY(return oopDesc::OopEncodingHeapMax - MaxPermSize - os::vm_page_size());
|
LP64_ONLY(return OopEncodingHeapMax - MaxPermSize - os::vm_page_size());
|
||||||
NOT_LP64(ShouldNotReachHere(); return 0);
|
NOT_LP64(ShouldNotReachHere(); return 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1776,6 +1812,8 @@ bool Arguments::check_vm_args_consistency() {
|
||||||
status = status && verify_interval(TLABWasteTargetPercent,
|
status = status && verify_interval(TLABWasteTargetPercent,
|
||||||
1, 100, "TLABWasteTargetPercent");
|
1, 100, "TLABWasteTargetPercent");
|
||||||
|
|
||||||
|
status = status && verify_object_alignment();
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2848,6 +2886,9 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
|
||||||
UseCompressedOops = false;
|
UseCompressedOops = false;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
// Set object alignment values.
|
||||||
|
set_object_alignment();
|
||||||
|
|
||||||
#ifdef SERIALGC
|
#ifdef SERIALGC
|
||||||
force_serial_gc();
|
force_serial_gc();
|
||||||
#endif // SERIALGC
|
#endif // SERIALGC
|
||||||
|
|
|
@ -321,6 +321,9 @@ class CommandLineFlags {
|
||||||
diagnostic(bool, PrintCompressedOopsMode, false, \
|
diagnostic(bool, PrintCompressedOopsMode, false, \
|
||||||
"Print compressed oops base address and encoding mode") \
|
"Print compressed oops base address and encoding mode") \
|
||||||
\
|
\
|
||||||
|
lp64_product(intx, ObjectAlignmentInBytes, 8, \
|
||||||
|
"Default object alignment in bytes, 8 is minimum") \
|
||||||
|
\
|
||||||
/* UseMembar is theoretically a temp flag used for memory barrier \
|
/* UseMembar is theoretically a temp flag used for memory barrier \
|
||||||
* removal testing. It was supposed to be removed before FCS but has \
|
* removal testing. It was supposed to be removed before FCS but has \
|
||||||
* been re-added (see 6401008) */ \
|
* been re-added (see 6401008) */ \
|
||||||
|
|
|
@ -1328,14 +1328,6 @@ static inline uint64_t cast_uint64_t(size_t x)
|
||||||
declare_constant(LogBytesPerWord) \
|
declare_constant(LogBytesPerWord) \
|
||||||
declare_constant(BytesPerLong) \
|
declare_constant(BytesPerLong) \
|
||||||
\
|
\
|
||||||
/********************/ \
|
|
||||||
/* Object alignment */ \
|
|
||||||
/********************/ \
|
|
||||||
\
|
|
||||||
declare_constant(MinObjAlignment) \
|
|
||||||
declare_constant(MinObjAlignmentInBytes) \
|
|
||||||
declare_constant(LogMinObjAlignmentInBytes) \
|
|
||||||
\
|
|
||||||
/********************************************/ \
|
/********************************************/ \
|
||||||
/* Generation and Space Hierarchy Constants */ \
|
/* Generation and Space Hierarchy Constants */ \
|
||||||
/********************************************/ \
|
/********************************************/ \
|
||||||
|
|
|
@ -51,7 +51,7 @@ extern "C" {
|
||||||
class Copy : AllStatic {
|
class Copy : AllStatic {
|
||||||
public:
|
public:
|
||||||
// Block copy methods have four attributes. We don't define all possibilities.
|
// Block copy methods have four attributes. We don't define all possibilities.
|
||||||
// alignment: aligned according to minimum Java object alignment (MinObjAlignment)
|
// alignment: aligned to BytesPerLong
|
||||||
// arrayof: arraycopy operation with both operands aligned on the same
|
// arrayof: arraycopy operation with both operands aligned on the same
|
||||||
// boundary as the first element of an array of the copy unit.
|
// boundary as the first element of an array of the copy unit.
|
||||||
// This is currently a HeapWord boundary on all platforms, except
|
// This is currently a HeapWord boundary on all platforms, except
|
||||||
|
@ -70,7 +70,7 @@ class Copy : AllStatic {
|
||||||
// [ '_atomic' ]
|
// [ '_atomic' ]
|
||||||
//
|
//
|
||||||
// Except in the arrayof case, whatever the alignment is, we assume we can copy
|
// Except in the arrayof case, whatever the alignment is, we assume we can copy
|
||||||
// whole alignment units. E.g., if MinObjAlignment is 2x word alignment, an odd
|
// whole alignment units. E.g., if BytesPerLong is 2x word alignment, an odd
|
||||||
// count may copy an extra word. In the arrayof case, we are allowed to copy
|
// count may copy an extra word. In the arrayof case, we are allowed to copy
|
||||||
// only the number of copy units specified.
|
// only the number of copy units specified.
|
||||||
|
|
||||||
|
@ -305,17 +305,17 @@ class Copy : AllStatic {
|
||||||
}
|
}
|
||||||
static void assert_params_aligned(HeapWord* from, HeapWord* to) {
|
static void assert_params_aligned(HeapWord* from, HeapWord* to) {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
if (mask_bits((uintptr_t)from, MinObjAlignmentInBytes-1) != 0)
|
if (mask_bits((uintptr_t)from, BytesPerLong-1) != 0)
|
||||||
basic_fatal("not object aligned");
|
basic_fatal("not long aligned");
|
||||||
if (mask_bits((uintptr_t)to, MinObjAlignmentInBytes-1) != 0)
|
if (mask_bits((uintptr_t)to, BytesPerLong-1) != 0)
|
||||||
basic_fatal("not object aligned");
|
basic_fatal("not long aligned");
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static void assert_params_aligned(HeapWord* to) {
|
static void assert_params_aligned(HeapWord* to) {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
if (mask_bits((uintptr_t)to, MinObjAlignmentInBytes-1) != 0)
|
if (mask_bits((uintptr_t)to, BytesPerLong-1) != 0)
|
||||||
basic_fatal("not object aligned");
|
basic_fatal("not long aligned");
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,6 +34,18 @@ int LogBitsPerHeapOop = 0;
|
||||||
int BytesPerHeapOop = 0;
|
int BytesPerHeapOop = 0;
|
||||||
int BitsPerHeapOop = 0;
|
int BitsPerHeapOop = 0;
|
||||||
|
|
||||||
|
// Object alignment, in units of HeapWords.
|
||||||
|
// Defaults are -1 so things will break badly if incorrectly initialized.
|
||||||
|
int MinObjAlignment = -1;
|
||||||
|
int MinObjAlignmentInBytes = -1;
|
||||||
|
int MinObjAlignmentInBytesMask = 0;
|
||||||
|
|
||||||
|
int LogMinObjAlignment = -1;
|
||||||
|
int LogMinObjAlignmentInBytes = -1;
|
||||||
|
|
||||||
|
// Oop encoding heap max
|
||||||
|
uint64_t OopEncodingHeapMax = 0;
|
||||||
|
|
||||||
void basic_fatal(const char* msg) {
|
void basic_fatal(const char* msg) {
|
||||||
fatal(msg);
|
fatal(msg);
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,6 +73,9 @@ extern int LogBitsPerHeapOop;
|
||||||
extern int BytesPerHeapOop;
|
extern int BytesPerHeapOop;
|
||||||
extern int BitsPerHeapOop;
|
extern int BitsPerHeapOop;
|
||||||
|
|
||||||
|
// Oop encoding heap max
|
||||||
|
extern uint64_t OopEncodingHeapMax;
|
||||||
|
|
||||||
const int BitsPerJavaInteger = 32;
|
const int BitsPerJavaInteger = 32;
|
||||||
const int BitsPerJavaLong = 64;
|
const int BitsPerJavaLong = 64;
|
||||||
const int BitsPerSize_t = size_tSize * BitsPerByte;
|
const int BitsPerSize_t = size_tSize * BitsPerByte;
|
||||||
|
@ -292,12 +295,12 @@ const int max_method_code_size = 64*K - 1; // JVM spec, 2nd ed. section 4.8.1 (
|
||||||
// Minimum is max(BytesPerLong, BytesPerDouble, BytesPerOop) / HeapWordSize, so jlong, jdouble and
|
// Minimum is max(BytesPerLong, BytesPerDouble, BytesPerOop) / HeapWordSize, so jlong, jdouble and
|
||||||
// reference fields can be naturally aligned.
|
// reference fields can be naturally aligned.
|
||||||
|
|
||||||
const int MinObjAlignment = HeapWordsPerLong;
|
extern int MinObjAlignment;
|
||||||
const int MinObjAlignmentInBytes = MinObjAlignment * HeapWordSize;
|
extern int MinObjAlignmentInBytes;
|
||||||
const int MinObjAlignmentInBytesMask = MinObjAlignmentInBytes - 1;
|
extern int MinObjAlignmentInBytesMask;
|
||||||
|
|
||||||
const int LogMinObjAlignment = LogHeapWordsPerLong;
|
extern int LogMinObjAlignment;
|
||||||
const int LogMinObjAlignmentInBytes = LogMinObjAlignment + LogHeapWordSize;
|
extern int LogMinObjAlignmentInBytes;
|
||||||
|
|
||||||
// Machine dependent stuff
|
// Machine dependent stuff
|
||||||
|
|
||||||
|
@ -332,18 +335,16 @@ inline intptr_t align_object_size(intptr_t size) {
|
||||||
return align_size_up(size, MinObjAlignment);
|
return align_size_up(size, MinObjAlignment);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pad out certain offsets to jlong alignment, in HeapWord units.
|
inline bool is_object_aligned(intptr_t addr) {
|
||||||
|
return addr == align_object_size(addr);
|
||||||
|
}
|
||||||
|
|
||||||
#define align_object_offset_(offset) align_size_up_(offset, HeapWordsPerLong)
|
// Pad out certain offsets to jlong alignment, in HeapWord units.
|
||||||
|
|
||||||
inline intptr_t align_object_offset(intptr_t offset) {
|
inline intptr_t align_object_offset(intptr_t offset) {
|
||||||
return align_size_up(offset, HeapWordsPerLong);
|
return align_size_up(offset, HeapWordsPerLong);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool is_object_aligned(intptr_t offset) {
|
|
||||||
return offset == align_object_offset(offset);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
//----------------------------------------------------------------------------------------------------
|
//----------------------------------------------------------------------------------------------------
|
||||||
// Utility macros for compilers
|
// Utility macros for compilers
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue