mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-23 04:24:49 +02:00
Merge
This commit is contained in:
commit
059e448264
791 changed files with 57067 additions and 26704 deletions
|
@ -779,9 +779,9 @@ class StubGenerator: public StubCodeGenerator {
|
|||
__ shrl(end, CardTableModRefBS::card_shift);
|
||||
__ subl(end, start); // end --> count
|
||||
__ BIND(L_loop);
|
||||
ExternalAddress base((address)ct->byte_map_base);
|
||||
Address index(start, count, Address::times_1, 0);
|
||||
__ movbyte(ArrayAddress(base, index), 0);
|
||||
intptr_t disp = (intptr_t) ct->byte_map_base;
|
||||
Address cardtable(start, count, Address::times_1, disp);
|
||||
__ movb(cardtable, 0);
|
||||
__ decrement(count);
|
||||
__ jcc(Assembler::greaterEqual, L_loop);
|
||||
}
|
||||
|
|
|
@ -1222,8 +1222,16 @@ class StubGenerator: public StubCodeGenerator {
|
|||
__ shrq(end, CardTableModRefBS::card_shift);
|
||||
__ subq(end, start); // number of bytes to copy
|
||||
|
||||
intptr_t disp = (intptr_t) ct->byte_map_base;
|
||||
if (__ is_simm32(disp)) {
|
||||
Address cardtable(noreg, noreg, Address::no_scale, disp);
|
||||
__ lea(scratch, cardtable);
|
||||
} else {
|
||||
ExternalAddress cardtable((address)disp);
|
||||
__ lea(scratch, cardtable);
|
||||
}
|
||||
|
||||
const Register count = end; // 'end' register contains bytes count now
|
||||
__ lea(scratch, ExternalAddress((address)ct->byte_map_base));
|
||||
__ addq(start, scratch);
|
||||
__ BIND(L_loop);
|
||||
__ movb(Address(start, count, Address::times_1), 0);
|
||||
|
|
|
@ -2309,7 +2309,7 @@ void os::Linux::libnuma_init() {
|
|||
dlsym(RTLD_DEFAULT, "sched_getcpu")));
|
||||
|
||||
if (sched_getcpu() != -1) { // Does it work?
|
||||
void *handle = dlopen("libnuma.so", RTLD_LAZY);
|
||||
void *handle = dlopen("libnuma.so.1", RTLD_LAZY);
|
||||
if (handle != NULL) {
|
||||
set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t,
|
||||
dlsym(handle, "numa_node_to_cpus")));
|
||||
|
|
|
@ -2679,6 +2679,12 @@ size_t os::numa_get_leaf_groups(int *ids, size_t size) {
|
|||
top += r;
|
||||
cur++;
|
||||
}
|
||||
if (bottom == 0) {
|
||||
// Handle a situation, when the OS reports no memory available.
|
||||
// Assume UMA architecture.
|
||||
ids[0] = 0;
|
||||
return 1;
|
||||
}
|
||||
return bottom;
|
||||
}
|
||||
|
||||
|
@ -4602,7 +4608,7 @@ void os::Solaris::synchronization_init() {
|
|||
}
|
||||
|
||||
void os::Solaris::liblgrp_init() {
|
||||
void *handle = dlopen("liblgrp.so", RTLD_LAZY);
|
||||
void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
|
||||
if (handle != NULL) {
|
||||
os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
|
||||
os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
|
||||
|
|
|
@ -3825,6 +3825,8 @@ int MatchRule::is_expensive() const {
|
|||
strcmp(opType,"ConvL2D")==0 ||
|
||||
strcmp(opType,"ConvL2F")==0 ||
|
||||
strcmp(opType,"ConvL2I")==0 ||
|
||||
strcmp(opType,"DecodeN")==0 ||
|
||||
strcmp(opType,"EncodeP")==0 ||
|
||||
strcmp(opType,"RoundDouble")==0 ||
|
||||
strcmp(opType,"RoundFloat")==0 ||
|
||||
strcmp(opType,"ReverseBytesI")==0 ||
|
||||
|
|
|
@ -351,7 +351,7 @@ void ciBlock::set_exception_range(int start_bci, int limit_bci) {
|
|||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
static char *flagnames[] = {
|
||||
static const char *flagnames[] = {
|
||||
"Processed",
|
||||
"Handler",
|
||||
"MayThrow",
|
||||
|
|
|
@ -188,10 +188,6 @@ void OopMap::set_derived_oop(VMReg reg, VMReg derived_from_local_register ) {
|
|||
}
|
||||
}
|
||||
|
||||
void OopMap::set_stack_obj(VMReg reg) {
|
||||
set_xxx(reg, OopMapValue::stack_obj, VMRegImpl::Bad());
|
||||
}
|
||||
|
||||
// OopMapSet
|
||||
|
||||
OopMapSet::OopMapSet() {
|
||||
|
@ -399,8 +395,7 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
|||
if ( loc != NULL ) {
|
||||
if ( omv.type() == OopMapValue::oop_value ) {
|
||||
#ifdef ASSERT
|
||||
if (COMPILER2_PRESENT(!DoEscapeAnalysis &&)
|
||||
(((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
|
||||
if ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
|
||||
!Universe::heap()->is_in_or_null(*loc)) {
|
||||
tty->print_cr("# Found non oop pointer. Dumping state at failure");
|
||||
// try to dump out some helpful debugging information
|
||||
|
@ -431,17 +426,6 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
if (DoEscapeAnalysis) {
|
||||
for (OopMapStream oms(map, OopMapValue::stack_obj); !oms.is_done(); oms.next()) {
|
||||
omv = oms.current();
|
||||
assert(omv.is_stack_loc(), "should refer to stack location");
|
||||
oop loc = (oop) fr->oopmapreg_to_location(omv.reg(),reg_map);
|
||||
oop_fn->do_oop(&loc);
|
||||
}
|
||||
}
|
||||
#endif // COMPILER2
|
||||
}
|
||||
|
||||
|
||||
|
@ -540,9 +524,6 @@ void print_register_type(OopMapValue::oop_types x, VMReg optional,
|
|||
st->print("Derived_oop_" );
|
||||
optional->print_on(st);
|
||||
break;
|
||||
case OopMapValue::stack_obj:
|
||||
st->print("Stack");
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ private:
|
|||
|
||||
public:
|
||||
// Constants
|
||||
enum { type_bits = 6,
|
||||
enum { type_bits = 5,
|
||||
register_bits = BitsPerShort - type_bits };
|
||||
|
||||
enum { type_shift = 0,
|
||||
|
@ -63,8 +63,7 @@ public:
|
|||
value_value = 2,
|
||||
narrowoop_value = 4,
|
||||
callee_saved_value = 8,
|
||||
derived_oop_value= 16,
|
||||
stack_obj = 32 };
|
||||
derived_oop_value= 16 };
|
||||
|
||||
// Constructors
|
||||
OopMapValue () { set_value(0); set_content_reg(VMRegImpl::Bad()); }
|
||||
|
@ -93,14 +92,12 @@ public:
|
|||
bool is_narrowoop() { return mask_bits(value(), type_mask_in_place) == narrowoop_value; }
|
||||
bool is_callee_saved() { return mask_bits(value(), type_mask_in_place) == callee_saved_value; }
|
||||
bool is_derived_oop() { return mask_bits(value(), type_mask_in_place) == derived_oop_value; }
|
||||
bool is_stack_obj() { return mask_bits(value(), type_mask_in_place) == stack_obj; }
|
||||
|
||||
void set_oop() { set_value((value() & register_mask_in_place) | oop_value); }
|
||||
void set_value() { set_value((value() & register_mask_in_place) | value_value); }
|
||||
void set_narrowoop() { set_value((value() & register_mask_in_place) | narrowoop_value); }
|
||||
void set_callee_saved() { set_value((value() & register_mask_in_place) | callee_saved_value); }
|
||||
void set_derived_oop() { set_value((value() & register_mask_in_place) | derived_oop_value); }
|
||||
void set_stack_obj() { set_value((value() & register_mask_in_place) | stack_obj); }
|
||||
|
||||
VMReg reg() const { return VMRegImpl::as_VMReg(mask_bits(value(), register_mask_in_place) >> register_shift); }
|
||||
oop_types type() const { return (oop_types)mask_bits(value(), type_mask_in_place); }
|
||||
|
@ -180,7 +177,6 @@ class OopMap: public ResourceObj {
|
|||
void set_dead ( VMReg local);
|
||||
void set_callee_saved( VMReg local, VMReg caller_machine_register );
|
||||
void set_derived_oop ( VMReg local, VMReg derived_from_local_register );
|
||||
void set_stack_obj( VMReg local);
|
||||
void set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional);
|
||||
|
||||
int heap_size() const;
|
||||
|
|
|
@ -71,8 +71,15 @@ TreeList* TreeList::as_TreeList(TreeChunk* tc) {
|
|||
TreeList* TreeList::as_TreeList(HeapWord* addr, size_t size) {
|
||||
TreeChunk* tc = (TreeChunk*) addr;
|
||||
assert(size >= sizeof(TreeChunk), "Chunk is too small for a TreeChunk");
|
||||
assert(tc->size() == 0 && tc->prev() == NULL && tc->next() == NULL,
|
||||
"Space should be clear");
|
||||
// The space in the heap will have been mangled initially but
|
||||
// is not remangled when a free chunk is returned to the free list
|
||||
// (since it is used to maintain the chunk on the free list).
|
||||
assert((ZapUnusedHeapArea &&
|
||||
SpaceMangler::is_mangled((HeapWord*) tc->size_addr()) &&
|
||||
SpaceMangler::is_mangled((HeapWord*) tc->prev_addr()) &&
|
||||
SpaceMangler::is_mangled((HeapWord*) tc->next_addr())) ||
|
||||
(tc->size() == 0 && tc->prev() == NULL && tc->next() == NULL),
|
||||
"Space should be clear or mangled");
|
||||
tc->setSize(size);
|
||||
tc->linkPrev(NULL);
|
||||
tc->linkNext(NULL);
|
||||
|
|
|
@ -54,7 +54,7 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
|
|||
_collector(NULL)
|
||||
{
|
||||
_bt.set_space(this);
|
||||
initialize(mr, true);
|
||||
initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
|
||||
// We have all of "mr", all of which we place in the dictionary
|
||||
// as one big chunk. We'll need to decide here which of several
|
||||
// possible alternative dictionary implementations to use. For
|
||||
|
|
|
@ -3195,31 +3195,16 @@ ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
|
|||
// YSR: All of this generation expansion/shrinking stuff is an exact copy of
|
||||
// OneContigSpaceCardGeneration, which makes me wonder if we should move this
|
||||
// to CardGeneration and share it...
|
||||
bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
|
||||
return CardGeneration::expand(bytes, expand_bytes);
|
||||
}
|
||||
|
||||
void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
|
||||
CMSExpansionCause::Cause cause)
|
||||
{
|
||||
assert_locked_or_safepoint(Heap_lock);
|
||||
|
||||
size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes);
|
||||
size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
|
||||
bool success = false;
|
||||
if (aligned_expand_bytes > aligned_bytes) {
|
||||
success = grow_by(aligned_expand_bytes);
|
||||
}
|
||||
if (!success) {
|
||||
success = grow_by(aligned_bytes);
|
||||
}
|
||||
if (!success) {
|
||||
size_t remaining_bytes = _virtual_space.uncommitted_size();
|
||||
if (remaining_bytes > 0) {
|
||||
success = grow_by(remaining_bytes);
|
||||
}
|
||||
}
|
||||
if (GC_locker::is_active()) {
|
||||
if (PrintGC && Verbose) {
|
||||
gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
|
||||
}
|
||||
}
|
||||
bool success = expand(bytes, expand_bytes);
|
||||
|
||||
// remember why we expanded; this information is used
|
||||
// by shouldConcurrentCollect() when making decisions on whether to start
|
||||
// a new CMS cycle.
|
||||
|
|
|
@ -1048,10 +1048,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
|||
double _initiating_occupancy;
|
||||
|
||||
protected:
|
||||
// Grow generation by specified size (returns false if unable to grow)
|
||||
bool grow_by(size_t bytes);
|
||||
// Grow generation to reserved size.
|
||||
bool grow_to_reserved();
|
||||
// Shrink generation by specified size (returns false if unable to shrink)
|
||||
virtual void shrink_by(size_t bytes);
|
||||
|
||||
|
@ -1103,6 +1099,11 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
|||
// Override
|
||||
virtual void ref_processor_init();
|
||||
|
||||
// Grow generation by specified size (returns false if unable to grow)
|
||||
bool grow_by(size_t bytes);
|
||||
// Grow generation to reserved size.
|
||||
bool grow_to_reserved();
|
||||
|
||||
void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
|
||||
|
||||
// Space enquiries
|
||||
|
@ -1193,6 +1194,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
|||
// Allocation failure
|
||||
void expand(size_t bytes, size_t expand_bytes,
|
||||
CMSExpansionCause::Cause cause);
|
||||
virtual bool expand(size_t bytes, size_t expand_bytes);
|
||||
void shrink(size_t bytes);
|
||||
HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
|
||||
bool expand_and_ensure_spooling_space(PromotionInfo* promo);
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
*
|
||||
*/
|
||||
|
||||
|
||||
// A FreeBlockDictionary is an abstract superclass that will allow
|
||||
// a number of alternative implementations in the future.
|
||||
class FreeBlockDictionary: public CHeapObj {
|
||||
|
|
|
@ -85,6 +85,8 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
|
|||
}
|
||||
|
||||
debug_only(void* prev_addr() const { return (void*)&_prev; })
|
||||
debug_only(void* next_addr() const { return (void*)&_next; })
|
||||
debug_only(void* size_addr() const { return (void*)&_size; })
|
||||
|
||||
size_t size() const volatile {
|
||||
LP64_ONLY(if (UseCompressedOops) return mark()->get_size(); else )
|
||||
|
|
|
@ -28,6 +28,7 @@ binaryTreeDictionary.cpp allocationStats.hpp
|
|||
binaryTreeDictionary.cpp binaryTreeDictionary.hpp
|
||||
binaryTreeDictionary.cpp globals.hpp
|
||||
binaryTreeDictionary.cpp ostream.hpp
|
||||
binaryTreeDictionary.cpp spaceDecorator.hpp
|
||||
|
||||
binaryTreeDictionary.hpp freeBlockDictionary.hpp
|
||||
binaryTreeDictionary.hpp freeList.hpp
|
||||
|
@ -114,6 +115,7 @@ compactibleFreeListSpace.cpp java.hpp
|
|||
compactibleFreeListSpace.cpp liveRange.hpp
|
||||
compactibleFreeListSpace.cpp oop.inline.hpp
|
||||
compactibleFreeListSpace.cpp resourceArea.hpp
|
||||
compactibleFreeListSpace.cpp spaceDecorator.hpp
|
||||
compactibleFreeListSpace.cpp universe.inline.hpp
|
||||
compactibleFreeListSpace.cpp vmThread.hpp
|
||||
|
||||
|
|
|
@ -22,16 +22,17 @@
|
|||
//
|
||||
//
|
||||
|
||||
asParNewGeneration.hpp adaptiveSizePolicy.hpp
|
||||
asParNewGeneration.hpp parNewGeneration.hpp
|
||||
asParNewGeneration.hpp adaptiveSizePolicy.hpp
|
||||
asParNewGeneration.hpp parNewGeneration.hpp
|
||||
|
||||
asParNewGeneration.cpp asParNewGeneration.hpp
|
||||
asParNewGeneration.cpp cmsAdaptiveSizePolicy.hpp
|
||||
asParNewGeneration.cpp asParNewGeneration.hpp
|
||||
asParNewGeneration.cpp cmsAdaptiveSizePolicy.hpp
|
||||
asParNewGeneration.cpp cmsGCAdaptivePolicyCounters.hpp
|
||||
asParNewGeneration.cpp defNewGeneration.inline.hpp
|
||||
asParNewGeneration.cpp oop.pcgc.inline.hpp
|
||||
asParNewGeneration.cpp parNewGeneration.hpp
|
||||
asParNewGeneration.cpp defNewGeneration.inline.hpp
|
||||
asParNewGeneration.cpp oop.pcgc.inline.hpp
|
||||
asParNewGeneration.cpp parNewGeneration.hpp
|
||||
asParNewGeneration.cpp referencePolicy.hpp
|
||||
asParNewGeneration.cpp spaceDecorator.hpp
|
||||
|
||||
parCardTableModRefBS.cpp allocation.inline.hpp
|
||||
parCardTableModRefBS.cpp cardTableModRefBS.hpp
|
||||
|
@ -75,6 +76,7 @@ parNewGeneration.cpp referencePolicy.hpp
|
|||
parNewGeneration.cpp resourceArea.hpp
|
||||
parNewGeneration.cpp sharedHeap.hpp
|
||||
parNewGeneration.cpp space.hpp
|
||||
parNewGeneration.cpp spaceDecorator.hpp
|
||||
parNewGeneration.cpp workgroup.hpp
|
||||
|
||||
parNewGeneration.hpp defNewGeneration.hpp
|
||||
|
|
|
@ -53,14 +53,15 @@ asPSOldGen.cpp java.hpp
|
|||
asPSOldGen.cpp oop.inline.hpp
|
||||
asPSOldGen.cpp parallelScavengeHeap.hpp
|
||||
asPSOldGen.cpp psMarkSweepDecorator.hpp
|
||||
asPSOldGen.cpp asPSOldGen.hpp
|
||||
asPSOldGen.cpp asPSOldGen.hpp
|
||||
|
||||
asPSYoungGen.hpp generationCounters.hpp
|
||||
asPSYoungGen.hpp mutableSpace.hpp
|
||||
asPSYoungGen.hpp objectStartArray.hpp
|
||||
asPSYoungGen.hpp spaceCounters.hpp
|
||||
asPSYoungGen.hpp psVirtualspace.hpp
|
||||
asPSYoungGen.hpp psYoungGen.hpp
|
||||
asPSYoungGen.hpp psYoungGen.hpp
|
||||
asPSYoungGen.hpp spaceDecorator.hpp
|
||||
|
||||
asPSYoungGen.cpp gcUtil.hpp
|
||||
asPSYoungGen.cpp java.hpp
|
||||
|
@ -68,8 +69,9 @@ asPSYoungGen.cpp oop.inline.hpp
|
|||
asPSYoungGen.cpp parallelScavengeHeap.hpp
|
||||
asPSYoungGen.cpp psMarkSweepDecorator.hpp
|
||||
asPSYoungGen.cpp psScavenge.hpp
|
||||
asPSYoungGen.cpp asPSYoungGen.hpp
|
||||
asPSYoungGen.cpp psYoungGen.hpp
|
||||
asPSYoungGen.cpp asPSYoungGen.hpp
|
||||
asPSYoungGen.cpp psYoungGen.hpp
|
||||
asPSYoungGen.cpp spaceDecorator.hpp
|
||||
|
||||
cardTableExtension.cpp cardTableExtension.hpp
|
||||
cardTableExtension.cpp gcTaskManager.hpp
|
||||
|
@ -225,6 +227,7 @@ psMarkSweep.cpp psYoungGen.hpp
|
|||
psMarkSweep.cpp referencePolicy.hpp
|
||||
psMarkSweep.cpp referenceProcessor.hpp
|
||||
psMarkSweep.cpp safepoint.hpp
|
||||
psMarkSweep.cpp spaceDecorator.hpp
|
||||
psMarkSweep.cpp symbolTable.hpp
|
||||
psMarkSweep.cpp systemDictionary.hpp
|
||||
psMarkSweep.cpp vmThread.hpp
|
||||
|
@ -239,6 +242,7 @@ psMarkSweepDecorator.cpp oop.inline.hpp
|
|||
psMarkSweepDecorator.cpp parallelScavengeHeap.hpp
|
||||
psMarkSweepDecorator.cpp psMarkSweep.hpp
|
||||
psMarkSweepDecorator.cpp psMarkSweepDecorator.hpp
|
||||
psMarkSweepDecorator.cpp spaceDecorator.hpp
|
||||
psMarkSweepDecorator.cpp systemDictionary.hpp
|
||||
|
||||
psMarkSweepDecorator.hpp mutableSpace.hpp
|
||||
|
@ -290,6 +294,7 @@ psOldGen.cpp oop.inline.hpp
|
|||
psOldGen.cpp parallelScavengeHeap.hpp
|
||||
psOldGen.cpp psMarkSweepDecorator.hpp
|
||||
psOldGen.cpp psOldGen.hpp
|
||||
psOldGen.cpp spaceDecorator.hpp
|
||||
|
||||
psOldGen.hpp psGenerationCounters.hpp
|
||||
psOldGen.hpp mutableSpace.hpp
|
||||
|
@ -351,6 +356,7 @@ psScavenge.cpp psTasks.hpp
|
|||
psScavenge.cpp referencePolicy.hpp
|
||||
psScavenge.cpp referenceProcessor.hpp
|
||||
psScavenge.cpp resourceArea.hpp
|
||||
psScavenge.cpp spaceDecorator.hpp
|
||||
psScavenge.cpp threadCritical.hpp
|
||||
psScavenge.cpp vmThread.hpp
|
||||
psScavenge.cpp vm_operations.hpp
|
||||
|
@ -409,8 +415,8 @@ psVirtualspace.hpp virtualspace.hpp
|
|||
|
||||
psVirtualspace.cpp os.hpp
|
||||
psVirtualspace.cpp os_<os_family>.inline.hpp
|
||||
psVirtualspace.cpp psVirtualspace.hpp
|
||||
psVirtualspace.cpp virtualspace.hpp
|
||||
psVirtualspace.cpp psVirtualspace.hpp
|
||||
psVirtualspace.cpp virtualspace.hpp
|
||||
|
||||
psYoungGen.cpp gcUtil.hpp
|
||||
psYoungGen.cpp java.hpp
|
||||
|
@ -419,7 +425,8 @@ psYoungGen.cpp parallelScavengeHeap.hpp
|
|||
psYoungGen.cpp psMarkSweepDecorator.hpp
|
||||
psYoungGen.cpp psScavenge.hpp
|
||||
psYoungGen.cpp psYoungGen.hpp
|
||||
psYoungGen.cpp mutableNUMASpace.hpp
|
||||
psYoungGen.cpp mutableNUMASpace.hpp
|
||||
psYoungGen.cpp spaceDecorator.hpp
|
||||
|
||||
psYoungGen.hpp psGenerationCounters.hpp
|
||||
psYoungGen.hpp mutableSpace.hpp
|
||||
|
|
|
@ -56,6 +56,7 @@ markSweep.inline.hpp psParallelCompact.hpp
|
|||
mutableNUMASpace.cpp mutableNUMASpace.hpp
|
||||
mutableNUMASpace.cpp oop.inline.hpp
|
||||
mutableNUMASpace.cpp sharedHeap.hpp
|
||||
mutableNUMASpace.cpp spaceDecorator.hpp
|
||||
mutableNUMASpace.cpp thread_<os_family>.inline.hpp
|
||||
|
||||
mutableNUMASpace.hpp mutableSpace.hpp
|
||||
|
@ -64,6 +65,7 @@ mutableNUMASpace.hpp gcUtil.hpp
|
|||
mutableSpace.cpp mutableSpace.hpp
|
||||
mutableSpace.cpp oop.inline.hpp
|
||||
mutableSpace.cpp safepoint.hpp
|
||||
mutableSpace.cpp spaceDecorator.hpp
|
||||
mutableSpace.cpp thread.hpp
|
||||
|
||||
spaceCounters.cpp resourceArea.hpp
|
||||
|
|
|
@ -162,10 +162,9 @@ bool ASParNewGeneration::resize_generation(size_t eden_size,
|
|||
// Grow the generation
|
||||
size_t change = desired_size - orig_size;
|
||||
assert(change % alignment == 0, "just checking");
|
||||
if (!virtual_space()->expand_by(change)) {
|
||||
if (expand(change)) {
|
||||
return false; // Error if we fail to resize!
|
||||
}
|
||||
|
||||
size_changed = true;
|
||||
} else if (desired_size < orig_size) {
|
||||
size_t desired_change = orig_size - desired_size;
|
||||
|
@ -222,7 +221,9 @@ void ASParNewGeneration::reset_survivors_after_shrink() {
|
|||
// Was there a shrink of the survivor space?
|
||||
if (new_end < to()->end()) {
|
||||
MemRegion mr(to()->bottom(), new_end);
|
||||
to()->initialize(mr, false /* clear */);
|
||||
to()->initialize(mr,
|
||||
SpaceDecorator::DontClear,
|
||||
SpaceDecorator::DontMangle);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -322,9 +323,7 @@ void ASParNewGeneration::resize_spaces(size_t requested_eden_size,
|
|||
pointer_delta(from_start, eden_start, sizeof(char)));
|
||||
}
|
||||
|
||||
// tty->print_cr("eden_size before: " SIZE_FORMAT, eden_size);
|
||||
eden_size = align_size_down(eden_size, alignment);
|
||||
// tty->print_cr("eden_size after: " SIZE_FORMAT, eden_size);
|
||||
eden_end = eden_start + eden_size;
|
||||
assert(eden_end >= eden_start, "addition overflowed")
|
||||
|
||||
|
@ -501,11 +500,31 @@ void ASParNewGeneration::resize_spaces(size_t requested_eden_size,
|
|||
size_t old_from = from()->capacity();
|
||||
size_t old_to = to()->capacity();
|
||||
|
||||
// If not clearing the spaces, do some checking to verify that
|
||||
// the spaces are already mangled.
|
||||
|
||||
// Must check mangling before the spaces are reshaped. Otherwise,
|
||||
// the bottom or end of one space may have moved into another
|
||||
// a failure of the check may not correctly indicate which space
|
||||
// is not properly mangled.
|
||||
if (ZapUnusedHeapArea) {
|
||||
HeapWord* limit = (HeapWord*) virtual_space()->high();
|
||||
eden()->check_mangled_unused_area(limit);
|
||||
from()->check_mangled_unused_area(limit);
|
||||
to()->check_mangled_unused_area(limit);
|
||||
}
|
||||
|
||||
// The call to initialize NULL's the next compaction space
|
||||
eden()->initialize(edenMR, true);
|
||||
eden()->initialize(edenMR,
|
||||
SpaceDecorator::Clear,
|
||||
SpaceDecorator::DontMangle);
|
||||
eden()->set_next_compaction_space(from());
|
||||
to()->initialize(toMR , true);
|
||||
from()->initialize(fromMR, false); // Note, not cleared!
|
||||
to()->initialize(toMR ,
|
||||
SpaceDecorator::Clear,
|
||||
SpaceDecorator::DontMangle);
|
||||
from()->initialize(fromMR,
|
||||
SpaceDecorator::DontClear,
|
||||
SpaceDecorator::DontMangle);
|
||||
|
||||
assert(from()->top() == old_from_top, "from top changed!");
|
||||
|
||||
|
|
|
@ -727,7 +727,7 @@ void ParNewGeneration::collect(bool full,
|
|||
SpecializationStats::clear();
|
||||
|
||||
age_table()->clear();
|
||||
to()->clear();
|
||||
to()->clear(SpaceDecorator::Mangle);
|
||||
|
||||
gch->save_marks();
|
||||
assert(workers != NULL, "Need parallel worker threads.");
|
||||
|
@ -793,8 +793,18 @@ void ParNewGeneration::collect(bool full,
|
|||
}
|
||||
if (!promotion_failed()) {
|
||||
// Swap the survivor spaces.
|
||||
eden()->clear();
|
||||
from()->clear();
|
||||
eden()->clear(SpaceDecorator::Mangle);
|
||||
from()->clear(SpaceDecorator::Mangle);
|
||||
if (ZapUnusedHeapArea) {
|
||||
// This is now done here because of the piece-meal mangling which
|
||||
// can check for valid mangling at intermediate points in the
|
||||
// collection(s). When a minor collection fails to collect
|
||||
// sufficient space resizing of the young generation can occur
|
||||
// an redistribute the spaces in the young generation. Mangle
|
||||
// here so that unzapped regions don't get distributed to
|
||||
// other spaces.
|
||||
to()->mangle_unused_area();
|
||||
}
|
||||
swap_spaces();
|
||||
|
||||
assert(to()->is_empty(), "to space should be empty now");
|
||||
|
|
|
@ -170,9 +170,20 @@ bool ASPSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
|
|||
if (desired_size > orig_size) {
|
||||
// Grow the generation
|
||||
size_t change = desired_size - orig_size;
|
||||
HeapWord* prev_low = (HeapWord*) virtual_space()->low();
|
||||
if (!virtual_space()->expand_by(change)) {
|
||||
return false;
|
||||
}
|
||||
if (ZapUnusedHeapArea) {
|
||||
// Mangle newly committed space immediately because it
|
||||
// can be done here more simply that after the new
|
||||
// spaces have been computed.
|
||||
HeapWord* new_low = (HeapWord*) virtual_space()->low();
|
||||
assert(new_low < prev_low, "Did not grow");
|
||||
|
||||
MemRegion mangle_region(new_low, prev_low);
|
||||
SpaceMangler::mangle_region(mangle_region);
|
||||
}
|
||||
size_changed = true;
|
||||
} else if (desired_size < orig_size) {
|
||||
size_t desired_change = orig_size - desired_size;
|
||||
|
@ -215,8 +226,10 @@ bool ASPSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
|
|||
// current implementation does not allow holes between the spaces
|
||||
// _young_generation_boundary has to be reset because it changes.
|
||||
// so additional verification
|
||||
|
||||
void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
|
||||
size_t requested_survivor_size) {
|
||||
assert(UseAdaptiveSizePolicy, "sanity check");
|
||||
assert(requested_eden_size > 0 && requested_survivor_size > 0,
|
||||
"just checking");
|
||||
|
||||
|
@ -276,22 +289,42 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
|
|||
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
const size_t alignment = heap->intra_heap_alignment();
|
||||
const bool maintain_minimum =
|
||||
(requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
|
||||
|
||||
bool eden_from_to_order = from_start < to_start;
|
||||
// Check whether from space is below to space
|
||||
if (from_start < to_start) {
|
||||
if (eden_from_to_order) {
|
||||
// Eden, from, to
|
||||
|
||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
||||
gclog_or_tty->print_cr(" Eden, from, to:");
|
||||
}
|
||||
|
||||
// Set eden
|
||||
// Compute how big eden can be, then adjust end.
|
||||
// See comment in PSYoungGen::resize_spaces() on
|
||||
// calculating eden_end.
|
||||
const size_t eden_size = MIN2(requested_eden_size,
|
||||
pointer_delta(from_start,
|
||||
eden_start,
|
||||
sizeof(char)));
|
||||
// "requested_eden_size" is a goal for the size of eden
|
||||
// and may not be attainable. "eden_size" below is
|
||||
// calculated based on the location of from-space and
|
||||
// the goal for the size of eden. from-space is
|
||||
// fixed in place because it contains live data.
|
||||
// The calculation is done this way to avoid 32bit
|
||||
// overflow (i.e., eden_start + requested_eden_size
|
||||
// may too large for representation in 32bits).
|
||||
size_t eden_size;
|
||||
if (maintain_minimum) {
|
||||
// Only make eden larger than the requested size if
|
||||
// the minimum size of the generation has to be maintained.
|
||||
// This could be done in general but policy at a higher
|
||||
// level is determining a requested size for eden and that
|
||||
// should be honored unless there is a fundamental reason.
|
||||
eden_size = pointer_delta(from_start,
|
||||
eden_start,
|
||||
sizeof(char));
|
||||
} else {
|
||||
eden_size = MIN2(requested_eden_size,
|
||||
pointer_delta(from_start, eden_start, sizeof(char)));
|
||||
}
|
||||
|
||||
eden_end = eden_start + eden_size;
|
||||
assert(eden_end >= eden_start, "addition overflowed")
|
||||
|
||||
|
@ -371,12 +404,14 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
|
|||
to_start = MAX2(to_start, eden_start + alignment);
|
||||
|
||||
// Compute how big eden can be, then adjust end.
|
||||
// See comment in PSYoungGen::resize_spaces() on
|
||||
// calculating eden_end.
|
||||
const size_t eden_size = MIN2(requested_eden_size,
|
||||
pointer_delta(to_start,
|
||||
eden_start,
|
||||
sizeof(char)));
|
||||
// See comments above on calculating eden_end.
|
||||
size_t eden_size;
|
||||
if (maintain_minimum) {
|
||||
eden_size = pointer_delta(to_start, eden_start, sizeof(char));
|
||||
} else {
|
||||
eden_size = MIN2(requested_eden_size,
|
||||
pointer_delta(to_start, eden_start, sizeof(char)));
|
||||
}
|
||||
eden_end = eden_start + eden_size;
|
||||
assert(eden_end >= eden_start, "addition overflowed")
|
||||
|
||||
|
@ -423,9 +458,47 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
|
|||
size_t old_from = from_space()->capacity_in_bytes();
|
||||
size_t old_to = to_space()->capacity_in_bytes();
|
||||
|
||||
eden_space()->initialize(edenMR, true);
|
||||
to_space()->initialize(toMR , true);
|
||||
from_space()->initialize(fromMR, false); // Note, not cleared!
|
||||
if (ZapUnusedHeapArea) {
|
||||
// NUMA is a special case because a numa space is not mangled
|
||||
// in order to not prematurely bind its address to memory to
|
||||
// the wrong memory (i.e., don't want the GC thread to first
|
||||
// touch the memory). The survivor spaces are not numa
|
||||
// spaces and are mangled.
|
||||
if (UseNUMA) {
|
||||
if (eden_from_to_order) {
|
||||
mangle_survivors(from_space(), fromMR, to_space(), toMR);
|
||||
} else {
|
||||
mangle_survivors(to_space(), toMR, from_space(), fromMR);
|
||||
}
|
||||
}
|
||||
|
||||
// If not mangling the spaces, do some checking to verify that
|
||||
// the spaces are already mangled.
|
||||
// The spaces should be correctly mangled at this point so
|
||||
// do some checking here. Note that they are not being mangled
|
||||
// in the calls to initialize().
|
||||
// Must check mangling before the spaces are reshaped. Otherwise,
|
||||
// the bottom or end of one space may have moved into an area
|
||||
// covered by another space and a failure of the check may
|
||||
// not correctly indicate which space is not properly mangled.
|
||||
|
||||
HeapWord* limit = (HeapWord*) virtual_space()->high();
|
||||
eden_space()->check_mangled_unused_area(limit);
|
||||
from_space()->check_mangled_unused_area(limit);
|
||||
to_space()->check_mangled_unused_area(limit);
|
||||
}
|
||||
// When an existing space is being initialized, it is not
|
||||
// mangled because the space has been previously mangled.
|
||||
eden_space()->initialize(edenMR,
|
||||
SpaceDecorator::Clear,
|
||||
SpaceDecorator::DontMangle);
|
||||
to_space()->initialize(toMR,
|
||||
SpaceDecorator::Clear,
|
||||
SpaceDecorator::DontMangle);
|
||||
from_space()->initialize(fromMR,
|
||||
SpaceDecorator::DontClear,
|
||||
SpaceDecorator::DontMangle);
|
||||
|
||||
PSScavenge::set_young_generation_boundary(eden_space()->bottom());
|
||||
|
||||
assert(from_space()->top() == old_from_top, "from top changed!");
|
||||
|
@ -446,7 +519,6 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
|
|||
}
|
||||
space_invariants();
|
||||
}
|
||||
|
||||
void ASPSYoungGen::reset_after_change() {
|
||||
assert_locked_or_safepoint(Heap_lock);
|
||||
|
||||
|
@ -458,7 +530,9 @@ void ASPSYoungGen::reset_after_change() {
|
|||
HeapWord* eden_bottom = eden_space()->bottom();
|
||||
if (new_eden_bottom != eden_bottom) {
|
||||
MemRegion eden_mr(new_eden_bottom, eden_space()->end());
|
||||
eden_space()->initialize(eden_mr, true);
|
||||
eden_space()->initialize(eden_mr,
|
||||
SpaceDecorator::Clear,
|
||||
SpaceDecorator::Mangle);
|
||||
PSScavenge::set_young_generation_boundary(eden_space()->bottom());
|
||||
}
|
||||
MemRegion cmr((HeapWord*)virtual_space()->low(),
|
||||
|
|
|
@ -666,9 +666,9 @@ void CardTableExtension::resize_commit_uncommit(int changed_region,
|
|||
|
||||
HeapWord* new_end_for_commit =
|
||||
MIN2(cur_committed.end(), _guard_region.start());
|
||||
MemRegion new_committed =
|
||||
MemRegion(new_start_aligned, new_end_for_commit);
|
||||
if(!new_committed.is_empty()) {
|
||||
if(new_start_aligned < new_end_for_commit) {
|
||||
MemRegion new_committed =
|
||||
MemRegion(new_start_aligned, new_end_for_commit);
|
||||
if (!os::commit_memory((char*)new_committed.start(),
|
||||
new_committed.byte_size())) {
|
||||
vm_exit_out_of_memory(new_committed.byte_size(),
|
||||
|
|
|
@ -938,3 +938,23 @@ void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
|
|||
// Delegate the resize to the generation.
|
||||
_old_gen->resize(desired_free_space);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void ParallelScavengeHeap::record_gen_tops_before_GC() {
|
||||
if (ZapUnusedHeapArea) {
|
||||
young_gen()->record_spaces_top();
|
||||
old_gen()->record_spaces_top();
|
||||
perm_gen()->record_spaces_top();
|
||||
}
|
||||
}
|
||||
|
||||
void ParallelScavengeHeap::gen_mangle_unused_area() {
|
||||
if (ZapUnusedHeapArea) {
|
||||
young_gen()->eden_space()->mangle_unused_area();
|
||||
young_gen()->to_space()->mangle_unused_area();
|
||||
young_gen()->from_space()->mangle_unused_area();
|
||||
old_gen()->object_space()->mangle_unused_area();
|
||||
perm_gen()->object_space()->mangle_unused_area();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -213,6 +213,12 @@ class ParallelScavengeHeap : public CollectedHeap {
|
|||
// Resize the old generation. The reserved space for the
|
||||
// generation may be expanded in preparation for the resize.
|
||||
void resize_old_gen(size_t desired_free_space);
|
||||
|
||||
// Save the tops of the spaces in all generations
|
||||
void record_gen_tops_before_GC() PRODUCT_RETURN;
|
||||
|
||||
// Mangle the unused parts of all spaces in the heap
|
||||
void gen_mangle_unused_area() PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)
|
||||
|
|
|
@ -98,6 +98,9 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
|||
// Increment the invocation count
|
||||
heap->increment_total_collections(true /* full */);
|
||||
|
||||
// Save information needed to minimize mangling
|
||||
heap->record_gen_tops_before_GC();
|
||||
|
||||
// We need to track unique mark sweep invocations as well.
|
||||
_total_invocations++;
|
||||
|
||||
|
@ -188,6 +191,12 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
|||
|
||||
deallocate_stacks();
|
||||
|
||||
if (ZapUnusedHeapArea) {
|
||||
// Do a complete mangle (top to end) because the usage for
|
||||
// scratch does not maintain a top pointer.
|
||||
young_gen->to_space()->mangle_unused_area_complete();
|
||||
}
|
||||
|
||||
eden_empty = young_gen->eden_space()->is_empty();
|
||||
if (!eden_empty) {
|
||||
eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
|
||||
|
@ -198,7 +207,7 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
|||
Universe::update_heap_info_at_gc();
|
||||
|
||||
survivors_empty = young_gen->from_space()->is_empty() &&
|
||||
young_gen->to_space()->is_empty();
|
||||
young_gen->to_space()->is_empty();
|
||||
young_gen_empty = eden_empty && survivors_empty;
|
||||
|
||||
BarrierSet* bs = heap->barrier_set();
|
||||
|
@ -344,6 +353,11 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
|||
perm_gen->verify_object_start_array();
|
||||
}
|
||||
|
||||
if (ZapUnusedHeapArea) {
|
||||
old_gen->object_space()->check_mangled_unused_area_complete();
|
||||
perm_gen->object_space()->check_mangled_unused_area_complete();
|
||||
}
|
||||
|
||||
NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
|
||||
|
||||
if (PrintHeapAtGC) {
|
||||
|
|
|
@ -438,5 +438,7 @@ void PSMarkSweepDecorator::compact(bool mangle_free_space ) {
|
|||
"should point inside space");
|
||||
space()->set_top(compaction_top());
|
||||
|
||||
if (mangle_free_space) space()->mangle_unused_area();
|
||||
if (mangle_free_space) {
|
||||
space()->mangle_unused_area();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -87,6 +87,15 @@ void PSOldGen::initialize_work(const char* perf_data_name, int level) {
|
|||
|
||||
MemRegion cmr((HeapWord*)virtual_space()->low(),
|
||||
(HeapWord*)virtual_space()->high());
|
||||
if (ZapUnusedHeapArea) {
|
||||
// Mangle newly committed space immediately rather than
|
||||
// waiting for the initialization of the space even though
|
||||
// mangling is related to spaces. Doing it here eliminates
|
||||
// the need to carry along information that a complete mangling
|
||||
// (bottom to end) needs to be done.
|
||||
SpaceMangler::mangle_region(cmr);
|
||||
}
|
||||
|
||||
Universe::heap()->barrier_set()->resize_covered_region(cmr);
|
||||
|
||||
CardTableModRefBS* _ct = (CardTableModRefBS*)Universe::heap()->barrier_set();
|
||||
|
@ -112,7 +121,9 @@ void PSOldGen::initialize_work(const char* perf_data_name, int level) {
|
|||
if (_object_space == NULL)
|
||||
vm_exit_during_initialization("Could not allocate an old gen space");
|
||||
|
||||
object_space()->initialize(cmr, true);
|
||||
object_space()->initialize(cmr,
|
||||
SpaceDecorator::Clear,
|
||||
SpaceDecorator::Mangle);
|
||||
|
||||
_object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
|
||||
|
||||
|
@ -204,10 +215,22 @@ HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
|
|||
}
|
||||
|
||||
void PSOldGen::expand(size_t bytes) {
|
||||
if (bytes == 0) {
|
||||
return;
|
||||
}
|
||||
MutexLocker x(ExpandHeap_lock);
|
||||
const size_t alignment = virtual_space()->alignment();
|
||||
size_t aligned_bytes = align_size_up(bytes, alignment);
|
||||
size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment);
|
||||
if (aligned_bytes == 0){
|
||||
// The alignment caused the number of bytes to wrap. An expand_by(0) will
|
||||
// return true with the implication that and expansion was done when it
|
||||
// was not. A call to expand implies a best effort to expand by "bytes"
|
||||
// but not a guarantee. Align down to give a best effort. This is likely
|
||||
// the most that the generation can expand since it has some capacity to
|
||||
// start with.
|
||||
aligned_bytes = align_size_down(bytes, alignment);
|
||||
}
|
||||
|
||||
bool success = false;
|
||||
if (aligned_expand_bytes > aligned_bytes) {
|
||||
|
@ -220,8 +243,8 @@ void PSOldGen::expand(size_t bytes) {
|
|||
success = expand_to_reserved();
|
||||
}
|
||||
|
||||
if (GC_locker::is_active()) {
|
||||
if (PrintGC && Verbose) {
|
||||
if (PrintGC && Verbose) {
|
||||
if (success && GC_locker::is_active()) {
|
||||
gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
|
||||
}
|
||||
}
|
||||
|
@ -230,8 +253,24 @@ void PSOldGen::expand(size_t bytes) {
|
|||
bool PSOldGen::expand_by(size_t bytes) {
|
||||
assert_lock_strong(ExpandHeap_lock);
|
||||
assert_locked_or_safepoint(Heap_lock);
|
||||
if (bytes == 0) {
|
||||
return true; // That's what virtual_space()->expand_by(0) would return
|
||||
}
|
||||
bool result = virtual_space()->expand_by(bytes);
|
||||
if (result) {
|
||||
if (ZapUnusedHeapArea) {
|
||||
// We need to mangle the newly expanded area. The memregion spans
|
||||
// end -> new_end, we assume that top -> end is already mangled.
|
||||
// Do the mangling before post_resize() is called because
|
||||
// the space is available for allocation after post_resize();
|
||||
HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
|
||||
assert(object_space()->end() < virtual_space_high,
|
||||
"Should be true before post_resize()");
|
||||
MemRegion mangle_region(object_space()->end(), virtual_space_high);
|
||||
// Note that the object space has not yet been updated to
|
||||
// coincede with the new underlying virtual space.
|
||||
SpaceMangler::mangle_region(mangle_region);
|
||||
}
|
||||
post_resize();
|
||||
if (UsePerfData) {
|
||||
_space_counters->update_capacity();
|
||||
|
@ -348,16 +387,7 @@ void PSOldGen::post_resize() {
|
|||
start_array()->set_covered_region(new_memregion);
|
||||
Universe::heap()->barrier_set()->resize_covered_region(new_memregion);
|
||||
|
||||
// Did we expand?
|
||||
HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
|
||||
if (object_space()->end() < virtual_space_high) {
|
||||
// We need to mangle the newly expanded area. The memregion spans
|
||||
// end -> new_end, we assume that top -> end is already mangled.
|
||||
// This cannot be safely tested for, as allocation may be taking
|
||||
// place.
|
||||
MemRegion mangle_region(object_space()->end(), virtual_space_high);
|
||||
object_space()->mangle_region(mangle_region);
|
||||
}
|
||||
|
||||
// ALWAYS do this last!!
|
||||
object_space()->set_end(virtual_space_high);
|
||||
|
@ -462,3 +492,10 @@ void PSOldGen::verify_object_start_array() {
|
|||
VerifyObjectStartArrayClosure check( this, &_start_array );
|
||||
object_iterate(&check);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void PSOldGen::record_spaces_top() {
|
||||
assert(ZapUnusedHeapArea, "Not mangling unused space");
|
||||
object_space()->set_top_for_allocations();
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -185,4 +185,8 @@ class PSOldGen : public CHeapObj {
|
|||
|
||||
// Printing support
|
||||
virtual const char* name() const { return _name; }
|
||||
|
||||
// Debugging support
|
||||
// Save the tops of all spaces for later use during mangling.
|
||||
void record_spaces_top() PRODUCT_RETURN;
|
||||
};
|
||||
|
|
|
@ -200,8 +200,8 @@ void PSParallelCompact::print_chunk_ranges()
|
|||
for (unsigned int id = 0; id < last_space_id; ++id) {
|
||||
const MutableSpace* space = _space_info[id].space();
|
||||
tty->print_cr("%u %s "
|
||||
SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " "
|
||||
SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " ",
|
||||
SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
|
||||
SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
|
||||
id, space_names[id],
|
||||
summary_data().addr_to_chunk_idx(space->bottom()),
|
||||
summary_data().addr_to_chunk_idx(space->top()),
|
||||
|
@ -213,8 +213,8 @@ void PSParallelCompact::print_chunk_ranges()
|
|||
void
|
||||
print_generic_summary_chunk(size_t i, const ParallelCompactData::ChunkData* c)
|
||||
{
|
||||
#define CHUNK_IDX_FORMAT SIZE_FORMAT_W("7")
|
||||
#define CHUNK_DATA_FORMAT SIZE_FORMAT_W("5")
|
||||
#define CHUNK_IDX_FORMAT SIZE_FORMAT_W(7)
|
||||
#define CHUNK_DATA_FORMAT SIZE_FORMAT_W(5)
|
||||
|
||||
ParallelCompactData& sd = PSParallelCompact::summary_data();
|
||||
size_t dci = c->destination() ? sd.addr_to_chunk_idx(c->destination()) : 0;
|
||||
|
@ -269,9 +269,9 @@ print_initial_summary_chunk(size_t i,
|
|||
const ParallelCompactData::ChunkData* c,
|
||||
bool newline = true)
|
||||
{
|
||||
tty->print(SIZE_FORMAT_W("5") " " PTR_FORMAT " "
|
||||
SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " "
|
||||
SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " %d",
|
||||
tty->print(SIZE_FORMAT_W(5) " " PTR_FORMAT " "
|
||||
SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " "
|
||||
SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
|
||||
i, c->destination(),
|
||||
c->partial_obj_size(), c->live_obj_size(),
|
||||
c->data_size(), c->source_chunk(), c->destination_count());
|
||||
|
@ -326,7 +326,7 @@ print_initial_summary_data(ParallelCompactData& summary_data,
|
|||
}
|
||||
|
||||
print_initial_summary_chunk(i, c, false);
|
||||
tty->print_cr(" %12.10f " SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10"),
|
||||
tty->print_cr(" %12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
|
||||
reclaimed_ratio, dead_to_right, live_to_right);
|
||||
|
||||
live_to_right -= c->data_size();
|
||||
|
@ -338,8 +338,8 @@ print_initial_summary_data(ParallelCompactData& summary_data,
|
|||
print_initial_summary_chunk(i, summary_data.chunk(i));
|
||||
}
|
||||
|
||||
tty->print_cr("max: " SIZE_FORMAT_W("4") " d2r=" SIZE_FORMAT_W("10") " "
|
||||
"l2r=" SIZE_FORMAT_W("10") " max_ratio=%14.12f",
|
||||
tty->print_cr("max: " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " "
|
||||
"l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
|
||||
max_reclaimed_ratio_chunk, max_dead_to_right,
|
||||
max_live_to_right, max_reclaimed_ratio);
|
||||
}
|
||||
|
@ -1060,6 +1060,10 @@ void PSParallelCompact::post_compact()
|
|||
|
||||
ref_processor()->enqueue_discovered_references(NULL);
|
||||
|
||||
if (ZapUnusedHeapArea) {
|
||||
heap->gen_mangle_unused_area();
|
||||
}
|
||||
|
||||
// Update time of last GC
|
||||
reset_millis_since_last_gc();
|
||||
}
|
||||
|
@ -1119,8 +1123,8 @@ PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
|
|||
HeapWord* chunk_destination = cp->destination();
|
||||
const size_t cur_deadwood = pointer_delta(dense_prefix, chunk_destination);
|
||||
if (TraceParallelOldGCDensePrefix && Verbose) {
|
||||
tty->print_cr("c#=" SIZE_FORMAT_W("04") " dst=" PTR_FORMAT " "
|
||||
"dp=" SIZE_FORMAT_W("08") " " "cdw=" SIZE_FORMAT_W("08"),
|
||||
tty->print_cr("c#=" SIZE_FORMAT_W(4) " dst=" PTR_FORMAT " "
|
||||
"dp=" SIZE_FORMAT_W(8) " " "cdw=" SIZE_FORMAT_W(8),
|
||||
sd.chunk(cp), chunk_destination,
|
||||
dense_prefix, cur_deadwood);
|
||||
}
|
||||
|
@ -1145,7 +1149,7 @@ PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
|
|||
return dense_prefix;
|
||||
}
|
||||
if (TraceParallelOldGCDensePrefix && Verbose) {
|
||||
tty->print_cr("backing up from c=" SIZE_FORMAT_W("4") " d2r=%10.8f "
|
||||
tty->print_cr("backing up from c=" SIZE_FORMAT_W(4) " d2r=%10.8f "
|
||||
"pc_d2r=%10.8f", sd.chunk(cp), density_to_right,
|
||||
prev_chunk_density_to_right);
|
||||
}
|
||||
|
@ -1182,7 +1186,7 @@ void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm,
|
|||
const size_t live_to_right = new_top - cp->destination();
|
||||
const size_t dead_to_right = space->top() - addr - live_to_right;
|
||||
|
||||
tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W("05") " "
|
||||
tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W(5) " "
|
||||
"spl=" SIZE_FORMAT " "
|
||||
"d2l=" SIZE_FORMAT " d2l%%=%6.4f "
|
||||
"d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT
|
||||
|
@ -1522,48 +1526,53 @@ void
|
|||
PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
|
||||
{
|
||||
assert(id < last_space_id, "id out of range");
|
||||
assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom(),
|
||||
"should have been set in summarize_spaces_quick()");
|
||||
|
||||
const MutableSpace* space = _space_info[id].space();
|
||||
HeapWord** new_top_addr = _space_info[id].new_top_addr();
|
||||
|
||||
HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
|
||||
_space_info[id].set_dense_prefix(dense_prefix_end);
|
||||
if (_space_info[id].new_top() != space->bottom()) {
|
||||
HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
|
||||
_space_info[id].set_dense_prefix(dense_prefix_end);
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (TraceParallelOldGCDensePrefix) {
|
||||
print_dense_prefix_stats("ratio", id, maximum_compaction, dense_prefix_end);
|
||||
HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
|
||||
print_dense_prefix_stats("density", id, maximum_compaction, addr);
|
||||
}
|
||||
if (TraceParallelOldGCDensePrefix) {
|
||||
print_dense_prefix_stats("ratio", id, maximum_compaction,
|
||||
dense_prefix_end);
|
||||
HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
|
||||
print_dense_prefix_stats("density", id, maximum_compaction, addr);
|
||||
}
|
||||
#endif // #ifndef PRODUCT
|
||||
|
||||
// If dead space crosses the dense prefix boundary, it is (at least partially)
|
||||
// filled with a dummy object, marked live and added to the summary data.
|
||||
// This simplifies the copy/update phase and must be done before the final
|
||||
// locations of objects are determined, to prevent leaving a fragment of dead
|
||||
// space that is too small to fill with an object.
|
||||
if (!maximum_compaction && dense_prefix_end != space->bottom()) {
|
||||
fill_dense_prefix_end(id);
|
||||
}
|
||||
// If dead space crosses the dense prefix boundary, it is (at least
|
||||
// partially) filled with a dummy object, marked live and added to the
|
||||
// summary data. This simplifies the copy/update phase and must be done
|
||||
// before the final locations of objects are determined, to prevent leaving
|
||||
// a fragment of dead space that is too small to fill with an object.
|
||||
if (!maximum_compaction && dense_prefix_end != space->bottom()) {
|
||||
fill_dense_prefix_end(id);
|
||||
}
|
||||
|
||||
// Compute the destination of each Chunk, and thus each object.
|
||||
_summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
|
||||
_summary_data.summarize(dense_prefix_end, space->end(),
|
||||
dense_prefix_end, space->top(),
|
||||
new_top_addr);
|
||||
// Compute the destination of each Chunk, and thus each object.
|
||||
_summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
|
||||
_summary_data.summarize(dense_prefix_end, space->end(),
|
||||
dense_prefix_end, space->top(),
|
||||
_space_info[id].new_top_addr());
|
||||
}
|
||||
|
||||
if (TraceParallelOldGCSummaryPhase) {
|
||||
const size_t chunk_size = ParallelCompactData::ChunkSize;
|
||||
HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
|
||||
const size_t dp_chunk = _summary_data.addr_to_chunk_idx(dense_prefix_end);
|
||||
const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
|
||||
const HeapWord* nt_aligned_up = _summary_data.chunk_align_up(*new_top_addr);
|
||||
HeapWord* const new_top = _space_info[id].new_top();
|
||||
const HeapWord* nt_aligned_up = _summary_data.chunk_align_up(new_top);
|
||||
const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
|
||||
tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
|
||||
"dp_chunk=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
|
||||
"cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
|
||||
id, space->capacity_in_words(), dense_prefix_end,
|
||||
dp_chunk, dp_words / chunk_size,
|
||||
cr_words / chunk_size, *new_top_addr);
|
||||
cr_words / chunk_size, new_top);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1632,7 +1641,7 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
|
|||
const size_t live = pointer_delta(_space_info[id].new_top(),
|
||||
space->bottom());
|
||||
const size_t available = pointer_delta(target_space_end, *new_top_addr);
|
||||
if (live <= available) {
|
||||
if (live > 0 && live <= available) {
|
||||
// All the live data will fit.
|
||||
if (TraceParallelOldGCSummaryPhase) {
|
||||
tty->print_cr("summarizing %d into old_space @ " PTR_FORMAT,
|
||||
|
@ -1642,16 +1651,18 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
|
|||
space->bottom(), space->top(),
|
||||
new_top_addr);
|
||||
|
||||
// Reset the new_top value for the space.
|
||||
_space_info[id].set_new_top(space->bottom());
|
||||
|
||||
// Clear the source_chunk field for each chunk in the space.
|
||||
HeapWord* const new_top = _space_info[id].new_top();
|
||||
HeapWord* const clear_end = _summary_data.chunk_align_up(new_top);
|
||||
ChunkData* beg_chunk = _summary_data.addr_to_chunk_ptr(space->bottom());
|
||||
ChunkData* end_chunk = _summary_data.addr_to_chunk_ptr(space->top() - 1);
|
||||
while (beg_chunk <= end_chunk) {
|
||||
ChunkData* end_chunk = _summary_data.addr_to_chunk_ptr(clear_end);
|
||||
while (beg_chunk < end_chunk) {
|
||||
beg_chunk->set_source_chunk(0);
|
||||
++beg_chunk;
|
||||
}
|
||||
|
||||
// Reset the new_top value for the space.
|
||||
_space_info[id].set_new_top(space->bottom());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1961,6 +1972,11 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
|||
PSPermGen* perm_gen = heap->perm_gen();
|
||||
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
|
||||
|
||||
if (ZapUnusedHeapArea) {
|
||||
// Save information needed to minimize mangling
|
||||
heap->record_gen_tops_before_GC();
|
||||
}
|
||||
|
||||
_print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes;
|
||||
|
||||
// Make sure data structures are sane, make the heap parsable, and do other
|
||||
|
@ -2129,17 +2145,19 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
|||
size_t max_eden_size = young_gen->max_size() -
|
||||
young_gen->from_space()->capacity_in_bytes() -
|
||||
young_gen->to_space()->capacity_in_bytes();
|
||||
size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
|
||||
young_gen->eden_space()->used_in_bytes(),
|
||||
old_gen->used_in_bytes(),
|
||||
perm_gen->used_in_bytes(),
|
||||
young_gen->eden_space()->capacity_in_bytes(),
|
||||
old_gen->max_gen_size(),
|
||||
max_eden_size,
|
||||
true /* full gc*/,
|
||||
gc_cause);
|
||||
size_policy->compute_generation_free_space(
|
||||
young_gen->used_in_bytes(),
|
||||
young_gen->eden_space()->used_in_bytes(),
|
||||
old_gen->used_in_bytes(),
|
||||
perm_gen->used_in_bytes(),
|
||||
young_gen->eden_space()->capacity_in_bytes(),
|
||||
old_gen->max_gen_size(),
|
||||
max_eden_size,
|
||||
true /* full gc*/,
|
||||
gc_cause);
|
||||
|
||||
heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
|
||||
heap->resize_old_gen(
|
||||
size_policy->calculated_old_free_size_in_bytes());
|
||||
|
||||
// Don't resize the young generation at an major collection. A
|
||||
// desired young generation size may have been calculated but
|
||||
|
@ -2212,6 +2230,11 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
|||
perm_gen->verify_object_start_array();
|
||||
}
|
||||
|
||||
if (ZapUnusedHeapArea) {
|
||||
old_gen->object_space()->check_mangled_unused_area_complete();
|
||||
perm_gen->object_space()->check_mangled_unused_area_complete();
|
||||
}
|
||||
|
||||
NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
|
||||
|
||||
collection_exit.update();
|
||||
|
@ -2499,7 +2522,7 @@ void PSParallelCompact::enqueue_chunk_draining_tasks(GCTaskQueue* q,
|
|||
if (TraceParallelOldGCCompactionPhase && Verbose) {
|
||||
const size_t count_mod_8 = fillable_chunks & 7;
|
||||
if (count_mod_8 == 0) gclog_or_tty->print("fillable: ");
|
||||
gclog_or_tty->print(" " SIZE_FORMAT_W("7"), cur);
|
||||
gclog_or_tty->print(" " SIZE_FORMAT_W(7), cur);
|
||||
if (count_mod_8 == 7) gclog_or_tty->cr();
|
||||
}
|
||||
|
||||
|
|
|
@ -716,6 +716,99 @@ class BitBlockUpdateClosure: public ParMarkBitMapClosure {
|
|||
virtual IterationStatus do_addr(HeapWord* addr, size_t words);
|
||||
};
|
||||
|
||||
// The UseParallelOldGC collector is a stop-the-world garbage
|
||||
// collector that does parts of the collection using parallel threads.
|
||||
// The collection includes the tenured generation and the young
|
||||
// generation. The permanent generation is collected at the same
|
||||
// time as the other two generations but the permanent generation
|
||||
// is collect by a single GC thread. The permanent generation is
|
||||
// collected serially because of the requirement that during the
|
||||
// processing of a klass AAA, any objects reference by AAA must
|
||||
// already have been processed. This requirement is enforced by
|
||||
// a left (lower address) to right (higher address) sliding compaction.
|
||||
//
|
||||
// There are four phases of the collection.
|
||||
//
|
||||
// - marking phase
|
||||
// - summary phase
|
||||
// - compacting phase
|
||||
// - clean up phase
|
||||
//
|
||||
// Roughly speaking these phases correspond, respectively, to
|
||||
// - mark all the live objects
|
||||
// - calculate the destination of each object at the end of the collection
|
||||
// - move the objects to their destination
|
||||
// - update some references and reinitialize some variables
|
||||
//
|
||||
// These three phases are invoked in PSParallelCompact::invoke_no_policy().
|
||||
// The marking phase is implemented in PSParallelCompact::marking_phase()
|
||||
// and does a complete marking of the heap.
|
||||
// The summary phase is implemented in PSParallelCompact::summary_phase().
|
||||
// The move and update phase is implemented in PSParallelCompact::compact().
|
||||
//
|
||||
// A space that is being collected is divided into chunks and with
|
||||
// each chunk is associated an object of type ParallelCompactData.
|
||||
// Each chunk is of a fixed size and typically will contain more than
|
||||
// 1 object and may have parts of objects at the front and back of the
|
||||
// chunk.
|
||||
//
|
||||
// chunk -----+---------------------+----------
|
||||
// objects covered [ AAA )[ BBB )[ CCC )[ DDD )
|
||||
//
|
||||
// The marking phase does a complete marking of all live objects in the
|
||||
// heap. The marking also compiles the size of the data for
|
||||
// all live objects covered by the chunk. This size includes the
|
||||
// part of any live object spanning onto the chunk (part of AAA
|
||||
// if it is live) from the front, all live objects contained in the chunk
|
||||
// (BBB and/or CCC if they are live), and the part of any live objects
|
||||
// covered by the chunk that extends off the chunk (part of DDD if it is
|
||||
// live). The marking phase uses multiple GC threads and marking is
|
||||
// done in a bit array of type ParMarkBitMap. The marking of the
|
||||
// bit map is done atomically as is the accumulation of the size of the
|
||||
// live objects covered by a chunk.
|
||||
//
|
||||
// The summary phase calculates the total live data to the left of
|
||||
// each chunk XXX. Based on that total and the bottom of the space,
|
||||
// it can calculate the starting location of the live data in XXX.
|
||||
// The summary phase calculates for each chunk XXX quantites such as
|
||||
//
|
||||
// - the amount of live data at the beginning of a chunk from an object
|
||||
// entering the chunk.
|
||||
// - the location of the first live data on the chunk
|
||||
// - a count of the number of chunks receiving live data from XXX.
|
||||
//
|
||||
// See ParallelCompactData for precise details. The summary phase also
|
||||
// calculates the dense prefix for the compaction. The dense prefix
|
||||
// is a portion at the beginning of the space that is not moved. The
|
||||
// objects in the dense prefix do need to have their object references
|
||||
// updated. See method summarize_dense_prefix().
|
||||
//
|
||||
// The summary phase is done using 1 GC thread.
|
||||
//
|
||||
// The compaction phase moves objects to their new location and updates
|
||||
// all references in the object.
|
||||
//
|
||||
// A current exception is that objects that cross a chunk boundary
|
||||
// are moved but do not have their references updated. References are
|
||||
// not updated because it cannot easily be determined if the klass
|
||||
// pointer KKK for the object AAA has been updated. KKK likely resides
|
||||
// in a chunk to the left of the chunk containing AAA. These AAA's
|
||||
// have there references updated at the end in a clean up phase.
|
||||
// See the method PSParallelCompact::update_deferred_objects(). An
|
||||
// alternate strategy is being investigated for this deferral of updating.
|
||||
//
|
||||
// Compaction is done on a chunk basis. A chunk that is ready to be
|
||||
// filled is put on a ready list and GC threads take chunk off the list
|
||||
// and fill them. A chunk is ready to be filled if it
|
||||
// empty of live objects. Such a chunk may have been initially
|
||||
// empty (only contained
|
||||
// dead objects) or may have had all its live objects copied out already.
|
||||
// A chunk that compacts into itself is also ready for filling. The
|
||||
// ready list is initially filled with empty chunks and chunks compacting
|
||||
// into themselves. There is always at least 1 chunk that can be put on
|
||||
// the ready list. The chunks are atomically added and removed from
|
||||
// the ready list.
|
||||
//
|
||||
class PSParallelCompact : AllStatic {
|
||||
public:
|
||||
// Convenient access to type names.
|
||||
|
|
|
@ -265,6 +265,11 @@ bool PSScavenge::invoke_no_policy() {
|
|||
young_gen->eden_space()->accumulate_statistics();
|
||||
}
|
||||
|
||||
if (ZapUnusedHeapArea) {
|
||||
// Save information needed to minimize mangling
|
||||
heap->record_gen_tops_before_GC();
|
||||
}
|
||||
|
||||
if (PrintHeapAtGC) {
|
||||
Universe::print_heap_before_gc();
|
||||
}
|
||||
|
@ -315,7 +320,7 @@ bool PSScavenge::invoke_no_policy() {
|
|||
if (!ScavengeWithObjectsInToSpace) {
|
||||
assert(young_gen->to_space()->is_empty(),
|
||||
"Attempt to scavenge with live objects in to_space");
|
||||
young_gen->to_space()->clear();
|
||||
young_gen->to_space()->clear(SpaceDecorator::Mangle);
|
||||
} else if (ZapUnusedHeapArea) {
|
||||
young_gen->to_space()->mangle_unused_area();
|
||||
}
|
||||
|
@ -437,8 +442,10 @@ bool PSScavenge::invoke_no_policy() {
|
|||
|
||||
if (!promotion_failure_occurred) {
|
||||
// Swap the survivor spaces.
|
||||
young_gen->eden_space()->clear();
|
||||
young_gen->from_space()->clear();
|
||||
|
||||
|
||||
young_gen->eden_space()->clear(SpaceDecorator::Mangle);
|
||||
young_gen->from_space()->clear(SpaceDecorator::Mangle);
|
||||
young_gen->swap_spaces();
|
||||
|
||||
size_t survived = young_gen->from_space()->used_in_bytes();
|
||||
|
@ -600,6 +607,12 @@ bool PSScavenge::invoke_no_policy() {
|
|||
Universe::print_heap_after_gc();
|
||||
}
|
||||
|
||||
if (ZapUnusedHeapArea) {
|
||||
young_gen->eden_space()->check_mangled_unused_area_complete();
|
||||
young_gen->from_space()->check_mangled_unused_area_complete();
|
||||
young_gen->to_space()->check_mangled_unused_area_complete();
|
||||
}
|
||||
|
||||
scavenge_exit.update();
|
||||
|
||||
if (PrintGCTaskTimeStamps) {
|
||||
|
|
|
@ -36,7 +36,7 @@ PSYoungGen::PSYoungGen(size_t initial_size,
|
|||
void PSYoungGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
|
||||
assert(_init_gen_size != 0, "Should have a finite size");
|
||||
_virtual_space = new PSVirtualSpace(rs, alignment);
|
||||
if (!_virtual_space->expand_by(_init_gen_size)) {
|
||||
if (!virtual_space()->expand_by(_init_gen_size)) {
|
||||
vm_exit_during_initialization("Could not reserve enough space for "
|
||||
"object heap");
|
||||
}
|
||||
|
@ -49,13 +49,20 @@ void PSYoungGen::initialize(ReservedSpace rs, size_t alignment) {
|
|||
|
||||
void PSYoungGen::initialize_work() {
|
||||
|
||||
_reserved = MemRegion((HeapWord*)_virtual_space->low_boundary(),
|
||||
(HeapWord*)_virtual_space->high_boundary());
|
||||
_reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
|
||||
(HeapWord*)virtual_space()->high_boundary());
|
||||
|
||||
MemRegion cmr((HeapWord*)_virtual_space->low(),
|
||||
(HeapWord*)_virtual_space->high());
|
||||
MemRegion cmr((HeapWord*)virtual_space()->low(),
|
||||
(HeapWord*)virtual_space()->high());
|
||||
Universe::heap()->barrier_set()->resize_covered_region(cmr);
|
||||
|
||||
if (ZapUnusedHeapArea) {
|
||||
// Mangle newly committed space immediately because it
|
||||
// can be done here more simply that after the new
|
||||
// spaces have been computed.
|
||||
SpaceMangler::mangle_region(cmr);
|
||||
}
|
||||
|
||||
if (UseNUMA) {
|
||||
_eden_space = new MutableNUMASpace();
|
||||
} else {
|
||||
|
@ -89,7 +96,7 @@ void PSYoungGen::initialize_work() {
|
|||
// Compute maximum space sizes for performance counters
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
size_t alignment = heap->intra_heap_alignment();
|
||||
size_t size = _virtual_space->reserved_size();
|
||||
size_t size = virtual_space()->reserved_size();
|
||||
|
||||
size_t max_survivor_size;
|
||||
size_t max_eden_size;
|
||||
|
@ -142,7 +149,7 @@ void PSYoungGen::compute_initial_space_boundaries() {
|
|||
|
||||
// Compute sizes
|
||||
size_t alignment = heap->intra_heap_alignment();
|
||||
size_t size = _virtual_space->committed_size();
|
||||
size_t size = virtual_space()->committed_size();
|
||||
|
||||
size_t survivor_size = size / InitialSurvivorRatio;
|
||||
survivor_size = align_size_down(survivor_size, alignment);
|
||||
|
@ -164,18 +171,18 @@ void PSYoungGen::compute_initial_space_boundaries() {
|
|||
}
|
||||
|
||||
void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) {
|
||||
assert(eden_size < _virtual_space->committed_size(), "just checking");
|
||||
assert(eden_size < virtual_space()->committed_size(), "just checking");
|
||||
assert(eden_size > 0 && survivor_size > 0, "just checking");
|
||||
|
||||
// Initial layout is Eden, to, from. After swapping survivor spaces,
|
||||
// that leaves us with Eden, from, to, which is step one in our two
|
||||
// step resize-with-live-data procedure.
|
||||
char *eden_start = _virtual_space->low();
|
||||
char *eden_start = virtual_space()->low();
|
||||
char *to_start = eden_start + eden_size;
|
||||
char *from_start = to_start + survivor_size;
|
||||
char *from_end = from_start + survivor_size;
|
||||
|
||||
assert(from_end == _virtual_space->high(), "just checking");
|
||||
assert(from_end == virtual_space()->high(), "just checking");
|
||||
assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
|
||||
assert(is_object_aligned((intptr_t)to_start), "checking alignment");
|
||||
assert(is_object_aligned((intptr_t)from_start), "checking alignment");
|
||||
|
@ -184,9 +191,9 @@ void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) {
|
|||
MemRegion to_mr ((HeapWord*)to_start, (HeapWord*)from_start);
|
||||
MemRegion from_mr((HeapWord*)from_start, (HeapWord*)from_end);
|
||||
|
||||
eden_space()->initialize(eden_mr, true);
|
||||
to_space()->initialize(to_mr , true);
|
||||
from_space()->initialize(from_mr, true);
|
||||
eden_space()->initialize(eden_mr, true, ZapUnusedHeapArea);
|
||||
to_space()->initialize(to_mr , true, ZapUnusedHeapArea);
|
||||
from_space()->initialize(from_mr, true, ZapUnusedHeapArea);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
@ -207,7 +214,7 @@ void PSYoungGen::space_invariants() {
|
|||
char* to_start = (char*)to_space()->bottom();
|
||||
char* to_end = (char*)to_space()->end();
|
||||
|
||||
guarantee(eden_start >= _virtual_space->low(), "eden bottom");
|
||||
guarantee(eden_start >= virtual_space()->low(), "eden bottom");
|
||||
guarantee(eden_start < eden_end, "eden space consistency");
|
||||
guarantee(from_start < from_end, "from space consistency");
|
||||
guarantee(to_start < to_end, "to space consistency");
|
||||
|
@ -217,29 +224,29 @@ void PSYoungGen::space_invariants() {
|
|||
// Eden, from, to
|
||||
guarantee(eden_end <= from_start, "eden/from boundary");
|
||||
guarantee(from_end <= to_start, "from/to boundary");
|
||||
guarantee(to_end <= _virtual_space->high(), "to end");
|
||||
guarantee(to_end <= virtual_space()->high(), "to end");
|
||||
} else {
|
||||
// Eden, to, from
|
||||
guarantee(eden_end <= to_start, "eden/to boundary");
|
||||
guarantee(to_end <= from_start, "to/from boundary");
|
||||
guarantee(from_end <= _virtual_space->high(), "from end");
|
||||
guarantee(from_end <= virtual_space()->high(), "from end");
|
||||
}
|
||||
|
||||
// More checks that the virtual space is consistent with the spaces
|
||||
assert(_virtual_space->committed_size() >=
|
||||
assert(virtual_space()->committed_size() >=
|
||||
(eden_space()->capacity_in_bytes() +
|
||||
to_space()->capacity_in_bytes() +
|
||||
from_space()->capacity_in_bytes()), "Committed size is inconsistent");
|
||||
assert(_virtual_space->committed_size() <= _virtual_space->reserved_size(),
|
||||
assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
|
||||
"Space invariant");
|
||||
char* eden_top = (char*)eden_space()->top();
|
||||
char* from_top = (char*)from_space()->top();
|
||||
char* to_top = (char*)to_space()->top();
|
||||
assert(eden_top <= _virtual_space->high(), "eden top");
|
||||
assert(from_top <= _virtual_space->high(), "from top");
|
||||
assert(to_top <= _virtual_space->high(), "to top");
|
||||
assert(eden_top <= virtual_space()->high(), "eden top");
|
||||
assert(from_top <= virtual_space()->high(), "from top");
|
||||
assert(to_top <= virtual_space()->high(), "to top");
|
||||
|
||||
_virtual_space->verify();
|
||||
virtual_space()->verify();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -265,8 +272,8 @@ void PSYoungGen::resize(size_t eden_size, size_t survivor_size) {
|
|||
|
||||
|
||||
bool PSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
|
||||
const size_t alignment = _virtual_space->alignment();
|
||||
size_t orig_size = _virtual_space->committed_size();
|
||||
const size_t alignment = virtual_space()->alignment();
|
||||
size_t orig_size = virtual_space()->committed_size();
|
||||
bool size_changed = false;
|
||||
|
||||
// There used to be this guarantee there.
|
||||
|
@ -288,10 +295,18 @@ bool PSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
|
|||
// Grow the generation
|
||||
size_t change = desired_size - orig_size;
|
||||
assert(change % alignment == 0, "just checking");
|
||||
if (!_virtual_space->expand_by(change)) {
|
||||
HeapWord* prev_high = (HeapWord*) virtual_space()->high();
|
||||
if (!virtual_space()->expand_by(change)) {
|
||||
return false; // Error if we fail to resize!
|
||||
}
|
||||
|
||||
if (ZapUnusedHeapArea) {
|
||||
// Mangle newly committed space immediately because it
|
||||
// can be done here more simply that after the new
|
||||
// spaces have been computed.
|
||||
HeapWord* new_high = (HeapWord*) virtual_space()->high();
|
||||
MemRegion mangle_region(prev_high, new_high);
|
||||
SpaceMangler::mangle_region(mangle_region);
|
||||
}
|
||||
size_changed = true;
|
||||
} else if (desired_size < orig_size) {
|
||||
size_t desired_change = orig_size - desired_size;
|
||||
|
@ -321,19 +336,95 @@ bool PSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
|
|||
post_resize();
|
||||
|
||||
if (Verbose && PrintGC) {
|
||||
size_t current_size = _virtual_space->committed_size();
|
||||
size_t current_size = virtual_space()->committed_size();
|
||||
gclog_or_tty->print_cr("PSYoung generation size changed: "
|
||||
SIZE_FORMAT "K->" SIZE_FORMAT "K",
|
||||
orig_size/K, current_size/K);
|
||||
}
|
||||
}
|
||||
|
||||
guarantee(eden_plus_survivors <= _virtual_space->committed_size() ||
|
||||
_virtual_space->committed_size() == max_size(), "Sanity");
|
||||
guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
|
||||
virtual_space()->committed_size() == max_size(), "Sanity");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// In the numa case eden is not mangled so a survivor space
|
||||
// moving into a region previously occupied by a survivor
|
||||
// may find an unmangled region. Also in the PS case eden
|
||||
// to-space and from-space may not touch (i.e., there may be
|
||||
// gaps between them due to movement while resizing the
|
||||
// spaces). Those gaps must be mangled.
|
||||
void PSYoungGen::mangle_survivors(MutableSpace* s1,
|
||||
MemRegion s1MR,
|
||||
MutableSpace* s2,
|
||||
MemRegion s2MR) {
|
||||
// Check eden and gap between eden and from-space, in deciding
|
||||
// what to mangle in from-space. Check the gap between from-space
|
||||
// and to-space when deciding what to mangle.
|
||||
//
|
||||
// +--------+ +----+ +---+
|
||||
// | eden | |s1 | |s2 |
|
||||
// +--------+ +----+ +---+
|
||||
// +-------+ +-----+
|
||||
// |s1MR | |s2MR |
|
||||
// +-------+ +-----+
|
||||
// All of survivor-space is properly mangled so find the
|
||||
// upper bound on the mangling for any portion above current s1.
|
||||
HeapWord* delta_end = MIN2(s1->bottom(), s1MR.end());
|
||||
MemRegion delta1_left;
|
||||
if (s1MR.start() < delta_end) {
|
||||
delta1_left = MemRegion(s1MR.start(), delta_end);
|
||||
s1->mangle_region(delta1_left);
|
||||
}
|
||||
// Find any portion to the right of the current s1.
|
||||
HeapWord* delta_start = MAX2(s1->end(), s1MR.start());
|
||||
MemRegion delta1_right;
|
||||
if (delta_start < s1MR.end()) {
|
||||
delta1_right = MemRegion(delta_start, s1MR.end());
|
||||
s1->mangle_region(delta1_right);
|
||||
}
|
||||
|
||||
// Similarly for the second survivor space except that
|
||||
// any of the new region that overlaps with the current
|
||||
// region of the first survivor space has already been
|
||||
// mangled.
|
||||
delta_end = MIN2(s2->bottom(), s2MR.end());
|
||||
delta_start = MAX2(s2MR.start(), s1->end());
|
||||
MemRegion delta2_left;
|
||||
if (s2MR.start() < delta_end) {
|
||||
delta2_left = MemRegion(s2MR.start(), delta_end);
|
||||
s2->mangle_region(delta2_left);
|
||||
}
|
||||
delta_start = MAX2(s2->end(), s2MR.start());
|
||||
MemRegion delta2_right;
|
||||
if (delta_start < s2MR.end()) {
|
||||
s2->mangle_region(delta2_right);
|
||||
}
|
||||
|
||||
if (TraceZapUnusedHeapArea) {
|
||||
// s1
|
||||
gclog_or_tty->print_cr("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
|
||||
"New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
|
||||
s1->bottom(), s1->end(), s1MR.start(), s1MR.end());
|
||||
gclog_or_tty->print_cr(" Mangle before: [" PTR_FORMAT ", "
|
||||
PTR_FORMAT ") Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
|
||||
delta1_left.start(), delta1_left.end(), delta1_right.start(),
|
||||
delta1_right.end());
|
||||
|
||||
// s2
|
||||
gclog_or_tty->print_cr("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
|
||||
"New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
|
||||
s2->bottom(), s2->end(), s2MR.start(), s2MR.end());
|
||||
gclog_or_tty->print_cr(" Mangle before: [" PTR_FORMAT ", "
|
||||
PTR_FORMAT ") Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
|
||||
delta2_left.start(), delta2_left.end(), delta2_right.start(),
|
||||
delta2_right.end());
|
||||
}
|
||||
|
||||
}
|
||||
#endif // NOT PRODUCT
|
||||
|
||||
void PSYoungGen::resize_spaces(size_t requested_eden_size,
|
||||
size_t requested_survivor_size) {
|
||||
|
@ -396,9 +487,11 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
|
|||
const bool maintain_minimum =
|
||||
(requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
|
||||
|
||||
bool eden_from_to_order = from_start < to_start;
|
||||
// Check whether from space is below to space
|
||||
if (from_start < to_start) {
|
||||
if (eden_from_to_order) {
|
||||
// Eden, from, to
|
||||
eden_from_to_order = true;
|
||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
||||
gclog_or_tty->print_cr(" Eden, from, to:");
|
||||
}
|
||||
|
@ -435,7 +528,7 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
|
|||
// extra calculations.
|
||||
|
||||
// First calculate an optimal to-space
|
||||
to_end = (char*)_virtual_space->high();
|
||||
to_end = (char*)virtual_space()->high();
|
||||
to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
|
||||
sizeof(char));
|
||||
|
||||
|
@ -491,7 +584,7 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
|
|||
// to space as if we were able to resize from space, even though from
|
||||
// space is not modified.
|
||||
// Giving eden priority was tried and gave poorer performance.
|
||||
to_end = (char*)pointer_delta(_virtual_space->high(),
|
||||
to_end = (char*)pointer_delta(virtual_space()->high(),
|
||||
(char*)requested_survivor_size,
|
||||
sizeof(char));
|
||||
to_end = MIN2(to_end, from_start);
|
||||
|
@ -560,9 +653,45 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
|
|||
size_t old_from = from_space()->capacity_in_bytes();
|
||||
size_t old_to = to_space()->capacity_in_bytes();
|
||||
|
||||
eden_space()->initialize(edenMR, true);
|
||||
to_space()->initialize(toMR , true);
|
||||
from_space()->initialize(fromMR, false); // Note, not cleared!
|
||||
if (ZapUnusedHeapArea) {
|
||||
// NUMA is a special case because a numa space is not mangled
|
||||
// in order to not prematurely bind its address to memory to
|
||||
// the wrong memory (i.e., don't want the GC thread to first
|
||||
// touch the memory). The survivor spaces are not numa
|
||||
// spaces and are mangled.
|
||||
if (UseNUMA) {
|
||||
if (eden_from_to_order) {
|
||||
mangle_survivors(from_space(), fromMR, to_space(), toMR);
|
||||
} else {
|
||||
mangle_survivors(to_space(), toMR, from_space(), fromMR);
|
||||
}
|
||||
}
|
||||
|
||||
// If not mangling the spaces, do some checking to verify that
|
||||
// the spaces are already mangled.
|
||||
// The spaces should be correctly mangled at this point so
|
||||
// do some checking here. Note that they are not being mangled
|
||||
// in the calls to initialize().
|
||||
// Must check mangling before the spaces are reshaped. Otherwise,
|
||||
// the bottom or end of one space may have moved into an area
|
||||
// covered by another space and a failure of the check may
|
||||
// not correctly indicate which space is not properly mangled.
|
||||
HeapWord* limit = (HeapWord*) virtual_space()->high();
|
||||
eden_space()->check_mangled_unused_area(limit);
|
||||
from_space()->check_mangled_unused_area(limit);
|
||||
to_space()->check_mangled_unused_area(limit);
|
||||
}
|
||||
// When an existing space is being initialized, it is not
|
||||
// mangled because the space has been previously mangled.
|
||||
eden_space()->initialize(edenMR,
|
||||
SpaceDecorator::Clear,
|
||||
SpaceDecorator::DontMangle);
|
||||
to_space()->initialize(toMR,
|
||||
SpaceDecorator::Clear,
|
||||
SpaceDecorator::DontMangle);
|
||||
from_space()->initialize(fromMR,
|
||||
SpaceDecorator::DontClear,
|
||||
SpaceDecorator::DontMangle);
|
||||
|
||||
assert(from_space()->top() == old_from_top, "from top changed!");
|
||||
|
||||
|
@ -671,7 +800,7 @@ void PSYoungGen::print_on(outputStream* st) const {
|
|||
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
|
||||
capacity_in_bytes()/K, used_in_bytes()/K);
|
||||
}
|
||||
_virtual_space->print_space_boundaries_on(st);
|
||||
virtual_space()->print_space_boundaries_on(st);
|
||||
st->print(" eden"); eden_space()->print_on(st);
|
||||
st->print(" from"); from_space()->print_on(st);
|
||||
st->print(" to "); to_space()->print_on(st);
|
||||
|
@ -774,7 +903,9 @@ void PSYoungGen::reset_survivors_after_shrink() {
|
|||
// Was there a shrink of the survivor space?
|
||||
if (new_end < space_shrinking->end()) {
|
||||
MemRegion mr(space_shrinking->bottom(), new_end);
|
||||
space_shrinking->initialize(mr, false /* clear */);
|
||||
space_shrinking->initialize(mr,
|
||||
SpaceDecorator::DontClear,
|
||||
SpaceDecorator::Mangle);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -809,3 +940,12 @@ void PSYoungGen::verify(bool allow_dirty) {
|
|||
from_space()->verify(allow_dirty);
|
||||
to_space()->verify(allow_dirty);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void PSYoungGen::record_spaces_top() {
|
||||
assert(ZapUnusedHeapArea, "Not mangling unused space");
|
||||
eden_space()->set_top_for_allocations();
|
||||
from_space()->set_top_for_allocations();
|
||||
to_space()->set_top_for_allocations();
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -179,4 +179,12 @@ class PSYoungGen : public CHeapObj {
|
|||
|
||||
// Space boundary invariant checker
|
||||
void space_invariants() PRODUCT_RETURN;
|
||||
|
||||
// Helper for mangling survivor spaces.
|
||||
void mangle_survivors(MutableSpace* s1,
|
||||
MemRegion s1MR,
|
||||
MutableSpace* s2,
|
||||
MemRegion s2MR) PRODUCT_RETURN;
|
||||
|
||||
void record_spaces_top() PRODUCT_RETURN;
|
||||
};
|
||||
|
|
|
@ -58,6 +58,12 @@ class AdaptiveWeightedAverage : public CHeapObj {
|
|||
_average(0.0), _sample_count(0), _weight(weight), _last_sample(0.0) {
|
||||
}
|
||||
|
||||
void clear() {
|
||||
_average = 0;
|
||||
_sample_count = 0;
|
||||
_last_sample = 0;
|
||||
}
|
||||
|
||||
// Accessors
|
||||
float average() const { return _average; }
|
||||
unsigned weight() const { return _weight; }
|
||||
|
@ -115,6 +121,12 @@ class AdaptivePaddedAverage : public AdaptiveWeightedAverage {
|
|||
float deviation() const { return _deviation; }
|
||||
unsigned padding() const { return _padding; }
|
||||
|
||||
void clear() {
|
||||
AdaptiveWeightedAverage::clear();
|
||||
_padded_avg = 0;
|
||||
_deviation = 0;
|
||||
}
|
||||
|
||||
// Override
|
||||
void sample(float new_sample);
|
||||
};
|
||||
|
|
|
@ -42,19 +42,31 @@ MutableNUMASpace::~MutableNUMASpace() {
|
|||
delete lgrp_spaces();
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void MutableNUMASpace::mangle_unused_area() {
|
||||
for (int i = 0; i < lgrp_spaces()->length(); i++) {
|
||||
LGRPSpace *ls = lgrp_spaces()->at(i);
|
||||
MutableSpace *s = ls->space();
|
||||
if (!os::numa_has_static_binding()) {
|
||||
HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
|
||||
if (top < s->end()) {
|
||||
ls->add_invalid_region(MemRegion(top, s->end()));
|
||||
}
|
||||
}
|
||||
s->mangle_unused_area();
|
||||
}
|
||||
// This method should do nothing.
|
||||
// It can be called on a numa space during a full compaction.
|
||||
}
|
||||
void MutableNUMASpace::mangle_unused_area_complete() {
|
||||
// This method should do nothing.
|
||||
// It can be called on a numa space during a full compaction.
|
||||
}
|
||||
void MutableNUMASpace::mangle_region(MemRegion mr) {
|
||||
// This method should do nothing because numa spaces are not mangled.
|
||||
}
|
||||
void MutableNUMASpace::set_top_for_allocations(HeapWord* v) {
|
||||
assert(false, "Do not mangle MutableNUMASpace's");
|
||||
}
|
||||
void MutableNUMASpace::set_top_for_allocations() {
|
||||
// This method should do nothing.
|
||||
}
|
||||
void MutableNUMASpace::check_mangled_unused_area(HeapWord* limit) {
|
||||
// This method should do nothing.
|
||||
}
|
||||
void MutableNUMASpace::check_mangled_unused_area_complete() {
|
||||
// This method should do nothing.
|
||||
}
|
||||
#endif // NOT_PRODUCT
|
||||
|
||||
// There may be unallocated holes in the middle chunks
|
||||
// that should be filled with dead objects to ensure parseability.
|
||||
|
@ -129,7 +141,20 @@ size_t MutableNUMASpace::free_in_words() const {
|
|||
size_t MutableNUMASpace::tlab_capacity(Thread *thr) const {
|
||||
guarantee(thr != NULL, "No thread");
|
||||
int lgrp_id = thr->lgrp_id();
|
||||
assert(lgrp_id != -1, "No lgrp_id set");
|
||||
if (lgrp_id == -1) {
|
||||
// This case can occur after the topology of the system has
|
||||
// changed. Thread can change their location, the new home
|
||||
// group will be determined during the first allocation
|
||||
// attempt. For now we can safely assume that all spaces
|
||||
// have equal size because the whole space will be reinitialized.
|
||||
if (lgrp_spaces()->length() > 0) {
|
||||
return capacity_in_bytes() / lgrp_spaces()->length();
|
||||
} else {
|
||||
assert(false, "There should be at least one locality group");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
// That's the normal case, where we know the locality group of the thread.
|
||||
int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
|
||||
if (i == -1) {
|
||||
return 0;
|
||||
|
@ -138,9 +163,17 @@ size_t MutableNUMASpace::tlab_capacity(Thread *thr) const {
|
|||
}
|
||||
|
||||
size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
|
||||
// Please see the comments for tlab_capacity().
|
||||
guarantee(thr != NULL, "No thread");
|
||||
int lgrp_id = thr->lgrp_id();
|
||||
assert(lgrp_id != -1, "No lgrp_id set");
|
||||
if (lgrp_id == -1) {
|
||||
if (lgrp_spaces()->length() > 0) {
|
||||
return free_in_bytes() / lgrp_spaces()->length();
|
||||
} else {
|
||||
assert(false, "There should be at least one locality group");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
|
||||
if (i == -1) {
|
||||
return 0;
|
||||
|
@ -238,12 +271,20 @@ void MutableNUMASpace::free_region(MemRegion mr) {
|
|||
void MutableNUMASpace::update() {
|
||||
if (update_layout(false)) {
|
||||
// If the topology has changed, make all chunks zero-sized.
|
||||
// And clear the alloc-rate statistics.
|
||||
// In future we may want to handle this more gracefully in order
|
||||
// to avoid the reallocation of the pages as much as possible.
|
||||
for (int i = 0; i < lgrp_spaces()->length(); i++) {
|
||||
MutableSpace *s = lgrp_spaces()->at(i)->space();
|
||||
LGRPSpace *ls = lgrp_spaces()->at(i);
|
||||
MutableSpace *s = ls->space();
|
||||
s->set_end(s->bottom());
|
||||
s->set_top(s->bottom());
|
||||
ls->clear_alloc_rate();
|
||||
}
|
||||
initialize(region(), true);
|
||||
// A NUMA space is never mangled
|
||||
initialize(region(),
|
||||
SpaceDecorator::Clear,
|
||||
SpaceDecorator::DontMangle);
|
||||
} else {
|
||||
bool should_initialize = false;
|
||||
if (!os::numa_has_static_binding()) {
|
||||
|
@ -257,7 +298,10 @@ void MutableNUMASpace::update() {
|
|||
|
||||
if (should_initialize ||
|
||||
(UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) {
|
||||
initialize(region(), true);
|
||||
// A NUMA space is never mangled
|
||||
initialize(region(),
|
||||
SpaceDecorator::Clear,
|
||||
SpaceDecorator::DontMangle);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -448,14 +492,17 @@ void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersecti
|
|||
}
|
||||
}
|
||||
|
||||
void MutableNUMASpace::initialize(MemRegion mr, bool clear_space) {
|
||||
void MutableNUMASpace::initialize(MemRegion mr,
|
||||
bool clear_space,
|
||||
bool mangle_space) {
|
||||
assert(clear_space, "Reallocation will destory data!");
|
||||
assert(lgrp_spaces()->length() > 0, "There should be at least one space");
|
||||
|
||||
MemRegion old_region = region(), new_region;
|
||||
set_bottom(mr.start());
|
||||
set_end(mr.end());
|
||||
MutableSpace::set_top(bottom());
|
||||
// Must always clear the space
|
||||
clear(SpaceDecorator::DontMangle);
|
||||
|
||||
// Compute chunk sizes
|
||||
size_t prev_page_size = page_size();
|
||||
|
@ -586,10 +633,8 @@ void MutableNUMASpace::initialize(MemRegion mr, bool clear_space) {
|
|||
bias_region(top_region, ls->lgrp_id());
|
||||
}
|
||||
|
||||
// If we clear the region, we would mangle it in debug. That would cause page
|
||||
// allocation in a different place. Hence setting the top directly.
|
||||
s->initialize(new_region, false);
|
||||
s->set_top(s->bottom());
|
||||
// Clear space (set top = bottom) but never mangle.
|
||||
s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle);
|
||||
|
||||
set_adaptation_cycles(samples_count());
|
||||
}
|
||||
|
@ -641,10 +686,12 @@ void MutableNUMASpace::set_top(HeapWord* value) {
|
|||
MutableSpace::set_top(value);
|
||||
}
|
||||
|
||||
void MutableNUMASpace::clear() {
|
||||
void MutableNUMASpace::clear(bool mangle_space) {
|
||||
MutableSpace::set_top(bottom());
|
||||
for (int i = 0; i < lgrp_spaces()->length(); i++) {
|
||||
lgrp_spaces()->at(i)->space()->clear();
|
||||
// Never mangle NUMA spaces because the mangling will
|
||||
// bind the memory to a possibly unwanted lgroup.
|
||||
lgrp_spaces()->at(i)->space()->clear(SpaceDecorator::DontMangle);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -112,6 +112,7 @@ class MutableNUMASpace : public MutableSpace {
|
|||
int lgrp_id() const { return _lgrp_id; }
|
||||
MutableSpace* space() const { return _space; }
|
||||
AdaptiveWeightedAverage* alloc_rate() const { return _alloc_rate; }
|
||||
void clear_alloc_rate() { _alloc_rate->clear(); }
|
||||
SpaceStats* space_stats() { return &_space_stats; }
|
||||
void clear_space_stats() { _space_stats = SpaceStats(); }
|
||||
|
||||
|
@ -171,14 +172,21 @@ class MutableNUMASpace : public MutableSpace {
|
|||
MutableNUMASpace();
|
||||
virtual ~MutableNUMASpace();
|
||||
// Space initialization.
|
||||
virtual void initialize(MemRegion mr, bool clear_space);
|
||||
virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
|
||||
// Update space layout if necessary. Do all adaptive resizing job.
|
||||
virtual void update();
|
||||
// Update allocation rate averages.
|
||||
virtual void accumulate_statistics();
|
||||
|
||||
virtual void clear();
|
||||
virtual void mangle_unused_area();
|
||||
virtual void clear(bool mangle_space);
|
||||
virtual void mangle_unused_area() PRODUCT_RETURN;
|
||||
virtual void mangle_unused_area_complete() PRODUCT_RETURN;
|
||||
virtual void mangle_region(MemRegion mr) PRODUCT_RETURN;
|
||||
virtual void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
|
||||
virtual void check_mangled_unused_area_complete() PRODUCT_RETURN;
|
||||
virtual void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
|
||||
virtual void set_top_for_allocations() PRODUCT_RETURN;
|
||||
|
||||
virtual void ensure_parsability();
|
||||
virtual size_t used_in_words() const;
|
||||
virtual size_t free_in_words() const;
|
||||
|
|
|
@ -25,7 +25,17 @@
|
|||
# include "incls/_precompiled.incl"
|
||||
# include "incls/_mutableSpace.cpp.incl"
|
||||
|
||||
void MutableSpace::initialize(MemRegion mr, bool clear_space) {
|
||||
MutableSpace::MutableSpace(): ImmutableSpace(), _top(NULL) {
|
||||
_mangler = new MutableSpaceMangler(this);
|
||||
}
|
||||
|
||||
MutableSpace::~MutableSpace() {
|
||||
delete _mangler;
|
||||
}
|
||||
|
||||
void MutableSpace::initialize(MemRegion mr,
|
||||
bool clear_space,
|
||||
bool mangle_space) {
|
||||
HeapWord* bottom = mr.start();
|
||||
HeapWord* end = mr.end();
|
||||
|
||||
|
@ -34,14 +44,51 @@ void MutableSpace::initialize(MemRegion mr, bool clear_space) {
|
|||
set_bottom(bottom);
|
||||
set_end(end);
|
||||
|
||||
if (clear_space) clear();
|
||||
if (clear_space) {
|
||||
clear(mangle_space);
|
||||
}
|
||||
}
|
||||
|
||||
void MutableSpace::clear() {
|
||||
void MutableSpace::clear(bool mangle_space) {
|
||||
set_top(bottom());
|
||||
if (ZapUnusedHeapArea) mangle_unused_area();
|
||||
if (ZapUnusedHeapArea && mangle_space) {
|
||||
mangle_unused_area();
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void MutableSpace::check_mangled_unused_area(HeapWord* limit) {
|
||||
mangler()->check_mangled_unused_area(limit);
|
||||
}
|
||||
|
||||
void MutableSpace::check_mangled_unused_area_complete() {
|
||||
mangler()->check_mangled_unused_area_complete();
|
||||
}
|
||||
|
||||
// Mangle only the unused space that has not previously
|
||||
// been mangled and that has not been allocated since being
|
||||
// mangled.
|
||||
void MutableSpace::mangle_unused_area() {
|
||||
mangler()->mangle_unused_area();
|
||||
}
|
||||
|
||||
void MutableSpace::mangle_unused_area_complete() {
|
||||
mangler()->mangle_unused_area_complete();
|
||||
}
|
||||
|
||||
void MutableSpace::mangle_region(MemRegion mr) {
|
||||
SpaceMangler::mangle_region(mr);
|
||||
}
|
||||
|
||||
void MutableSpace::set_top_for_allocations(HeapWord* v) {
|
||||
mangler()->set_top_for_allocations(v);
|
||||
}
|
||||
|
||||
void MutableSpace::set_top_for_allocations() {
|
||||
mangler()->set_top_for_allocations(top());
|
||||
}
|
||||
#endif
|
||||
|
||||
// This version requires locking. */
|
||||
HeapWord* MutableSpace::allocate(size_t size) {
|
||||
assert(Heap_lock->owned_by_self() ||
|
||||
|
|
|
@ -30,14 +30,23 @@
|
|||
// Invariant: (ImmutableSpace +) bottom() <= top() <= end()
|
||||
// top() is inclusive and end() is exclusive.
|
||||
|
||||
class MutableSpaceMangler;
|
||||
|
||||
class MutableSpace: public ImmutableSpace {
|
||||
friend class VMStructs;
|
||||
|
||||
// Helper for mangling unused space in debug builds
|
||||
MutableSpaceMangler* _mangler;
|
||||
|
||||
protected:
|
||||
HeapWord* _top;
|
||||
|
||||
MutableSpaceMangler* mangler() { return _mangler; }
|
||||
|
||||
public:
|
||||
virtual ~MutableSpace() {}
|
||||
MutableSpace() { _top = NULL; }
|
||||
virtual ~MutableSpace();
|
||||
MutableSpace();
|
||||
|
||||
// Accessors
|
||||
HeapWord* top() const { return _top; }
|
||||
virtual void set_top(HeapWord* value) { _top = value; }
|
||||
|
@ -52,21 +61,30 @@ class MutableSpace: public ImmutableSpace {
|
|||
MemRegion used_region() { return MemRegion(bottom(), top()); }
|
||||
|
||||
// Initialization
|
||||
virtual void initialize(MemRegion mr, bool clear_space);
|
||||
virtual void clear();
|
||||
virtual void initialize(MemRegion mr,
|
||||
bool clear_space,
|
||||
bool mangle_space);
|
||||
virtual void clear(bool mangle_space);
|
||||
// Does the usual initialization but optionally resets top to bottom.
|
||||
#if 0 // MANGLE_SPACE
|
||||
void initialize(MemRegion mr, bool clear_space, bool reset_top);
|
||||
#endif
|
||||
virtual void update() { }
|
||||
virtual void accumulate_statistics() { }
|
||||
|
||||
// Overwrites the unused portion of this space. Note that some collectors
|
||||
// may use this "scratch" space during collections.
|
||||
virtual void mangle_unused_area() {
|
||||
mangle_region(MemRegion(_top, _end));
|
||||
}
|
||||
// Methods used in mangling. See descriptions under SpaceMangler.
|
||||
virtual void mangle_unused_area() PRODUCT_RETURN;
|
||||
virtual void mangle_unused_area_complete() PRODUCT_RETURN;
|
||||
virtual void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
|
||||
virtual void check_mangled_unused_area_complete() PRODUCT_RETURN;
|
||||
virtual void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
|
||||
|
||||
// Used to save the space's current top for later use during mangling.
|
||||
virtual void set_top_for_allocations() PRODUCT_RETURN;
|
||||
|
||||
virtual void ensure_parsability() { }
|
||||
|
||||
void mangle_region(MemRegion mr) {
|
||||
debug_only(Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord));
|
||||
}
|
||||
virtual void mangle_region(MemRegion mr) PRODUCT_RETURN;
|
||||
|
||||
// Boolean querries.
|
||||
bool is_empty() const { return used_in_words() == 0; }
|
||||
|
|
141
hotspot/src/share/vm/gc_implementation/shared/spaceDecorator.cpp
Normal file
141
hotspot/src/share/vm/gc_implementation/shared/spaceDecorator.cpp
Normal file
|
@ -0,0 +1,141 @@
|
|||
/*
|
||||
* Copyright 2002-2005 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
# include "incls/_precompiled.incl"
|
||||
# include "incls/_spaceDecorator.cpp.incl"
|
||||
|
||||
// Catch-all file for utility classes
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
// Returns true is the location q matches the mangling
|
||||
// pattern.
|
||||
bool SpaceMangler::is_mangled(HeapWord* q) {
|
||||
// This test loses precision but is good enough
|
||||
return badHeapWord == (max_juint & (uintptr_t) q->value());
|
||||
}
|
||||
|
||||
|
||||
void SpaceMangler::set_top_for_allocations(HeapWord* v) {
|
||||
if (v < end()) {
|
||||
assert(!CheckZapUnusedHeapArea || is_mangled(v),
|
||||
"The high water mark is not mangled");
|
||||
}
|
||||
_top_for_allocations = v;
|
||||
}
|
||||
|
||||
// Mangle only the unused space that has not previously
|
||||
// been mangled and that has not been allocated since being
|
||||
// mangled.
|
||||
void SpaceMangler::mangle_unused_area() {
|
||||
assert(ZapUnusedHeapArea, "Mangling should not be in use");
|
||||
// Mangle between top and the high water mark. Safeguard
|
||||
// against the space changing since top_for_allocations was
|
||||
// set.
|
||||
HeapWord* mangled_end = MIN2(top_for_allocations(), end());
|
||||
if (top() < mangled_end) {
|
||||
MemRegion mangle_mr(top(), mangled_end);
|
||||
SpaceMangler::mangle_region(mangle_mr);
|
||||
// Light weight check of mangling.
|
||||
check_mangled_unused_area(end());
|
||||
}
|
||||
// Complete check of unused area which is functional when
|
||||
// DEBUG_MANGLING is defined.
|
||||
check_mangled_unused_area_complete();
|
||||
}
|
||||
|
||||
// A complete mangle is expected in the
|
||||
// exceptional case where top_for_allocations is not
|
||||
// properly tracking the high water mark for mangling.
|
||||
// This can be the case when to-space is being used for
|
||||
// scratch space during a mark-sweep-compact. See
|
||||
// contribute_scratch() and PSMarkSweep::allocate_stacks().
|
||||
void SpaceMangler::mangle_unused_area_complete() {
|
||||
assert(ZapUnusedHeapArea, "Mangling should not be in use");
|
||||
MemRegion mangle_mr(top(), end());
|
||||
SpaceMangler::mangle_region(mangle_mr);
|
||||
}
|
||||
|
||||
// Simply mangle the MemRegion mr.
|
||||
void SpaceMangler::mangle_region(MemRegion mr) {
|
||||
assert(ZapUnusedHeapArea, "Mangling should not be in use");
|
||||
#ifdef ASSERT
|
||||
if(TraceZapUnusedHeapArea) {
|
||||
gclog_or_tty->print("Mangling [0x%x to 0x%x)", mr.start(), mr.end());
|
||||
}
|
||||
Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord);
|
||||
if(TraceZapUnusedHeapArea) {
|
||||
gclog_or_tty->print_cr(" done");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// Check that top, top_for_allocations and the last
|
||||
// word of the space are mangled. In a tight memory
|
||||
// situation even this light weight mangling could
|
||||
// cause paging by touching the end of the space.
|
||||
void SpaceMangler::check_mangled_unused_area(HeapWord* limit) {
|
||||
if (CheckZapUnusedHeapArea) {
|
||||
// This method can be called while the spaces are
|
||||
// being reshaped so skip the test if the end of the
|
||||
// space is beyond the specified limit;
|
||||
if (end() > limit) return;
|
||||
|
||||
assert(top() == end() ||
|
||||
(is_mangled(top())), "Top not mangled");
|
||||
assert((top_for_allocations() < top()) ||
|
||||
(top_for_allocations() >= end()) ||
|
||||
(is_mangled(top_for_allocations())),
|
||||
"Older unused not mangled");
|
||||
assert(top() == end() ||
|
||||
(is_mangled(end() - 1)), "End not properly mangled");
|
||||
// Only does checking when DEBUG_MANGLING is defined.
|
||||
check_mangled_unused_area_complete();
|
||||
}
|
||||
}
|
||||
|
||||
#undef DEBUG_MANGLING
|
||||
// This should only be used while debugging the mangling
|
||||
// because of the high cost of checking the completeness.
|
||||
void SpaceMangler::check_mangled_unused_area_complete() {
|
||||
if (CheckZapUnusedHeapArea) {
|
||||
assert(ZapUnusedHeapArea, "Not mangling unused area");
|
||||
#ifdef DEBUG_MANGLING
|
||||
HeapWord* q = top();
|
||||
HeapWord* limit = end();
|
||||
|
||||
bool passed = true;
|
||||
while (q < limit) {
|
||||
if (!is_mangled(q)) {
|
||||
passed = false;
|
||||
break;
|
||||
}
|
||||
q++;
|
||||
}
|
||||
assert(passed, "Mangling is not complete");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
#undef DEBUG_MANGLING
|
||||
#endif // not PRODUCT
|
141
hotspot/src/share/vm/gc_implementation/shared/spaceDecorator.hpp
Normal file
141
hotspot/src/share/vm/gc_implementation/shared/spaceDecorator.hpp
Normal file
|
@ -0,0 +1,141 @@
|
|||
/*
|
||||
* Copyright 2002-2005 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
class SpaceDecorator: public AllStatic {
|
||||
public:
|
||||
// Initialization flags.
|
||||
static const bool Clear = true;
|
||||
static const bool DontClear = false;
|
||||
static const bool Mangle = true;
|
||||
static const bool DontMangle = false;
|
||||
};
|
||||
|
||||
// Functionality for use with class Space and class MutableSpace.
|
||||
// The approach taken with the mangling is to mangle all
|
||||
// the space initially and then to mangle areas that have
|
||||
// been allocated since the last collection. Mangling is
|
||||
// done in the context of a generation and in the context
|
||||
// of a space.
|
||||
// The space in a generation is mangled when it is first
|
||||
// initialized and when the generation grows. The spaces
|
||||
// are not necessarily up-to-date when this mangling occurs
|
||||
// and the method mangle_region() is used.
|
||||
// After allocations have been done in a space, the space generally
|
||||
// need to be remangled. Remangling is only done on the
|
||||
// recently allocated regions in the space. Typically, that is
|
||||
// the region between the new top and the top just before a
|
||||
// garbage collection.
|
||||
// An exception to the usual mangling in a space is done when the
|
||||
// space is used for an extraordinary purpose. Specifically, when
|
||||
// to-space is used as scratch space for a mark-sweep-compact
|
||||
// collection.
|
||||
// Spaces are mangled after a collection. If the generation
|
||||
// grows after a collection, the added space is mangled as part of
|
||||
// the growth of the generation. No additional mangling is needed when the
|
||||
// spaces are resized after an expansion.
|
||||
// The class SpaceMangler keeps a pointer to the top of the allocated
|
||||
// area and provides the methods for doing the piece meal mangling.
|
||||
// Methods for doing sparces and full checking of the mangling are
|
||||
// included. The full checking is done if DEBUG_MANGLING is defined.
|
||||
// GenSpaceMangler is used with the GenCollectedHeap collectors and
|
||||
// MutableSpaceMangler is used with the ParallelScavengeHeap collectors.
|
||||
// These subclasses abstract the differences in the types of spaces used
|
||||
// by each heap.
|
||||
|
||||
class SpaceMangler: public CHeapObj {
|
||||
friend class VMStructs;
|
||||
|
||||
// High water mark for allocations. Typically, the space above
|
||||
// this point have been mangle previously and don't need to be
|
||||
// touched again. Space belows this point has been allocated
|
||||
// and remangling is needed between the current top and this
|
||||
// high water mark.
|
||||
HeapWord* _top_for_allocations;
|
||||
HeapWord* top_for_allocations() { return _top_for_allocations; }
|
||||
|
||||
public:
|
||||
|
||||
// Setting _top_for_allocations to NULL at initialization
|
||||
// makes it always below top so that mangling done as part
|
||||
// of the initialize() call of a space does nothing (as it
|
||||
// should since the mangling is done as part of the constructor
|
||||
// for the space.
|
||||
SpaceMangler() : _top_for_allocations(NULL) {}
|
||||
|
||||
// Methods for top and end that delegate to the specific
|
||||
// space type.
|
||||
virtual HeapWord* top() const = 0;
|
||||
virtual HeapWord* end() const = 0;
|
||||
|
||||
// Return true if q matches the mangled pattern.
|
||||
static bool is_mangled(HeapWord* q) PRODUCT_RETURN0;
|
||||
|
||||
// Used to save the an address in a space for later use during mangling.
|
||||
void set_top_for_allocations(HeapWord* v);
|
||||
|
||||
// Overwrites the unused portion of this space.
|
||||
// Mangle only the region not previously mangled [top, top_previously_mangled)
|
||||
void mangle_unused_area();
|
||||
// Mangle all the unused region [top, end)
|
||||
void mangle_unused_area_complete();
|
||||
// Do some sparse checking on the area that should have been mangled.
|
||||
void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
|
||||
// Do a complete check of the area that should be mangled.
|
||||
void check_mangled_unused_area_complete() PRODUCT_RETURN;
|
||||
|
||||
// Mangle the MemRegion. This is a non-space specific mangler. It
|
||||
// is used during the initial mangling of a space before the space
|
||||
// is fully constructed. Also is used when a generation is expanded
|
||||
// and possibly before the spaces have been reshaped to to the new
|
||||
// size of the generation.
|
||||
static void mangle_region(MemRegion mr);
|
||||
};
|
||||
|
||||
class ContiguousSpace;
|
||||
|
||||
// For use with GenCollectedHeap's
|
||||
class GenSpaceMangler: public SpaceMangler {
|
||||
ContiguousSpace* _sp;
|
||||
|
||||
ContiguousSpace* sp() { return _sp; }
|
||||
|
||||
HeapWord* top() const { return _sp->top(); }
|
||||
HeapWord* end() const { return _sp->end(); }
|
||||
|
||||
public:
|
||||
GenSpaceMangler(ContiguousSpace* sp) : SpaceMangler(), _sp(sp) {}
|
||||
};
|
||||
|
||||
// For use with ParallelScavengeHeap's.
|
||||
class MutableSpaceMangler: public SpaceMangler {
|
||||
MutableSpace* _sp;
|
||||
|
||||
MutableSpace* sp() { return _sp; }
|
||||
|
||||
HeapWord* top() const { return _sp->top(); }
|
||||
HeapWord* end() const { return _sp->end(); }
|
||||
|
||||
public:
|
||||
MutableSpaceMangler(MutableSpace* sp) : SpaceMangler(), _sp(sp) {}
|
||||
};
|
|
@ -1406,6 +1406,7 @@ defNewGeneration.cpp java.hpp
|
|||
defNewGeneration.cpp oop.inline.hpp
|
||||
defNewGeneration.cpp referencePolicy.hpp
|
||||
defNewGeneration.cpp space.inline.hpp
|
||||
defNewGeneration.cpp spaceDecorator.hpp
|
||||
defNewGeneration.cpp thread_<os_family>.inline.hpp
|
||||
|
||||
defNewGeneration.hpp ageTable.hpp
|
||||
|
@ -1790,6 +1791,7 @@ generation.cpp generation.inline.hpp
|
|||
generation.cpp java.hpp
|
||||
generation.cpp oop.hpp
|
||||
generation.cpp oop.inline.hpp
|
||||
generation.cpp spaceDecorator.hpp
|
||||
generation.cpp space.inline.hpp
|
||||
|
||||
generation.hpp allocation.hpp
|
||||
|
@ -3723,6 +3725,7 @@ space.cpp oop.inline2.hpp
|
|||
space.cpp safepoint.hpp
|
||||
space.cpp space.hpp
|
||||
space.cpp space.inline.hpp
|
||||
space.cpp spaceDecorator.hpp
|
||||
space.cpp systemDictionary.hpp
|
||||
space.cpp universe.inline.hpp
|
||||
space.cpp vmSymbols.hpp
|
||||
|
@ -3745,6 +3748,13 @@ space.inline.hpp safepoint.hpp
|
|||
space.inline.hpp space.hpp
|
||||
space.inline.hpp universe.hpp
|
||||
|
||||
spaceDecorator.hpp globalDefinitions.hpp
|
||||
spaceDecorator.hpp mutableSpace.hpp
|
||||
spaceDecorator.hpp space.hpp
|
||||
|
||||
spaceDecorator.cpp copy.hpp
|
||||
spaceDecorator.cpp spaceDecorator.hpp
|
||||
|
||||
specialized_oop_closures.cpp ostream.hpp
|
||||
specialized_oop_closures.cpp specialized_oop_closures.hpp
|
||||
|
||||
|
|
|
@ -51,6 +51,7 @@ dump.cpp oop.hpp
|
|||
dump.cpp oopFactory.hpp
|
||||
dump.cpp resourceArea.hpp
|
||||
dump.cpp signature.hpp
|
||||
dump.cpp spaceDecorator.hpp
|
||||
dump.cpp symbolTable.hpp
|
||||
dump.cpp systemDictionary.hpp
|
||||
dump.cpp vmThread.hpp
|
||||
|
|
|
@ -432,14 +432,16 @@ bool CompactingPermGenGen::grow_by(size_t bytes) {
|
|||
}
|
||||
|
||||
|
||||
void CompactingPermGenGen::grow_to_reserved() {
|
||||
bool CompactingPermGenGen::grow_to_reserved() {
|
||||
// Don't allow _virtual_size to expand into shared spaces.
|
||||
bool success = false;
|
||||
if (_virtual_space.uncommitted_size() > _shared_space_size) {
|
||||
size_t remaining_bytes =
|
||||
_virtual_space.uncommitted_size() - _shared_space_size;
|
||||
bool success = OneContigSpaceCardGeneration::grow_by(remaining_bytes);
|
||||
success = OneContigSpaceCardGeneration::grow_by(remaining_bytes);
|
||||
DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -184,7 +184,7 @@ public:
|
|||
void post_compact();
|
||||
size_t contiguous_available() const;
|
||||
bool grow_by(size_t bytes);
|
||||
void grow_to_reserved();
|
||||
virtual bool grow_to_reserved();
|
||||
|
||||
void clear_remembered_set();
|
||||
void invalidate_remembered_set();
|
||||
|
|
|
@ -172,15 +172,25 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
|
|||
_to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
|
||||
_gen_counters);
|
||||
|
||||
compute_space_boundaries(0);
|
||||
compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
|
||||
update_counters();
|
||||
_next_gen = NULL;
|
||||
_tenuring_threshold = MaxTenuringThreshold;
|
||||
_pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
|
||||
}
|
||||
|
||||
void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size) {
|
||||
uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
|
||||
void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
|
||||
bool clear_space,
|
||||
bool mangle_space) {
|
||||
uintx alignment =
|
||||
GenCollectedHeap::heap()->collector_policy()->min_alignment();
|
||||
|
||||
// If the spaces are being cleared (only done at heap initialization
|
||||
// currently), the survivor spaces need not be empty.
|
||||
// Otherwise, no care is taken for used areas in the survivor spaces
|
||||
// so check.
|
||||
assert(clear_space || (to()->is_empty() && from()->is_empty()),
|
||||
"Initialization of the survivor spaces assumes these are empty");
|
||||
|
||||
// Compute sizes
|
||||
uintx size = _virtual_space.committed_size();
|
||||
|
@ -214,16 +224,41 @@ void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size) {
|
|||
MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
|
||||
MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
|
||||
|
||||
eden()->initialize(edenMR, (minimum_eden_size == 0));
|
||||
// If minumum_eden_size != 0, we will not have cleared any
|
||||
// A minimum eden size implies that there is a part of eden that
|
||||
// is being used and that affects the initialization of any
|
||||
// newly formed eden.
|
||||
bool live_in_eden = minimum_eden_size > 0;
|
||||
|
||||
// If not clearing the spaces, do some checking to verify that
|
||||
// the space are already mangled.
|
||||
if (!clear_space) {
|
||||
// Must check mangling before the spaces are reshaped. Otherwise,
|
||||
// the bottom or end of one space may have moved into another
|
||||
// a failure of the check may not correctly indicate which space
|
||||
// is not properly mangled.
|
||||
if (ZapUnusedHeapArea) {
|
||||
HeapWord* limit = (HeapWord*) _virtual_space.high();
|
||||
eden()->check_mangled_unused_area(limit);
|
||||
from()->check_mangled_unused_area(limit);
|
||||
to()->check_mangled_unused_area(limit);
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the spaces for their new regions.
|
||||
eden()->initialize(edenMR,
|
||||
clear_space && !live_in_eden,
|
||||
SpaceDecorator::Mangle);
|
||||
// If clear_space and live_in_eden, we will not have cleared any
|
||||
// portion of eden above its top. This can cause newly
|
||||
// expanded space not to be mangled if using ZapUnusedHeapArea.
|
||||
// We explicitly do such mangling here.
|
||||
if (ZapUnusedHeapArea && (minimum_eden_size != 0)) {
|
||||
if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
|
||||
eden()->mangle_unused_area();
|
||||
}
|
||||
from()->initialize(fromMR, true);
|
||||
to()->initialize(toMR , true);
|
||||
from()->initialize(fromMR, clear_space, mangle_space);
|
||||
to()->initialize(toMR, clear_space, mangle_space);
|
||||
|
||||
// Set next compaction spaces.
|
||||
eden()->set_next_compaction_space(from());
|
||||
// The to-space is normally empty before a compaction so need
|
||||
// not be considered. The exception is during promotion
|
||||
|
@ -250,7 +285,16 @@ void DefNewGeneration::swap_spaces() {
|
|||
|
||||
bool DefNewGeneration::expand(size_t bytes) {
|
||||
MutexLocker x(ExpandHeap_lock);
|
||||
HeapWord* prev_high = (HeapWord*) _virtual_space.high();
|
||||
bool success = _virtual_space.expand_by(bytes);
|
||||
if (success && ZapUnusedHeapArea) {
|
||||
// Mangle newly committed space immediately because it
|
||||
// can be done here more simply that after the new
|
||||
// spaces have been computed.
|
||||
HeapWord* new_high = (HeapWord*) _virtual_space.high();
|
||||
MemRegion mangle_region(prev_high, new_high);
|
||||
SpaceMangler::mangle_region(mangle_region);
|
||||
}
|
||||
|
||||
// Do not attempt an expand-to-the reserve size. The
|
||||
// request should properly observe the maximum size of
|
||||
|
@ -262,7 +306,8 @@ bool DefNewGeneration::expand(size_t bytes) {
|
|||
// value.
|
||||
if (GC_locker::is_active()) {
|
||||
if (PrintGC && Verbose) {
|
||||
gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
|
||||
gclog_or_tty->print_cr("Garbage collection disabled, "
|
||||
"expanded heap instead");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -326,16 +371,24 @@ void DefNewGeneration::compute_new_size() {
|
|||
changed = true;
|
||||
}
|
||||
if (changed) {
|
||||
compute_space_boundaries(eden()->used());
|
||||
MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
|
||||
// The spaces have already been mangled at this point but
|
||||
// may not have been cleared (set top = bottom) and should be.
|
||||
// Mangling was done when the heap was being expanded.
|
||||
compute_space_boundaries(eden()->used(),
|
||||
SpaceDecorator::Clear,
|
||||
SpaceDecorator::DontMangle);
|
||||
MemRegion cmr((HeapWord*)_virtual_space.low(),
|
||||
(HeapWord*)_virtual_space.high());
|
||||
Universe::heap()->barrier_set()->resize_covered_region(cmr);
|
||||
if (Verbose && PrintGC) {
|
||||
size_t new_size_after = _virtual_space.committed_size();
|
||||
size_t eden_size_after = eden()->capacity();
|
||||
size_t survivor_size_after = from()->capacity();
|
||||
gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden="
|
||||
gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
|
||||
SIZE_FORMAT "K [eden="
|
||||
SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
|
||||
new_size_before/K, new_size_after/K, eden_size_after/K, survivor_size_after/K);
|
||||
new_size_before/K, new_size_after/K,
|
||||
eden_size_after/K, survivor_size_after/K);
|
||||
if (WizardMode) {
|
||||
gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
|
||||
thread_increase_size/K, threads_count);
|
||||
|
@ -480,7 +533,7 @@ void DefNewGeneration::collect(bool full,
|
|||
ScanWeakRefClosure scan_weak_ref(this);
|
||||
|
||||
age_table()->clear();
|
||||
to()->clear();
|
||||
to()->clear(SpaceDecorator::Mangle);
|
||||
|
||||
gch->rem_set()->prepare_for_younger_refs_iterate(false);
|
||||
|
||||
|
@ -525,8 +578,18 @@ void DefNewGeneration::collect(bool full,
|
|||
soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, NULL);
|
||||
if (!promotion_failed()) {
|
||||
// Swap the survivor spaces.
|
||||
eden()->clear();
|
||||
from()->clear();
|
||||
eden()->clear(SpaceDecorator::Mangle);
|
||||
from()->clear(SpaceDecorator::Mangle);
|
||||
if (ZapUnusedHeapArea) {
|
||||
// This is now done here because of the piece-meal mangling which
|
||||
// can check for valid mangling at intermediate points in the
|
||||
// collection(s). When a minor collection fails to collect
|
||||
// sufficient space resizing of the young generation can occur
|
||||
// an redistribute the spaces in the young generation. Mangle
|
||||
// here so that unzapped regions don't get distributed to
|
||||
// other spaces.
|
||||
to()->mangle_unused_area();
|
||||
}
|
||||
swap_spaces();
|
||||
|
||||
assert(to()->is_empty(), "to space should be empty now");
|
||||
|
@ -753,6 +816,15 @@ void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* reque
|
|||
}
|
||||
}
|
||||
|
||||
void DefNewGeneration::reset_scratch() {
|
||||
// If contributing scratch in to_space, mangle all of
|
||||
// to_space if ZapUnusedHeapArea. This is needed because
|
||||
// top is not maintained while using to-space as scratch.
|
||||
if (ZapUnusedHeapArea) {
|
||||
to()->mangle_unused_area_complete();
|
||||
}
|
||||
}
|
||||
|
||||
bool DefNewGeneration::collection_attempt_is_safe() {
|
||||
if (!to()->is_empty()) {
|
||||
return false;
|
||||
|
@ -806,11 +878,25 @@ void DefNewGeneration::gc_epilogue(bool full) {
|
|||
}
|
||||
}
|
||||
|
||||
if (ZapUnusedHeapArea) {
|
||||
eden()->check_mangled_unused_area_complete();
|
||||
from()->check_mangled_unused_area_complete();
|
||||
to()->check_mangled_unused_area_complete();
|
||||
}
|
||||
|
||||
// update the generation and space performance counters
|
||||
update_counters();
|
||||
gch->collector_policy()->counters()->update_counters();
|
||||
}
|
||||
|
||||
void DefNewGeneration::record_spaces_top() {
|
||||
assert(ZapUnusedHeapArea, "Not mangling unused space");
|
||||
eden()->set_top_for_allocations();
|
||||
to()->set_top_for_allocations();
|
||||
from()->set_top_for_allocations();
|
||||
}
|
||||
|
||||
|
||||
void DefNewGeneration::update_counters() {
|
||||
if (UsePerfData) {
|
||||
_eden_counters->update_all();
|
||||
|
|
|
@ -279,6 +279,9 @@ protected:
|
|||
virtual void gc_prologue(bool full);
|
||||
virtual void gc_epilogue(bool full);
|
||||
|
||||
// Save the tops for eden, from, and to
|
||||
virtual void record_spaces_top();
|
||||
|
||||
// Doesn't require additional work during GC prologue and epilogue
|
||||
virtual bool performs_in_place_marking() const { return false; }
|
||||
|
||||
|
@ -299,9 +302,12 @@ protected:
|
|||
|
||||
// For non-youngest collection, the DefNewGeneration can contribute
|
||||
// "to-space".
|
||||
void contribute_scratch(ScratchBlock*& list, Generation* requestor,
|
||||
virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
|
||||
size_t max_alloc_words);
|
||||
|
||||
// Reset for contribution of "to-space".
|
||||
virtual void reset_scratch();
|
||||
|
||||
// GC support
|
||||
virtual void compute_new_size();
|
||||
virtual void collect(bool full,
|
||||
|
@ -331,7 +337,12 @@ protected:
|
|||
void verify(bool allow_dirty);
|
||||
|
||||
protected:
|
||||
void compute_space_boundaries(uintx minimum_eden_size);
|
||||
// If clear_space is true, clear the survivor spaces. Eden is
|
||||
// cleared if the minimum size of eden is 0. If mangle_space
|
||||
// is true, also mangle the space in debug mode.
|
||||
void compute_space_boundaries(uintx minimum_eden_size,
|
||||
bool clear_space,
|
||||
bool mangle_space);
|
||||
// Scavenge support
|
||||
void swap_spaces();
|
||||
};
|
||||
|
|
|
@ -645,7 +645,7 @@ public:
|
|||
class ClearSpaceClosure : public SpaceClosure {
|
||||
public:
|
||||
void do_space(Space* s) {
|
||||
s->clear();
|
||||
s->clear(SpaceDecorator::Mangle);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -465,6 +465,11 @@ void GenCollectedHeap::do_collection(bool full,
|
|||
_gens[i]->stat_record()->invocations++;
|
||||
_gens[i]->stat_record()->accumulated_time.start();
|
||||
|
||||
// Must be done anew before each collection because
|
||||
// a previous collection will do mangling and will
|
||||
// change top of some spaces.
|
||||
record_gen_tops_before_GC();
|
||||
|
||||
if (PrintGC && Verbose) {
|
||||
gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
|
||||
i,
|
||||
|
@ -1058,6 +1063,12 @@ ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
|
|||
return res;
|
||||
}
|
||||
|
||||
void GenCollectedHeap::release_scratch() {
|
||||
for (int i = 0; i < _n_gens; i++) {
|
||||
_gens[i]->reset_scratch();
|
||||
}
|
||||
}
|
||||
|
||||
size_t GenCollectedHeap::large_typearray_limit() {
|
||||
return gen_policy()->large_typearray_limit();
|
||||
}
|
||||
|
@ -1285,6 +1296,24 @@ void GenCollectedHeap::gc_epilogue(bool full) {
|
|||
always_do_update_barrier = UseConcMarkSweepGC;
|
||||
};
|
||||
|
||||
#ifndef PRODUCT
|
||||
class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
|
||||
private:
|
||||
public:
|
||||
void do_generation(Generation* gen) {
|
||||
gen->record_spaces_top();
|
||||
}
|
||||
};
|
||||
|
||||
void GenCollectedHeap::record_gen_tops_before_GC() {
|
||||
if (ZapUnusedHeapArea) {
|
||||
GenGCSaveTopsBeforeGCClosure blk;
|
||||
generation_iterate(&blk, false); // not old-to-young.
|
||||
perm_gen()->record_spaces_top();
|
||||
}
|
||||
}
|
||||
#endif // not PRODUCT
|
||||
|
||||
class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
|
||||
public:
|
||||
void do_generation(Generation* gen) {
|
||||
|
|
|
@ -259,6 +259,9 @@ public:
|
|||
// be provided are returned as a list of ScratchBlocks, sorted by
|
||||
// decreasing size.
|
||||
ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words);
|
||||
// Allow each generation to reset any scratch space that it has
|
||||
// contributed as it needs.
|
||||
void release_scratch();
|
||||
|
||||
size_t large_typearray_limit();
|
||||
|
||||
|
@ -482,6 +485,9 @@ private:
|
|||
bool should_do_concurrent_full_gc(GCCause::Cause cause);
|
||||
void collect_mostly_concurrent(GCCause::Cause cause);
|
||||
|
||||
// Save the tops of the spaces in all generations
|
||||
void record_gen_tops_before_GC() PRODUCT_RETURN;
|
||||
|
||||
protected:
|
||||
virtual void gc_prologue(bool full);
|
||||
virtual void gc_epilogue(bool full);
|
||||
|
|
|
@ -190,6 +190,10 @@ void GenMarkSweep::allocate_stacks() {
|
|||
|
||||
|
||||
void GenMarkSweep::deallocate_stacks() {
|
||||
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
gch->release_scratch();
|
||||
|
||||
if (_preserved_oop_stack) {
|
||||
delete _preserved_mark_stack;
|
||||
_preserved_mark_stack = NULL;
|
||||
|
|
|
@ -32,6 +32,12 @@ Generation::Generation(ReservedSpace rs, size_t initial_size, int level) :
|
|||
vm_exit_during_initialization("Could not reserve enough space for "
|
||||
"object heap");
|
||||
}
|
||||
// Mangle all of the the initial generation.
|
||||
if (ZapUnusedHeapArea) {
|
||||
MemRegion mangle_region((HeapWord*)_virtual_space.low(),
|
||||
(HeapWord*)_virtual_space.high());
|
||||
SpaceMangler::mangle_region(mangle_region);
|
||||
}
|
||||
_reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(),
|
||||
(HeapWord*)_virtual_space.high_boundary());
|
||||
}
|
||||
|
@ -373,6 +379,41 @@ CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
|
|||
}
|
||||
}
|
||||
|
||||
bool CardGeneration::expand(size_t bytes, size_t expand_bytes) {
|
||||
assert_locked_or_safepoint(Heap_lock);
|
||||
if (bytes == 0) {
|
||||
return true; // That's what grow_by(0) would return
|
||||
}
|
||||
size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes);
|
||||
if (aligned_bytes == 0){
|
||||
// The alignment caused the number of bytes to wrap. An expand_by(0) will
|
||||
// return true with the implication that an expansion was done when it
|
||||
// was not. A call to expand implies a best effort to expand by "bytes"
|
||||
// but not a guarantee. Align down to give a best effort. This is likely
|
||||
// the most that the generation can expand since it has some capacity to
|
||||
// start with.
|
||||
aligned_bytes = ReservedSpace::page_align_size_down(bytes);
|
||||
}
|
||||
size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
|
||||
bool success = false;
|
||||
if (aligned_expand_bytes > aligned_bytes) {
|
||||
success = grow_by(aligned_expand_bytes);
|
||||
}
|
||||
if (!success) {
|
||||
success = grow_by(aligned_bytes);
|
||||
}
|
||||
if (!success) {
|
||||
success = grow_to_reserved();
|
||||
}
|
||||
if (PrintGC && Verbose) {
|
||||
if (success && GC_locker::is_active()) {
|
||||
gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
|
||||
}
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
|
||||
// No young generation references, clear this generation's cards.
|
||||
void CardGeneration::clear_remembered_set() {
|
||||
|
@ -435,25 +476,9 @@ OneContigSpaceCardGeneration::expand_and_allocate(size_t word_size,
|
|||
}
|
||||
}
|
||||
|
||||
void OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) {
|
||||
bool OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) {
|
||||
GCMutexLocker x(ExpandHeap_lock);
|
||||
size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes);
|
||||
size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
|
||||
bool success = false;
|
||||
if (aligned_expand_bytes > aligned_bytes) {
|
||||
success = grow_by(aligned_expand_bytes);
|
||||
}
|
||||
if (!success) {
|
||||
success = grow_by(aligned_bytes);
|
||||
}
|
||||
if (!success) {
|
||||
grow_to_reserved();
|
||||
}
|
||||
if (GC_locker::is_active()) {
|
||||
if (PrintGC && Verbose) {
|
||||
gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
|
||||
}
|
||||
}
|
||||
return CardGeneration::expand(bytes, expand_bytes);
|
||||
}
|
||||
|
||||
|
||||
|
@ -505,8 +530,11 @@ bool OneContigSpaceCardGeneration::grow_by(size_t bytes) {
|
|||
_bts->resize(new_word_size);
|
||||
|
||||
// Fix for bug #4668531
|
||||
MemRegion mangle_region(_the_space->end(), (HeapWord*)_virtual_space.high());
|
||||
_the_space->mangle_region(mangle_region);
|
||||
if (ZapUnusedHeapArea) {
|
||||
MemRegion mangle_region(_the_space->end(),
|
||||
(HeapWord*)_virtual_space.high());
|
||||
SpaceMangler::mangle_region(mangle_region);
|
||||
}
|
||||
|
||||
// Expand space -- also expands space's BOT
|
||||
// (which uses (part of) shared array above)
|
||||
|
@ -622,6 +650,14 @@ void OneContigSpaceCardGeneration::gc_epilogue(bool full) {
|
|||
|
||||
// update the generation and space performance counters
|
||||
update_counters();
|
||||
if (ZapUnusedHeapArea) {
|
||||
the_space()->check_mangled_unused_area_complete();
|
||||
}
|
||||
}
|
||||
|
||||
void OneContigSpaceCardGeneration::record_spaces_top() {
|
||||
assert(ZapUnusedHeapArea, "Not mangling unused space");
|
||||
the_space()->set_top_for_allocations();
|
||||
}
|
||||
|
||||
void OneContigSpaceCardGeneration::verify(bool allow_dirty) {
|
||||
|
|
|
@ -376,6 +376,9 @@ class Generation: public CHeapObj {
|
|||
// The default is to do nothing.
|
||||
virtual void gc_epilogue(bool full) {};
|
||||
|
||||
// Save the high water marks for the used space in a generation.
|
||||
virtual void record_spaces_top() {};
|
||||
|
||||
// Some generations may need to be "fixed-up" after some allocation
|
||||
// activity to make them parsable again. The default is to do nothing.
|
||||
virtual void ensure_parsability() {};
|
||||
|
@ -476,6 +479,10 @@ class Generation: public CHeapObj {
|
|||
virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
|
||||
size_t max_alloc_words) {}
|
||||
|
||||
// Give each generation an opportunity to do clean up for any
|
||||
// contributed scratch.
|
||||
virtual void reset_scratch() {};
|
||||
|
||||
// When an older generation has been collected, and perhaps resized,
|
||||
// this method will be invoked on all younger generations (from older to
|
||||
// younger), allowing them to resize themselves as appropriate.
|
||||
|
@ -599,11 +606,21 @@ class CardGeneration: public Generation {
|
|||
|
||||
public:
|
||||
|
||||
// Attempt to expand the generation by "bytes". Expand by at a
|
||||
// minimum "expand_bytes". Return true if some amount (not
|
||||
// necessarily the full "bytes") was done.
|
||||
virtual bool expand(size_t bytes, size_t expand_bytes);
|
||||
|
||||
virtual void clear_remembered_set();
|
||||
|
||||
virtual void invalidate_remembered_set();
|
||||
|
||||
virtual void prepare_for_verify();
|
||||
|
||||
// Grow generation with specified size (returns false if unable to grow)
|
||||
virtual bool grow_by(size_t bytes) = 0;
|
||||
// Grow generation to reserved size.
|
||||
virtual bool grow_to_reserved() = 0;
|
||||
};
|
||||
|
||||
// OneContigSpaceCardGeneration models a heap of old objects contained in a single
|
||||
|
@ -624,14 +641,14 @@ class OneContigSpaceCardGeneration: public CardGeneration {
|
|||
// and after last GC.
|
||||
|
||||
// Grow generation with specified size (returns false if unable to grow)
|
||||
bool grow_by(size_t bytes);
|
||||
virtual bool grow_by(size_t bytes);
|
||||
// Grow generation to reserved size.
|
||||
bool grow_to_reserved();
|
||||
virtual bool grow_to_reserved();
|
||||
// Shrink generation with specified size (returns false if unable to shrink)
|
||||
void shrink_by(size_t bytes);
|
||||
|
||||
// Allocation failure
|
||||
void expand(size_t bytes, size_t expand_bytes);
|
||||
virtual bool expand(size_t bytes, size_t expand_bytes);
|
||||
void shrink(size_t bytes);
|
||||
|
||||
// Accessing spaces
|
||||
|
@ -699,6 +716,8 @@ class OneContigSpaceCardGeneration: public CardGeneration {
|
|||
|
||||
virtual void gc_epilogue(bool full);
|
||||
|
||||
virtual void record_spaces_top();
|
||||
|
||||
virtual void verify(bool allow_dirty);
|
||||
virtual void print_on(outputStream* st) const;
|
||||
};
|
||||
|
|
|
@ -232,30 +232,44 @@ ContiguousSpace::new_dcto_cl(OopClosure* cl,
|
|||
return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
|
||||
}
|
||||
|
||||
void Space::initialize(MemRegion mr, bool clear_space) {
|
||||
void Space::initialize(MemRegion mr,
|
||||
bool clear_space,
|
||||
bool mangle_space) {
|
||||
HeapWord* bottom = mr.start();
|
||||
HeapWord* end = mr.end();
|
||||
assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
|
||||
"invalid space boundaries");
|
||||
set_bottom(bottom);
|
||||
set_end(end);
|
||||
if (clear_space) clear();
|
||||
if (clear_space) clear(mangle_space);
|
||||
}
|
||||
|
||||
void Space::clear() {
|
||||
if (ZapUnusedHeapArea) mangle_unused_area();
|
||||
void Space::clear(bool mangle_space) {
|
||||
if (ZapUnusedHeapArea && mangle_space) {
|
||||
mangle_unused_area();
|
||||
}
|
||||
}
|
||||
|
||||
void ContiguousSpace::initialize(MemRegion mr, bool clear_space)
|
||||
ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL) {
|
||||
_mangler = new GenSpaceMangler(this);
|
||||
}
|
||||
|
||||
ContiguousSpace::~ContiguousSpace() {
|
||||
delete _mangler;
|
||||
}
|
||||
|
||||
void ContiguousSpace::initialize(MemRegion mr,
|
||||
bool clear_space,
|
||||
bool mangle_space)
|
||||
{
|
||||
CompactibleSpace::initialize(mr, clear_space);
|
||||
CompactibleSpace::initialize(mr, clear_space, mangle_space);
|
||||
_concurrent_iteration_safe_limit = top();
|
||||
}
|
||||
|
||||
void ContiguousSpace::clear() {
|
||||
void ContiguousSpace::clear(bool mangle_space) {
|
||||
set_top(bottom());
|
||||
set_saved_mark();
|
||||
Space::clear();
|
||||
Space::clear(mangle_space);
|
||||
}
|
||||
|
||||
bool Space::is_in(const void* p) const {
|
||||
|
@ -271,8 +285,8 @@ bool ContiguousSpace::is_free_block(const HeapWord* p) const {
|
|||
return p >= _top;
|
||||
}
|
||||
|
||||
void OffsetTableContigSpace::clear() {
|
||||
ContiguousSpace::clear();
|
||||
void OffsetTableContigSpace::clear(bool mangle_space) {
|
||||
ContiguousSpace::clear(mangle_space);
|
||||
_offsets.initialize_threshold();
|
||||
}
|
||||
|
||||
|
@ -288,17 +302,46 @@ void OffsetTableContigSpace::set_end(HeapWord* new_end) {
|
|||
Space::set_end(new_end);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
|
||||
mangler()->set_top_for_allocations(v);
|
||||
}
|
||||
void ContiguousSpace::set_top_for_allocations() {
|
||||
mangler()->set_top_for_allocations(top());
|
||||
}
|
||||
void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
|
||||
mangler()->check_mangled_unused_area(limit);
|
||||
}
|
||||
|
||||
void ContiguousSpace::check_mangled_unused_area_complete() {
|
||||
mangler()->check_mangled_unused_area_complete();
|
||||
}
|
||||
|
||||
// Mangled only the unused space that has not previously
|
||||
// been mangled and that has not been allocated since being
|
||||
// mangled.
|
||||
void ContiguousSpace::mangle_unused_area() {
|
||||
// to-space is used for storing marks during mark-sweep
|
||||
mangle_region(MemRegion(top(), end()));
|
||||
mangler()->mangle_unused_area();
|
||||
}
|
||||
void ContiguousSpace::mangle_unused_area_complete() {
|
||||
mangler()->mangle_unused_area_complete();
|
||||
}
|
||||
|
||||
void ContiguousSpace::mangle_region(MemRegion mr) {
|
||||
debug_only(Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord));
|
||||
// Although this method uses SpaceMangler::mangle_region() which
|
||||
// is not specific to a space, the when the ContiguousSpace version
|
||||
// is called, it is always with regard to a space and this
|
||||
// bounds checking is appropriate.
|
||||
MemRegion space_mr(bottom(), end());
|
||||
assert(space_mr.contains(mr), "Mangling outside space");
|
||||
SpaceMangler::mangle_region(mr);
|
||||
}
|
||||
#endif // NOT_PRODUCT
|
||||
|
||||
void CompactibleSpace::initialize(MemRegion mr, bool clear_space) {
|
||||
Space::initialize(mr, clear_space);
|
||||
void CompactibleSpace::initialize(MemRegion mr,
|
||||
bool clear_space,
|
||||
bool mangle_space) {
|
||||
Space::initialize(mr, clear_space, mangle_space);
|
||||
_compaction_top = bottom();
|
||||
_next_compaction_space = NULL;
|
||||
}
|
||||
|
@ -820,8 +863,8 @@ void ContiguousSpace::allocate_temporary_filler(int factor) {
|
|||
}
|
||||
}
|
||||
|
||||
void EdenSpace::clear() {
|
||||
ContiguousSpace::clear();
|
||||
void EdenSpace::clear(bool mangle_space) {
|
||||
ContiguousSpace::clear(mangle_space);
|
||||
set_soft_end(end());
|
||||
}
|
||||
|
||||
|
@ -878,7 +921,7 @@ OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOff
|
|||
_par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
|
||||
{
|
||||
_offsets.set_contig_space(this);
|
||||
initialize(mr, true);
|
||||
initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -131,15 +131,17 @@ class Space: public CHeapObj {
|
|||
return MemRegion(bottom(), saved_mark_word());
|
||||
}
|
||||
|
||||
// Initialization
|
||||
virtual void initialize(MemRegion mr, bool clear_space);
|
||||
virtual void clear();
|
||||
// Initialization. These may be run to reset an existing
|
||||
// Space.
|
||||
virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
|
||||
virtual void clear(bool mangle_space);
|
||||
|
||||
// For detecting GC bugs. Should only be called at GC boundaries, since
|
||||
// some unused space may be used as scratch space during GC's.
|
||||
// Default implementation does nothing. We also call this when expanding
|
||||
// a space to satisfy an allocation request. See bug #4668531
|
||||
virtual void mangle_unused_area() {}
|
||||
virtual void mangle_unused_area_complete() {}
|
||||
virtual void mangle_region(MemRegion mr) {}
|
||||
|
||||
// Testers
|
||||
|
@ -354,7 +356,7 @@ private:
|
|||
CompactibleSpace* _next_compaction_space;
|
||||
|
||||
public:
|
||||
virtual void initialize(MemRegion mr, bool clear_space);
|
||||
virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
|
||||
|
||||
// Used temporarily during a compaction phase to hold the value
|
||||
// top should have when compaction is complete.
|
||||
|
@ -724,12 +726,14 @@ protected:
|
|||
/* continuously, but those that weren't need to have their thresholds */ \
|
||||
/* re-initialized. Also mangles unused area for debugging. */ \
|
||||
if (is_empty()) { \
|
||||
clear(); \
|
||||
clear(SpaceDecorator::Mangle); \
|
||||
} else { \
|
||||
if (ZapUnusedHeapArea) mangle_unused_area(); \
|
||||
} \
|
||||
}
|
||||
|
||||
class GenSpaceMangler;
|
||||
|
||||
// A space in which the free area is contiguous. It therefore supports
|
||||
// faster allocation, and compaction.
|
||||
class ContiguousSpace: public CompactibleSpace {
|
||||
|
@ -738,13 +742,21 @@ class ContiguousSpace: public CompactibleSpace {
|
|||
protected:
|
||||
HeapWord* _top;
|
||||
HeapWord* _concurrent_iteration_safe_limit;
|
||||
// A helper for mangling the unused area of the space in debug builds.
|
||||
GenSpaceMangler* _mangler;
|
||||
|
||||
GenSpaceMangler* mangler() { return _mangler; }
|
||||
|
||||
// Allocation helpers (return NULL if full).
|
||||
inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
|
||||
inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
|
||||
|
||||
public:
|
||||
virtual void initialize(MemRegion mr, bool clear_space);
|
||||
|
||||
ContiguousSpace();
|
||||
~ContiguousSpace();
|
||||
|
||||
virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
|
||||
|
||||
// Accessors
|
||||
HeapWord* top() const { return _top; }
|
||||
|
@ -753,15 +765,34 @@ class ContiguousSpace: public CompactibleSpace {
|
|||
void set_saved_mark() { _saved_mark_word = top(); }
|
||||
void reset_saved_mark() { _saved_mark_word = bottom(); }
|
||||
|
||||
virtual void clear();
|
||||
virtual void clear(bool mangle_space);
|
||||
|
||||
WaterMark bottom_mark() { return WaterMark(this, bottom()); }
|
||||
WaterMark top_mark() { return WaterMark(this, top()); }
|
||||
WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); }
|
||||
bool saved_mark_at_top() const { return saved_mark_word() == top(); }
|
||||
|
||||
void mangle_unused_area();
|
||||
void mangle_region(MemRegion mr);
|
||||
// In debug mode mangle (write it with a particular bit
|
||||
// pattern) the unused part of a space.
|
||||
|
||||
// Used to save the an address in a space for later use during mangling.
|
||||
void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
|
||||
// Used to save the space's current top for later use during mangling.
|
||||
void set_top_for_allocations() PRODUCT_RETURN;
|
||||
|
||||
// Mangle regions in the space from the current top up to the
|
||||
// previously mangled part of the space.
|
||||
void mangle_unused_area() PRODUCT_RETURN;
|
||||
// Mangle [top, end)
|
||||
void mangle_unused_area_complete() PRODUCT_RETURN;
|
||||
// Mangle the given MemRegion.
|
||||
void mangle_region(MemRegion mr) PRODUCT_RETURN;
|
||||
|
||||
// Do some sparse checking on the area that should have been mangled.
|
||||
void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
|
||||
// Check the complete area that should have been mangled.
|
||||
// This code may be NULL depending on the macro DEBUG_MANGLING.
|
||||
void check_mangled_unused_area_complete() PRODUCT_RETURN;
|
||||
|
||||
// Size computations: sizes in bytes.
|
||||
size_t capacity() const { return byte_size(bottom(), end()); }
|
||||
|
@ -956,7 +987,7 @@ class EdenSpace : public ContiguousSpace {
|
|||
void set_soft_end(HeapWord* value) { _soft_end = value; }
|
||||
|
||||
// Override.
|
||||
void clear();
|
||||
void clear(bool mangle_space);
|
||||
|
||||
// Set both the 'hard' and 'soft' limits (_end and _soft_end).
|
||||
void set_end(HeapWord* value) {
|
||||
|
@ -1000,7 +1031,7 @@ class OffsetTableContigSpace: public ContiguousSpace {
|
|||
void set_bottom(HeapWord* value);
|
||||
void set_end(HeapWord* value);
|
||||
|
||||
void clear();
|
||||
void clear(bool mangle_space);
|
||||
|
||||
inline HeapWord* block_start(const void* p) const;
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ static bool is_init_with_ea(ciMethod* callee_method,
|
|||
ciMethod* caller_method, Compile* C) {
|
||||
// True when EA is ON and a java constructor is called or
|
||||
// a super constructor is called from an inlined java constructor.
|
||||
return DoEscapeAnalysis && EliminateAllocations &&
|
||||
return C->do_escape_analysis() && EliminateAllocations &&
|
||||
( callee_method->is_initializer() ||
|
||||
(caller_method->is_initializer() &&
|
||||
caller_method != C->method() &&
|
||||
|
|
|
@ -388,6 +388,9 @@
|
|||
product(intx, EliminateAllocationArraySizeLimit, 64, \
|
||||
"Array size (number of elements) limit for scalar replacement") \
|
||||
\
|
||||
product(intx, ValueSearchLimit, 1000, \
|
||||
"Recursion limit in PhaseMacroExpand::value_from_mem_phi") \
|
||||
\
|
||||
product(intx, MaxLabelRootDepth, 1100, \
|
||||
"Maximum times call Label_Root to prevent stack overflow") \
|
||||
\
|
||||
|
|
|
@ -631,61 +631,13 @@ uint CallNode::match_edge(uint idx) const {
|
|||
bool CallNode::may_modify(const TypePtr *addr_t, PhaseTransform *phase) {
|
||||
const TypeOopPtr *adrInst_t = addr_t->isa_oopptr();
|
||||
|
||||
// if not an InstPtr or not an instance type, assume the worst
|
||||
if (adrInst_t == NULL || !adrInst_t->is_known_instance_field()) {
|
||||
// If not an OopPtr or not an instance type, assume the worst.
|
||||
// Note: currently this method is called only for instance types.
|
||||
if (adrInst_t == NULL || !adrInst_t->is_known_instance()) {
|
||||
return true;
|
||||
}
|
||||
Compile *C = phase->C;
|
||||
int offset = adrInst_t->offset();
|
||||
assert(adrInst_t->klass_is_exact() && offset >= 0, "should be valid offset");
|
||||
ciKlass* adr_k = adrInst_t->klass();
|
||||
assert(adr_k->is_loaded() &&
|
||||
adr_k->is_java_klass() &&
|
||||
!adr_k->is_interface(),
|
||||
"only non-abstract classes are expected");
|
||||
|
||||
int base_idx = C->get_alias_index(adrInst_t);
|
||||
int size = BytesPerLong; // If we don't know the size, assume largest.
|
||||
if (adrInst_t->isa_instptr()) {
|
||||
ciField* field = C->alias_type(base_idx)->field();
|
||||
if (field != NULL) {
|
||||
size = field->size_in_bytes();
|
||||
}
|
||||
} else {
|
||||
assert(adrInst_t->isa_aryptr(), "only arrays are expected");
|
||||
size = type2aelembytes(adr_k->as_array_klass()->element_type()->basic_type());
|
||||
}
|
||||
|
||||
ciMethod * meth = is_CallStaticJava() ? as_CallStaticJava()->method() : NULL;
|
||||
BCEscapeAnalyzer *bcea = (meth != NULL) ? meth->get_bcea() : NULL;
|
||||
|
||||
const TypeTuple * d = tf()->domain();
|
||||
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
|
||||
const Type* t = d->field_at(i);
|
||||
Node *arg = in(i);
|
||||
const Type *at = phase->type(arg);
|
||||
if (at == TypePtr::NULL_PTR || at == Type::TOP)
|
||||
continue; // null can't affect anything
|
||||
|
||||
const TypeOopPtr *at_ptr = at->isa_oopptr();
|
||||
if (!arg->is_top() && (t->isa_oopptr() != NULL ||
|
||||
t->isa_ptr() && at_ptr != NULL)) {
|
||||
assert(at_ptr != NULL, "expecting an OopPtr");
|
||||
ciKlass* at_k = at_ptr->klass();
|
||||
if ((adrInst_t->base() == at_ptr->base()) &&
|
||||
at_k->is_loaded() &&
|
||||
at_k->is_java_klass()) {
|
||||
// If we have found an argument matching addr_t, check if the field
|
||||
// at the specified offset is modified.
|
||||
if ((at_k->is_interface() || adr_k == at_k ||
|
||||
adr_k->is_subclass_of(at_k) && !at_ptr->klass_is_exact()) &&
|
||||
(bcea == NULL ||
|
||||
bcea->is_arg_modified(i - TypeFunc::Parms, offset, size))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// The instance_id is set only for scalar-replaceable allocations which
|
||||
// are not passed as arguments according to Escape Analysis.
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -713,7 +713,9 @@ PhiNode* PhiNode::split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) cons
|
|||
assert(type() == Type::MEMORY &&
|
||||
(t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ||
|
||||
t->isa_oopptr() && !t->is_oopptr()->is_known_instance() &&
|
||||
t->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop),
|
||||
t->is_oopptr()->cast_to_exactness(true)
|
||||
->is_oopptr()->cast_to_ptr_type(t_oop->ptr())
|
||||
->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop),
|
||||
"bottom or raw memory required");
|
||||
|
||||
// Check if an appropriate node already exists.
|
||||
|
@ -1089,6 +1091,8 @@ Node* PhiNode::unique_input(PhaseTransform* phase) {
|
|||
if (rc == NULL || phase->type(rc) == Type::TOP)
|
||||
continue; // ignore unreachable control path
|
||||
Node* n = in(i);
|
||||
if (n == NULL)
|
||||
continue;
|
||||
Node* un = n->uncast();
|
||||
if (un == NULL || un == this || phase->type(un) == Type::TOP) {
|
||||
continue; // ignore if top, or in(i) and "this" are in a data cycle
|
||||
|
|
|
@ -583,18 +583,32 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
|||
NOT_PRODUCT( verify_graph_edges(); )
|
||||
|
||||
// Perform escape analysis
|
||||
if (_do_escape_analysis)
|
||||
_congraph = new ConnectionGraph(this);
|
||||
if (_congraph != NULL) {
|
||||
NOT_PRODUCT( TracePhase t2("escapeAnalysis", &_t_escapeAnalysis, TimeCompiler); )
|
||||
_congraph->compute_escape();
|
||||
if (failing()) return;
|
||||
if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
|
||||
TracePhase t2("escapeAnalysis", &_t_escapeAnalysis, true);
|
||||
// Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction.
|
||||
PhaseGVN* igvn = initial_gvn();
|
||||
Node* oop_null = igvn->zerocon(T_OBJECT);
|
||||
Node* noop_null = igvn->zerocon(T_NARROWOOP);
|
||||
|
||||
_congraph = new(comp_arena()) ConnectionGraph(this);
|
||||
bool has_non_escaping_obj = _congraph->compute_escape();
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (PrintEscapeAnalysis) {
|
||||
_congraph->dump();
|
||||
}
|
||||
#endif
|
||||
// Cleanup.
|
||||
if (oop_null->outcnt() == 0)
|
||||
igvn->hash_delete(oop_null);
|
||||
if (noop_null->outcnt() == 0)
|
||||
igvn->hash_delete(noop_null);
|
||||
|
||||
if (!has_non_escaping_obj) {
|
||||
_congraph = NULL;
|
||||
}
|
||||
|
||||
if (failing()) return;
|
||||
}
|
||||
// Now optimize
|
||||
Optimize();
|
||||
|
@ -995,9 +1009,14 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
|
|||
int offset = tj->offset();
|
||||
TypePtr::PTR ptr = tj->ptr();
|
||||
|
||||
// Known instance (scalarizable allocation) alias only with itself.
|
||||
bool is_known_inst = tj->isa_oopptr() != NULL &&
|
||||
tj->is_oopptr()->is_known_instance();
|
||||
|
||||
// Process weird unsafe references.
|
||||
if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
|
||||
assert(InlineUnsafeOps, "indeterminate pointers come only from unsafe ops");
|
||||
assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
|
||||
tj = TypeOopPtr::BOTTOM;
|
||||
ptr = tj->ptr();
|
||||
offset = tj->offset();
|
||||
|
@ -1005,14 +1024,20 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
|
|||
|
||||
// Array pointers need some flattening
|
||||
const TypeAryPtr *ta = tj->isa_aryptr();
|
||||
if( ta && _AliasLevel >= 2 ) {
|
||||
if( ta && is_known_inst ) {
|
||||
if ( offset != Type::OffsetBot &&
|
||||
offset > arrayOopDesc::length_offset_in_bytes() ) {
|
||||
offset = Type::OffsetBot; // Flatten constant access into array body only
|
||||
tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, offset, ta->instance_id());
|
||||
}
|
||||
} else if( ta && _AliasLevel >= 2 ) {
|
||||
// For arrays indexed by constant indices, we flatten the alias
|
||||
// space to include all of the array body. Only the header, klass
|
||||
// and array length can be accessed un-aliased.
|
||||
if( offset != Type::OffsetBot ) {
|
||||
if( ta->const_oop() ) { // methodDataOop or methodOop
|
||||
offset = Type::OffsetBot; // Flatten constant access into array body
|
||||
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,Type::OffsetBot, ta->instance_id());
|
||||
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,offset);
|
||||
} else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
|
||||
// range is OK as-is.
|
||||
tj = ta = TypeAryPtr::RANGE;
|
||||
|
@ -1026,29 +1051,29 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
|
|||
ptr = TypePtr::BotPTR;
|
||||
} else { // Random constant offset into array body
|
||||
offset = Type::OffsetBot; // Flatten constant access into array body
|
||||
tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,Type::OffsetBot, ta->instance_id());
|
||||
tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset);
|
||||
}
|
||||
}
|
||||
// Arrays of fixed size alias with arrays of unknown size.
|
||||
if (ta->size() != TypeInt::POS) {
|
||||
const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
|
||||
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset, ta->instance_id());
|
||||
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset);
|
||||
}
|
||||
// Arrays of known objects become arrays of unknown objects.
|
||||
if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
|
||||
const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
|
||||
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset, ta->instance_id());
|
||||
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
|
||||
}
|
||||
if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
|
||||
const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
|
||||
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset, ta->instance_id());
|
||||
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
|
||||
}
|
||||
// Arrays of bytes and of booleans both use 'bastore' and 'baload' so
|
||||
// cannot be distinguished by bytecode alone.
|
||||
if (ta->elem() == TypeInt::BOOL) {
|
||||
const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
|
||||
ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
|
||||
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset, ta->instance_id());
|
||||
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset);
|
||||
}
|
||||
// During the 2nd round of IterGVN, NotNull castings are removed.
|
||||
// Make sure the Bottom and NotNull variants alias the same.
|
||||
|
@ -1068,21 +1093,24 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
|
|||
if( ptr == TypePtr::Constant ) {
|
||||
// No constant oop pointers (such as Strings); they alias with
|
||||
// unknown strings.
|
||||
assert(!is_known_inst, "not scalarizable allocation");
|
||||
tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
|
||||
} else if( to->is_known_instance_field() ) {
|
||||
} else if( is_known_inst ) {
|
||||
tj = to; // Keep NotNull and klass_is_exact for instance type
|
||||
} else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
|
||||
// During the 2nd round of IterGVN, NotNull castings are removed.
|
||||
// Make sure the Bottom and NotNull variants alias the same.
|
||||
// Also, make sure exact and non-exact variants alias the same.
|
||||
tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset, to->instance_id());
|
||||
tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
|
||||
}
|
||||
// Canonicalize the holder of this field
|
||||
ciInstanceKlass *k = to->klass()->as_instance_klass();
|
||||
if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
|
||||
// First handle header references such as a LoadKlassNode, even if the
|
||||
// object's klass is unloaded at compile time (4965979).
|
||||
tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset, to->instance_id());
|
||||
if (!is_known_inst) { // Do it only for non-instance types
|
||||
tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset);
|
||||
}
|
||||
} else if (offset < 0 || offset >= k->size_helper() * wordSize) {
|
||||
to = NULL;
|
||||
tj = TypeOopPtr::BOTTOM;
|
||||
|
@ -1090,7 +1118,11 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
|
|||
} else {
|
||||
ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset);
|
||||
if (!k->equals(canonical_holder) || tj->offset() != offset) {
|
||||
tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset, to->instance_id());
|
||||
if( is_known_inst ) {
|
||||
tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id());
|
||||
} else {
|
||||
tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1276,7 +1308,9 @@ Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_cr
|
|||
assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr");
|
||||
if (flat->isa_oopptr() && !flat->isa_klassptr()) {
|
||||
const TypeOopPtr* foop = flat->is_oopptr();
|
||||
const TypePtr* xoop = foop->cast_to_exactness(!foop->klass_is_exact())->is_ptr();
|
||||
// Scalarizable allocations have exact klass always.
|
||||
bool exact = !foop->klass_is_exact() || foop->is_known_instance();
|
||||
const TypePtr* xoop = foop->cast_to_exactness(exact)->is_ptr();
|
||||
assert(foop == flatten_alias_type(xoop), "exactness must not affect alias type");
|
||||
}
|
||||
assert(flat == flatten_alias_type(flat), "exact bit doesn't matter");
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -178,23 +178,33 @@ public:
|
|||
|
||||
// count of outgoing edges
|
||||
uint edge_count() const { return (_edges == NULL) ? 0 : _edges->length(); }
|
||||
|
||||
// node index of target of outgoing edge "e"
|
||||
uint edge_target(uint e) const;
|
||||
uint edge_target(uint e) const {
|
||||
assert(_edges != NULL, "valid edge index");
|
||||
return (_edges->at(e) >> EdgeShift);
|
||||
}
|
||||
// type of outgoing edge "e"
|
||||
EdgeType edge_type(uint e) const;
|
||||
EdgeType edge_type(uint e) const {
|
||||
assert(_edges != NULL, "valid edge index");
|
||||
return (EdgeType) (_edges->at(e) & EdgeMask);
|
||||
}
|
||||
|
||||
// add a edge of the specified type pointing to the specified target
|
||||
void add_edge(uint targIdx, EdgeType et);
|
||||
|
||||
// remove an edge of the specified type pointing to the specified target
|
||||
void remove_edge(uint targIdx, EdgeType et);
|
||||
|
||||
#ifndef PRODUCT
|
||||
void dump() const;
|
||||
void dump(bool print_state=true) const;
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
class ConnectionGraph: public ResourceObj {
|
||||
private:
|
||||
GrowableArray<PointsToNode>* _nodes; // Connection graph nodes indexed
|
||||
GrowableArray<PointsToNode> _nodes; // Connection graph nodes indexed
|
||||
// by ideal node index.
|
||||
|
||||
Unique_Node_List _delayed_worklist; // Nodes to be processed before
|
||||
|
@ -207,24 +217,22 @@ private:
|
|||
// is still being collected. If false,
|
||||
// no new nodes will be processed.
|
||||
|
||||
bool _has_allocations; // Indicates whether method has any
|
||||
// non-escaping allocations.
|
||||
|
||||
uint _phantom_object; // Index of globally escaping object
|
||||
// that pointer values loaded from
|
||||
// a field which has not been set
|
||||
// are assumed to point to.
|
||||
uint _oop_null; // ConP(#NULL)
|
||||
uint _noop_null; // ConN(#NULL)
|
||||
|
||||
Compile * _compile; // Compile object for current compilation
|
||||
|
||||
// address of an element in _nodes. Used when the element is to be modified
|
||||
PointsToNode *ptnode_adr(uint idx) {
|
||||
if ((uint)_nodes->length() <= idx) {
|
||||
// expand _nodes array
|
||||
PointsToNode dummy = _nodes->at_grow(idx);
|
||||
}
|
||||
return _nodes->adr_at(idx);
|
||||
// Address of an element in _nodes. Used when the element is to be modified
|
||||
PointsToNode *ptnode_adr(uint idx) const {
|
||||
// There should be no new ideal nodes during ConnectionGraph build,
|
||||
// growableArray::adr_at() will throw assert otherwise.
|
||||
return _nodes.adr_at(idx);
|
||||
}
|
||||
uint nodes_size() const { return _nodes.length(); }
|
||||
|
||||
// Add node to ConnectionGraph.
|
||||
void add_node(Node *n, PointsToNode::NodeType nt, PointsToNode::EscapeState es, bool done);
|
||||
|
@ -307,30 +315,30 @@ private:
|
|||
// Set the escape state of a node
|
||||
void set_escape_state(uint ni, PointsToNode::EscapeState es);
|
||||
|
||||
// Get Compile object for current compilation.
|
||||
Compile *C() const { return _compile; }
|
||||
|
||||
public:
|
||||
ConnectionGraph(Compile *C);
|
||||
|
||||
// Check for non-escaping candidates
|
||||
static bool has_candidates(Compile *C);
|
||||
|
||||
// Compute the escape information
|
||||
void compute_escape();
|
||||
bool compute_escape();
|
||||
|
||||
// escape state of a node
|
||||
PointsToNode::EscapeState escape_state(Node *n, PhaseTransform *phase);
|
||||
// other information we have collected
|
||||
bool is_scalar_replaceable(Node *n) {
|
||||
if (_collecting)
|
||||
if (_collecting || (n->_idx >= nodes_size()))
|
||||
return false;
|
||||
PointsToNode ptn = _nodes->at_grow(n->_idx);
|
||||
return ptn.escape_state() == PointsToNode::NoEscape && ptn._scalar_replaceable;
|
||||
PointsToNode* ptn = ptnode_adr(n->_idx);
|
||||
return ptn->escape_state() == PointsToNode::NoEscape && ptn->_scalar_replaceable;
|
||||
}
|
||||
|
||||
bool hidden_alias(Node *n) {
|
||||
if (_collecting)
|
||||
if (_collecting || (n->_idx >= nodes_size()))
|
||||
return true;
|
||||
PointsToNode ptn = _nodes->at_grow(n->_idx);
|
||||
return (ptn.escape_state() != PointsToNode::NoEscape) || ptn._hidden_alias;
|
||||
PointsToNode* ptn = ptnode_adr(n->_idx);
|
||||
return (ptn->escape_state() != PointsToNode::NoEscape) || ptn->_hidden_alias;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
|
|
@ -473,10 +473,12 @@ void IdealGraphPrinter::visit_node(Node *n, void *param) {
|
|||
print_prop("is_dontcare", "false");
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
Node* old = C->matcher()->find_old_node(node);
|
||||
if (old != NULL) {
|
||||
print_prop("old_node_idx", old->_idx);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
if (node->is_Proj()) {
|
||||
|
|
|
@ -725,6 +725,11 @@ static Node *remove_useless_bool(IfNode *iff, PhaseGVN *phase) {
|
|||
int true_path = phi->is_diamond_phi();
|
||||
if( true_path == 0 ) return NULL;
|
||||
|
||||
// Make sure that iff and the control of the phi are different. This
|
||||
// should really only happen for dead control flow since it requires
|
||||
// an illegal cycle.
|
||||
if (phi->in(0)->in(1)->in(0) == iff) return NULL;
|
||||
|
||||
// phi->region->if_proj->ifnode->bool->cmp
|
||||
BoolNode *bol2 = phi->in(0)->in(1)->in(0)->in(1)->as_Bool();
|
||||
|
||||
|
@ -751,6 +756,7 @@ static Node *remove_useless_bool(IfNode *iff, PhaseGVN *phase) {
|
|||
}
|
||||
|
||||
Node* new_bol = (flip ? phase->transform( bol2->negate(phase) ) : bol2);
|
||||
assert(new_bol != iff->in(1), "must make progress");
|
||||
iff->set_req(1, new_bol);
|
||||
// Intervening diamond probably goes dead
|
||||
phase->C->set_major_progress();
|
||||
|
|
|
@ -322,7 +322,7 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSe
|
|||
uint choice = 0; // Bigger is most important
|
||||
uint latency = 0; // Bigger is scheduled first
|
||||
uint score = 0; // Bigger is better
|
||||
uint idx; // Index in worklist
|
||||
int idx = -1; // Index in worklist
|
||||
|
||||
for( uint i=0; i<cnt; i++ ) { // Inspect entire worklist
|
||||
// Order in worklist is used to break ties.
|
||||
|
@ -412,9 +412,10 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSe
|
|||
}
|
||||
} // End of for all ready nodes in worklist
|
||||
|
||||
Node *n = worklist[idx]; // Get the winner
|
||||
assert(idx >= 0, "index should be set");
|
||||
Node *n = worklist[(uint)idx]; // Get the winner
|
||||
|
||||
worklist.map(idx,worklist.pop()); // Compress worklist
|
||||
worklist.map((uint)idx, worklist.pop()); // Compress worklist
|
||||
return n;
|
||||
}
|
||||
|
||||
|
@ -599,7 +600,14 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect
|
|||
assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
|
||||
}
|
||||
}
|
||||
if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire ) {
|
||||
if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire &&
|
||||
n->req() > TypeFunc::Parms ) {
|
||||
// MemBarAcquire could be created without Precedent edge.
|
||||
// del_req() replaces the specified edge with the last input edge
|
||||
// and then removes the last edge. If the specified edge > number of
|
||||
// edges the last edge will be moved outside of the input edges array
|
||||
// and the edge will be lost. This is why this code should be
|
||||
// executed only when Precedent (== TypeFunc::Parms) edge is present.
|
||||
Node *x = n->in(TypeFunc::Parms);
|
||||
n->del_req(TypeFunc::Parms);
|
||||
n->add_prec(x);
|
||||
|
|
|
@ -578,7 +578,8 @@ Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
|
|||
Node *cmov = conditional_move( n );
|
||||
if( cmov ) return cmov;
|
||||
}
|
||||
if( n->is_CFG() || n_op == Op_StorePConditional || n_op == Op_StoreLConditional || n_op == Op_CompareAndSwapI || n_op == Op_CompareAndSwapL ||n_op == Op_CompareAndSwapP) return n;
|
||||
if( n->is_CFG() || n->is_LoadStore() )
|
||||
return n;
|
||||
if( n_op == Op_Opaque1 || // Opaque nodes cannot be mod'd
|
||||
n_op == Op_Opaque2 ) {
|
||||
if( !C->major_progress() ) // If chance of no more loop opts...
|
||||
|
@ -1891,18 +1892,19 @@ void PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, N
|
|||
_igvn.hash_delete(use);
|
||||
use->set_req(j, n_clone);
|
||||
_igvn._worklist.push(use);
|
||||
Node* use_c;
|
||||
if (!use->is_Phi()) {
|
||||
Node* use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0);
|
||||
set_ctrl(n_clone, use_c);
|
||||
assert(!loop->is_member(get_loop(use_c)), "should be outside loop");
|
||||
get_loop(use_c)->_body.push(n_clone);
|
||||
use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0);
|
||||
} else {
|
||||
// Use in a phi is considered a use in the associated predecessor block
|
||||
Node *prevbb = use->in(0)->in(j);
|
||||
set_ctrl(n_clone, prevbb);
|
||||
assert(!loop->is_member(get_loop(prevbb)), "should be outside loop");
|
||||
get_loop(prevbb)->_body.push(n_clone);
|
||||
use_c = use->in(0)->in(j);
|
||||
}
|
||||
if (use_c->is_CountedLoop()) {
|
||||
use_c = use_c->in(LoopNode::EntryControl);
|
||||
}
|
||||
set_ctrl(n_clone, use_c);
|
||||
assert(!loop->is_member(get_loop(use_c)), "should be outside loop");
|
||||
get_loop(use_c)->_body.push(n_clone);
|
||||
_igvn.register_new_node_with_optimizer(n_clone);
|
||||
#if !defined(PRODUCT)
|
||||
if (TracePartialPeeling) {
|
||||
|
|
|
@ -194,9 +194,10 @@ void PhaseMacroExpand::eliminate_card_mark(Node *p2x) {
|
|||
}
|
||||
|
||||
// Search for a memory operation for the specified memory slice.
|
||||
static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_mem, Node *alloc) {
|
||||
static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_mem, Node *alloc, PhaseGVN *phase) {
|
||||
Node *orig_mem = mem;
|
||||
Node *alloc_mem = alloc->in(TypeFunc::Memory);
|
||||
const TypeOopPtr *tinst = phase->C->get_adr_type(alias_idx)->isa_oopptr();
|
||||
while (true) {
|
||||
if (mem == alloc_mem || mem == start_mem ) {
|
||||
return mem; // hit one of our sentinals
|
||||
|
@ -208,7 +209,13 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me
|
|||
// already know that the object is safe to eliminate.
|
||||
if (in->is_Initialize() && in->as_Initialize()->allocation() == alloc) {
|
||||
return in;
|
||||
} else if (in->is_Call() || in->is_MemBar()) {
|
||||
} else if (in->is_Call()) {
|
||||
CallNode *call = in->as_Call();
|
||||
if (!call->may_modify(tinst, phase)) {
|
||||
mem = call->in(TypeFunc::Memory);
|
||||
}
|
||||
mem = in->in(TypeFunc::Memory);
|
||||
} else if (in->is_MemBar()) {
|
||||
mem = in->in(TypeFunc::Memory);
|
||||
} else {
|
||||
assert(false, "unexpected projection");
|
||||
|
@ -231,8 +238,7 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me
|
|||
} else {
|
||||
return mem;
|
||||
}
|
||||
if (mem == orig_mem)
|
||||
return mem;
|
||||
assert(mem != orig_mem, "dead memory loop");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -241,27 +247,50 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me
|
|||
// on the input paths.
|
||||
// Note: this function is recursive, its depth is limied by the "level" argument
|
||||
// Returns the computed Phi, or NULL if it cannot compute it.
|
||||
Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *phi_type, const TypeOopPtr *adr_t, Node *alloc, int level) {
|
||||
|
||||
if (level <= 0) {
|
||||
return NULL;
|
||||
}
|
||||
Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *phi_type, const TypeOopPtr *adr_t, Node *alloc, Node_Stack *value_phis, int level) {
|
||||
assert(mem->is_Phi(), "sanity");
|
||||
int alias_idx = C->get_alias_index(adr_t);
|
||||
int offset = adr_t->offset();
|
||||
int instance_id = adr_t->instance_id();
|
||||
|
||||
// Check if an appropriate value phi already exists.
|
||||
Node* region = mem->in(0);
|
||||
for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) {
|
||||
Node* phi = region->fast_out(k);
|
||||
if (phi->is_Phi() && phi != mem &&
|
||||
phi->as_Phi()->is_same_inst_field(phi_type, instance_id, alias_idx, offset)) {
|
||||
return phi;
|
||||
}
|
||||
}
|
||||
// Check if an appropriate new value phi already exists.
|
||||
Node* new_phi = NULL;
|
||||
uint size = value_phis->size();
|
||||
for (uint i=0; i < size; i++) {
|
||||
if ( mem->_idx == value_phis->index_at(i) ) {
|
||||
return value_phis->node_at(i);
|
||||
}
|
||||
}
|
||||
|
||||
if (level <= 0) {
|
||||
return NULL; // Give up: phi tree too deep
|
||||
}
|
||||
Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
|
||||
Node *alloc_mem = alloc->in(TypeFunc::Memory);
|
||||
|
||||
uint length = mem->req();
|
||||
GrowableArray <Node *> values(length, length, NULL);
|
||||
|
||||
// create a new Phi for the value
|
||||
PhiNode *phi = new (C, length) PhiNode(mem->in(0), phi_type, NULL, instance_id, alias_idx, offset);
|
||||
transform_later(phi);
|
||||
value_phis->push(phi, mem->_idx);
|
||||
|
||||
for (uint j = 1; j < length; j++) {
|
||||
Node *in = mem->in(j);
|
||||
if (in == NULL || in->is_top()) {
|
||||
values.at_put(j, in);
|
||||
} else {
|
||||
Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc);
|
||||
Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn);
|
||||
if (val == start_mem || val == alloc_mem) {
|
||||
// hit a sentinel, return appropriate 0 value
|
||||
values.at_put(j, _igvn.zerocon(ft));
|
||||
|
@ -280,33 +309,18 @@ Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *
|
|||
} else if(val->is_Proj() && val->in(0) == alloc) {
|
||||
values.at_put(j, _igvn.zerocon(ft));
|
||||
} else if (val->is_Phi()) {
|
||||
// Check if an appropriate node already exists.
|
||||
Node* region = val->in(0);
|
||||
Node* old_phi = NULL;
|
||||
for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) {
|
||||
Node* phi = region->fast_out(k);
|
||||
if (phi->is_Phi() && phi != val &&
|
||||
phi->as_Phi()->is_same_inst_field(phi_type, instance_id, alias_idx, offset)) {
|
||||
old_phi = phi;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (old_phi == NULL) {
|
||||
val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, level-1);
|
||||
if (val == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
values.at_put(j, val);
|
||||
} else {
|
||||
values.at_put(j, old_phi);
|
||||
val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1);
|
||||
if (val == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
values.at_put(j, val);
|
||||
} else {
|
||||
return NULL; // unknown node on this path
|
||||
assert(false, "unknown node on this path");
|
||||
return NULL; // unknown node on this path
|
||||
}
|
||||
}
|
||||
}
|
||||
// create a new Phi for the value
|
||||
PhiNode *phi = new (C, length) PhiNode(mem->in(0), phi_type, NULL, instance_id, alias_idx, offset);
|
||||
// Set Phi's inputs
|
||||
for (uint j = 1; j < length; j++) {
|
||||
if (values.at(j) == mem) {
|
||||
phi->init_req(j, phi);
|
||||
|
@ -314,7 +328,6 @@ Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *
|
|||
phi->init_req(j, values.at(j));
|
||||
}
|
||||
}
|
||||
transform_later(phi);
|
||||
return phi;
|
||||
}
|
||||
|
||||
|
@ -329,7 +342,8 @@ Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, BasicType ft, const Type
|
|||
Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
|
||||
Node *alloc_ctrl = alloc->in(TypeFunc::Control);
|
||||
Node *alloc_mem = alloc->in(TypeFunc::Memory);
|
||||
VectorSet visited(Thread::current()->resource_area());
|
||||
Arena *a = Thread::current()->resource_area();
|
||||
VectorSet visited(a);
|
||||
|
||||
|
||||
bool done = sfpt_mem == alloc_mem;
|
||||
|
@ -338,7 +352,7 @@ Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, BasicType ft, const Type
|
|||
if (visited.test_set(mem->_idx)) {
|
||||
return NULL; // found a loop, give up
|
||||
}
|
||||
mem = scan_mem_chain(mem, alias_idx, offset, start_mem, alloc);
|
||||
mem = scan_mem_chain(mem, alias_idx, offset, start_mem, alloc, &_igvn);
|
||||
if (mem == start_mem || mem == alloc_mem) {
|
||||
done = true; // hit a sentinel, return appropriate 0 value
|
||||
} else if (mem->is_Initialize()) {
|
||||
|
@ -362,7 +376,7 @@ Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, BasicType ft, const Type
|
|||
Node *unique_input = NULL;
|
||||
Node *top = C->top();
|
||||
for (uint i = 1; i < mem->req(); i++) {
|
||||
Node *n = scan_mem_chain(mem->in(i), alias_idx, offset, start_mem, alloc);
|
||||
Node *n = scan_mem_chain(mem->in(i), alias_idx, offset, start_mem, alloc, &_igvn);
|
||||
if (n == NULL || n == top || n == mem) {
|
||||
continue;
|
||||
} else if (unique_input == NULL) {
|
||||
|
@ -389,9 +403,18 @@ Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, BasicType ft, const Type
|
|||
return mem->in(MemNode::ValueIn);
|
||||
} else if (mem->is_Phi()) {
|
||||
// attempt to produce a Phi reflecting the values on the input paths of the Phi
|
||||
Node * phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, 8);
|
||||
Node_Stack value_phis(a, 8);
|
||||
Node * phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, &value_phis, ValueSearchLimit);
|
||||
if (phi != NULL) {
|
||||
return phi;
|
||||
} else {
|
||||
// Kill all new Phis
|
||||
while(value_phis.is_nonempty()) {
|
||||
Node* n = value_phis.node();
|
||||
_igvn.hash_delete(n);
|
||||
_igvn.subsume_node(n, C->top());
|
||||
value_phis.pop();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -448,7 +471,7 @@ bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArr
|
|||
Node* n = use->fast_out(k);
|
||||
if (!n->is_Store() && n->Opcode() != Op_CastP2X) {
|
||||
DEBUG_ONLY(disq_node = n;)
|
||||
if (n->is_Load()) {
|
||||
if (n->is_Load() || n->is_LoadStore()) {
|
||||
NOT_PRODUCT(fail_eliminate = "Field load";)
|
||||
} else {
|
||||
NOT_PRODUCT(fail_eliminate = "Not store field referrence";)
|
||||
|
|
|
@ -79,7 +79,7 @@ private:
|
|||
const TypeFunc* slow_call_type,
|
||||
address slow_call_address);
|
||||
Node *value_from_mem(Node *mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, Node *alloc);
|
||||
Node *value_from_mem_phi(Node *mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, Node *alloc, int level);
|
||||
Node *value_from_mem_phi(Node *mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, Node *alloc, Node_Stack *value_phis, int level);
|
||||
|
||||
bool eliminate_allocate_node(AllocateNode *alloc);
|
||||
bool can_eliminate_allocation(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints);
|
||||
|
|
|
@ -94,14 +94,19 @@ Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr,
|
|||
if (tinst == NULL || !tinst->is_known_instance_field())
|
||||
return mchain; // don't try to optimize non-instance types
|
||||
uint instance_id = tinst->instance_id();
|
||||
Node *start_mem = phase->C->start()->proj_out(TypeFunc::Memory);
|
||||
Node *prev = NULL;
|
||||
Node *result = mchain;
|
||||
while (prev != result) {
|
||||
prev = result;
|
||||
if (result == start_mem)
|
||||
break; // hit one of our sentinals
|
||||
// skip over a call which does not affect this memory slice
|
||||
if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
|
||||
Node *proj_in = result->in(0);
|
||||
if (proj_in->is_Call()) {
|
||||
if (proj_in->is_Allocate() && proj_in->_idx == instance_id) {
|
||||
break; // hit one of our sentinals
|
||||
} else if (proj_in->is_Call()) {
|
||||
CallNode *call = proj_in->as_Call();
|
||||
if (!call->may_modify(t_adr, phase)) {
|
||||
result = call->in(TypeFunc::Memory);
|
||||
|
@ -115,6 +120,8 @@ Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr,
|
|||
}
|
||||
} else if (proj_in->is_MemBar()) {
|
||||
result = proj_in->in(TypeFunc::Memory);
|
||||
} else {
|
||||
assert(false, "unexpected projection");
|
||||
}
|
||||
} else if (result->is_MergeMem()) {
|
||||
result = step_through_mergemem(phase, result->as_MergeMem(), t_adr, NULL, tty);
|
||||
|
@ -135,7 +142,9 @@ Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGV
|
|||
const TypePtr *t = mphi->adr_type();
|
||||
if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ||
|
||||
t->isa_oopptr() && !t->is_oopptr()->is_known_instance() &&
|
||||
t->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop) {
|
||||
t->is_oopptr()->cast_to_exactness(true)
|
||||
->is_oopptr()->cast_to_ptr_type(t_oop->ptr())
|
||||
->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop) {
|
||||
// clone the Phi with our address type
|
||||
result = mphi->split_out_instance(t_adr, igvn);
|
||||
} else {
|
||||
|
|
|
@ -607,6 +607,7 @@ public:
|
|||
};
|
||||
|
||||
//------------------------------LoadStoreNode---------------------------
|
||||
// Note: is_Mem() method returns 'true' for this class.
|
||||
class LoadStoreNode : public Node {
|
||||
public:
|
||||
enum {
|
||||
|
|
|
@ -1399,6 +1399,10 @@ public:
|
|||
uint index() const {
|
||||
return _inode_top->indx;
|
||||
}
|
||||
uint index_at(uint i) const {
|
||||
assert(_inodes + i <= _inode_top, "in range");
|
||||
return _inodes[i].indx;
|
||||
}
|
||||
void set_node(Node *n) {
|
||||
_inode_top->node = n;
|
||||
}
|
||||
|
|
|
@ -479,7 +479,7 @@ class Parse : public GraphKit {
|
|||
float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci);
|
||||
bool seems_never_taken(float prob);
|
||||
|
||||
void do_ifnull(BoolTest::mask btest);
|
||||
void do_ifnull(BoolTest::mask btest, Node* c);
|
||||
void do_if(BoolTest::mask btest, Node* c);
|
||||
void repush_if_args();
|
||||
void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
|
||||
|
|
|
@ -875,6 +875,8 @@ bool Parse::seems_never_taken(float prob) {
|
|||
return prob < PROB_MIN;
|
||||
}
|
||||
|
||||
//-------------------------------repush_if_args--------------------------------
|
||||
// Push arguments of an "if" bytecode back onto the stack by adjusting _sp.
|
||||
inline void Parse::repush_if_args() {
|
||||
#ifndef PRODUCT
|
||||
if (PrintOpto && WizardMode) {
|
||||
|
@ -892,7 +894,7 @@ inline void Parse::repush_if_args() {
|
|||
}
|
||||
|
||||
//----------------------------------do_ifnull----------------------------------
|
||||
void Parse::do_ifnull(BoolTest::mask btest) {
|
||||
void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
|
||||
int target_bci = iter().get_dest();
|
||||
|
||||
Block* branch_block = successor_for_bci(target_bci);
|
||||
|
@ -904,7 +906,7 @@ void Parse::do_ifnull(BoolTest::mask btest) {
|
|||
// (An earlier version of do_ifnull omitted this trap for OSR methods.)
|
||||
#ifndef PRODUCT
|
||||
if (PrintOpto && Verbose)
|
||||
tty->print_cr("Never-taken backedge stops compilation at bci %d",bci());
|
||||
tty->print_cr("Never-taken edge stops compilation at bci %d",bci());
|
||||
#endif
|
||||
repush_if_args(); // to gather stats on loop
|
||||
// We need to mark this branch as taken so that if we recompile we will
|
||||
|
@ -923,18 +925,7 @@ void Parse::do_ifnull(BoolTest::mask btest) {
|
|||
return;
|
||||
}
|
||||
|
||||
// If this is a backwards branch in the bytecodes, add Safepoint
|
||||
maybe_add_safepoint(target_bci);
|
||||
|
||||
explicit_null_checks_inserted++;
|
||||
Node* a = null();
|
||||
Node* b = pop();
|
||||
Node* c = _gvn.transform( new (C, 3) CmpPNode(b, a) );
|
||||
|
||||
// Make a cast-away-nullness that is control dependent on the test
|
||||
const Type *t = _gvn.type(b);
|
||||
const Type *t_not_null = t->join(TypePtr::NOTNULL);
|
||||
Node *cast = new (C, 2) CastPPNode(b,t_not_null);
|
||||
|
||||
// Generate real control flow
|
||||
Node *tst = _gvn.transform( new (C, 2) BoolNode( c, btest ) );
|
||||
|
@ -996,7 +987,7 @@ void Parse::do_if(BoolTest::mask btest, Node* c) {
|
|||
if (prob == PROB_UNKNOWN) {
|
||||
#ifndef PRODUCT
|
||||
if (PrintOpto && Verbose)
|
||||
tty->print_cr("Never-taken backedge stops compilation at bci %d",bci());
|
||||
tty->print_cr("Never-taken edge stops compilation at bci %d",bci());
|
||||
#endif
|
||||
repush_if_args(); // to gather stats on loop
|
||||
// We need to mark this branch as taken so that if we recompile we will
|
||||
|
@ -2100,11 +2091,15 @@ void Parse::do_one_bytecode() {
|
|||
break;
|
||||
}
|
||||
|
||||
case Bytecodes::_ifnull:
|
||||
do_ifnull(BoolTest::eq);
|
||||
break;
|
||||
case Bytecodes::_ifnonnull:
|
||||
do_ifnull(BoolTest::ne);
|
||||
case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null;
|
||||
case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
|
||||
handle_if_null:
|
||||
// If this is a backwards branch in the bytecodes, add Safepoint
|
||||
maybe_add_safepoint(iter().get_dest());
|
||||
a = null();
|
||||
b = pop();
|
||||
c = _gvn.transform( new (C, 3) CmpPNode(b, a) );
|
||||
do_ifnull(btest, c);
|
||||
break;
|
||||
|
||||
case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
|
||||
|
|
|
@ -1196,8 +1196,10 @@ void SuperWord::construct_bb() {
|
|||
Node *n = lp()->fast_out(i);
|
||||
if (in_bb(n) && (n->is_Phi() && n->bottom_type() == Type::MEMORY)) {
|
||||
Node* n_tail = n->in(LoopNode::LoopBackControl);
|
||||
_mem_slice_head.push(n);
|
||||
_mem_slice_tail.push(n_tail);
|
||||
if (n_tail != n->in(LoopNode::EntryControl)) {
|
||||
_mem_slice_head.push(n);
|
||||
_mem_slice_tail.push(n_tail);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2218,7 +2218,7 @@ const Type *TypeOopPtr::cast_to_ptr_type(PTR ptr) const {
|
|||
return make(ptr, _offset);
|
||||
}
|
||||
|
||||
//-----------------------------cast_to_instance-------------------------------
|
||||
//-----------------------------cast_to_instance_id----------------------------
|
||||
const TypeOopPtr *TypeOopPtr::cast_to_instance_id(int instance_id) const {
|
||||
// There are no instances of a general oop.
|
||||
// Return self unchanged.
|
||||
|
@ -2610,8 +2610,7 @@ const TypeInstPtr *TypeInstPtr::make(PTR ptr,
|
|||
// Ptr is never Null
|
||||
assert( ptr != Null, "NULL pointers are not typed" );
|
||||
|
||||
if ( instance_id > 0 )
|
||||
xk = true; // instances are always exactly typed
|
||||
assert(instance_id <= 0 || xk || !UseExactTypes, "instances are always exactly typed");
|
||||
if (!UseExactTypes) xk = false;
|
||||
if (ptr == Constant) {
|
||||
// Note: This case includes meta-object constants, such as methods.
|
||||
|
@ -2650,16 +2649,10 @@ const Type *TypeInstPtr::cast_to_exactness(bool klass_is_exact) const {
|
|||
return make(ptr(), klass(), klass_is_exact, const_oop(), _offset, _instance_id);
|
||||
}
|
||||
|
||||
//-----------------------------cast_to_instance-------------------------------
|
||||
//-----------------------------cast_to_instance_id----------------------------
|
||||
const TypeOopPtr *TypeInstPtr::cast_to_instance_id(int instance_id) const {
|
||||
if( instance_id == _instance_id ) return this;
|
||||
bool exact = _klass_is_exact;
|
||||
PTR ptr_t = _ptr;
|
||||
if ( instance_id > 0 ) { // instances are always exactly typed
|
||||
if (UseExactTypes) exact = true;
|
||||
ptr_t = NotNull;
|
||||
}
|
||||
return make(ptr_t, klass(), exact, const_oop(), _offset, instance_id);
|
||||
return make(_ptr, klass(), _klass_is_exact, const_oop(), _offset, instance_id);
|
||||
}
|
||||
|
||||
//------------------------------xmeet_unloaded---------------------------------
|
||||
|
@ -2899,6 +2892,7 @@ const Type *TypeInstPtr::xmeet( const Type *t ) const {
|
|||
xk = above_centerline(ptr) ? tinst_xk : false;
|
||||
// Watch out for Constant vs. AnyNull interface.
|
||||
if (ptr == Constant) ptr = NotNull; // forget it was a constant
|
||||
instance_id = InstanceBot;
|
||||
}
|
||||
ciObject* o = NULL; // the Constant value, if any
|
||||
if (ptr == Constant) {
|
||||
|
@ -2989,6 +2983,7 @@ const Type *TypeInstPtr::xmeet( const Type *t ) const {
|
|||
// class hierarchy - which means we have to fall to at least NotNull.
|
||||
if( ptr == TopPTR || ptr == AnyNull || ptr == Constant )
|
||||
ptr = NotNull;
|
||||
instance_id = InstanceBot;
|
||||
|
||||
// Now we find the LCA of Java classes
|
||||
ciKlass* k = this_klass->least_common_ancestor(tinst_klass);
|
||||
|
@ -3101,8 +3096,7 @@ const TypeAryPtr *TypeAryPtr::make( PTR ptr, const TypeAry *ary, ciKlass* k, boo
|
|||
assert(!(k == NULL && ary->_elem->isa_int()),
|
||||
"integral arrays must be pre-equipped with a class");
|
||||
if (!xk) xk = ary->ary_must_be_exact();
|
||||
if ( instance_id > 0 )
|
||||
xk = true; // instances are always exactly typed
|
||||
assert(instance_id <= 0 || xk || !UseExactTypes, "instances are always exactly typed");
|
||||
if (!UseExactTypes) xk = (ptr == Constant);
|
||||
return (TypeAryPtr*)(new TypeAryPtr(ptr, NULL, ary, k, xk, offset, instance_id))->hashcons();
|
||||
}
|
||||
|
@ -3113,8 +3107,7 @@ const TypeAryPtr *TypeAryPtr::make( PTR ptr, ciObject* o, const TypeAry *ary, ci
|
|||
"integral arrays must be pre-equipped with a class");
|
||||
assert( (ptr==Constant && o) || (ptr!=Constant && !o), "" );
|
||||
if (!xk) xk = (o != NULL) || ary->ary_must_be_exact();
|
||||
if ( instance_id > 0 )
|
||||
xk = true; // instances are always exactly typed
|
||||
assert(instance_id <= 0 || xk || !UseExactTypes, "instances are always exactly typed");
|
||||
if (!UseExactTypes) xk = (ptr == Constant);
|
||||
return (TypeAryPtr*)(new TypeAryPtr(ptr, o, ary, k, xk, offset, instance_id))->hashcons();
|
||||
}
|
||||
|
@ -3134,16 +3127,10 @@ const Type *TypeAryPtr::cast_to_exactness(bool klass_is_exact) const {
|
|||
return make(ptr(), const_oop(), _ary, klass(), klass_is_exact, _offset, _instance_id);
|
||||
}
|
||||
|
||||
//-----------------------------cast_to_instance-------------------------------
|
||||
//-----------------------------cast_to_instance_id----------------------------
|
||||
const TypeOopPtr *TypeAryPtr::cast_to_instance_id(int instance_id) const {
|
||||
if( instance_id == _instance_id ) return this;
|
||||
bool exact = _klass_is_exact;
|
||||
PTR ptr_t = _ptr;
|
||||
if ( instance_id > 0 ) { // instances are always exactly typed
|
||||
if (UseExactTypes) exact = true;
|
||||
ptr_t = NotNull;
|
||||
}
|
||||
return make(ptr_t, const_oop(), _ary, klass(), exact, _offset, instance_id);
|
||||
return make(_ptr, const_oop(), _ary, klass(), _klass_is_exact, _offset, instance_id);
|
||||
}
|
||||
|
||||
//-----------------------------narrow_size_type-------------------------------
|
||||
|
@ -3300,6 +3287,7 @@ const Type *TypeAryPtr::xmeet( const Type *t ) const {
|
|||
} else {
|
||||
// Something like byte[int+] meets char[int+].
|
||||
// This must fall to bottom, not (int[-128..65535])[int+].
|
||||
instance_id = InstanceBot;
|
||||
tary = TypeAry::make(Type::BOTTOM, tary->_size);
|
||||
}
|
||||
}
|
||||
|
@ -3316,6 +3304,7 @@ const Type *TypeAryPtr::xmeet( const Type *t ) const {
|
|||
if( tap->const_oop() != NULL && !o->equals(tap->const_oop()) ) {
|
||||
ptr = NotNull;
|
||||
o = NULL;
|
||||
instance_id = InstanceBot;
|
||||
}
|
||||
} else if( above_centerline(_ptr) ) {
|
||||
o = tap->const_oop();
|
||||
|
|
|
@ -1201,7 +1201,8 @@ void Arguments::set_ergonomics_flags() {
|
|||
// by ergonomics.
|
||||
if (MaxHeapSize <= max_heap_for_compressed_oops()) {
|
||||
if (FLAG_IS_DEFAULT(UseCompressedOops)) {
|
||||
FLAG_SET_ERGO(bool, UseCompressedOops, true);
|
||||
// Turn off until bug is fixed.
|
||||
// FLAG_SET_ERGO(bool, UseCompressedOops, true);
|
||||
}
|
||||
} else {
|
||||
if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) {
|
||||
|
@ -2495,6 +2496,9 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
|
|||
if (match_option(option, "-XX:+PrintVMOptions", &tail)) {
|
||||
PrintVMOptions = true;
|
||||
}
|
||||
if (match_option(option, "-XX:-PrintVMOptions", &tail)) {
|
||||
PrintVMOptions = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Parse default .hotspotrc settings file
|
||||
|
|
|
@ -589,9 +589,15 @@ class CommandLineFlags {
|
|||
develop(bool, ZapJNIHandleArea, trueInDebug, \
|
||||
"Zap freed JNI handle space with 0xFEFEFEFE") \
|
||||
\
|
||||
develop(bool, ZapUnusedHeapArea, false, \
|
||||
develop(bool, ZapUnusedHeapArea, trueInDebug, \
|
||||
"Zap unused heap space with 0xBAADBABE") \
|
||||
\
|
||||
develop(bool, TraceZapUnusedHeapArea, false, \
|
||||
"Trace zapping of unused heap space") \
|
||||
\
|
||||
develop(bool, CheckZapUnusedHeapArea, false, \
|
||||
"Check zapping of unused heap space") \
|
||||
\
|
||||
develop(bool, PrintVMMessages, true, \
|
||||
"Print vm messages on console") \
|
||||
\
|
||||
|
|
|
@ -217,6 +217,7 @@ static const char* property_counters_ss[] = {
|
|||
"java.class.path",
|
||||
"java.endorsed.dirs",
|
||||
"java.ext.dirs",
|
||||
"java.version",
|
||||
"java.home",
|
||||
NULL
|
||||
};
|
||||
|
|
|
@ -97,8 +97,12 @@ const int SerializePageShiftCount = 3;
|
|||
// object size.
|
||||
class HeapWord {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
private:
|
||||
char* i;
|
||||
#ifdef ASSERT
|
||||
public:
|
||||
char* value() { return i; }
|
||||
#endif
|
||||
};
|
||||
|
||||
// HeapWordSize must be 2^LogHeapWordSize.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue