This commit is contained in:
Jesper Wilhelmsson 2014-11-13 12:29:09 +01:00
commit c72d5c48e2
53 changed files with 565 additions and 1378 deletions

View file

@ -64,8 +64,8 @@ public class DefNewGeneration extends Generation {
} }
// Accessing spaces // Accessing spaces
public EdenSpace eden() { public ContiguousSpace eden() {
return (EdenSpace) VMObjectFactory.newObject(EdenSpace.class, edenSpaceField.getValue(addr)); return (ContiguousSpace) VMObjectFactory.newObject(ContiguousSpace.class, edenSpaceField.getValue(addr));
} }
public ContiguousSpace from() { public ContiguousSpace from() {

View file

@ -3513,7 +3513,7 @@ void TemplateTable::_new() {
Rtags = R3_ARG1, Rtags = R3_ARG1,
Rindex = R5_ARG3; Rindex = R5_ARG3;
const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc();
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// Check if fast case is possible. // Check if fast case is possible.

View file

@ -3196,7 +3196,7 @@ void MacroAssembler::eden_allocate(
assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size"); assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size");
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { if (!Universe::heap()->supports_inline_contig_alloc()) {
// No allocation in the shared eden. // No allocation in the shared eden.
ba(slow_case); ba(slow_case);
delayed()->nop(); delayed()->nop();
@ -3331,7 +3331,7 @@ void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case
assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */); assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */);
Label do_refill, discard_tlab; Label do_refill, discard_tlab;
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { if (!Universe::heap()->supports_inline_contig_alloc()) {
// No allocation in the shared eden. // No allocation in the shared eden.
ba(slow_case); ba(slow_case);
delayed()->nop(); delayed()->nop();

View file

@ -3309,7 +3309,7 @@ void TemplateTable::_new() {
// (creates a new TLAB, etc.) // (creates a new TLAB, etc.)
const bool allow_shared_alloc = const bool allow_shared_alloc =
Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; Universe::heap()->supports_inline_contig_alloc();
if(UseTLAB) { if(UseTLAB) {
Register RoldTopValue = RallocatedObject; Register RoldTopValue = RallocatedObject;

View file

@ -2946,7 +2946,7 @@ void MacroAssembler::eden_allocate(Register obj,
Label& slow_case) { Label& slow_case) {
assert(obj == rax, "obj must be in rax, for cmpxchg"); assert(obj == rax, "obj must be in rax, for cmpxchg");
assert_different_registers(obj, var_size_in_bytes, t1); assert_different_registers(obj, var_size_in_bytes, t1);
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { if (!Universe::heap()->supports_inline_contig_alloc()) {
jmp(slow_case); jmp(slow_case);
} else { } else {
Register end = t1; Register end = t1;
@ -4419,7 +4419,7 @@ Register MacroAssembler::tlab_refill(Label& retry,
assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx); assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
Label do_refill, discard_tlab; Label do_refill, discard_tlab;
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { if (!Universe::heap()->supports_inline_contig_alloc()) {
// No allocation in the shared eden. // No allocation in the shared eden.
jmp(slow_case); jmp(slow_case);
} }

View file

@ -3214,7 +3214,7 @@ void TemplateTable::_new() {
// (creates a new TLAB, etc.) // (creates a new TLAB, etc.)
const bool allow_shared_alloc = const bool allow_shared_alloc =
Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; Universe::heap()->supports_inline_contig_alloc();
const Register thread = rcx; const Register thread = rcx;
if (UseTLAB || allow_shared_alloc) { if (UseTLAB || allow_shared_alloc) {

View file

@ -3269,7 +3269,7 @@ void TemplateTable::_new() {
// (creates a new TLAB, etc.) // (creates a new TLAB, etc.)
const bool allow_shared_alloc = const bool allow_shared_alloc =
Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; Universe::heap()->supports_inline_contig_alloc();
if (UseTLAB) { if (UseTLAB) {
__ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); __ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));

View file

@ -2608,7 +2608,10 @@ void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned."); assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned."); assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
if (UseLargePages) { if (UseLargePages) {
Solaris::setup_large_pages(addr, bytes, alignment_hint); size_t page_size = Solaris::page_size_for_alignment(alignment_hint);
if (page_size > (size_t) vm_page_size()) {
Solaris::setup_large_pages(addr, bytes, page_size);
}
} }
} }

View file

@ -31,6 +31,11 @@
// Implementation of class OrderAccess. // Implementation of class OrderAccess.
// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
static inline void compiler_barrier() {
__asm__ volatile ("" : : : "memory");
}
inline void OrderAccess::loadload() { acquire(); } inline void OrderAccess::loadload() { acquire(); }
inline void OrderAccess::storestore() { release(); } inline void OrderAccess::storestore() { release(); }
inline void OrderAccess::loadstore() { acquire(); } inline void OrderAccess::loadstore() { acquire(); }
@ -46,9 +51,7 @@ inline void OrderAccess::acquire() {
} }
inline void OrderAccess::release() { inline void OrderAccess::release() {
// Avoid hitting the same cache-line from compiler_barrier();
// different threads.
volatile jint local_dummy = 0;
} }
inline void OrderAccess::fence() { inline void OrderAccess::fence() {
@ -62,34 +65,34 @@ inline void OrderAccess::fence() {
} }
} }
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; } inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { jbyte v = *p; compiler_barrier(); return v; }
inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; } inline jshort OrderAccess::load_acquire(volatile jshort* p) { jshort v = *p; compiler_barrier(); return v; }
inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; } inline jint OrderAccess::load_acquire(volatile jint* p) { jint v = *p; compiler_barrier(); return v; }
inline jlong OrderAccess::load_acquire(volatile jlong* p) { return Atomic::load(p); } inline jlong OrderAccess::load_acquire(volatile jlong* p) { jlong v = Atomic::load(p); compiler_barrier(); return v; }
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; } inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { jubyte v = *p; compiler_barrier(); return v; }
inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; } inline jushort OrderAccess::load_acquire(volatile jushort* p) { jushort v = *p; compiler_barrier(); return v; }
inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; } inline juint OrderAccess::load_acquire(volatile juint* p) { juint v = *p; compiler_barrier(); return v; }
inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); } inline julong OrderAccess::load_acquire(volatile julong* p) { julong v = Atomic::load((volatile jlong*)p); compiler_barrier(); return v; }
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; } inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { jfloat v = *p; compiler_barrier(); return v; }
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return jdouble_cast(Atomic::load((volatile jlong*)p)); } inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { jdouble v = jdouble_cast(Atomic::load((volatile jlong*)p)); compiler_barrier(); return v; }
inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return *p; } inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { intptr_t v = *p; compiler_barrier(); return v; }
inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return *(void* volatile *)p; } inline void* OrderAccess::load_ptr_acquire(volatile void* p) { void* v = *(void* volatile *)p; compiler_barrier(); return v; }
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; } inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { void* v = *(void* const volatile *)p; compiler_barrier(); return v; }
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; } inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { compiler_barrier(); *p = v; }
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; } inline void OrderAccess::release_store(volatile jshort* p, jshort v) { compiler_barrier(); *p = v; }
inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; } inline void OrderAccess::release_store(volatile jint* p, jint v) { compiler_barrier(); *p = v; }
inline void OrderAccess::release_store(volatile jlong* p, jlong v) { Atomic::store(v, p); } inline void OrderAccess::release_store(volatile jlong* p, jlong v) { compiler_barrier(); Atomic::store(v, p); }
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; } inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { compiler_barrier(); *p = v; }
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; } inline void OrderAccess::release_store(volatile jushort* p, jushort v) { compiler_barrier(); *p = v; }
inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; } inline void OrderAccess::release_store(volatile juint* p, juint v) { compiler_barrier(); *p = v; }
inline void OrderAccess::release_store(volatile julong* p, julong v) { Atomic::store((jlong)v, (volatile jlong*)p); } inline void OrderAccess::release_store(volatile julong* p, julong v) { compiler_barrier(); Atomic::store((jlong)v, (volatile jlong*)p); }
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; } inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { compiler_barrier(); *p = v; }
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { release_store((volatile jlong *)p, jlong_cast(v)); } inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { release_store((volatile jlong *)p, jlong_cast(v)); }
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; } inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { compiler_barrier(); *p = v; }
inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { *(void* volatile *)p = v; } inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { compiler_barrier(); *(void* volatile *)p = v; }
inline void OrderAccess::store_fence(jbyte* p, jbyte v) { inline void OrderAccess::store_fence(jbyte* p, jbyte v) {
__asm__ volatile ( "xchgb (%2),%0" __asm__ volatile ( "xchgb (%2),%0"

View file

@ -7,7 +7,6 @@
-Xbootclasspath/p:<directories and zip/jar files separated by ;> -Xbootclasspath/p:<directories and zip/jar files separated by ;>
prepend in front of bootstrap class path prepend in front of bootstrap class path
-Xnoclassgc disable class garbage collection -Xnoclassgc disable class garbage collection
-Xincgc enable incremental garbage collection
-Xloggc:<file> log GC status to a file with time stamps -Xloggc:<file> log GC status to a file with time stamps
-Xbatch disable background compilation -Xbatch disable background compilation
-Xms<size> set initial Java heap size -Xms<size> set initial Java heap size

View file

@ -89,9 +89,3 @@ void ConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
_gc_policy_counters = new GCPolicyCounters("Copy:CMS", 2, 3); _gc_policy_counters = new GCPolicyCounters("Copy:CMS", 2, 3);
} }
} }
// Returns true if the incremental mode is enabled.
bool ConcurrentMarkSweepPolicy::has_soft_ended_eden()
{
return CMSIncrementalMode;
}

View file

@ -42,9 +42,6 @@ class ConcurrentMarkSweepPolicy : public GenCollectorPolicy {
virtual void initialize_size_policy(size_t init_eden_size, virtual void initialize_size_policy(size_t init_eden_size,
size_t init_promo_size, size_t init_promo_size,
size_t init_survivor_size); size_t init_survivor_size);
// Returns true if the incremental mode is enabled.
virtual bool has_soft_ended_eden();
}; };
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSCOLLECTORPOLICY_HPP #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSCOLLECTORPOLICY_HPP

View file

@ -2083,17 +2083,13 @@ bool CompactibleFreeListSpace::should_concurrent_collect() const {
} }
// Support for compaction // Support for compaction
void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) { void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
SCAN_AND_FORWARD(cp,end,block_is_obj,block_size); scan_and_forward(this, cp);
// Prepare_for_compaction() uses the space between live objects // Prepare_for_compaction() uses the space between live objects
// so that later phase can skip dead space quickly. So verification // so that later phase can skip dead space quickly. So verification
// of the free lists doesn't work after. // of the free lists doesn't work after.
} }
#define obj_size(q) adjustObjectSize(oop(q)->size())
#define adjust_obj_size(s) adjustObjectSize(s)
void CompactibleFreeListSpace::adjust_pointers() { void CompactibleFreeListSpace::adjust_pointers() {
// In other versions of adjust_pointers(), a bail out // In other versions of adjust_pointers(), a bail out
// based on the amount of live data in the generation // based on the amount of live data in the generation
@ -2101,12 +2097,12 @@ void CompactibleFreeListSpace::adjust_pointers() {
// Cannot test used() == 0 here because the free lists have already // Cannot test used() == 0 here because the free lists have already
// been mangled by the compaction. // been mangled by the compaction.
SCAN_AND_ADJUST_POINTERS(adjust_obj_size); scan_and_adjust_pointers(this);
// See note about verification in prepare_for_compaction(). // See note about verification in prepare_for_compaction().
} }
void CompactibleFreeListSpace::compact() { void CompactibleFreeListSpace::compact() {
SCAN_AND_COMPACT(obj_size); scan_and_compact(this);
} }
// Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2] // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
@ -2629,7 +2625,7 @@ void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>*
// Get the #blocks we want to claim // Get the #blocks we want to claim
size_t n_blks = (size_t)_blocks_to_claim[word_sz].average(); size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
assert(n_blks > 0, "Error"); assert(n_blks > 0, "Error");
assert(ResizePLAB || n_blks == OldPLABSize, "Error"); assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
// In some cases, when the application has a phase change, // In some cases, when the application has a phase change,
// there may be a sudden and sharp shift in the object survival // there may be a sudden and sharp shift in the object survival
// profile, and updating the counts at the end of a scavenge // profile, and updating the counts at the end of a scavenge

View file

@ -73,6 +73,13 @@ class CompactibleFreeListSpace: public CompactibleSpace {
friend class CMSCollector; friend class CMSCollector;
// Local alloc buffer for promotion into this space. // Local alloc buffer for promotion into this space.
friend class CFLS_LAB; friend class CFLS_LAB;
// Allow scan_and_* functions to call (private) overrides of the auxiliary functions on this class
template <typename SpaceType>
friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space);
template <typename SpaceType>
friend void CompactibleSpace::scan_and_compact(SpaceType* space);
template <typename SpaceType>
friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
// "Size" of chunks of work (executed during parallel remark phases // "Size" of chunks of work (executed during parallel remark phases
// of CMS collection); this probably belongs in CMSCollector, although // of CMS collection); this probably belongs in CMSCollector, although
@ -288,6 +295,28 @@ class CompactibleFreeListSpace: public CompactibleSpace {
_bt.freed(start, size); _bt.freed(start, size);
} }
// Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
// See comments for CompactibleSpace for more information.
inline HeapWord* scan_limit() const {
return end();
}
inline bool scanned_block_is_obj(const HeapWord* addr) const {
return CompactibleFreeListSpace::block_is_obj(addr); // Avoid virtual call
}
inline size_t scanned_block_size(const HeapWord* addr) const {
return CompactibleFreeListSpace::block_size(addr); // Avoid virtual call
}
inline size_t adjust_obj_size(size_t size) const {
return adjustObjectSize(size);
}
inline size_t obj_size(const HeapWord* addr) const {
return adjustObjectSize(oop(addr)->size());
}
protected: protected:
// Reset the indexed free list to its initial empty condition. // Reset the indexed free list to its initial empty condition.
void resetIndexedFreeListArray(); void resetIndexedFreeListArray();

View file

@ -167,16 +167,6 @@ class CMSTokenSyncWithLocks: public CMSTokenSync {
}; };
// Wrapper class to temporarily disable icms during a foreground cms collection.
class ICMSDisabler: public StackObj {
public:
// The ctor disables icms and wakes up the thread so it notices the change;
// the dtor re-enables icms. Note that the CMSCollector methods will check
// CMSIncrementalMode.
ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
~ICMSDisabler() { CMSCollector::enable_icms(); }
};
////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////
// Concurrent Mark-Sweep Generation ///////////////////////////// // Concurrent Mark-Sweep Generation /////////////////////////////
////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////
@ -363,7 +353,6 @@ CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
_cms_used_at_gc0_end = 0; _cms_used_at_gc0_end = 0;
_allow_duty_cycle_reduction = false; _allow_duty_cycle_reduction = false;
_valid_bits = 0; _valid_bits = 0;
_icms_duty_cycle = CMSIncrementalDutyCycle;
} }
double CMSStats::cms_free_adjustment_factor(size_t free) const { double CMSStats::cms_free_adjustment_factor(size_t free) const {
@ -442,86 +431,17 @@ double CMSStats::time_until_cms_start() const {
return work - deadline; return work - deadline;
} }
// Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
// amount of change to prevent wild oscillation.
unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
unsigned int new_duty_cycle) {
assert(old_duty_cycle <= 100, "bad input value");
assert(new_duty_cycle <= 100, "bad input value");
// Note: use subtraction with caution since it may underflow (values are
// unsigned). Addition is safe since we're in the range 0-100.
unsigned int damped_duty_cycle = new_duty_cycle;
if (new_duty_cycle < old_duty_cycle) {
const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
if (new_duty_cycle + largest_delta < old_duty_cycle) {
damped_duty_cycle = old_duty_cycle - largest_delta;
}
} else if (new_duty_cycle > old_duty_cycle) {
const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
if (new_duty_cycle > old_duty_cycle + largest_delta) {
damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
}
}
assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
if (CMSTraceIncrementalPacing) {
gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
old_duty_cycle, new_duty_cycle, damped_duty_cycle);
}
return damped_duty_cycle;
}
unsigned int CMSStats::icms_update_duty_cycle_impl() {
assert(CMSIncrementalPacing && valid(),
"should be handled in icms_update_duty_cycle()");
double cms_time_so_far = cms_timer().seconds();
double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
// Avoid division by 0.
double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
if (new_duty_cycle > _icms_duty_cycle) {
// Avoid very small duty cycles (1 or 2); 0 is allowed.
if (new_duty_cycle > 2) {
_icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
new_duty_cycle);
}
} else if (_allow_duty_cycle_reduction) {
// The duty cycle is reduced only once per cms cycle (see record_cms_end()).
new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
// Respect the minimum duty cycle.
unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
_icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
}
if (PrintGCDetails || CMSTraceIncrementalPacing) {
gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
}
_allow_duty_cycle_reduction = false;
return _icms_duty_cycle;
}
#ifndef PRODUCT #ifndef PRODUCT
void CMSStats::print_on(outputStream *st) const { void CMSStats::print_on(outputStream *st) const {
st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha); st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT, st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
gc0_duration(), gc0_period(), gc0_promoted()); gc0_duration(), gc0_period(), gc0_promoted());
st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT, st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
cms_duration(), cms_duration_per_mb(), cms_duration(), cms_period(), cms_allocated());
cms_period(), cms_allocated());
st->print(",cms_since_beg=%g,cms_since_end=%g", st->print(",cms_since_beg=%g,cms_since_end=%g",
cms_time_since_begin(), cms_time_since_end()); cms_time_since_begin(), cms_time_since_end());
st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT, st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
_cms_used_at_gc0_begin, _cms_used_at_gc0_end); _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
if (CMSIncrementalMode) {
st->print(",dc=%d", icms_duty_cycle());
}
if (valid()) { if (valid()) {
st->print(",promo_rate=%g,cms_alloc_rate=%g", st->print(",promo_rate=%g,cms_alloc_rate=%g",
@ -579,8 +499,6 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
#endif #endif
_collection_count_start(0), _collection_count_start(0),
_verifying(false), _verifying(false),
_icms_start_limit(NULL),
_icms_stop_limit(NULL),
_verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"), _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
_completed_initialization(false), _completed_initialization(false),
_collector_policy(cp), _collector_policy(cp),
@ -1116,137 +1034,6 @@ void CMSCollector::promoted(bool par, HeapWord* start,
} }
} }
static inline size_t percent_of_space(Space* space, HeapWord* addr)
{
size_t delta = pointer_delta(addr, space->bottom());
return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
}
void CMSCollector::icms_update_allocation_limits()
{
Generation* young = GenCollectedHeap::heap()->get_gen(0);
EdenSpace* eden = young->as_DefNewGeneration()->eden();
const unsigned int duty_cycle = stats().icms_update_duty_cycle();
if (CMSTraceIncrementalPacing) {
stats().print();
}
assert(duty_cycle <= 100, "invalid duty cycle");
if (duty_cycle != 0) {
// The duty_cycle is a percentage between 0 and 100; convert to words and
// then compute the offset from the endpoints of the space.
size_t free_words = eden->free() / HeapWordSize;
double free_words_dbl = (double)free_words;
size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
size_t offset_words = (free_words - duty_cycle_words) / 2;
_icms_start_limit = eden->top() + offset_words;
_icms_stop_limit = eden->end() - offset_words;
// The limits may be adjusted (shifted to the right) by
// CMSIncrementalOffset, to allow the application more mutator time after a
// young gen gc (when all mutators were stopped) and before CMS starts and
// takes away one or more cpus.
if (CMSIncrementalOffset != 0) {
double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
size_t adjustment = (size_t)adjustment_dbl;
HeapWord* tmp_stop = _icms_stop_limit + adjustment;
if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
_icms_start_limit += adjustment;
_icms_stop_limit = tmp_stop;
}
}
}
if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
_icms_start_limit = _icms_stop_limit = eden->end();
}
// Install the new start limit.
eden->set_soft_end(_icms_start_limit);
if (CMSTraceIncrementalMode) {
gclog_or_tty->print(" icms alloc limits: "
PTR_FORMAT "," PTR_FORMAT
" (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
p2i(_icms_start_limit), p2i(_icms_stop_limit),
percent_of_space(eden, _icms_start_limit),
percent_of_space(eden, _icms_stop_limit));
if (Verbose) {
gclog_or_tty->print("eden: ");
eden->print_on(gclog_or_tty);
}
}
}
// Any changes here should try to maintain the invariant
// that if this method is called with _icms_start_limit
// and _icms_stop_limit both NULL, then it should return NULL
// and not notify the icms thread.
HeapWord*
CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
size_t word_size)
{
// A start_limit equal to end() means the duty cycle is 0, so treat that as a
// nop.
if (CMSIncrementalMode && _icms_start_limit != space->end()) {
if (top <= _icms_start_limit) {
if (CMSTraceIncrementalMode) {
space->print_on(gclog_or_tty);
gclog_or_tty->stamp();
gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
", new limit=" PTR_FORMAT
" (" SIZE_FORMAT "%%)",
p2i(top), p2i(_icms_stop_limit),
percent_of_space(space, _icms_stop_limit));
}
ConcurrentMarkSweepThread::start_icms();
assert(top < _icms_stop_limit, "Tautology");
if (word_size < pointer_delta(_icms_stop_limit, top)) {
return _icms_stop_limit;
}
// The allocation will cross both the _start and _stop limits, so do the
// stop notification also and return end().
if (CMSTraceIncrementalMode) {
space->print_on(gclog_or_tty);
gclog_or_tty->stamp();
gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
", new limit=" PTR_FORMAT
" (" SIZE_FORMAT "%%)",
p2i(top), p2i(space->end()),
percent_of_space(space, space->end()));
}
ConcurrentMarkSweepThread::stop_icms();
return space->end();
}
if (top <= _icms_stop_limit) {
if (CMSTraceIncrementalMode) {
space->print_on(gclog_or_tty);
gclog_or_tty->stamp();
gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
", new limit=" PTR_FORMAT
" (" SIZE_FORMAT "%%)",
top, space->end(),
percent_of_space(space, space->end()));
}
ConcurrentMarkSweepThread::stop_icms();
return space->end();
}
if (CMSTraceIncrementalMode) {
space->print_on(gclog_or_tty);
gclog_or_tty->stamp();
gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
", new limit=" PTR_FORMAT,
top, NULL);
}
}
return NULL;
}
oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) { oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
// allocate, copy and if necessary update promoinfo -- // allocate, copy and if necessary update promoinfo --
@ -1289,14 +1076,6 @@ oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
} }
HeapWord*
ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
HeapWord* top,
size_t word_sz)
{
return collector()->allocation_limit_reached(space, top, word_sz);
}
// IMPORTANT: Notes on object size recognition in CMS. // IMPORTANT: Notes on object size recognition in CMS.
// --------------------------------------------------- // ---------------------------------------------------
// A block of storage in the CMS generation is always in // A block of storage in the CMS generation is always in
@ -1809,9 +1588,6 @@ void CMSCollector::acquire_control_and_collect(bool full,
// we want to do a foreground collection. // we want to do a foreground collection.
_foregroundGCIsActive = true; _foregroundGCIsActive = true;
// Disable incremental mode during a foreground collection.
ICMSDisabler icms_disabler;
// release locks and wait for a notify from the background collector // release locks and wait for a notify from the background collector
// releasing the locks in only necessary for phases which // releasing the locks in only necessary for phases which
// do yields to improve the granularity of the collection. // do yields to improve the granularity of the collection.
@ -2135,7 +1911,7 @@ void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
void CMSCollector::print_eden_and_survivor_chunk_arrays() { void CMSCollector::print_eden_and_survivor_chunk_arrays() {
DefNewGeneration* dng = _young_gen->as_DefNewGeneration(); DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
EdenSpace* eden_space = dng->eden(); ContiguousSpace* eden_space = dng->eden();
ContiguousSpace* from_space = dng->from(); ContiguousSpace* from_space = dng->from();
ContiguousSpace* to_space = dng->to(); ContiguousSpace* to_space = dng->to();
// Eden // Eden
@ -2783,10 +2559,6 @@ void CMSCollector::gc_epilogue(bool full) {
// //
_cmsGen->update_counters(cms_used); _cmsGen->update_counters(cms_used);
if (CMSIncrementalMode) {
icms_update_allocation_limits();
}
bitMapLock()->unlock(); bitMapLock()->unlock();
releaseFreelistLocks(); releaseFreelistLocks();
@ -4272,12 +4044,10 @@ void CMSConcMarkingTask::coordinator_yield() {
assert_lock_strong(_bit_map_lock); assert_lock_strong(_bit_map_lock);
_bit_map_lock->unlock(); _bit_map_lock->unlock();
ConcurrentMarkSweepThread::desynchronize(true); ConcurrentMarkSweepThread::desynchronize(true);
ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer(); _collector->stopTimer();
if (PrintCMSStatistics != 0) { if (PrintCMSStatistics != 0) {
_collector->incrementYields(); _collector->incrementYields();
} }
_collector->icms_wait();
// It is possible for whichever thread initiated the yield request // It is possible for whichever thread initiated the yield request
// not to get a chance to wake up and take the bitmap lock between // not to get a chance to wake up and take the bitmap lock between
@ -4307,7 +4077,6 @@ void CMSConcMarkingTask::coordinator_yield() {
ConcurrentMarkSweepThread::should_yield() && ConcurrentMarkSweepThread::should_yield() &&
!CMSCollector::foregroundGCIsActive(); ++i) { !CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false); os::sleep(Thread::current(), 1, false);
ConcurrentMarkSweepThread::acknowledge_yield_request();
} }
ConcurrentMarkSweepThread::synchronize(true); ConcurrentMarkSweepThread::synchronize(true);
@ -5238,7 +5007,7 @@ class RemarkKlassClosure : public KlassClosure {
void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) { void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration(); DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
EdenSpace* eden_space = dng->eden(); ContiguousSpace* eden_space = dng->eden();
ContiguousSpace* from_space = dng->from(); ContiguousSpace* from_space = dng->from();
ContiguousSpace* to_space = dng->to(); ContiguousSpace* to_space = dng->to();
@ -5410,7 +5179,7 @@ CMSParMarkTask::do_young_space_rescan(uint worker_id,
while (!pst->is_task_claimed(/* reference */ nth_task)) { while (!pst->is_task_claimed(/* reference */ nth_task)) {
// We claimed task # nth_task; compute its boundaries. // We claimed task # nth_task; compute its boundaries.
if (chunk_top == 0) { // no samples were taken if (chunk_top == 0) { // no samples were taken
assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task"); assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
start = space->bottom(); start = space->bottom();
end = space->top(); end = space->top();
} else if (nth_task == 0) { } else if (nth_task == 0) {
@ -5788,7 +5557,7 @@ void CMSCollector::do_remark_parallel() {
// process_roots (which currently doesn't know how to // process_roots (which currently doesn't know how to
// parallelize such a scan), but rather will be broken up into // parallelize such a scan), but rather will be broken up into
// a set of parallel tasks (via the sampling that the [abortable] // a set of parallel tasks (via the sampling that the [abortable]
// preclean phase did of EdenSpace, plus the [two] tasks of // preclean phase did of eden, plus the [two] tasks of
// scanning the [two] survivor spaces. Further fine-grain // scanning the [two] survivor spaces. Further fine-grain
// parallelization of the scanning of the survivor spaces // parallelization of the scanning of the survivor spaces
// themselves, and of precleaning of the younger gen itself // themselves, and of precleaning of the younger gen itself
@ -6474,19 +6243,16 @@ void CMSCollector::reset(bool asynch) {
assert_lock_strong(bitMapLock()); assert_lock_strong(bitMapLock());
bitMapLock()->unlock(); bitMapLock()->unlock();
ConcurrentMarkSweepThread::desynchronize(true); ConcurrentMarkSweepThread::desynchronize(true);
ConcurrentMarkSweepThread::acknowledge_yield_request();
stopTimer(); stopTimer();
if (PrintCMSStatistics != 0) { if (PrintCMSStatistics != 0) {
incrementYields(); incrementYields();
} }
icms_wait();
// See the comment in coordinator_yield() // See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount && for (unsigned i = 0; i < CMSYieldSleepCount &&
ConcurrentMarkSweepThread::should_yield() && ConcurrentMarkSweepThread::should_yield() &&
!CMSCollector::foregroundGCIsActive(); ++i) { !CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false); os::sleep(Thread::current(), 1, false);
ConcurrentMarkSweepThread::acknowledge_yield_request();
} }
ConcurrentMarkSweepThread::synchronize(true); ConcurrentMarkSweepThread::synchronize(true);
@ -6509,10 +6275,6 @@ void CMSCollector::reset(bool asynch) {
_collectorState = Idling; _collectorState = Idling;
} }
// Stop incremental mode after a cycle completes, so that any future cycles
// are triggered by allocation.
stop_icms();
NOT_PRODUCT( NOT_PRODUCT(
if (RotateCMSCollectionTypes) { if (RotateCMSCollectionTypes) {
_cmsGen->rotate_debug_collection_type(); _cmsGen->rotate_debug_collection_type();
@ -6964,12 +6726,10 @@ void MarkRefsIntoAndScanClosure::do_yield_work() {
_bit_map->lock()->unlock(); _bit_map->lock()->unlock();
_freelistLock->unlock(); _freelistLock->unlock();
ConcurrentMarkSweepThread::desynchronize(true); ConcurrentMarkSweepThread::desynchronize(true);
ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer(); _collector->stopTimer();
if (PrintCMSStatistics != 0) { if (PrintCMSStatistics != 0) {
_collector->incrementYields(); _collector->incrementYields();
} }
_collector->icms_wait();
// See the comment in coordinator_yield() // See the comment in coordinator_yield()
for (unsigned i = 0; for (unsigned i = 0;
@ -6978,7 +6738,6 @@ void MarkRefsIntoAndScanClosure::do_yield_work() {
!CMSCollector::foregroundGCIsActive(); !CMSCollector::foregroundGCIsActive();
++i) { ++i) {
os::sleep(Thread::current(), 1, false); os::sleep(Thread::current(), 1, false);
ConcurrentMarkSweepThread::acknowledge_yield_request();
} }
ConcurrentMarkSweepThread::synchronize(true); ConcurrentMarkSweepThread::synchronize(true);
@ -7124,19 +6883,16 @@ void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
_bitMap->lock()->unlock(); _bitMap->lock()->unlock();
_freelistLock->unlock(); _freelistLock->unlock();
ConcurrentMarkSweepThread::desynchronize(true); ConcurrentMarkSweepThread::desynchronize(true);
ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer(); _collector->stopTimer();
if (PrintCMSStatistics != 0) { if (PrintCMSStatistics != 0) {
_collector->incrementYields(); _collector->incrementYields();
} }
_collector->icms_wait();
// See the comment in coordinator_yield() // See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount && for (unsigned i = 0; i < CMSYieldSleepCount &&
ConcurrentMarkSweepThread::should_yield() && ConcurrentMarkSweepThread::should_yield() &&
!CMSCollector::foregroundGCIsActive(); ++i) { !CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false); os::sleep(Thread::current(), 1, false);
ConcurrentMarkSweepThread::acknowledge_yield_request();
} }
ConcurrentMarkSweepThread::synchronize(true); ConcurrentMarkSweepThread::synchronize(true);
@ -7196,19 +6952,16 @@ void SurvivorSpacePrecleanClosure::do_yield_work() {
// Relinquish the bit map lock // Relinquish the bit map lock
_bit_map->lock()->unlock(); _bit_map->lock()->unlock();
ConcurrentMarkSweepThread::desynchronize(true); ConcurrentMarkSweepThread::desynchronize(true);
ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer(); _collector->stopTimer();
if (PrintCMSStatistics != 0) { if (PrintCMSStatistics != 0) {
_collector->incrementYields(); _collector->incrementYields();
} }
_collector->icms_wait();
// See the comment in coordinator_yield() // See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount && for (unsigned i = 0; i < CMSYieldSleepCount &&
ConcurrentMarkSweepThread::should_yield() && ConcurrentMarkSweepThread::should_yield() &&
!CMSCollector::foregroundGCIsActive(); ++i) { !CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false); os::sleep(Thread::current(), 1, false);
ConcurrentMarkSweepThread::acknowledge_yield_request();
} }
ConcurrentMarkSweepThread::synchronize(true); ConcurrentMarkSweepThread::synchronize(true);
@ -7354,19 +7107,16 @@ void MarkFromRootsClosure::do_yield_work() {
assert_lock_strong(_bitMap->lock()); assert_lock_strong(_bitMap->lock());
_bitMap->lock()->unlock(); _bitMap->lock()->unlock();
ConcurrentMarkSweepThread::desynchronize(true); ConcurrentMarkSweepThread::desynchronize(true);
ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer(); _collector->stopTimer();
if (PrintCMSStatistics != 0) { if (PrintCMSStatistics != 0) {
_collector->incrementYields(); _collector->incrementYields();
} }
_collector->icms_wait();
// See the comment in coordinator_yield() // See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount && for (unsigned i = 0; i < CMSYieldSleepCount &&
ConcurrentMarkSweepThread::should_yield() && ConcurrentMarkSweepThread::should_yield() &&
!CMSCollector::foregroundGCIsActive(); ++i) { !CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false); os::sleep(Thread::current(), 1, false);
ConcurrentMarkSweepThread::acknowledge_yield_request();
} }
ConcurrentMarkSweepThread::synchronize(true); ConcurrentMarkSweepThread::synchronize(true);
@ -7388,7 +7138,7 @@ void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
_finger = ptr + obj->size(); _finger = ptr + obj->size();
assert(_finger > ptr, "we just incremented it above"); assert(_finger > ptr, "we just incremented it above");
// On large heaps, it may take us some time to get through // On large heaps, it may take us some time to get through
// the marking phase (especially if running iCMS). During // the marking phase. During
// this time it's possible that a lot of mutations have // this time it's possible that a lot of mutations have
// accumulated in the card table and the mod union table -- // accumulated in the card table and the mod union table --
// these mutation records are redundant until we have // these mutation records are redundant until we have
@ -7505,7 +7255,7 @@ void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
_finger = ptr + obj->size(); _finger = ptr + obj->size();
assert(_finger > ptr, "we just incremented it above"); assert(_finger > ptr, "we just incremented it above");
// On large heaps, it may take us some time to get through // On large heaps, it may take us some time to get through
// the marking phase (especially if running iCMS). During // the marking phase. During
// this time it's possible that a lot of mutations have // this time it's possible that a lot of mutations have
// accumulated in the card table and the mod union table -- // accumulated in the card table and the mod union table --
// these mutation records are redundant until we have // these mutation records are redundant until we have
@ -7994,20 +7744,16 @@ void CMSPrecleanRefsYieldClosure::do_yield_work() {
bml->unlock(); bml->unlock();
ConcurrentMarkSweepThread::desynchronize(true); ConcurrentMarkSweepThread::desynchronize(true);
ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer(); _collector->stopTimer();
if (PrintCMSStatistics != 0) { if (PrintCMSStatistics != 0) {
_collector->incrementYields(); _collector->incrementYields();
} }
_collector->icms_wait();
// See the comment in coordinator_yield() // See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount && for (unsigned i = 0; i < CMSYieldSleepCount &&
ConcurrentMarkSweepThread::should_yield() && ConcurrentMarkSweepThread::should_yield() &&
!CMSCollector::foregroundGCIsActive(); ++i) { !CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false); os::sleep(Thread::current(), 1, false);
ConcurrentMarkSweepThread::acknowledge_yield_request();
} }
ConcurrentMarkSweepThread::synchronize(true); ConcurrentMarkSweepThread::synchronize(true);
@ -8675,19 +8421,16 @@ void SweepClosure::do_yield_work(HeapWord* addr) {
_bitMap->lock()->unlock(); _bitMap->lock()->unlock();
_freelistLock->unlock(); _freelistLock->unlock();
ConcurrentMarkSweepThread::desynchronize(true); ConcurrentMarkSweepThread::desynchronize(true);
ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer(); _collector->stopTimer();
if (PrintCMSStatistics != 0) { if (PrintCMSStatistics != 0) {
_collector->incrementYields(); _collector->incrementYields();
} }
_collector->icms_wait();
// See the comment in coordinator_yield() // See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount && for (unsigned i = 0; i < CMSYieldSleepCount &&
ConcurrentMarkSweepThread::should_yield() && ConcurrentMarkSweepThread::should_yield() &&
!CMSCollector::foregroundGCIsActive(); ++i) { !CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false); os::sleep(Thread::current(), 1, false);
ConcurrentMarkSweepThread::acknowledge_yield_request();
} }
ConcurrentMarkSweepThread::synchronize(true); ConcurrentMarkSweepThread::synchronize(true);

View file

@ -356,7 +356,6 @@ class CMSStats VALUE_OBJ_CLASS_SPEC {
size_t _gc0_promoted; // bytes promoted per gc0 size_t _gc0_promoted; // bytes promoted per gc0
double _cms_duration; double _cms_duration;
double _cms_duration_pre_sweep; // time from initiation to start of sweep double _cms_duration_pre_sweep; // time from initiation to start of sweep
double _cms_duration_per_mb;
double _cms_period; double _cms_period;
size_t _cms_allocated; // bytes of direct allocation per gc0 period size_t _cms_allocated; // bytes of direct allocation per gc0 period
@ -383,17 +382,7 @@ class CMSStats VALUE_OBJ_CLASS_SPEC {
unsigned int _valid_bits; unsigned int _valid_bits;
unsigned int _icms_duty_cycle; // icms duty cycle (0-100).
protected: protected:
// Return a duty cycle that avoids wild oscillations, by limiting the amount
// of change between old_duty_cycle and new_duty_cycle (the latter is treated
// as a recommended value).
static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle,
unsigned int new_duty_cycle);
unsigned int icms_update_duty_cycle_impl();
// In support of adjusting of cms trigger ratios based on history // In support of adjusting of cms trigger ratios based on history
// of concurrent mode failure. // of concurrent mode failure.
double cms_free_adjustment_factor(size_t free) const; double cms_free_adjustment_factor(size_t free) const;
@ -426,7 +415,6 @@ class CMSStats VALUE_OBJ_CLASS_SPEC {
size_t gc0_promoted() const { return _gc0_promoted; } size_t gc0_promoted() const { return _gc0_promoted; }
double cms_period() const { return _cms_period; } double cms_period() const { return _cms_period; }
double cms_duration() const { return _cms_duration; } double cms_duration() const { return _cms_duration; }
double cms_duration_per_mb() const { return _cms_duration_per_mb; }
size_t cms_allocated() const { return _cms_allocated; } size_t cms_allocated() const { return _cms_allocated; }
size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;} size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
@ -458,12 +446,6 @@ class CMSStats VALUE_OBJ_CLASS_SPEC {
// End of higher level statistics. // End of higher level statistics.
// Returns the cms incremental mode duty cycle, as a percentage (0-100).
unsigned int icms_duty_cycle() const { return _icms_duty_cycle; }
// Update the duty cycle and return the new value.
unsigned int icms_update_duty_cycle();
// Debugging. // Debugging.
void print_on(outputStream* st) const PRODUCT_RETURN; void print_on(outputStream* st) const PRODUCT_RETURN;
void print() const { print_on(gclog_or_tty); } void print() const { print_on(gclog_or_tty); }
@ -725,13 +707,6 @@ class CMSCollector: public CHeapObj<mtGC> {
// Timing, allocation and promotion statistics, used for scheduling. // Timing, allocation and promotion statistics, used for scheduling.
CMSStats _stats; CMSStats _stats;
// Allocation limits installed in the young gen, used only in
// CMSIncrementalMode. When an allocation in the young gen would cross one of
// these limits, the cms generation is notified and the cms thread is started
// or stopped, respectively.
HeapWord* _icms_start_limit;
HeapWord* _icms_stop_limit;
enum CMS_op_type { enum CMS_op_type {
CMS_op_checkpointRootsInitial, CMS_op_checkpointRootsInitial,
CMS_op_checkpointRootsFinal CMS_op_checkpointRootsFinal
@ -867,10 +842,6 @@ class CMSCollector: public CHeapObj<mtGC> {
// collector. // collector.
bool waitForForegroundGC(); bool waitForForegroundGC();
// Incremental mode triggering: recompute the icms duty cycle and set the
// allocation limits in the young gen.
void icms_update_allocation_limits();
size_t block_size_using_printezis_bits(HeapWord* addr) const; size_t block_size_using_printezis_bits(HeapWord* addr) const;
size_t block_size_if_printezis_bits(HeapWord* addr) const; size_t block_size_if_printezis_bits(HeapWord* addr) const;
HeapWord* next_card_start_after_block(HeapWord* addr) const; HeapWord* next_card_start_after_block(HeapWord* addr) const;
@ -928,9 +899,6 @@ class CMSCollector: public CHeapObj<mtGC> {
void promoted(bool par, HeapWord* start, void promoted(bool par, HeapWord* start,
bool is_obj_array, size_t obj_size); bool is_obj_array, size_t obj_size);
HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
size_t word_size);
void getFreelistLocks() const; void getFreelistLocks() const;
void releaseFreelistLocks() const; void releaseFreelistLocks() const;
bool haveFreelistLocks() const; bool haveFreelistLocks() const;
@ -1001,14 +969,6 @@ class CMSCollector: public CHeapObj<mtGC> {
// Timers/stats for gc scheduling and incremental mode pacing. // Timers/stats for gc scheduling and incremental mode pacing.
CMSStats& stats() { return _stats; } CMSStats& stats() { return _stats; }
// Convenience methods that check whether CMSIncrementalMode is enabled and
// forward to the corresponding methods in ConcurrentMarkSweepThread.
static void start_icms();
static void stop_icms(); // Called at the end of the cms cycle.
static void disable_icms(); // Called before a foreground collection.
static void enable_icms(); // Called after a foreground collection.
void icms_wait(); // Called at yield points.
// Adaptive size policy // Adaptive size policy
AdaptiveSizePolicy* size_policy(); AdaptiveSizePolicy* size_policy();
@ -1211,9 +1171,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
return allocate(size, tlab); return allocate(size, tlab);
} }
// Incremental mode triggering.
HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
size_t word_size);
// Used by CMSStats to track direct allocation. The value is sampled and // Used by CMSStats to track direct allocation. The value is sampled and
// reset after each young gen collection. // reset after each young gen collection.

View file

@ -234,36 +234,6 @@ inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
} }
} }
inline void CMSCollector::start_icms() {
if (CMSIncrementalMode) {
ConcurrentMarkSweepThread::start_icms();
}
}
inline void CMSCollector::stop_icms() {
if (CMSIncrementalMode) {
ConcurrentMarkSweepThread::stop_icms();
}
}
inline void CMSCollector::disable_icms() {
if (CMSIncrementalMode) {
ConcurrentMarkSweepThread::disable_icms();
}
}
inline void CMSCollector::enable_icms() {
if (CMSIncrementalMode) {
ConcurrentMarkSweepThread::enable_icms();
}
}
inline void CMSCollector::icms_wait() {
if (CMSIncrementalMode) {
cmsThread()->icms_wait();
}
}
inline void CMSCollector::save_sweep_limits() { inline void CMSCollector::save_sweep_limits() {
_cmsGen->save_sweep_limit(); _cmsGen->save_sweep_limit();
} }
@ -363,12 +333,6 @@ inline void CMSStats::record_cms_end() {
_cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration, _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
cur_duration, _cms_alpha); cur_duration, _cms_alpha);
// Avoid division by 0.
const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1);
_cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb,
cur_duration / cms_used_mb,
_cms_alpha);
_cms_end_time.update(); _cms_end_time.update();
_cms_alpha = _saved_alpha; _cms_alpha = _saved_alpha;
_allow_duty_cycle_reduction = true; _allow_duty_cycle_reduction = true;
@ -400,15 +364,6 @@ inline double CMSStats::cms_consumption_rate() const {
return (gc0_promoted() + cms_allocated()) / gc0_period(); return (gc0_promoted() + cms_allocated()) / gc0_period();
} }
inline unsigned int CMSStats::icms_update_duty_cycle() {
// Update the duty cycle only if pacing is enabled and the stats are valid
// (after at least one young gen gc and one cms cycle have completed).
if (CMSIncrementalPacing && valid()) {
return icms_update_duty_cycle_impl();
}
return _icms_duty_cycle;
}
inline void ConcurrentMarkSweepGeneration::save_sweep_limit() { inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
cmsSpace()->save_sweep_limit(); cmsSpace()->save_sweep_limit();
} }

View file

@ -49,13 +49,6 @@ bool ConcurrentMarkSweepThread::_should_terminate = false;
int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil; int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil;
volatile jint ConcurrentMarkSweepThread::_pending_yields = 0; volatile jint ConcurrentMarkSweepThread::_pending_yields = 0;
volatile jint ConcurrentMarkSweepThread::_pending_decrements = 0;
volatile jint ConcurrentMarkSweepThread::_icms_disabled = 0;
volatile bool ConcurrentMarkSweepThread::_should_run = false;
// When icms is enabled, the icms thread is stopped until explicitly
// started.
volatile bool ConcurrentMarkSweepThread::_should_stop = true;
SurrogateLockerThread* SurrogateLockerThread*
ConcurrentMarkSweepThread::_slt = NULL; ConcurrentMarkSweepThread::_slt = NULL;
@ -99,7 +92,6 @@ ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
} }
} }
_sltMonitor = SLT_lock; _sltMonitor = SLT_lock;
assert(!CMSIncrementalMode || icms_is_enabled(), "Error");
} }
void ConcurrentMarkSweepThread::run() { void ConcurrentMarkSweepThread::run() {
@ -184,11 +176,6 @@ ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::start(CMSCollector* collec
} }
void ConcurrentMarkSweepThread::stop() { void ConcurrentMarkSweepThread::stop() {
if (CMSIncrementalMode) {
// Disable incremental mode and wake up the thread so it notices the change.
disable_icms();
start_icms();
}
// it is ok to take late safepoints here, if needed // it is ok to take late safepoints here, if needed
{ {
MutexLockerEx x(Terminator_lock); MutexLockerEx x(Terminator_lock);
@ -387,23 +374,13 @@ void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
void ConcurrentMarkSweepThread::sleepBeforeNextCycle() { void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
while (!_should_terminate) { while (!_should_terminate) {
if (CMSIncrementalMode) { if(CMSWaitDuration >= 0) {
icms_wait(); // Wait until the next synchronous GC, a concurrent full gc
if(CMSWaitDuration >= 0) { // request or a timeout, whichever is earlier.
// Wait until the next synchronous GC, a concurrent full gc wait_on_cms_lock_for_scavenge(CMSWaitDuration);
// request or a timeout, whichever is earlier.
wait_on_cms_lock_for_scavenge(CMSWaitDuration);
}
return;
} else { } else {
if(CMSWaitDuration >= 0) { // Wait until any cms_lock event or check interval not to call shouldConcurrentCollect permanently
// Wait until the next synchronous GC, a concurrent full gc wait_on_cms_lock(CMSCheckInterval);
// request or a timeout, whichever is earlier.
wait_on_cms_lock_for_scavenge(CMSWaitDuration);
} else {
// Wait until any cms_lock event or check interval not to call shouldConcurrentCollect permanently
wait_on_cms_lock(CMSCheckInterval);
}
} }
// Check if we should start a CMS collection cycle // Check if we should start a CMS collection cycle
if (_collector->shouldConcurrentCollect()) { if (_collector->shouldConcurrentCollect()) {
@ -414,42 +391,6 @@ void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
} }
} }
// Incremental CMS
void ConcurrentMarkSweepThread::start_icms() {
assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
trace_state("start_icms");
_should_run = true;
iCMS_lock->notify_all();
}
void ConcurrentMarkSweepThread::stop_icms() {
assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
if (!_should_stop) {
trace_state("stop_icms");
_should_stop = true;
_should_run = false;
asynchronous_yield_request();
iCMS_lock->notify_all();
}
}
void ConcurrentMarkSweepThread::icms_wait() {
assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
if (_should_stop && icms_is_enabled()) {
MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
trace_state("pause_icms");
_collector->stats().stop_cms_timer();
while(!_should_run && icms_is_enabled()) {
iCMS_lock->wait(Mutex::_no_safepoint_check_flag);
}
_collector->stats().start_cms_timer();
_should_stop = false;
trace_state("pause_icms end");
}
}
// Note: this method, although exported by the ConcurrentMarkSweepThread, // Note: this method, although exported by the ConcurrentMarkSweepThread,
// which is a non-JavaThread, can only be called by a JavaThread. // which is a non-JavaThread, can only be called by a JavaThread.
// Currently this is done at vm creation time (post-vm-init) by the // Currently this is done at vm creation time (post-vm-init) by the

View file

@ -64,20 +64,11 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
static bool clear_CMS_flag(int b) { return (_CMS_flag &= ~b) != 0; } static bool clear_CMS_flag(int b) { return (_CMS_flag &= ~b) != 0; }
void sleepBeforeNextCycle(); void sleepBeforeNextCycle();
// CMS thread should yield for a young gen collection, direct allocation, // CMS thread should yield for a young gen collection and direct allocations
// and iCMS activity.
static char _pad_1[64 - sizeof(jint)]; // prevent cache-line sharing static char _pad_1[64 - sizeof(jint)]; // prevent cache-line sharing
static volatile jint _pending_yields; static volatile jint _pending_yields;
static volatile jint _pending_decrements; // decrements to _pending_yields
static char _pad_2[64 - sizeof(jint)]; // prevent cache-line sharing static char _pad_2[64 - sizeof(jint)]; // prevent cache-line sharing
// Tracing messages, enabled by CMSTraceThreadState.
static inline void trace_state(const char* desc);
static volatile int _icms_disabled; // a counter to track #iCMS disable & enable
static volatile bool _should_run; // iCMS may run
static volatile bool _should_stop; // iCMS should stop
// debugging // debugging
void verify_ok_to_terminate() const PRODUCT_RETURN; void verify_ok_to_terminate() const PRODUCT_RETURN;
@ -135,44 +126,13 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
void wait_on_cms_lock_for_scavenge(long t_millis); void wait_on_cms_lock_for_scavenge(long t_millis);
// The CMS thread will yield during the work portion of its cycle // The CMS thread will yield during the work portion of its cycle
// only when requested to. Both synchronous and asychronous requests // only when requested to.
// are provided: // A synchronous request is used for young gen collections and
// (1) A synchronous request is used for young gen collections and // for direct allocations. The requesting thread increments
// for direct allocations. The requesting thread increments // _pending_yields at the beginning of an operation, and decrements
// _pending_yields at the beginning of an operation, and decrements // _pending_yields when that operation is completed.
// _pending_yields when that operation is completed. // In turn, the CMS thread yields when _pending_yields is positive,
// In turn, the CMS thread yields when _pending_yields is positive, // and continues to yield until the value reverts to 0.
// and continues to yield until the value reverts to 0.
// (2) An asynchronous request, on the other hand, is used by iCMS
// for the stop_icms() operation. A single yield satisfies all of
// the outstanding asynch yield requests, of which there may
// occasionally be several in close succession. To accomplish
// this, an asynch-requesting thread atomically increments both
// _pending_yields and _pending_decrements. An asynchr requesting
// thread does not wait and "acknowledge" completion of an operation
// and deregister the request, like the synchronous version described
// above does. In turn, after yielding, the CMS thread decrements both
// _pending_yields and _pending_decrements by the value seen in
// _pending_decrements before the decrement.
// NOTE: The above scheme is isomorphic to having two request counters,
// one for async requests and one for sync requests, and for the CMS thread
// to check the sum of the two counters to decide whether it should yield
// and to clear only the async counter when it yields. However, it turns out
// to be more efficient for CMS code to just check a single counter
// _pending_yields that holds the sum (of both sync and async requests), and
// a second counter _pending_decrements that only holds the async requests,
// for greater efficiency, since in a typical CMS run, there are many more
// potential (i.e. static) yield points than there are actual
// (i.e. dynamic) yields because of requests, which are few and far between.
//
// Note that, while "_pending_yields >= _pending_decrements" is an invariant,
// we cannot easily test that invariant, since the counters are manipulated via
// atomic instructions without explicit locking and we cannot read
// the two counters atomically together: one suggestion is to
// use (for example) 16-bit counters so as to be able to read the
// two counters atomically even on 32-bit platforms. Notice that
// the second assert in acknowledge_yield_request() below does indeed
// check a form of the above invariant, albeit indirectly.
static void increment_pending_yields() { static void increment_pending_yields() {
Atomic::inc(&_pending_yields); Atomic::inc(&_pending_yields);
@ -182,67 +142,9 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
Atomic::dec(&_pending_yields); Atomic::dec(&_pending_yields);
assert(_pending_yields >= 0, "can't be negative"); assert(_pending_yields >= 0, "can't be negative");
} }
static void asynchronous_yield_request() {
assert(CMSIncrementalMode, "Currently only used w/iCMS");
increment_pending_yields();
Atomic::inc(&_pending_decrements);
assert(_pending_decrements >= 0, "can't be negative");
}
static void acknowledge_yield_request() {
jint decrement = _pending_decrements;
if (decrement > 0) {
assert(CMSIncrementalMode, "Currently only used w/iCMS");
// Order important to preserve: _pending_yields >= _pending_decrements
Atomic::add(-decrement, &_pending_decrements);
Atomic::add(-decrement, &_pending_yields);
assert(_pending_decrements >= 0, "can't be negative");
assert(_pending_yields >= 0, "can't be negative");
}
}
static bool should_yield() { return _pending_yields > 0; } static bool should_yield() { return _pending_yields > 0; }
// CMS incremental mode.
static void start_icms(); // notify thread to start a quantum of work
static void stop_icms(); // request thread to stop working
void icms_wait(); // if asked to stop, wait until notified to start
// Incremental mode is enabled globally by the flag CMSIncrementalMode. It
// must also be enabled/disabled dynamically to allow foreground collections.
#define ICMS_ENABLING_ASSERT \
assert((CMSIncrementalMode && _icms_disabled >= 0) || \
(!CMSIncrementalMode && _icms_disabled <= 0), "Error")
static inline void enable_icms() {
ICMS_ENABLING_ASSERT;
Atomic::dec(&_icms_disabled);
}
static inline void disable_icms() {
ICMS_ENABLING_ASSERT;
Atomic::inc(&_icms_disabled);
}
static inline bool icms_is_disabled() {
ICMS_ENABLING_ASSERT;
return _icms_disabled > 0;
}
static inline bool icms_is_enabled() {
return !icms_is_disabled();
}
}; };
inline void ConcurrentMarkSweepThread::trace_state(const char* desc) {
if (CMSTraceThreadState) {
char buf[128];
TimeStamp& ts = gclog_or_tty->time_stamp();
if (!ts.is_updated()) {
ts.update();
}
jio_snprintf(buf, sizeof(buf), " [%.3f: CMSThread %s] ",
ts.seconds(), desc);
buf[sizeof(buf) - 1] = '\0';
gclog_or_tty->print("%s", buf);
}
}
// For scoped increment/decrement of (synchronous) yield requests // For scoped increment/decrement of (synchronous) yield requests
class CMSSynchronousYieldRequest: public StackObj { class CMSSynchronousYieldRequest: public StackObj {
public: public:

View file

@ -207,12 +207,6 @@ void VM_GenCollectFullConcurrent::doit() {
MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
assert(_full_gc_count_before <= gch->total_full_collections(), "Error"); assert(_full_gc_count_before <= gch->total_full_collections(), "Error");
if (gch->total_full_collections() == _full_gc_count_before) { if (gch->total_full_collections() == _full_gc_count_before) {
// Disable iCMS until the full collection is done, and
// remember that we did so.
CMSCollector::disable_icms();
_disabled_icms = true;
// In case CMS thread was in icms_wait(), wake it up.
CMSCollector::start_icms();
// Nudge the CMS thread to start a concurrent collection. // Nudge the CMS thread to start a concurrent collection.
CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause); CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
} else { } else {
@ -276,8 +270,4 @@ void VM_GenCollectFullConcurrent::doit_epilogue() {
FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag); FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
} }
} }
// Enable iCMS back if we disabled it earlier.
if (_disabled_icms) {
CMSCollector::enable_icms();
}
} }

View file

@ -128,13 +128,11 @@ class VM_CMS_Final_Remark: public VM_CMS_Operation {
// VM operation to invoke a concurrent collection of the heap as a // VM operation to invoke a concurrent collection of the heap as a
// GenCollectedHeap heap. // GenCollectedHeap heap.
class VM_GenCollectFullConcurrent: public VM_GC_Operation { class VM_GenCollectFullConcurrent: public VM_GC_Operation {
bool _disabled_icms;
public: public:
VM_GenCollectFullConcurrent(unsigned int gc_count_before, VM_GenCollectFullConcurrent(unsigned int gc_count_before,
unsigned int full_gc_count_before, unsigned int full_gc_count_before,
GCCause::Cause gc_cause) GCCause::Cause gc_cause)
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */), : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */)
_disabled_icms(false)
{ {
assert(FullGCCount_lock != NULL, "Error"); assert(FullGCCount_lock != NULL, "Error");
assert(UseAsyncConcMarkSweepGC, "Else will hang caller"); assert(UseAsyncConcMarkSweepGC, "Else will hang caller");

View file

@ -1248,7 +1248,7 @@ public:
// The same as above but assume that the caller holds the Heap_lock. // The same as above but assume that the caller holds the Heap_lock.
void collect_locked(GCCause::Cause cause); void collect_locked(GCCause::Cause cause);
virtual void copy_allocation_context_stats(const jint* contexts, virtual bool copy_allocation_context_stats(const jint* contexts,
jlong* totals, jlong* totals,
jbyte* accuracy, jbyte* accuracy,
jint len); jint len);

View file

@ -25,8 +25,9 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc_implementation/g1/g1CollectedHeap.hpp" #include "gc_implementation/g1/g1CollectedHeap.hpp"
void G1CollectedHeap::copy_allocation_context_stats(const jint* contexts, bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
jlong* totals, jlong* totals,
jbyte* accuracy, jbyte* accuracy,
jint len) { jint len) {
return false;
} }

View file

@ -1585,34 +1585,22 @@ public:
} }
}; };
uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) {
assert(n_workers > 0, "Active gc workers should be greater than 0");
const uint overpartition_factor = 4;
const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
}
void void
G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) { G1CollectorPolicy::record_concurrent_mark_cleanup_end(uint n_workers) {
_collectionSetChooser->clear(); _collectionSetChooser->clear();
uint region_num = _g1->num_regions(); uint n_regions = _g1->num_regions();
const uint OverpartitionFactor = 4; uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
uint WorkUnit; _collectionSetChooser->prepare_for_par_region_addition(n_regions, chunk_size);
// The use of MinChunkSize = 8 in the original code ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers);
// causes some assertion failures when the total number of _g1->workers()->run_task(&par_known_garbage_task);
// region is less than 8. The code here tries to fix that.
// Should the original code also be fixed?
if (no_of_gc_threads > 0) {
const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U);
WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor),
MinWorkUnit);
} else {
assert(no_of_gc_threads > 0,
"The active gc workers should be greater than 0");
// In a product build do something reasonable to avoid a crash.
const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);
WorkUnit =
MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
MinWorkUnit);
}
_collectionSetChooser->prepare_for_par_region_addition(_g1->num_regions(),
WorkUnit);
ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, WorkUnit, (uint) no_of_gc_threads);
_g1->workers()->run_task(&parKnownGarbageTask);
_collectionSetChooser->sort_regions(); _collectionSetChooser->sort_regions();

View file

@ -612,6 +612,10 @@ private:
uint desired_min_length, uint desired_min_length,
uint desired_max_length); uint desired_max_length);
// Calculate and return chunk size (in number of regions) for parallel
// concurrent mark cleanup.
uint calculate_parallel_work_chunk_size(uint n_workers, uint n_regions);
// Check whether a given young length (young_length) fits into the // Check whether a given young length (young_length) fits into the
// given target pause time and whether the prediction for the amount // given target pause time and whether the prediction for the amount
// of objects to be copied for the given length will fit into the // of objects to be copied for the given length will fit into the
@ -687,7 +691,7 @@ public:
// Record start, end, and completion of cleanup. // Record start, end, and completion of cleanup.
void record_concurrent_mark_cleanup_start(); void record_concurrent_mark_cleanup_start();
void record_concurrent_mark_cleanup_end(int no_of_gc_threads); void record_concurrent_mark_cleanup_end(uint n_workers);
void record_concurrent_mark_cleanup_completed(); void record_concurrent_mark_cleanup_completed();
// Records the information about the heap size for reporting in // Records the information about the heap size for reporting in

View file

@ -97,13 +97,6 @@ G1RemSet::~G1RemSet() {
FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl, mtGC); FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl, mtGC);
} }
void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
if (_g1->is_in_g1_reserved(mr.start())) {
_n += (int) ((mr.byte_size() / CardTableModRefBS::card_size));
if (_start_first == NULL) _start_first = mr.start();
}
}
class ScanRSClosure : public HeapRegionClosure { class ScanRSClosure : public HeapRegionClosure {
size_t _cards_done, _cards; size_t _cards_done, _cards;
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
@ -303,15 +296,6 @@ void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i) {
_g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i); _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
// Now there should be no dirty cards.
if (G1RSLogCheckCardTable) {
CountNonCleanMemRegionClosure cl(_g1);
_ct_bs->mod_card_iterate(&cl);
// XXX This isn't true any more: keeping cards of young regions
// marked dirty broke it. Need some reasonable fix.
guarantee(cl.n() == 0, "Card table should be clean.");
}
_g1p->phase_times()->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0); _g1p->phase_times()->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
} }

View file

@ -151,19 +151,6 @@ public:
} }
}; };
class CountNonCleanMemRegionClosure: public MemRegionClosure {
G1CollectedHeap* _g1;
int _n;
HeapWord* _start_first;
public:
CountNonCleanMemRegionClosure(G1CollectedHeap* g1) :
_g1(g1), _n(0), _start_first(NULL)
{}
void do_MemRegion(MemRegion mr);
int n() { return _n; };
HeapWord* start_first() { return _start_first; }
};
class UpdateRSOopClosure: public ExtendedOopClosure { class UpdateRSOopClosure: public ExtendedOopClosure {
HeapRegion* _from; HeapRegion* _from;
G1RemSet* _rs; G1RemSet* _rs;

View file

@ -108,10 +108,6 @@
develop(bool, G1RSBarrierRegionFilter, true, \ develop(bool, G1RSBarrierRegionFilter, true, \
"If true, generate region filtering code in RS barrier") \ "If true, generate region filtering code in RS barrier") \
\ \
develop(bool, G1RSLogCheckCardTable, false, \
"If true, verify that no dirty cards remain after RS log " \
"processing.") \
\
diagnostic(bool, G1PrintRegionLivenessInfo, false, \ diagnostic(bool, G1PrintRegionLivenessInfo, false, \
"Prints the liveness information for all regions in the heap " \ "Prints the liveness information for all regions in the heap " \
"at the end of a marking cycle.") \ "at the end of a marking cycle.") \

View file

@ -960,6 +960,10 @@ void HeapRegion::verify() const {
verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
} }
void HeapRegion::prepare_for_compaction(CompactPoint* cp) {
scan_and_forward(this, cp);
}
// G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
// away eventually. // away eventually.
@ -1043,12 +1047,6 @@ void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) {
} }
} }
#define block_is_always_obj(q) true
void G1OffsetTableContigSpace::prepare_for_compaction(CompactPoint* cp) {
SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size);
}
#undef block_is_always_obj
G1OffsetTableContigSpace:: G1OffsetTableContigSpace::
G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr) : MemRegion mr) :

View file

@ -187,8 +187,6 @@ class G1OffsetTableContigSpace: public CompactibleSpace {
HeapWord* block_start(const void* p); HeapWord* block_start(const void* p);
HeapWord* block_start_const(const void* p) const; HeapWord* block_start_const(const void* p) const;
void prepare_for_compaction(CompactPoint* cp);
// Add offset table update. // Add offset table update.
virtual HeapWord* allocate(size_t word_size); virtual HeapWord* allocate(size_t word_size);
HeapWord* par_allocate(size_t word_size); HeapWord* par_allocate(size_t word_size);
@ -210,6 +208,9 @@ class G1OffsetTableContigSpace: public CompactibleSpace {
class HeapRegion: public G1OffsetTableContigSpace { class HeapRegion: public G1OffsetTableContigSpace {
friend class VMStructs; friend class VMStructs;
// Allow scan_and_forward to call (private) overrides for auxiliary functions on this class
template <typename SpaceType>
friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
private: private:
// The remembered set for this region. // The remembered set for this region.
@ -219,6 +220,20 @@ class HeapRegion: public G1OffsetTableContigSpace {
G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; } G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
// Auxiliary functions for scan_and_forward support.
// See comments for CompactibleSpace for more information.
inline HeapWord* scan_limit() const {
return top();
}
inline bool scanned_block_is_obj(const HeapWord* addr) const {
return true; // Always true, since scan_limit is top
}
inline size_t scanned_block_size(const HeapWord* addr) const {
return HeapRegion::block_size(addr); // Avoid virtual call
}
protected: protected:
// The index of this region in the heap region sequence. // The index of this region in the heap region sequence.
uint _hrm_index; uint _hrm_index;
@ -340,6 +355,9 @@ class HeapRegion: public G1OffsetTableContigSpace {
// and the amount of unallocated words if called on top() // and the amount of unallocated words if called on top()
size_t block_size(const HeapWord* p) const; size_t block_size(const HeapWord* p) const;
// Override for scan_and_forward support.
void prepare_for_compaction(CompactPoint* cp);
inline HeapWord* par_allocate_no_bot_updates(size_t word_size); inline HeapWord* par_allocate_no_bot_updates(size_t word_size);
inline HeapWord* allocate_no_bot_updates(size_t word_size); inline HeapWord* allocate_no_bot_updates(size_t word_size);

View file

@ -426,11 +426,19 @@ void FreeRegionList_test() {
mtGC); mtGC);
G1BlockOffsetSharedArray oa(heap, bot_storage); G1BlockOffsetSharedArray oa(heap, bot_storage);
bot_storage->commit_regions(0, num_regions_in_test); bot_storage->commit_regions(0, num_regions_in_test);
HeapRegion hr0(0, &oa, heap);
HeapRegion hr1(1, &oa, heap); // Set up memory regions for the heap regions.
HeapRegion hr2(2, &oa, heap); MemRegion mr0(heap.start(), HeapRegion::GrainWords);
HeapRegion hr3(3, &oa, heap); MemRegion mr1(mr0.end(), HeapRegion::GrainWords);
HeapRegion hr4(4, &oa, heap); MemRegion mr2(mr1.end(), HeapRegion::GrainWords);
MemRegion mr3(mr2.end(), HeapRegion::GrainWords);
MemRegion mr4(mr3.end(), HeapRegion::GrainWords);
HeapRegion hr0(0, &oa, mr0);
HeapRegion hr1(1, &oa, mr1);
HeapRegion hr2(2, &oa, mr2);
HeapRegion hr3(3, &oa, mr3);
HeapRegion hr4(4, &oa, mr4);
l.add_ordered(&hr1); l.add_ordered(&hr1);
l.add_ordered(&hr0); l.add_ordered(&hr0);
l.add_ordered(&hr3); l.add_ordered(&hr3);

View file

@ -27,6 +27,7 @@
#include "gc_implementation/shared/markSweep.hpp" #include "gc_implementation/shared/markSweep.hpp"
#include "gc_interface/collectedHeap.hpp" #include "gc_interface/collectedHeap.hpp"
#include "oops/markOop.inline.hpp"
#include "utilities/stack.inline.hpp" #include "utilities/stack.inline.hpp"
#include "utilities/macros.hpp" #include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS

View file

@ -644,10 +644,13 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// For each context in contexts, set the corresponding entries in the totals // For each context in contexts, set the corresponding entries in the totals
// and accuracy arrays to the current values held by the statistics. Each // and accuracy arrays to the current values held by the statistics. Each
// array should be of length len. // array should be of length len.
virtual void copy_allocation_context_stats(const jint* contexts, // Returns true if there are more stats available.
virtual bool copy_allocation_context_stats(const jint* contexts,
jlong* totals, jlong* totals,
jbyte* accuracy, jbyte* accuracy,
jint len) { } jint len) {
return false;
}
/////////////// Unit tests /////////////// /////////////// Unit tests ///////////////

View file

@ -189,11 +189,6 @@ class CollectorPolicy : public CHeapObj<mtGC> {
return CollectorPolicy::CollectorPolicyKind; return CollectorPolicy::CollectorPolicyKind;
} }
// Returns true if a collector has eden space with soft end.
virtual bool has_soft_ended_eden() {
return false;
}
// Do any updates required to global flags that are due to heap initialization // Do any updates required to global flags that are due to heap initialization
// changes // changes
virtual void post_heap_initialize() = 0; virtual void post_heap_initialize() = 0;

View file

@ -194,11 +194,7 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
(HeapWord*)_virtual_space.high()); (HeapWord*)_virtual_space.high());
Universe::heap()->barrier_set()->resize_covered_region(cmr); Universe::heap()->barrier_set()->resize_covered_region(cmr);
if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) { _eden_space = new ContiguousSpace();
_eden_space = new ConcEdenSpace(this);
} else {
_eden_space = new EdenSpace(this);
}
_from_space = new ContiguousSpace(); _from_space = new ContiguousSpace();
_to_space = new ContiguousSpace(); _to_space = new ContiguousSpace();
@ -1038,38 +1034,12 @@ HeapWord* DefNewGeneration::allocate(size_t word_size,
if (CMSEdenChunksRecordAlways && _next_gen != NULL) { if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
_next_gen->sample_eden_chunk(); _next_gen->sample_eden_chunk();
} }
return result; } else {
} // If the eden is full and the last collection bailed out, we are running
do { // out of heap space, and we try to allocate the from-space, too.
HeapWord* old_limit = eden()->soft_end(); // allocate_from_space can't be inlined because that would introduce a
if (old_limit < eden()->end()) { // circular dependency at compile time.
// Tell the next generation we reached a limit.
HeapWord* new_limit =
next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
if (new_limit != NULL) {
Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
} else {
assert(eden()->soft_end() == eden()->end(),
"invalid state after allocation_limit_reached returned null");
}
} else {
// The allocation failed and the soft limit is equal to the hard limit,
// there are no reasons to do an attempt to allocate
assert(old_limit == eden()->end(), "sanity check");
break;
}
// Try to allocate until succeeded or the soft limit can't be adjusted
result = eden()->par_allocate(word_size);
} while (result == NULL);
// If the eden is full and the last collection bailed out, we are running
// out of heap space, and we try to allocate the from-space, too.
// allocate_from_space can't be inlined because that would introduce a
// circular dependency at compile time.
if (result == NULL) {
result = allocate_from_space(word_size); result = allocate_from_space(word_size);
} else if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
_next_gen->sample_eden_chunk();
} }
return result; return result;
} }
@ -1083,11 +1053,6 @@ HeapWord* DefNewGeneration::par_allocate(size_t word_size,
return res; return res;
} }
void DefNewGeneration::gc_prologue(bool full) {
// Ensure that _end and _soft_end are the same in eden space.
eden()->set_soft_end(eden()->end());
}
size_t DefNewGeneration::tlab_capacity() const { size_t DefNewGeneration::tlab_capacity() const {
return eden()->capacity(); return eden()->capacity();
} }

View file

@ -32,7 +32,6 @@
#include "memory/generation.inline.hpp" #include "memory/generation.inline.hpp"
#include "utilities/stack.hpp" #include "utilities/stack.hpp"
class EdenSpace;
class ContiguousSpace; class ContiguousSpace;
class ScanClosure; class ScanClosure;
class STWGCTimer; class STWGCTimer;
@ -132,7 +131,7 @@ protected:
void adjust_desired_tenuring_threshold(); void adjust_desired_tenuring_threshold();
// Spaces // Spaces
EdenSpace* _eden_space; ContiguousSpace* _eden_space;
ContiguousSpace* _from_space; ContiguousSpace* _from_space;
ContiguousSpace* _to_space; ContiguousSpace* _to_space;
@ -214,9 +213,9 @@ protected:
virtual Generation::Name kind() { return Generation::DefNew; } virtual Generation::Name kind() { return Generation::DefNew; }
// Accessing spaces // Accessing spaces
EdenSpace* eden() const { return _eden_space; } ContiguousSpace* eden() const { return _eden_space; }
ContiguousSpace* from() const { return _from_space; } ContiguousSpace* from() const { return _from_space; }
ContiguousSpace* to() const { return _to_space; } ContiguousSpace* to() const { return _to_space; }
virtual CompactibleSpace* first_compaction_space() const; virtual CompactibleSpace* first_compaction_space() const;
@ -282,8 +281,6 @@ protected:
HeapWord* par_allocate(size_t word_size, bool is_tlab); HeapWord* par_allocate(size_t word_size, bool is_tlab);
// Prologue & Epilogue
virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full); virtual void gc_epilogue(bool full);
// Save the tops for eden, from, and to // Save the tops for eden, from, and to

View file

@ -265,14 +265,6 @@ class Generation: public CHeapObj<mtGC> {
// Like "allocate", but performs any necessary locking internally. // Like "allocate", but performs any necessary locking internally.
virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0; virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0;
// A 'younger' gen has reached an allocation limit, and uses this to notify
// the next older gen. The return value is a new limit, or NULL if none. The
// caller must do the necessary locking.
virtual HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
size_t word_size) {
return NULL;
}
// Some generation may offer a region for shared, contiguous allocation, // Some generation may offer a region for shared, contiguous allocation,
// via inlined code (by exporting the address of the top and end fields // via inlined code (by exporting the address of the top and end fields
// defining the extent of the contiguous allocation region.) // defining the extent of the contiguous allocation region.)

View file

@ -438,52 +438,8 @@ bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
} }
} }
#define block_is_always_obj(q) true
#define obj_size(q) oop(q)->size()
#define adjust_obj_size(s) s
void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) {
SCAN_AND_FORWARD(cp, end, block_is_obj, block_size);
}
// Faster object search.
void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size); scan_and_forward(this, cp);
}
void Space::adjust_pointers() {
// adjust all the interior pointers to point at the new locations of objects
// Used by MarkSweep::mark_sweep_phase3()
// First check to see if there is any work to be done.
if (used() == 0) {
return; // Nothing to do.
}
// Otherwise...
HeapWord* q = bottom();
HeapWord* t = end();
debug_only(HeapWord* prev_q = NULL);
while (q < t) {
if (oop(q)->is_gc_marked()) {
// q is alive
// point all the oops to the new location
size_t size = oop(q)->adjust_pointers();
debug_only(prev_q = q);
q += size;
} else {
// q is not a live object. But we're not in a compactible space,
// So we don't have live ranges.
debug_only(prev_q = q);
q += block_size(q);
assert(q > prev_q, "we should be moving forward through memory");
}
}
assert(q == t, "just checking");
} }
void CompactibleSpace::adjust_pointers() { void CompactibleSpace::adjust_pointers() {
@ -492,11 +448,11 @@ void CompactibleSpace::adjust_pointers() {
return; // Nothing to do. return; // Nothing to do.
} }
SCAN_AND_ADJUST_POINTERS(adjust_obj_size); scan_and_adjust_pointers(this);
} }
void CompactibleSpace::compact() { void CompactibleSpace::compact() {
SCAN_AND_COMPACT(obj_size); scan_and_compact(this);
} }
void Space::print_short() const { print_short_on(tty); } void Space::print_short() const { print_short_on(tty); }
@ -684,13 +640,12 @@ size_t ContiguousSpace::block_size(const HeapWord* p) const {
} }
// This version requires locking. // This version requires locking.
inline HeapWord* ContiguousSpace::allocate_impl(size_t size, inline HeapWord* ContiguousSpace::allocate_impl(size_t size) {
HeapWord* const end_value) {
assert(Heap_lock->owned_by_self() || assert(Heap_lock->owned_by_self() ||
(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
"not locked"); "not locked");
HeapWord* obj = top(); HeapWord* obj = top();
if (pointer_delta(end_value, obj) >= size) { if (pointer_delta(end(), obj) >= size) {
HeapWord* new_top = obj + size; HeapWord* new_top = obj + size;
set_top(new_top); set_top(new_top);
assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
@ -701,11 +656,10 @@ inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
} }
// This version is lock-free. // This version is lock-free.
inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size, inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size) {
HeapWord* const end_value) {
do { do {
HeapWord* obj = top(); HeapWord* obj = top();
if (pointer_delta(end_value, obj) >= size) { if (pointer_delta(end(), obj) >= size) {
HeapWord* new_top = obj + size; HeapWord* new_top = obj + size;
HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
// result can be one of two: // result can be one of two:
@ -744,12 +698,12 @@ HeapWord* ContiguousSpace::allocate_aligned(size_t size) {
// Requires locking. // Requires locking.
HeapWord* ContiguousSpace::allocate(size_t size) { HeapWord* ContiguousSpace::allocate(size_t size) {
return allocate_impl(size, end()); return allocate_impl(size);
} }
// Lock-free. // Lock-free.
HeapWord* ContiguousSpace::par_allocate(size_t size) { HeapWord* ContiguousSpace::par_allocate(size_t size) {
return par_allocate_impl(size, end()); return par_allocate_impl(size);
} }
void ContiguousSpace::allocate_temporary_filler(int factor) { void ContiguousSpace::allocate_temporary_filler(int factor) {
@ -784,49 +738,6 @@ void ContiguousSpace::allocate_temporary_filler(int factor) {
} }
} }
void EdenSpace::clear(bool mangle_space) {
ContiguousSpace::clear(mangle_space);
set_soft_end(end());
}
// Requires locking.
HeapWord* EdenSpace::allocate(size_t size) {
return allocate_impl(size, soft_end());
}
// Lock-free.
HeapWord* EdenSpace::par_allocate(size_t size) {
return par_allocate_impl(size, soft_end());
}
HeapWord* ConcEdenSpace::par_allocate(size_t size)
{
do {
// The invariant is top() should be read before end() because
// top() can't be greater than end(), so if an update of _soft_end
// occurs between 'end_val = end();' and 'top_val = top();' top()
// also can grow up to the new end() and the condition
// 'top_val > end_val' is true. To ensure the loading order
// OrderAccess::loadload() is required after top() read.
HeapWord* obj = top();
OrderAccess::loadload();
if (pointer_delta(*soft_end_addr(), obj) >= size) {
HeapWord* new_top = obj + size;
HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
// result can be one of two:
// the old top value: the exchange succeeded
// otherwise: the new value of the top is returned.
if (result == obj) {
assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
return obj;
}
} else {
return NULL;
}
} while (true);
}
HeapWord* OffsetTableContigSpace::initialize_threshold() { HeapWord* OffsetTableContigSpace::initialize_threshold() {
return _offsets.initialize_threshold(); return _offsets.initialize_threshold();
} }

View file

@ -41,19 +41,6 @@
// implementations for keeping track of free and used space, // implementations for keeping track of free and used space,
// for iterating over objects and free blocks, etc. // for iterating over objects and free blocks, etc.
// Here's the Space hierarchy:
//
// - Space -- an abstract base class describing a heap area
// - CompactibleSpace -- a space supporting compaction
// - CompactibleFreeListSpace -- (used for CMS generation)
// - ContiguousSpace -- a compactible space in which all free space
// is contiguous
// - EdenSpace -- contiguous space used as nursery
// - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
// - OffsetTableContigSpace -- contiguous space with a block offset array
// that allows "fast" block_start calls
// - TenuredSpace -- (used for TenuredGeneration)
// Forward decls. // Forward decls.
class Space; class Space;
class BlockOffsetArray; class BlockOffsetArray;
@ -238,7 +225,7 @@ class Space: public CHeapObj<mtGC> {
// Mark-sweep-compact support: all spaces can update pointers to objects // Mark-sweep-compact support: all spaces can update pointers to objects
// moving as a part of compaction. // moving as a part of compaction.
virtual void adjust_pointers(); virtual void adjust_pointers() = 0;
// PrintHeapAtGC support // PrintHeapAtGC support
virtual void print() const; virtual void print() const;
@ -339,7 +326,36 @@ public:
// necessarily, a space that is normally contiguous. But, for example, a // necessarily, a space that is normally contiguous. But, for example, a
// free-list-based space whose normal collection is a mark-sweep without // free-list-based space whose normal collection is a mark-sweep without
// compaction could still support compaction in full GC's. // compaction could still support compaction in full GC's.
//
// The compaction operations are implemented by the
// scan_and_{adjust_pointers,compact,forward} function templates.
// The following are, non-virtual, auxiliary functions used by these function templates:
// - scan_limit()
// - scanned_block_is_obj()
// - scanned_block_size()
// - adjust_obj_size()
// - obj_size()
// These functions are to be used exclusively by the scan_and_* function templates,
// and must be defined for all (non-abstract) subclasses of CompactibleSpace.
//
// NOTE: Any subclasses to CompactibleSpace wanting to change/define the behavior
// in any of the auxiliary functions must also override the corresponding
// prepare_for_compaction/adjust_pointers/compact functions using them.
// If not, such changes will not be used or have no effect on the compaction operations.
//
// This translates to the following dependencies:
// Overrides/definitions of
// - scan_limit
// - scanned_block_is_obj
// - scanned_block_size
// require override/definition of prepare_for_compaction().
// Similar dependencies exist between
// - adjust_obj_size and adjust_pointers()
// - obj_size and compact().
//
// Additionally, this also means that changes to block_size() or block_is_obj() that
// should be effective during the compaction operations must provide a corresponding
// definition of scanned_block_size/scanned_block_is_obj respectively.
class CompactibleSpace: public Space { class CompactibleSpace: public Space {
friend class VMStructs; friend class VMStructs;
friend class CompactibleFreeListSpace; friend class CompactibleFreeListSpace;
@ -347,6 +363,15 @@ private:
HeapWord* _compaction_top; HeapWord* _compaction_top;
CompactibleSpace* _next_compaction_space; CompactibleSpace* _next_compaction_space;
// Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
inline size_t adjust_obj_size(size_t size) const {
return size;
}
inline size_t obj_size(const HeapWord* addr) const {
return oop(addr)->size();
}
public: public:
CompactibleSpace() : CompactibleSpace() :
_compaction_top(NULL), _next_compaction_space(NULL) {} _compaction_top(NULL), _next_compaction_space(NULL) {}
@ -390,7 +415,7 @@ public:
// "cp->compaction_space" up-to-date. Offset tables may be updated in // "cp->compaction_space" up-to-date. Offset tables may be updated in
// this phase as if the final copy had occurred; if so, "cp->threshold" // this phase as if the final copy had occurred; if so, "cp->threshold"
// indicates when the next such action should be taken. // indicates when the next such action should be taken.
virtual void prepare_for_compaction(CompactPoint* cp); virtual void prepare_for_compaction(CompactPoint* cp) = 0;
// MarkSweep support phase3 // MarkSweep support phase3
virtual void adjust_pointers(); virtual void adjust_pointers();
// MarkSweep support phase4 // MarkSweep support phase4
@ -449,6 +474,25 @@ protected:
// words remaining after this operation. // words remaining after this operation.
bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q, bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
size_t word_len); size_t word_len);
// Below are template functions for scan_and_* algorithms (avoiding virtual calls).
// The space argument should be a subclass of CompactibleSpace, implementing
// scan_limit(), scanned_block_is_obj(), and scanned_block_size(),
// and possibly also overriding obj_size(), and adjust_obj_size().
// These functions should avoid virtual calls whenever possible.
// Frequently calls adjust_obj_size().
template <class SpaceType>
static inline void scan_and_adjust_pointers(SpaceType* space);
// Frequently calls obj_size().
template <class SpaceType>
static inline void scan_and_compact(SpaceType* space);
// Frequently calls scanned_block_is_obj() and scanned_block_size().
// Requires the scan_limit() function.
template <class SpaceType>
static inline void scan_and_forward(SpaceType* space, CompactPoint* cp);
}; };
class GenSpaceMangler; class GenSpaceMangler;
@ -458,6 +502,25 @@ class GenSpaceMangler;
class ContiguousSpace: public CompactibleSpace { class ContiguousSpace: public CompactibleSpace {
friend class OneContigSpaceCardGeneration; friend class OneContigSpaceCardGeneration;
friend class VMStructs; friend class VMStructs;
// Allow scan_and_forward function to call (private) overrides for auxiliary functions on this class
template <typename SpaceType>
friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
private:
// Auxiliary functions for scan_and_forward support.
// See comments for CompactibleSpace for more information.
inline HeapWord* scan_limit() const {
return top();
}
inline bool scanned_block_is_obj(const HeapWord* addr) const {
return true; // Always true, since scan_limit is top
}
inline size_t scanned_block_size(const HeapWord* addr) const {
return oop(addr)->size();
}
protected: protected:
HeapWord* _top; HeapWord* _top;
HeapWord* _concurrent_iteration_safe_limit; HeapWord* _concurrent_iteration_safe_limit;
@ -467,8 +530,8 @@ class ContiguousSpace: public CompactibleSpace {
GenSpaceMangler* mangler() { return _mangler; } GenSpaceMangler* mangler() { return _mangler; }
// Allocation helpers (return NULL if full). // Allocation helpers (return NULL if full).
inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value); inline HeapWord* allocate_impl(size_t word_size);
inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value); inline HeapWord* par_allocate_impl(size_t word_size);
public: public:
ContiguousSpace(); ContiguousSpace();
@ -622,7 +685,6 @@ class ContiguousSpace: public CompactibleSpace {
// Used to increase collection frequency. "factor" of 0 means entire // Used to increase collection frequency. "factor" of 0 means entire
// space. // space.
void allocate_temporary_filler(int factor); void allocate_temporary_filler(int factor);
}; };
@ -685,56 +747,6 @@ public:
{} {}
}; };
// Class EdenSpace describes eden-space in new generation.
class DefNewGeneration;
class EdenSpace : public ContiguousSpace {
friend class VMStructs;
private:
DefNewGeneration* _gen;
// _soft_end is used as a soft limit on allocation. As soft limits are
// reached, the slow-path allocation code can invoke other actions and then
// adjust _soft_end up to a new soft limit or to end().
HeapWord* _soft_end;
public:
EdenSpace(DefNewGeneration* gen) :
_gen(gen), _soft_end(NULL) {}
// Get/set just the 'soft' limit.
HeapWord* soft_end() { return _soft_end; }
HeapWord** soft_end_addr() { return &_soft_end; }
void set_soft_end(HeapWord* value) { _soft_end = value; }
// Override.
void clear(bool mangle_space);
// Set both the 'hard' and 'soft' limits (_end and _soft_end).
void set_end(HeapWord* value) {
set_soft_end(value);
ContiguousSpace::set_end(value);
}
// Allocation (return NULL if full)
HeapWord* allocate(size_t word_size);
HeapWord* par_allocate(size_t word_size);
};
// Class ConcEdenSpace extends EdenSpace for the sake of safe
// allocation while soft-end is being modified concurrently
class ConcEdenSpace : public EdenSpace {
public:
ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { }
// Allocation (return NULL if full)
HeapWord* par_allocate(size_t word_size);
};
// A ContigSpace that Supports an efficient "block_start" operation via // A ContigSpace that Supports an efficient "block_start" operation via
// a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
// other spaces.) This is the abstract base class for old generation // other spaces.) This is the abstract base class for old generation

View file

@ -25,6 +25,9 @@
#ifndef SHARE_VM_MEMORY_SPACE_INLINE_HPP #ifndef SHARE_VM_MEMORY_SPACE_INLINE_HPP
#define SHARE_VM_MEMORY_SPACE_INLINE_HPP #define SHARE_VM_MEMORY_SPACE_INLINE_HPP
#include "gc_implementation/shared/liveRange.hpp"
#include "gc_implementation/shared/markSweep.inline.hpp"
#include "gc_implementation/shared/spaceDecorator.hpp"
#include "gc_interface/collectedHeap.hpp" #include "gc_interface/collectedHeap.hpp"
#include "memory/space.hpp" #include "memory/space.hpp"
#include "memory/universe.hpp" #include "memory/universe.hpp"
@ -35,272 +38,6 @@ inline HeapWord* Space::block_start(const void* p) {
return block_start_const(p); return block_start_const(p);
} }
#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \
/* Compute the new addresses for the live objects and store it in the mark \
* Used by universe::mark_sweep_phase2() \
*/ \
HeapWord* compact_top; /* This is where we are currently compacting to. */ \
\
/* We're sure to be here before any objects are compacted into this \
* space, so this is a good time to initialize this: \
*/ \
set_compaction_top(bottom()); \
\
if (cp->space == NULL) { \
assert(cp->gen != NULL, "need a generation"); \
assert(cp->threshold == NULL, "just checking"); \
assert(cp->gen->first_compaction_space() == this, "just checking"); \
cp->space = cp->gen->first_compaction_space(); \
compact_top = cp->space->bottom(); \
cp->space->set_compaction_top(compact_top); \
cp->threshold = cp->space->initialize_threshold(); \
} else { \
compact_top = cp->space->compaction_top(); \
} \
\
/* We allow some amount of garbage towards the bottom of the space, so \
* we don't start compacting before there is a significant gain to be made.\
* Occasionally, we want to ensure a full compaction, which is determined \
* by the MarkSweepAlwaysCompactCount parameter. \
*/ \
uint invocations = MarkSweep::total_invocations(); \
bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \
\
size_t allowed_deadspace = 0; \
if (skip_dead) { \
const size_t ratio = allowed_dead_ratio(); \
allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \
} \
\
HeapWord* q = bottom(); \
HeapWord* t = scan_limit(); \
\
HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \
live object. */ \
HeapWord* first_dead = end();/* The first dead object. */ \
LiveRange* liveRange = NULL; /* The current live range, recorded in the \
first header of preceding free area. */ \
_first_dead = first_dead; \
\
const intx interval = PrefetchScanIntervalInBytes; \
\
while (q < t) { \
assert(!block_is_obj(q) || \
oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \
oop(q)->mark()->has_bias_pattern(), \
"these are the only valid states during a mark sweep"); \
if (block_is_obj(q) && oop(q)->is_gc_marked()) { \
/* prefetch beyond q */ \
Prefetch::write(q, interval); \
size_t size = block_size(q); \
compact_top = cp->space->forward(oop(q), size, cp, compact_top); \
q += size; \
end_of_live = q; \
} else { \
/* run over all the contiguous dead objects */ \
HeapWord* end = q; \
do { \
/* prefetch beyond end */ \
Prefetch::write(end, interval); \
end += block_size(end); \
} while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\
\
/* see if we might want to pretend this object is alive so that \
* we don't have to compact quite as often. \
*/ \
if (allowed_deadspace > 0 && q == compact_top) { \
size_t sz = pointer_delta(end, q); \
if (insert_deadspace(allowed_deadspace, q, sz)) { \
compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \
q = end; \
end_of_live = end; \
continue; \
} \
} \
\
/* otherwise, it really is a free region. */ \
\
/* for the previous LiveRange, record the end of the live objects. */ \
if (liveRange) { \
liveRange->set_end(q); \
} \
\
/* record the current LiveRange object. \
* liveRange->start() is overlaid on the mark word. \
*/ \
liveRange = (LiveRange*)q; \
liveRange->set_start(end); \
liveRange->set_end(end); \
\
/* see if this is the first dead region. */ \
if (q < first_dead) { \
first_dead = q; \
} \
\
/* move on to the next object */ \
q = end; \
} \
} \
\
assert(q == t, "just checking"); \
if (liveRange != NULL) { \
liveRange->set_end(q); \
} \
_end_of_live = end_of_live; \
if (end_of_live < first_dead) { \
first_dead = end_of_live; \
} \
_first_dead = first_dead; \
\
/* save the compaction_top of the compaction space. */ \
cp->space->set_compaction_top(compact_top); \
}
#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \
/* adjust all the interior pointers to point at the new locations of objects \
* Used by MarkSweep::mark_sweep_phase3() */ \
\
HeapWord* q = bottom(); \
HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \
\
assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \
\
if (q < t && _first_dead > q && \
!oop(q)->is_gc_marked()) { \
/* we have a chunk of the space which hasn't moved and we've \
* reinitialized the mark word during the previous pass, so we can't \
* use is_gc_marked for the traversal. */ \
HeapWord* end = _first_dead; \
\
while (q < end) { \
/* I originally tried to conjoin "block_start(q) == q" to the \
* assertion below, but that doesn't work, because you can't \
* accurately traverse previous objects to get to the current one \
* after their pointers have been \
* updated, until the actual compaction is done. dld, 4/00 */ \
assert(block_is_obj(q), \
"should be at block boundaries, and should be looking at objs"); \
\
/* point all the oops to the new location */ \
size_t size = oop(q)->adjust_pointers(); \
size = adjust_obj_size(size); \
\
q += size; \
} \
\
if (_first_dead == t) { \
q = t; \
} else { \
/* $$$ This is funky. Using this to read the previously written \
* LiveRange. See also use below. */ \
q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \
} \
} \
\
const intx interval = PrefetchScanIntervalInBytes; \
\
debug_only(HeapWord* prev_q = NULL); \
while (q < t) { \
/* prefetch beyond q */ \
Prefetch::write(q, interval); \
if (oop(q)->is_gc_marked()) { \
/* q is alive */ \
/* point all the oops to the new location */ \
size_t size = oop(q)->adjust_pointers(); \
size = adjust_obj_size(size); \
debug_only(prev_q = q); \
q += size; \
} else { \
/* q is not a live object, so its mark should point at the next \
* live object */ \
debug_only(prev_q = q); \
q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
assert(q > prev_q, "we should be moving forward through memory"); \
} \
} \
\
assert(q == t, "just checking"); \
}
#define SCAN_AND_COMPACT(obj_size) { \
/* Copy all live objects to their new location \
* Used by MarkSweep::mark_sweep_phase4() */ \
\
HeapWord* q = bottom(); \
HeapWord* const t = _end_of_live; \
debug_only(HeapWord* prev_q = NULL); \
\
if (q < t && _first_dead > q && \
!oop(q)->is_gc_marked()) { \
debug_only( \
/* we have a chunk of the space which hasn't moved and we've reinitialized \
* the mark word during the previous pass, so we can't use is_gc_marked for \
* the traversal. */ \
HeapWord* const end = _first_dead; \
\
while (q < end) { \
size_t size = obj_size(q); \
assert(!oop(q)->is_gc_marked(), \
"should be unmarked (special dense prefix handling)"); \
debug_only(prev_q = q); \
q += size; \
} \
) /* debug_only */ \
\
if (_first_dead == t) { \
q = t; \
} else { \
/* $$$ Funky */ \
q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \
} \
} \
\
const intx scan_interval = PrefetchScanIntervalInBytes; \
const intx copy_interval = PrefetchCopyIntervalInBytes; \
while (q < t) { \
if (!oop(q)->is_gc_marked()) { \
/* mark is pointer to next marked oop */ \
debug_only(prev_q = q); \
q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
assert(q > prev_q, "we should be moving forward through memory"); \
} else { \
/* prefetch beyond q */ \
Prefetch::read(q, scan_interval); \
\
/* size and destination */ \
size_t size = obj_size(q); \
HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \
\
/* prefetch beyond compaction_top */ \
Prefetch::write(compaction_top, copy_interval); \
\
/* copy object and reinit its mark */ \
assert(q != compaction_top, "everything in this pass should be moving"); \
Copy::aligned_conjoint_words(q, compaction_top, size); \
oop(compaction_top)->init_mark(); \
assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
\
debug_only(prev_q = q); \
q += size; \
} \
} \
\
/* Let's remember if we were empty before we did the compaction. */ \
bool was_empty = used_region().is_empty(); \
/* Reset space after compaction is complete */ \
reset_after_compaction(); \
/* We do this clear, below, since it has overloaded meanings for some */ \
/* space subtypes. For example, OffsetTableContigSpace's that were */ \
/* compacted into will have had their offset table thresholds updated */ \
/* continuously, but those that weren't need to have their thresholds */ \
/* re-initialized. Also mangles unused area for debugging. */ \
if (used_region().is_empty()) { \
if (!was_empty) clear(SpaceDecorator::Mangle); \
} else { \
if (ZapUnusedHeapArea) mangle_unused_area(); \
} \
}
inline HeapWord* OffsetTableContigSpace::allocate(size_t size) { inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
HeapWord* res = ContiguousSpace::allocate(size); HeapWord* res = ContiguousSpace::allocate(size);
if (res != NULL) { if (res != NULL) {
@ -334,4 +71,263 @@ OffsetTableContigSpace::block_start_const(const void* p) const {
return _offsets.block_start(p); return _offsets.block_start(p);
} }
template <class SpaceType>
inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp) {
// Compute the new addresses for the live objects and store it in the mark
// Used by universe::mark_sweep_phase2()
HeapWord* compact_top; // This is where we are currently compacting to.
// We're sure to be here before any objects are compacted into this
// space, so this is a good time to initialize this:
space->set_compaction_top(space->bottom());
if (cp->space == NULL) {
assert(cp->gen != NULL, "need a generation");
assert(cp->threshold == NULL, "just checking");
assert(cp->gen->first_compaction_space() == space, "just checking");
cp->space = cp->gen->first_compaction_space();
compact_top = cp->space->bottom();
cp->space->set_compaction_top(compact_top);
cp->threshold = cp->space->initialize_threshold();
} else {
compact_top = cp->space->compaction_top();
}
// We allow some amount of garbage towards the bottom of the space, so
// we don't start compacting before there is a significant gain to be made.
// Occasionally, we want to ensure a full compaction, which is determined
// by the MarkSweepAlwaysCompactCount parameter.
uint invocations = MarkSweep::total_invocations();
bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0);
size_t allowed_deadspace = 0;
if (skip_dead) {
const size_t ratio = space->allowed_dead_ratio();
allowed_deadspace = (space->capacity() * ratio / 100) / HeapWordSize;
}
HeapWord* q = space->bottom();
HeapWord* t = space->scan_limit();
HeapWord* end_of_live= q; // One byte beyond the last byte of the last
// live object.
HeapWord* first_dead = space->end(); // The first dead object.
LiveRange* liveRange = NULL; // The current live range, recorded in the
// first header of preceding free area.
space->_first_dead = first_dead;
const intx interval = PrefetchScanIntervalInBytes;
while (q < t) {
assert(!space->scanned_block_is_obj(q) ||
oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||
oop(q)->mark()->has_bias_pattern(),
"these are the only valid states during a mark sweep");
if (space->scanned_block_is_obj(q) && oop(q)->is_gc_marked()) {
// prefetch beyond q
Prefetch::write(q, interval);
size_t size = space->scanned_block_size(q);
compact_top = cp->space->forward(oop(q), size, cp, compact_top);
q += size;
end_of_live = q;
} else {
// run over all the contiguous dead objects
HeapWord* end = q;
do {
// prefetch beyond end
Prefetch::write(end, interval);
end += space->scanned_block_size(end);
} while (end < t && (!space->scanned_block_is_obj(end) || !oop(end)->is_gc_marked()));
// see if we might want to pretend this object is alive so that
// we don't have to compact quite as often.
if (allowed_deadspace > 0 && q == compact_top) {
size_t sz = pointer_delta(end, q);
if (space->insert_deadspace(allowed_deadspace, q, sz)) {
compact_top = cp->space->forward(oop(q), sz, cp, compact_top);
q = end;
end_of_live = end;
continue;
}
}
// otherwise, it really is a free region.
// for the previous LiveRange, record the end of the live objects.
if (liveRange) {
liveRange->set_end(q);
}
// record the current LiveRange object.
// liveRange->start() is overlaid on the mark word.
liveRange = (LiveRange*)q;
liveRange->set_start(end);
liveRange->set_end(end);
// see if this is the first dead region.
if (q < first_dead) {
first_dead = q;
}
// move on to the next object
q = end;
}
}
assert(q == t, "just checking");
if (liveRange != NULL) {
liveRange->set_end(q);
}
space->_end_of_live = end_of_live;
if (end_of_live < first_dead) {
first_dead = end_of_live;
}
space->_first_dead = first_dead;
// save the compaction_top of the compaction space.
cp->space->set_compaction_top(compact_top);
}
template <class SpaceType>
inline void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space) {
// adjust all the interior pointers to point at the new locations of objects
// Used by MarkSweep::mark_sweep_phase3()
HeapWord* q = space->bottom();
HeapWord* t = space->_end_of_live; // Established by "prepare_for_compaction".
assert(space->_first_dead <= space->_end_of_live, "Stands to reason, no?");
if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) {
// we have a chunk of the space which hasn't moved and we've
// reinitialized the mark word during the previous pass, so we can't
// use is_gc_marked for the traversal.
HeapWord* end = space->_first_dead;
while (q < end) {
// I originally tried to conjoin "block_start(q) == q" to the
// assertion below, but that doesn't work, because you can't
// accurately traverse previous objects to get to the current one
// after their pointers have been
// updated, until the actual compaction is done. dld, 4/00
assert(space->block_is_obj(q), "should be at block boundaries, and should be looking at objs");
// point all the oops to the new location
size_t size = oop(q)->adjust_pointers();
size = space->adjust_obj_size(size);
q += size;
}
if (space->_first_dead == t) {
q = t;
} else {
// $$$ This is funky. Using this to read the previously written
// LiveRange. See also use below.
q = (HeapWord*)oop(space->_first_dead)->mark()->decode_pointer();
}
}
const intx interval = PrefetchScanIntervalInBytes;
debug_only(HeapWord* prev_q = NULL);
while (q < t) {
// prefetch beyond q
Prefetch::write(q, interval);
if (oop(q)->is_gc_marked()) {
// q is alive
// point all the oops to the new location
size_t size = oop(q)->adjust_pointers();
size = space->adjust_obj_size(size);
debug_only(prev_q = q);
q += size;
} else {
// q is not a live object, so its mark should point at the next
// live object
debug_only(prev_q = q);
q = (HeapWord*) oop(q)->mark()->decode_pointer();
assert(q > prev_q, "we should be moving forward through memory");
}
}
assert(q == t, "just checking");
}
template <class SpaceType>
inline void CompactibleSpace::scan_and_compact(SpaceType* space) {
// Copy all live objects to their new location
// Used by MarkSweep::mark_sweep_phase4()
HeapWord* q = space->bottom();
HeapWord* const t = space->_end_of_live;
debug_only(HeapWord* prev_q = NULL);
if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) {
#ifdef ASSERT // Debug only
// we have a chunk of the space which hasn't moved and we've reinitialized
// the mark word during the previous pass, so we can't use is_gc_marked for
// the traversal.
HeapWord* const end = space->_first_dead;
while (q < end) {
size_t size = space->obj_size(q);
assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
prev_q = q;
q += size;
}
#endif
if (space->_first_dead == t) {
q = t;
} else {
// $$$ Funky
q = (HeapWord*) oop(space->_first_dead)->mark()->decode_pointer();
}
}
const intx scan_interval = PrefetchScanIntervalInBytes;
const intx copy_interval = PrefetchCopyIntervalInBytes;
while (q < t) {
if (!oop(q)->is_gc_marked()) {
// mark is pointer to next marked oop
debug_only(prev_q = q);
q = (HeapWord*) oop(q)->mark()->decode_pointer();
assert(q > prev_q, "we should be moving forward through memory");
} else {
// prefetch beyond q
Prefetch::read(q, scan_interval);
// size and destination
size_t size = space->obj_size(q);
HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();
// prefetch beyond compaction_top
Prefetch::write(compaction_top, copy_interval);
// copy object and reinit its mark
assert(q != compaction_top, "everything in this pass should be moving");
Copy::aligned_conjoint_words(q, compaction_top, size);
oop(compaction_top)->init_mark();
assert(oop(compaction_top)->klass() != NULL, "should have a class");
debug_only(prev_q = q);
q += size;
}
}
// Let's remember if we were empty before we did the compaction.
bool was_empty = space->used_region().is_empty();
// Reset space after compaction is complete
space->reset_after_compaction();
// We do this clear, below, since it has overloaded meanings for some
// space subtypes. For example, OffsetTableContigSpace's that were
// compacted into will have had their offset table thresholds updated
// continuously, but those that weren't need to have their thresholds
// re-initialized. Also mangles unused area for debugging.
if (space->used_region().is_empty()) {
if (!was_empty) space->clear(SpaceDecorator::Mangle);
} else {
if (ZapUnusedHeapArea) space->mangle_unused_area();
}
}
#endif // SHARE_VM_MEMORY_SPACE_INLINE_HPP #endif // SHARE_VM_MEMORY_SPACE_INLINE_HPP

View file

@ -1197,8 +1197,7 @@ void PhaseMacroExpand::expand_allocate_common(
} }
if (C->env()->dtrace_alloc_probes() || if (C->env()->dtrace_alloc_probes() ||
!UseTLAB && (!Universe::heap()->supports_inline_contig_alloc() || !UseTLAB && (!Universe::heap()->supports_inline_contig_alloc())) {
(UseConcMarkSweepGC && CMSIncrementalMode))) {
// Force slow-path allocation // Force slow-path allocation
always_slow = true; always_slow = true;
initial_slow_test = NULL; initial_slow_test = NULL;

View file

@ -1777,7 +1777,7 @@ void Arguments::set_g1_gc_flags() {
#ifdef ASSERT #ifdef ASSERT
static bool verify_serial_gc_flags() { static bool verify_serial_gc_flags() {
return (UseSerialGC && return (UseSerialGC &&
!(UseParNewGC || (UseConcMarkSweepGC || CMSIncrementalMode) || UseG1GC || !(UseParNewGC || (UseConcMarkSweepGC) || UseG1GC ||
UseParallelGC || UseParallelOldGC)); UseParallelGC || UseParallelOldGC));
} }
#endif // ASSERT #endif // ASSERT
@ -2191,10 +2191,6 @@ void Arguments::check_deprecated_gcs() {
warning("Using the ParNew young collector with the Serial old collector is deprecated " warning("Using the ParNew young collector with the Serial old collector is deprecated "
"and will likely be removed in a future release"); "and will likely be removed in a future release");
} }
if (CMSIncrementalMode) {
warning("Using incremental CMS is deprecated and will likely be removed in a future release");
}
} }
void Arguments::check_deprecated_gc_flags() { void Arguments::check_deprecated_gc_flags() {
@ -2316,31 +2312,8 @@ bool Arguments::check_vm_args_consistency() {
status = status && ArgumentsExt::check_gc_consistency_user(); status = status && ArgumentsExt::check_gc_consistency_user();
status = status && check_stack_pages(); status = status && check_stack_pages();
if (CMSIncrementalMode) { status = status && verify_percentage(CMSIncrementalSafetyFactor,
if (!UseConcMarkSweepGC) { "CMSIncrementalSafetyFactor");
jio_fprintf(defaultStream::error_stream(),
"error: invalid argument combination.\n"
"The CMS collector (-XX:+UseConcMarkSweepGC) must be "
"selected in order\nto use CMSIncrementalMode.\n");
status = false;
} else {
status = status && verify_percentage(CMSIncrementalDutyCycle,
"CMSIncrementalDutyCycle");
status = status && verify_percentage(CMSIncrementalDutyCycleMin,
"CMSIncrementalDutyCycleMin");
status = status && verify_percentage(CMSIncrementalSafetyFactor,
"CMSIncrementalSafetyFactor");
status = status && verify_percentage(CMSIncrementalOffset,
"CMSIncrementalOffset");
status = status && verify_percentage(CMSExpAvgFactor,
"CMSExpAvgFactor");
// If it was not set on the command line, set
// CMSInitiatingOccupancyFraction to 1 so icms can initiate cycles early.
if (CMSInitiatingOccupancyFraction < 0) {
FLAG_SET_DEFAULT(CMSInitiatingOccupancyFraction, 1);
}
}
}
// CMS space iteration, which FLSVerifyAllHeapreferences entails, // CMS space iteration, which FLSVerifyAllHeapreferences entails,
// insists that we hold the requisite locks so that the iteration is // insists that we hold the requisite locks so that the iteration is
@ -2874,14 +2847,6 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
// -Xnoclassgc // -Xnoclassgc
} else if (match_option(option, "-Xnoclassgc", &tail)) { } else if (match_option(option, "-Xnoclassgc", &tail)) {
FLAG_SET_CMDLINE(bool, ClassUnloading, false); FLAG_SET_CMDLINE(bool, ClassUnloading, false);
// -Xincgc: i-CMS
} else if (match_option(option, "-Xincgc", &tail)) {
FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, true);
FLAG_SET_CMDLINE(bool, CMSIncrementalMode, true);
// -Xnoincgc: no i-CMS
} else if (match_option(option, "-Xnoincgc", &tail)) {
FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, false);
FLAG_SET_CMDLINE(bool, CMSIncrementalMode, false);
// -Xconcgc // -Xconcgc
} else if (match_option(option, "-Xconcgc", &tail)) { } else if (match_option(option, "-Xconcgc", &tail)) {
FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, true); FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, true);
@ -3711,7 +3676,6 @@ void Arguments::set_shared_spaces_flags() {
#if !INCLUDE_ALL_GCS #if !INCLUDE_ALL_GCS
static void force_serial_gc() { static void force_serial_gc() {
FLAG_SET_DEFAULT(UseSerialGC, true); FLAG_SET_DEFAULT(UseSerialGC, true);
FLAG_SET_DEFAULT(CMSIncrementalMode, false); // special CMS suboption
UNSUPPORTED_GC_OPTION(UseG1GC); UNSUPPORTED_GC_OPTION(UseG1GC);
UNSUPPORTED_GC_OPTION(UseParallelGC); UNSUPPORTED_GC_OPTION(UseParallelGC);
UNSUPPORTED_GC_OPTION(UseParallelOldGC); UNSUPPORTED_GC_OPTION(UseParallelOldGC);

View file

@ -1638,30 +1638,10 @@ class CommandLineFlags {
"The maximum size of young gen chosen by default per GC worker " \ "The maximum size of young gen chosen by default per GC worker " \
"thread available") \ "thread available") \
\ \
product(bool, CMSIncrementalMode, false, \
"Whether CMS GC should operate in \"incremental\" mode") \
\
product(uintx, CMSIncrementalDutyCycle, 10, \
"Percentage (0-100) of CMS incremental mode duty cycle. If " \
"CMSIncrementalPacing is enabled, then this is just the initial " \
"value.") \
\
product(bool, CMSIncrementalPacing, true, \
"Whether the CMS incremental mode duty cycle should be " \
"automatically adjusted") \
\
product(uintx, CMSIncrementalDutyCycleMin, 0, \
"Minimum percentage (0-100) of the CMS incremental duty cycle " \
"used when CMSIncrementalPacing is enabled") \
\
product(uintx, CMSIncrementalSafetyFactor, 10, \ product(uintx, CMSIncrementalSafetyFactor, 10, \
"Percentage (0-100) used to add conservatism when computing the " \ "Percentage (0-100) used to add conservatism when computing the " \
"duty cycle") \ "duty cycle") \
\ \
product(uintx, CMSIncrementalOffset, 0, \
"Percentage (0-100) by which the CMS incremental mode duty cycle "\
"is shifted to the right within the period between young GCs") \
\
product(uintx, CMSExpAvgFactor, 50, \ product(uintx, CMSExpAvgFactor, 50, \
"Percentage (0-100) used to weight the current sample when " \ "Percentage (0-100) used to weight the current sample when " \
"computing exponential averages for CMS statistics") \ "computing exponential averages for CMS statistics") \
@ -1720,15 +1700,6 @@ class CommandLineFlags {
"Skip block flux-rate sampling for an epoch unless inter-sweep " \ "Skip block flux-rate sampling for an epoch unless inter-sweep " \
"duration exceeds this threshold in milliseconds") \ "duration exceeds this threshold in milliseconds") \
\ \
develop(bool, CMSTraceIncrementalMode, false, \
"Trace CMS incremental mode") \
\
develop(bool, CMSTraceIncrementalPacing, false, \
"Trace CMS incremental mode pacing computation") \
\
develop(bool, CMSTraceThreadState, false, \
"Trace the CMS thread state (enable the trace_state() method)") \
\
product(bool, CMSClassUnloadingEnabled, true, \ product(bool, CMSClassUnloadingEnabled, true, \
"Whether class unloading enabled when using CMS GC") \ "Whether class unloading enabled when using CMS GC") \
\ \

View file

@ -72,7 +72,6 @@ Monitor* Threads_lock = NULL;
Monitor* CGC_lock = NULL; Monitor* CGC_lock = NULL;
Monitor* STS_lock = NULL; Monitor* STS_lock = NULL;
Monitor* SLT_lock = NULL; Monitor* SLT_lock = NULL;
Monitor* iCMS_lock = NULL;
Monitor* FullGCCount_lock = NULL; Monitor* FullGCCount_lock = NULL;
Monitor* CMark_lock = NULL; Monitor* CMark_lock = NULL;
Mutex* CMRegionStack_lock = NULL; Mutex* CMRegionStack_lock = NULL;
@ -175,9 +174,6 @@ void mutex_init() {
def(CGC_lock , Monitor, special, true ); // coordinate between fore- and background GC def(CGC_lock , Monitor, special, true ); // coordinate between fore- and background GC
def(STS_lock , Monitor, leaf, true ); def(STS_lock , Monitor, leaf, true );
if (UseConcMarkSweepGC) {
def(iCMS_lock , Monitor, special, true ); // CMS incremental mode start/stop notification
}
if (UseConcMarkSweepGC || UseG1GC) { if (UseConcMarkSweepGC || UseG1GC) {
def(FullGCCount_lock , Monitor, leaf, true ); // in support of ExplicitGCInvokesConcurrent def(FullGCCount_lock , Monitor, leaf, true ); // in support of ExplicitGCInvokesConcurrent
} }

View file

@ -66,7 +66,6 @@ extern Monitor* CGC_lock; // used for coordination betwee
// fore- & background GC threads. // fore- & background GC threads.
extern Monitor* STS_lock; // used for joining/leaving SuspendibleThreadSet. extern Monitor* STS_lock; // used for joining/leaving SuspendibleThreadSet.
extern Monitor* SLT_lock; // used in CMS GC for acquiring PLL extern Monitor* SLT_lock; // used in CMS GC for acquiring PLL
extern Monitor* iCMS_lock; // CMS incremental mode start/stop notification
extern Monitor* FullGCCount_lock; // in support of "concurrent" full gc extern Monitor* FullGCCount_lock; // in support of "concurrent" full gc
extern Monitor* CMark_lock; // used for concurrent mark thread coordination extern Monitor* CMark_lock; // used for concurrent mark thread coordination
extern Mutex* CMRegionStack_lock; // used for protecting accesses to the CM region stack extern Mutex* CMRegionStack_lock; // used for protecting accesses to the CM region stack

View file

@ -527,12 +527,10 @@ typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable;
nonstatic_field(DefNewGeneration, _next_gen, Generation*) \ nonstatic_field(DefNewGeneration, _next_gen, Generation*) \
nonstatic_field(DefNewGeneration, _tenuring_threshold, uint) \ nonstatic_field(DefNewGeneration, _tenuring_threshold, uint) \
nonstatic_field(DefNewGeneration, _age_table, ageTable) \ nonstatic_field(DefNewGeneration, _age_table, ageTable) \
nonstatic_field(DefNewGeneration, _eden_space, EdenSpace*) \ nonstatic_field(DefNewGeneration, _eden_space, ContiguousSpace*) \
nonstatic_field(DefNewGeneration, _from_space, ContiguousSpace*) \ nonstatic_field(DefNewGeneration, _from_space, ContiguousSpace*) \
nonstatic_field(DefNewGeneration, _to_space, ContiguousSpace*) \ nonstatic_field(DefNewGeneration, _to_space, ContiguousSpace*) \
\ \
nonstatic_field(EdenSpace, _gen, DefNewGeneration*) \
\
nonstatic_field(Generation, _reserved, MemRegion) \ nonstatic_field(Generation, _reserved, MemRegion) \
nonstatic_field(Generation, _virtual_space, VirtualSpace) \ nonstatic_field(Generation, _virtual_space, VirtualSpace) \
nonstatic_field(Generation, _level, int) \ nonstatic_field(Generation, _level, int) \
@ -1490,7 +1488,6 @@ typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable;
declare_toplevel_type(BitMap) \ declare_toplevel_type(BitMap) \
declare_type(CompactibleSpace, Space) \ declare_type(CompactibleSpace, Space) \
declare_type(ContiguousSpace, CompactibleSpace) \ declare_type(ContiguousSpace, CompactibleSpace) \
declare_type(EdenSpace, ContiguousSpace) \
declare_type(OffsetTableContigSpace, ContiguousSpace) \ declare_type(OffsetTableContigSpace, ContiguousSpace) \
declare_type(TenuredSpace, OffsetTableContigSpace) \ declare_type(TenuredSpace, OffsetTableContigSpace) \
declare_toplevel_type(BarrierSet) \ declare_toplevel_type(BarrierSet) \
@ -1532,7 +1529,6 @@ typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable;
declare_toplevel_type(CollectedHeap*) \ declare_toplevel_type(CollectedHeap*) \
declare_toplevel_type(ContiguousSpace*) \ declare_toplevel_type(ContiguousSpace*) \
declare_toplevel_type(DefNewGeneration*) \ declare_toplevel_type(DefNewGeneration*) \
declare_toplevel_type(EdenSpace*) \
declare_toplevel_type(GenCollectedHeap*) \ declare_toplevel_type(GenCollectedHeap*) \
declare_toplevel_type(Generation*) \ declare_toplevel_type(Generation*) \
declare_toplevel_type(GenerationSpec**) \ declare_toplevel_type(GenerationSpec**) \

View file

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="utf-8"?> <?xml version="1.0" encoding="utf-8"?>
<!-- <!--
Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
This code is free software; you can redistribute it and/or modify it This code is free software; you can redistribute it and/or modify it
@ -59,6 +59,7 @@ public:
void set_starttime(const Ticks&amp; time) {} void set_starttime(const Ticks&amp; time) {}
void set_endtime(const Ticks&amp; time) {} void set_endtime(const Ticks&amp; time) {}
bool should_commit() const { return false; } bool should_commit() const { return false; }
static bool is_enabled() { return false; }
void commit() const {} void commit() const {}
}; };

View file

@ -174,11 +174,8 @@ needs_full_vm_compact1 = \
gc/g1/TestShrinkToOneRegion.java \ gc/g1/TestShrinkToOneRegion.java \
gc/metaspace/G1AddMetaspaceDependency.java \ gc/metaspace/G1AddMetaspaceDependency.java \
gc/startup_warnings/TestCMS.java \ gc/startup_warnings/TestCMS.java \
gc/startup_warnings/TestCMSIncrementalMode.java \
gc/startup_warnings/TestCMSNoIncrementalMode.java \
gc/startup_warnings/TestDefaultMaxRAMFraction.java \ gc/startup_warnings/TestDefaultMaxRAMFraction.java \
gc/startup_warnings/TestDefNewCMS.java \ gc/startup_warnings/TestDefNewCMS.java \
gc/startup_warnings/TestIncGC.java \
gc/startup_warnings/TestParallelGC.java \ gc/startup_warnings/TestParallelGC.java \
gc/startup_warnings/TestParallelScavengeSerialOld.java \ gc/startup_warnings/TestParallelScavengeSerialOld.java \
gc/startup_warnings/TestParNewCMS.java \ gc/startup_warnings/TestParNewCMS.java \
@ -273,8 +270,6 @@ needs_cmsgc = \
gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java \ gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java \
gc/concurrentMarkSweep/ \ gc/concurrentMarkSweep/ \
gc/startup_warnings/TestCMS.java \ gc/startup_warnings/TestCMS.java \
gc/startup_warnings/TestCMSIncrementalMode.java \
gc/startup_warnings/TestCMSNoIncrementalMode.java \
gc/startup_warnings/TestDefNewCMS.java \ gc/startup_warnings/TestDefNewCMS.java \
gc/startup_warnings/TestParNewCMS.java gc/startup_warnings/TestParNewCMS.java

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -19,22 +19,19 @@
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any * or visit www.oracle.com if you need additional information or have any
* questions. * questions.
*
*/ */
package sun.jvm.hotspot.memory; /**
* @test TestNUMAPageSize
* @summary Make sure that start up with NUMA support does not cause problems.
* @bug 8061467
* @key gc
* @key regression
* @run main/othervm -Xmx8M -XX:+UseNUMA TestNUMAPageSize
*/
import java.util.*; public class TestNUMAPageSize {
import sun.jvm.hotspot.debugger.*; public static void main(String args[]) throws Exception {
import sun.jvm.hotspot.runtime.*; // nothing to do
import sun.jvm.hotspot.types.*;
/** <P> Class EdenSpace describes eden-space in new
generation. (Currently it does not add any significant
functionality beyond ContiguousSpace.) */
public class EdenSpace extends ContiguousSpace {
public EdenSpace(Address addr) {
super(addr);
} }
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -22,24 +22,23 @@
*/ */
/* /*
* @test TestCMSNoIncrementalMode * @test DisableResizePLAB
* @key gc * @key gc
* @bug 8006398 * @bug 8060467
* @summary Test that CMS with incremental mode turned off does not print a warning message * @author filipp.zhinkin@oracle.com, john.coomes@oracle.com
* @library /testlibrary * @summary Run CMS with PLAB resizing disabled and a small OldPLABSize
*/ * @run main/othervm -XX:+UseConcMarkSweepGC -XX:-ResizePLAB -XX:OldPLABSize=1k -Xmx256m -XX:+PrintGCDetails DisableResizePLAB
*/
import com.oracle.java.testlibrary.OutputAnalyzer;
import com.oracle.java.testlibrary.ProcessTools;
public class TestCMSNoIncrementalMode {
public static void main(String args[]) throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseConcMarkSweepGC", "-XX:-CMSIncrementalMode", "-version");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldNotContain("deprecated");
output.shouldNotContain("error");
output.shouldHaveExitValue(0);
}
public class DisableResizePLAB {
public static void main(String args[]) throws Exception {
Object garbage[] = new Object[1_000];
for (int i = 0; i < garbage.length; i++) {
garbage[i] = new byte[0];
}
long startTime = System.currentTimeMillis();
while (System.currentTimeMillis() - startTime < 10_000) {
Object o = new byte[1024];
}
}
} }

View file

@ -279,8 +279,7 @@ public class TestShrinkAuxiliaryData {
"-XX:\\+UseConcMarkSweepGC", "-XX:\\+UseConcMarkSweepGC",
"-XX:\\+UseParallelOldGC", "-XX:\\+UseParallelOldGC",
"-XX:\\+UseParNewGC", "-XX:\\+UseParNewGC",
"-Xconcgc", "-Xconcgc"
"-Xincgc"
}; };
} }
} }

View file

@ -1,46 +0,0 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestCMSIncrementalMode
* @key gc
* @bug 8006398
* @summary Test that the deprecated CMSIncrementalMode print a warning message
* @library /testlibrary
*/
import com.oracle.java.testlibrary.OutputAnalyzer;
import com.oracle.java.testlibrary.ProcessTools;
public class TestCMSIncrementalMode {
public static void main(String args[]) throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseConcMarkSweepGC", "-XX:+CMSIncrementalMode", "-version");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldContain("warning: Using incremental CMS is deprecated and will likely be removed in a future release");
output.shouldNotContain("error");
output.shouldHaveExitValue(0);
}
}

View file

@ -1,46 +0,0 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestIncGC
* @key gc
* @bug 8006398
* @summary Test that the deprecated -Xincgc print a warning message
* @library /testlibrary
*/
import com.oracle.java.testlibrary.OutputAnalyzer;
import com.oracle.java.testlibrary.ProcessTools;
public class TestIncGC {
public static void main(String args[]) throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xincgc", "-version");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldContain("warning: Using incremental CMS is deprecated and will likely be removed in a future release");
output.shouldNotContain("error");
output.shouldHaveExitValue(0);
}
}