mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-26 22:34:27 +02:00
Merge
This commit is contained in:
commit
4fafece403
70 changed files with 1570 additions and 892 deletions
|
@ -52,21 +52,9 @@ void ConcurrentMarkSweepPolicy::initialize_alignments() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConcurrentMarkSweepPolicy::initialize_generations() {
|
void ConcurrentMarkSweepPolicy::initialize_generations() {
|
||||||
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC,
|
_generations = NEW_C_HEAP_ARRAY(GenerationSpecPtr, number_of_generations(), mtGC);
|
||||||
CURRENT_PC, AllocFailStrategy::RETURN_NULL);
|
_generations[0] = new GenerationSpec(Generation::ParNew, _initial_young_size, _max_young_size);
|
||||||
if (_generations == NULL)
|
_generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep, _initial_old_size, _max_old_size);
|
||||||
vm_exit_during_initialization("Unable to allocate gen spec");
|
|
||||||
|
|
||||||
Generation::Name yg_name =
|
|
||||||
UseParNewGC ? Generation::ParNew : Generation::DefNew;
|
|
||||||
_generations[0] = new GenerationSpec(yg_name, _initial_young_size,
|
|
||||||
_max_young_size);
|
|
||||||
_generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep,
|
|
||||||
_initial_old_size, _max_old_size);
|
|
||||||
|
|
||||||
if (_generations[0] == NULL || _generations[1] == NULL) {
|
|
||||||
vm_exit_during_initialization("Unable to allocate gen spec");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConcurrentMarkSweepPolicy::initialize_size_policy(size_t init_eden_size,
|
void ConcurrentMarkSweepPolicy::initialize_size_policy(size_t init_eden_size,
|
||||||
|
@ -82,10 +70,5 @@ void ConcurrentMarkSweepPolicy::initialize_size_policy(size_t init_eden_size,
|
||||||
|
|
||||||
void ConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
|
void ConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
|
||||||
// initialize the policy counters - 2 collectors, 3 generations
|
// initialize the policy counters - 2 collectors, 3 generations
|
||||||
if (UseParNewGC) {
|
|
||||||
_gc_policy_counters = new GCPolicyCounters("ParNew:CMS", 2, 3);
|
_gc_policy_counters = new GCPolicyCounters("ParNew:CMS", 2, 3);
|
||||||
}
|
}
|
||||||
else {
|
|
||||||
_gc_policy_counters = new GCPolicyCounters("Copy:CMS", 2, 3);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -90,7 +90,8 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
|
||||||
CMSRescanMultiple),
|
CMSRescanMultiple),
|
||||||
_marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
|
_marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
|
||||||
CMSConcMarkMultiple),
|
CMSConcMarkMultiple),
|
||||||
_collector(NULL)
|
_collector(NULL),
|
||||||
|
_preconsumptionDirtyCardClosure(NULL)
|
||||||
{
|
{
|
||||||
assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
|
assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
|
||||||
"FreeChunk is larger than expected");
|
"FreeChunk is larger than expected");
|
||||||
|
|
|
@ -155,6 +155,9 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||||
// Used to keep track of limit of sweep for the space
|
// Used to keep track of limit of sweep for the space
|
||||||
HeapWord* _sweep_limit;
|
HeapWord* _sweep_limit;
|
||||||
|
|
||||||
|
// Used to make the young collector update the mod union table
|
||||||
|
MemRegionClosure* _preconsumptionDirtyCardClosure;
|
||||||
|
|
||||||
// Support for compacting cms
|
// Support for compacting cms
|
||||||
HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
|
HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
|
||||||
HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
|
HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
|
||||||
|
@ -356,6 +359,14 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||||
void initialize_sequential_subtasks_for_marking(int n_threads,
|
void initialize_sequential_subtasks_for_marking(int n_threads,
|
||||||
HeapWord* low = NULL);
|
HeapWord* low = NULL);
|
||||||
|
|
||||||
|
virtual MemRegionClosure* preconsumptionDirtyCardClosure() const {
|
||||||
|
return _preconsumptionDirtyCardClosure;
|
||||||
|
}
|
||||||
|
|
||||||
|
void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
|
||||||
|
_preconsumptionDirtyCardClosure = cl;
|
||||||
|
}
|
||||||
|
|
||||||
// Space enquiries
|
// Space enquiries
|
||||||
size_t used() const;
|
size_t used() const;
|
||||||
size_t free() const;
|
size_t free() const;
|
||||||
|
|
|
@ -1201,14 +1201,6 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
ConcurrentMarkSweepGeneration::
|
|
||||||
par_promote_alloc_undo(int thread_num,
|
|
||||||
HeapWord* obj, size_t word_sz) {
|
|
||||||
// CMS does not support promotion undo.
|
|
||||||
ShouldNotReachHere();
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
ConcurrentMarkSweepGeneration::
|
ConcurrentMarkSweepGeneration::
|
||||||
par_promote_alloc_done(int thread_num) {
|
par_promote_alloc_done(int thread_num) {
|
||||||
|
@ -4094,10 +4086,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (clean_survivor) { // preclean the active survivor space(s)
|
if (clean_survivor) { // preclean the active survivor space(s)
|
||||||
assert(_young_gen->kind() == Generation::DefNew ||
|
DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
|
||||||
_young_gen->kind() == Generation::ParNew,
|
|
||||||
"incorrect type for cast");
|
|
||||||
DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
|
|
||||||
PushAndMarkClosure pam_cl(this, _span, ref_processor(),
|
PushAndMarkClosure pam_cl(this, _span, ref_processor(),
|
||||||
&_markBitMap, &_modUnionTable,
|
&_markBitMap, &_modUnionTable,
|
||||||
&_markStack, true /* precleaning phase */);
|
&_markStack, true /* precleaning phase */);
|
||||||
|
@ -5168,7 +5157,7 @@ void
|
||||||
CMSCollector::
|
CMSCollector::
|
||||||
initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
|
initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
|
||||||
assert(n_threads > 0, "Unexpected n_threads argument");
|
assert(n_threads > 0, "Unexpected n_threads argument");
|
||||||
DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
|
DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
|
||||||
|
|
||||||
// Eden space
|
// Eden space
|
||||||
if (!dng->eden()->is_empty()) {
|
if (!dng->eden()->is_empty()) {
|
||||||
|
@ -5945,7 +5934,6 @@ void CMSCollector::reset(bool concurrent) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
|
void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
|
||||||
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
|
|
||||||
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
||||||
GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
|
GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
|
||||||
TraceCollectorStats tcs(counters());
|
TraceCollectorStats tcs(counters());
|
||||||
|
|
|
@ -1151,9 +1151,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||||
// Overrides for parallel promotion.
|
// Overrides for parallel promotion.
|
||||||
virtual oop par_promote(int thread_num,
|
virtual oop par_promote(int thread_num,
|
||||||
oop obj, markOop m, size_t word_sz);
|
oop obj, markOop m, size_t word_sz);
|
||||||
// This one should not be called for CMS.
|
|
||||||
virtual void par_promote_alloc_undo(int thread_num,
|
|
||||||
HeapWord* obj, size_t word_sz);
|
|
||||||
virtual void par_promote_alloc_done(int thread_num);
|
virtual void par_promote_alloc_done(int thread_num);
|
||||||
virtual void par_oop_since_save_marks_iterate_done(int thread_num);
|
virtual void par_oop_since_save_marks_iterate_done(int thread_num);
|
||||||
|
|
||||||
|
@ -1256,8 +1253,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||||
virtual const char* short_name() const { return "CMS"; }
|
virtual const char* short_name() const { return "CMS"; }
|
||||||
void print() const;
|
void print() const;
|
||||||
void printOccupancy(const char* s);
|
void printOccupancy(const char* s);
|
||||||
bool must_be_youngest() const { return false; }
|
|
||||||
bool must_be_oldest() const { return true; }
|
|
||||||
|
|
||||||
// Resize the generation after a compacting GC. The
|
// Resize the generation after a compacting GC. The
|
||||||
// generation can be treated as a contiguous space
|
// generation can be treated as a contiguous space
|
||||||
|
|
|
@ -180,9 +180,32 @@ class ClearBitmapHRClosure : public HeapRegionClosure {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class ParClearNextMarkBitmapTask : public AbstractGangTask {
|
||||||
|
ClearBitmapHRClosure* _cl;
|
||||||
|
HeapRegionClaimer _hrclaimer;
|
||||||
|
bool _suspendible; // If the task is suspendible, workers must join the STS.
|
||||||
|
|
||||||
|
public:
|
||||||
|
ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) :
|
||||||
|
_cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {}
|
||||||
|
|
||||||
|
void work(uint worker_id) {
|
||||||
|
if (_suspendible) {
|
||||||
|
SuspendibleThreadSet::join();
|
||||||
|
}
|
||||||
|
G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true);
|
||||||
|
if (_suspendible) {
|
||||||
|
SuspendibleThreadSet::leave();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
void CMBitMap::clearAll() {
|
void CMBitMap::clearAll() {
|
||||||
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
|
ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
|
||||||
G1CollectedHeap::heap()->heap_region_iterate(&cl);
|
uint n_workers = g1h->workers()->active_workers();
|
||||||
|
ParClearNextMarkBitmapTask task(&cl, n_workers, false);
|
||||||
|
g1h->workers()->run_task(&task);
|
||||||
guarantee(cl.complete(), "Must have completed iteration.");
|
guarantee(cl.complete(), "Must have completed iteration.");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -861,7 +884,8 @@ void ConcurrentMark::clearNextBitmap() {
|
||||||
guarantee(!g1h->mark_in_progress(), "invariant");
|
guarantee(!g1h->mark_in_progress(), "invariant");
|
||||||
|
|
||||||
ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
|
ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
|
||||||
g1h->heap_region_iterate(&cl);
|
ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true);
|
||||||
|
_parallel_workers->run_task(&task);
|
||||||
|
|
||||||
// Clear the liveness counting data. If the marking has been aborted, the abort()
|
// Clear the liveness counting data. If the marking has been aborted, the abort()
|
||||||
// call already did that.
|
// call already did that.
|
||||||
|
@ -2099,6 +2123,7 @@ void ConcurrentMark::cleanup() {
|
||||||
// We reclaimed old regions so we should calculate the sizes to make
|
// We reclaimed old regions so we should calculate the sizes to make
|
||||||
// sure we update the old gen/space data.
|
// sure we update the old gen/space data.
|
||||||
g1h->g1mm()->update_sizes();
|
g1h->g1mm()->update_sizes();
|
||||||
|
g1h->allocation_context_stats().update_after_mark();
|
||||||
|
|
||||||
g1h->trace_heap_after_concurrent_cycle();
|
g1h->trace_heap_after_concurrent_cycle();
|
||||||
}
|
}
|
||||||
|
@ -3219,7 +3244,6 @@ void ConcurrentMark::aggregate_count_data() {
|
||||||
_g1h->set_par_threads(n_workers);
|
_g1h->set_par_threads(n_workers);
|
||||||
_g1h->workers()->run_task(&g1_par_agg_task);
|
_g1h->workers()->run_task(&g1_par_agg_task);
|
||||||
_g1h->set_par_threads(0);
|
_g1h->set_par_threads(0);
|
||||||
_g1h->allocation_context_stats().update_at_remark();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clear the per-worker arrays used to store the per-region counting data
|
// Clear the per-worker arrays used to store the per-region counting data
|
||||||
|
|
|
@ -280,7 +280,6 @@ void ConcurrentMarkThread::run() {
|
||||||
// We may have aborted just before the remark. Do not bother clearing the
|
// We may have aborted just before the remark. Do not bother clearing the
|
||||||
// bitmap then, as it has been done during mark abort.
|
// bitmap then, as it has been done during mark abort.
|
||||||
if (!cm()->has_aborted()) {
|
if (!cm()->has_aborted()) {
|
||||||
SuspendibleThreadSetJoiner sts;
|
|
||||||
_cm->clearNextBitmap();
|
_cm->clearNextBitmap();
|
||||||
} else {
|
} else {
|
||||||
assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
|
assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
|
||||||
|
|
|
@ -45,7 +45,7 @@ class AllocationContextStats: public StackObj {
|
||||||
public:
|
public:
|
||||||
inline void clear() { }
|
inline void clear() { }
|
||||||
inline void update(bool full_gc) { }
|
inline void update(bool full_gc) { }
|
||||||
inline void update_at_remark() { }
|
inline void update_after_mark() { }
|
||||||
inline bool available() { return false; }
|
inline bool available() { return false; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -59,7 +59,7 @@ void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
|
||||||
!(retained_region->top() == retained_region->end()) &&
|
!(retained_region->top() == retained_region->end()) &&
|
||||||
!retained_region->is_empty() &&
|
!retained_region->is_empty() &&
|
||||||
!retained_region->is_humongous()) {
|
!retained_region->is_humongous()) {
|
||||||
retained_region->record_top_and_timestamp();
|
retained_region->record_timestamp();
|
||||||
// The retained region was added to the old region set when it was
|
// The retained region was added to the old region set when it was
|
||||||
// retired. We have to remove it now, since we don't allow regions
|
// retired. We have to remove it now, since we don't allow regions
|
||||||
// we allocate to in the region sets. We'll re-add it later, when
|
// we allocate to in the region sets. We'll re-add it later, when
|
||||||
|
@ -94,6 +94,9 @@ void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, Evacuat
|
||||||
// want either way so no reason to check explicitly for either
|
// want either way so no reason to check explicitly for either
|
||||||
// condition.
|
// condition.
|
||||||
_retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
|
_retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
|
||||||
|
if (_retained_old_gc_alloc_region != NULL) {
|
||||||
|
_retained_old_gc_alloc_region->record_retained_region();
|
||||||
|
}
|
||||||
|
|
||||||
if (ResizePLAB) {
|
if (ResizePLAB) {
|
||||||
_g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
|
_g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
|
||||||
|
|
|
@ -1222,7 +1222,6 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||||
|
|
||||||
// Timing
|
// Timing
|
||||||
assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
|
assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
|
||||||
gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
|
|
||||||
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
|
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -2258,6 +2257,7 @@ bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
|
||||||
case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
|
case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
|
||||||
case GCCause::_g1_humongous_allocation: return true;
|
case GCCause::_g1_humongous_allocation: return true;
|
||||||
case GCCause::_update_allocation_context_stats_inc: return true;
|
case GCCause::_update_allocation_context_stats_inc: return true;
|
||||||
|
case GCCause::_wb_conc_mark: return true;
|
||||||
default: return false;
|
default: return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2552,8 +2552,9 @@ void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
|
||||||
void
|
void
|
||||||
G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl,
|
G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl,
|
||||||
uint worker_id,
|
uint worker_id,
|
||||||
HeapRegionClaimer *hrclaimer) const {
|
HeapRegionClaimer *hrclaimer,
|
||||||
_hrm.par_iterate(cl, worker_id, hrclaimer);
|
bool concurrent) const {
|
||||||
|
_hrm.par_iterate(cl, worker_id, hrclaimer, concurrent);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clear the cached CSet starting regions and (more importantly)
|
// Clear the cached CSet starting regions and (more importantly)
|
||||||
|
@ -6530,7 +6531,7 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
|
||||||
// We really only need to do this for old regions given that we
|
// We really only need to do this for old regions given that we
|
||||||
// should never scan survivors. But it doesn't hurt to do it
|
// should never scan survivors. But it doesn't hurt to do it
|
||||||
// for survivors too.
|
// for survivors too.
|
||||||
new_alloc_region->record_top_and_timestamp();
|
new_alloc_region->record_timestamp();
|
||||||
if (survivor) {
|
if (survivor) {
|
||||||
new_alloc_region->set_survivor();
|
new_alloc_region->set_survivor();
|
||||||
_hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
|
_hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
|
||||||
|
|
|
@ -1380,10 +1380,13 @@ public:
|
||||||
// in the range [0..max(ParallelGCThreads-1, 1)]. Applies "blk->doHeapRegion"
|
// in the range [0..max(ParallelGCThreads-1, 1)]. Applies "blk->doHeapRegion"
|
||||||
// to each of the regions, by attempting to claim the region using the
|
// to each of the regions, by attempting to claim the region using the
|
||||||
// HeapRegionClaimer and, if successful, applying the closure to the claimed
|
// HeapRegionClaimer and, if successful, applying the closure to the claimed
|
||||||
// region.
|
// region. The concurrent argument should be set to true if iteration is
|
||||||
|
// performed concurrently, during which no assumptions are made for consistent
|
||||||
|
// attributes of the heap regions (as they might be modified while iterating).
|
||||||
void heap_region_par_iterate(HeapRegionClosure* cl,
|
void heap_region_par_iterate(HeapRegionClosure* cl,
|
||||||
uint worker_id,
|
uint worker_id,
|
||||||
HeapRegionClaimer* hrclaimer) const;
|
HeapRegionClaimer* hrclaimer,
|
||||||
|
bool concurrent = false) const;
|
||||||
|
|
||||||
// Clear the cached cset start regions and (more importantly)
|
// Clear the cached cset start regions and (more importantly)
|
||||||
// the time stamps. Called when we reset the GC time stamp.
|
// the time stamps. Called when we reset the GC time stamp.
|
||||||
|
|
|
@ -1425,6 +1425,18 @@ void G1CollectorPolicy::print_yg_surv_rate_info() const {
|
||||||
#endif // PRODUCT
|
#endif // PRODUCT
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool G1CollectorPolicy::is_young_list_full() {
|
||||||
|
uint young_list_length = _g1->young_list()->length();
|
||||||
|
uint young_list_target_length = _young_list_target_length;
|
||||||
|
return young_list_length >= young_list_target_length;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool G1CollectorPolicy::can_expand_young_list() {
|
||||||
|
uint young_list_length = _g1->young_list()->length();
|
||||||
|
uint young_list_max_length = _young_list_max_length;
|
||||||
|
return young_list_length < young_list_max_length;
|
||||||
|
}
|
||||||
|
|
||||||
uint G1CollectorPolicy::max_regions(int purpose) {
|
uint G1CollectorPolicy::max_regions(int purpose) {
|
||||||
switch (purpose) {
|
switch (purpose) {
|
||||||
case GCAllocForSurvived:
|
case GCAllocForSurvived:
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
|
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
|
||||||
|
|
||||||
#include "gc_implementation/g1/collectionSetChooser.hpp"
|
#include "gc_implementation/g1/collectionSetChooser.hpp"
|
||||||
|
#include "gc_implementation/g1/g1Allocator.hpp"
|
||||||
#include "gc_implementation/g1/g1MMUTracker.hpp"
|
#include "gc_implementation/g1/g1MMUTracker.hpp"
|
||||||
#include "memory/collectorPolicy.hpp"
|
#include "memory/collectorPolicy.hpp"
|
||||||
|
|
||||||
|
@ -807,7 +808,7 @@ public:
|
||||||
|
|
||||||
// If an expansion would be appropriate, because recent GC overhead had
|
// If an expansion would be appropriate, because recent GC overhead had
|
||||||
// exceeded the desired limit, return an amount to expand by.
|
// exceeded the desired limit, return an amount to expand by.
|
||||||
size_t expansion_amount();
|
virtual size_t expansion_amount();
|
||||||
|
|
||||||
// Print tracing information.
|
// Print tracing information.
|
||||||
void print_tracing_info() const;
|
void print_tracing_info() const;
|
||||||
|
@ -826,17 +827,9 @@ public:
|
||||||
|
|
||||||
size_t young_list_target_length() const { return _young_list_target_length; }
|
size_t young_list_target_length() const { return _young_list_target_length; }
|
||||||
|
|
||||||
bool is_young_list_full() {
|
bool is_young_list_full();
|
||||||
uint young_list_length = _g1->young_list()->length();
|
|
||||||
uint young_list_target_length = _young_list_target_length;
|
|
||||||
return young_list_length >= young_list_target_length;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool can_expand_young_list() {
|
bool can_expand_young_list();
|
||||||
uint young_list_length = _g1->young_list()->length();
|
|
||||||
uint young_list_max_length = _young_list_max_length;
|
|
||||||
return young_list_length < young_list_max_length;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint young_list_max_length() {
|
uint young_list_max_length() {
|
||||||
return _young_list_max_length;
|
return _young_list_max_length;
|
||||||
|
|
|
@ -0,0 +1,32 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_EXT_HPP
|
||||||
|
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_EXT_HPP
|
||||||
|
|
||||||
|
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||||
|
|
||||||
|
class G1CollectorPolicyExt : public G1CollectorPolicy { };
|
||||||
|
|
||||||
|
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_EXT_HPP
|
|
@ -140,11 +140,9 @@ public:
|
||||||
|
|
||||||
// Set the "from" region in the closure.
|
// Set the "from" region in the closure.
|
||||||
_oc->set_region(r);
|
_oc->set_region(r);
|
||||||
HeapWord* card_start = _bot_shared->address_for_index(index);
|
MemRegion card_region(_bot_shared->address_for_index(index), G1BlockOffsetSharedArray::N_words);
|
||||||
HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
|
MemRegion pre_gc_allocated(r->bottom(), r->scan_top());
|
||||||
Space *sp = SharedHeap::heap()->space_containing(card_start);
|
MemRegion mr = pre_gc_allocated.intersection(card_region);
|
||||||
MemRegion sm_region = sp->used_region_at_save_marks();
|
|
||||||
MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
|
|
||||||
if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) {
|
if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) {
|
||||||
// We make the card as "claimed" lazily (so races are possible
|
// We make the card as "claimed" lazily (so races are possible
|
||||||
// but they're benign), which reduces the number of duplicate
|
// but they're benign), which reduces the number of duplicate
|
||||||
|
|
|
@ -326,7 +326,7 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
|
||||||
|
|
||||||
hr_clear(false /*par*/, false /*clear_space*/);
|
hr_clear(false /*par*/, false /*clear_space*/);
|
||||||
set_top(bottom());
|
set_top(bottom());
|
||||||
record_top_and_timestamp();
|
record_timestamp();
|
||||||
|
|
||||||
assert(mr.end() == orig_end(),
|
assert(mr.end() == orig_end(),
|
||||||
err_msg("Given region end address " PTR_FORMAT " should match exactly "
|
err_msg("Given region end address " PTR_FORMAT " should match exactly "
|
||||||
|
@ -416,9 +416,9 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
|
||||||
|
|
||||||
// If we're within a stop-world GC, then we might look at a card in a
|
// If we're within a stop-world GC, then we might look at a card in a
|
||||||
// GC alloc region that extends onto a GC LAB, which may not be
|
// GC alloc region that extends onto a GC LAB, which may not be
|
||||||
// parseable. Stop such at the "saved_mark" of the region.
|
// parseable. Stop such at the "scan_top" of the region.
|
||||||
if (g1h->is_gc_active()) {
|
if (g1h->is_gc_active()) {
|
||||||
mr = mr.intersection(used_region_at_save_marks());
|
mr = mr.intersection(MemRegion(bottom(), scan_top()));
|
||||||
} else {
|
} else {
|
||||||
mr = mr.intersection(used_region());
|
mr = mr.intersection(used_region());
|
||||||
}
|
}
|
||||||
|
@ -969,7 +969,7 @@ void HeapRegion::prepare_for_compaction(CompactPoint* cp) {
|
||||||
|
|
||||||
void G1OffsetTableContigSpace::clear(bool mangle_space) {
|
void G1OffsetTableContigSpace::clear(bool mangle_space) {
|
||||||
set_top(bottom());
|
set_top(bottom());
|
||||||
set_saved_mark_word(bottom());
|
_scan_top = bottom();
|
||||||
CompactibleSpace::clear(mangle_space);
|
CompactibleSpace::clear(mangle_space);
|
||||||
reset_bot();
|
reset_bot();
|
||||||
}
|
}
|
||||||
|
@ -1001,41 +1001,42 @@ HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
|
||||||
return _offsets.threshold();
|
return _offsets.threshold();
|
||||||
}
|
}
|
||||||
|
|
||||||
HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
|
HeapWord* G1OffsetTableContigSpace::scan_top() const {
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
|
|
||||||
HeapWord* local_top = top();
|
HeapWord* local_top = top();
|
||||||
OrderAccess::loadload();
|
OrderAccess::loadload();
|
||||||
if (_gc_time_stamp < g1h->get_gc_time_stamp()) {
|
const unsigned local_time_stamp = _gc_time_stamp;
|
||||||
|
assert(local_time_stamp <= g1h->get_gc_time_stamp(), "invariant");
|
||||||
|
if (local_time_stamp < g1h->get_gc_time_stamp()) {
|
||||||
return local_top;
|
return local_top;
|
||||||
} else {
|
} else {
|
||||||
return Space::saved_mark_word();
|
return _scan_top;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1OffsetTableContigSpace::record_top_and_timestamp() {
|
void G1OffsetTableContigSpace::record_timestamp() {
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
|
unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
|
||||||
|
|
||||||
if (_gc_time_stamp < curr_gc_time_stamp) {
|
if (_gc_time_stamp < curr_gc_time_stamp) {
|
||||||
// The order of these is important, as another thread might be
|
// Setting the time stamp here tells concurrent readers to look at
|
||||||
// about to start scanning this region. If it does so after
|
// scan_top to know the maximum allowed address to look at.
|
||||||
// set_saved_mark and before _gc_time_stamp = ..., then the latter
|
|
||||||
// will be false, and it will pick up top() as the high water mark
|
// scan_top should be bottom for all regions except for the
|
||||||
// of region. If it does so after _gc_time_stamp = ..., then it
|
// retained old alloc region which should have scan_top == top
|
||||||
// will pick up the right saved_mark_word() as the high water mark
|
HeapWord* st = _scan_top;
|
||||||
// of the region. Either way, the behavior will be correct.
|
guarantee(st == _bottom || st == _top, "invariant");
|
||||||
Space::set_saved_mark_word(top());
|
|
||||||
OrderAccess::storestore();
|
|
||||||
_gc_time_stamp = curr_gc_time_stamp;
|
_gc_time_stamp = curr_gc_time_stamp;
|
||||||
// No need to do another barrier to flush the writes above. If
|
|
||||||
// this is called in parallel with other threads trying to
|
|
||||||
// allocate into the region, the caller should call this while
|
|
||||||
// holding a lock and when the lock is released the writes will be
|
|
||||||
// flushed.
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void G1OffsetTableContigSpace::record_retained_region() {
|
||||||
|
// scan_top is the maximum address where it's safe for the next gc to
|
||||||
|
// scan this region.
|
||||||
|
_scan_top = top();
|
||||||
|
}
|
||||||
|
|
||||||
void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) {
|
void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) {
|
||||||
object_iterate(blk);
|
object_iterate(blk);
|
||||||
}
|
}
|
||||||
|
@ -1063,6 +1064,8 @@ G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||||
void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
|
void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
|
||||||
CompactibleSpace::initialize(mr, clear_space, mangle_space);
|
CompactibleSpace::initialize(mr, clear_space, mangle_space);
|
||||||
_top = bottom();
|
_top = bottom();
|
||||||
|
_scan_top = bottom();
|
||||||
|
set_saved_mark_word(NULL);
|
||||||
reset_bot();
|
reset_bot();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -101,28 +101,25 @@ public:
|
||||||
// OffsetTableContigSpace. If the two versions of BlockOffsetTable could
|
// OffsetTableContigSpace. If the two versions of BlockOffsetTable could
|
||||||
// be reconciled, then G1OffsetTableContigSpace could go away.
|
// be reconciled, then G1OffsetTableContigSpace could go away.
|
||||||
|
|
||||||
// The idea behind time stamps is the following. Doing a save_marks on
|
// The idea behind time stamps is the following. We want to keep track of
|
||||||
// all regions at every GC pause is time consuming (if I remember
|
// the highest address where it's safe to scan objects for each region.
|
||||||
// well, 10ms or so). So, we would like to do that only for regions
|
// This is only relevant for current GC alloc regions so we keep a time stamp
|
||||||
// that are GC alloc regions. To achieve this, we use time
|
// per region to determine if the region has been allocated during the current
|
||||||
// stamps. For every evacuation pause, G1CollectedHeap generates a
|
// GC or not. If the time stamp is current we report a scan_top value which
|
||||||
// unique time stamp (essentially a counter that gets
|
// was saved at the end of the previous GC for retained alloc regions and which is
|
||||||
// incremented). Every time we want to call save_marks on a region,
|
// equal to the bottom for all other regions.
|
||||||
// we set the saved_mark_word to top and also copy the current GC
|
// There is a race between card scanners and allocating gc workers where we must ensure
|
||||||
// time stamp to the time stamp field of the space. Reading the
|
// that card scanners do not read the memory allocated by the gc workers.
|
||||||
// saved_mark_word involves checking the time stamp of the
|
// In order to enforce that, we must not return a value of _top which is more recent than the
|
||||||
// region. If it is the same as the current GC time stamp, then we
|
// time stamp. This is due to the fact that a region may become a gc alloc region at
|
||||||
// can safely read the saved_mark_word field, as it is valid. If the
|
// some point after we've read the timestamp value as being < the current time stamp.
|
||||||
// time stamp of the region is not the same as the current GC time
|
// The time stamps are re-initialized to zero at cleanup and at Full GCs.
|
||||||
// stamp, then we instead read top, as the saved_mark_word field is
|
// The current scheme that uses sequential unsigned ints will fail only if we have 4b
|
||||||
// invalid. Time stamps (on the regions and also on the
|
|
||||||
// G1CollectedHeap) are reset at every cleanup (we iterate over
|
|
||||||
// the regions anyway) and at the end of a Full GC. The current scheme
|
|
||||||
// that uses sequential unsigned ints will fail only if we have 4b
|
|
||||||
// evacuation pauses between two cleanups, which is _highly_ unlikely.
|
// evacuation pauses between two cleanups, which is _highly_ unlikely.
|
||||||
class G1OffsetTableContigSpace: public CompactibleSpace {
|
class G1OffsetTableContigSpace: public CompactibleSpace {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
HeapWord* _top;
|
HeapWord* _top;
|
||||||
|
HeapWord* volatile _scan_top;
|
||||||
protected:
|
protected:
|
||||||
G1BlockOffsetArrayContigSpace _offsets;
|
G1BlockOffsetArrayContigSpace _offsets;
|
||||||
Mutex _par_alloc_lock;
|
Mutex _par_alloc_lock;
|
||||||
|
@ -166,10 +163,11 @@ class G1OffsetTableContigSpace: public CompactibleSpace {
|
||||||
void set_bottom(HeapWord* value);
|
void set_bottom(HeapWord* value);
|
||||||
void set_end(HeapWord* value);
|
void set_end(HeapWord* value);
|
||||||
|
|
||||||
virtual HeapWord* saved_mark_word() const;
|
HeapWord* scan_top() const;
|
||||||
void record_top_and_timestamp();
|
void record_timestamp();
|
||||||
void reset_gc_time_stamp() { _gc_time_stamp = 0; }
|
void reset_gc_time_stamp() { _gc_time_stamp = 0; }
|
||||||
unsigned get_gc_time_stamp() { return _gc_time_stamp; }
|
unsigned get_gc_time_stamp() { return _gc_time_stamp; }
|
||||||
|
void record_retained_region();
|
||||||
|
|
||||||
// See the comment above in the declaration of _pre_dummy_top for an
|
// See the comment above in the declaration of _pre_dummy_top for an
|
||||||
// explanation of what it is.
|
// explanation of what it is.
|
||||||
|
@ -191,6 +189,8 @@ class G1OffsetTableContigSpace: public CompactibleSpace {
|
||||||
virtual HeapWord* allocate(size_t word_size);
|
virtual HeapWord* allocate(size_t word_size);
|
||||||
HeapWord* par_allocate(size_t word_size);
|
HeapWord* par_allocate(size_t word_size);
|
||||||
|
|
||||||
|
HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; }
|
||||||
|
|
||||||
// MarkSweep support phase3
|
// MarkSweep support phase3
|
||||||
virtual HeapWord* initialize_threshold();
|
virtual HeapWord* initialize_threshold();
|
||||||
virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
|
virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
|
||||||
|
|
|
@ -260,7 +260,7 @@ uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx)
|
||||||
return num_regions;
|
return num_regions;
|
||||||
}
|
}
|
||||||
|
|
||||||
void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer) const {
|
void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const {
|
||||||
const uint start_index = hrclaimer->start_region_for_worker(worker_id);
|
const uint start_index = hrclaimer->start_region_for_worker(worker_id);
|
||||||
|
|
||||||
// Every worker will actually look at all regions, skipping over regions that
|
// Every worker will actually look at all regions, skipping over regions that
|
||||||
|
@ -279,7 +279,11 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, Heap
|
||||||
// We'll ignore "continues humongous" regions (we'll process them
|
// We'll ignore "continues humongous" regions (we'll process them
|
||||||
// when we come across their corresponding "start humongous"
|
// when we come across their corresponding "start humongous"
|
||||||
// region) and regions already claimed.
|
// region) and regions already claimed.
|
||||||
if (hrclaimer->is_region_claimed(index) || r->is_continues_humongous()) {
|
// However, if the iteration is specified as concurrent, the values for
|
||||||
|
// is_starts_humongous and is_continues_humongous can not be trusted,
|
||||||
|
// and we should just blindly iterate over regions regardless of their
|
||||||
|
// humongous status.
|
||||||
|
if (hrclaimer->is_region_claimed(index) || (!concurrent && r->is_continues_humongous())) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
// OK, try to claim it
|
// OK, try to claim it
|
||||||
|
@ -287,7 +291,9 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, Heap
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
// Success!
|
// Success!
|
||||||
if (r->is_starts_humongous()) {
|
// As mentioned above, special treatment of humongous regions can only be
|
||||||
|
// done if we are iterating non-concurrently.
|
||||||
|
if (!concurrent && r->is_starts_humongous()) {
|
||||||
// If the region is "starts humongous" we'll iterate over its
|
// If the region is "starts humongous" we'll iterate over its
|
||||||
// "continues humongous" first; in fact we'll do them
|
// "continues humongous" first; in fact we'll do them
|
||||||
// first. The order is important. In one case, calling the
|
// first. The order is important. In one case, calling the
|
||||||
|
|
|
@ -222,7 +222,7 @@ public:
|
||||||
// terminating the iteration early if doHeapRegion() returns true.
|
// terminating the iteration early if doHeapRegion() returns true.
|
||||||
void iterate(HeapRegionClosure* blk) const;
|
void iterate(HeapRegionClosure* blk) const;
|
||||||
|
|
||||||
void par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer) const;
|
void par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const;
|
||||||
|
|
||||||
// Uncommit up to num_regions_to_remove regions that are completely free.
|
// Uncommit up to num_regions_to_remove regions that are completely free.
|
||||||
// Return the actual number of uncommitted regions.
|
// Return the actual number of uncommitted regions.
|
||||||
|
|
|
@ -92,12 +92,8 @@ bool VM_G1IncCollectionPause::doit_prologue() {
|
||||||
|
|
||||||
void VM_G1IncCollectionPause::doit() {
|
void VM_G1IncCollectionPause::doit() {
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
assert(!_should_initiate_conc_mark ||
|
assert(!_should_initiate_conc_mark || g1h->should_do_concurrent_full_gc(_gc_cause),
|
||||||
((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
|
"only a GC locker, a System.gc(), stats update, whitebox, or a hum allocation induced GC should start a cycle");
|
||||||
(_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
|
|
||||||
_gc_cause == GCCause::_g1_humongous_allocation ||
|
|
||||||
_gc_cause == GCCause::_update_allocation_context_stats_inc),
|
|
||||||
"only a GC locker, a System.gc(), stats update or a hum allocation induced GC should start a cycle");
|
|
||||||
|
|
||||||
if (_word_size > 0) {
|
if (_word_size > 0) {
|
||||||
// An allocation has been requested. So, try to do that first.
|
// An allocation has been requested. So, try to do that first.
|
||||||
|
@ -230,7 +226,6 @@ void VM_CGC_Operation::release_and_notify_pending_list_lock() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void VM_CGC_Operation::doit() {
|
void VM_CGC_Operation::doit() {
|
||||||
gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
|
|
||||||
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
|
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
|
||||||
GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm(), G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id());
|
GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm(), G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id());
|
||||||
SharedHeap* sh = SharedHeap::heap();
|
SharedHeap* sh = SharedHeap::heap();
|
||||||
|
|
|
@ -884,8 +884,6 @@ void EvacuateFollowersClosureGeneral::do_void() {
|
||||||
|
|
||||||
// A Generation that does parallel young-gen collection.
|
// A Generation that does parallel young-gen collection.
|
||||||
|
|
||||||
bool ParNewGeneration::_avoid_promotion_undo = false;
|
|
||||||
|
|
||||||
void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) {
|
void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) {
|
||||||
assert(_promo_failure_scan_stack.is_empty(), "post condition");
|
assert(_promo_failure_scan_stack.is_empty(), "post condition");
|
||||||
_promo_failure_scan_stack.clear(true); // Clear cached segments.
|
_promo_failure_scan_stack.clear(true); // Clear cached segments.
|
||||||
|
@ -934,10 +932,6 @@ void ParNewGeneration::collect(bool full,
|
||||||
assert(gch->n_gens() == 2,
|
assert(gch->n_gens() == 2,
|
||||||
"Par collection currently only works with single older gen.");
|
"Par collection currently only works with single older gen.");
|
||||||
_next_gen = gch->next_gen(this);
|
_next_gen = gch->next_gen(this);
|
||||||
// Do we have to avoid promotion_undo?
|
|
||||||
if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
|
|
||||||
set_avoid_promotion_undo(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the next generation is too full to accommodate worst-case promotion
|
// If the next generation is too full to accommodate worst-case promotion
|
||||||
// from this generation, pass on collection; let the next generation
|
// from this generation, pass on collection; let the next generation
|
||||||
|
@ -999,6 +993,11 @@ void ParNewGeneration::collect(bool full,
|
||||||
thread_state_set.reset(0 /* Bad value in debug if not reset */,
|
thread_state_set.reset(0 /* Bad value in debug if not reset */,
|
||||||
promotion_failed());
|
promotion_failed());
|
||||||
|
|
||||||
|
// Trace and reset failed promotion info.
|
||||||
|
if (promotion_failed()) {
|
||||||
|
thread_state_set.trace_promotion_failed(gc_tracer);
|
||||||
|
}
|
||||||
|
|
||||||
// Process (weak) reference objects found during scavenge.
|
// Process (weak) reference objects found during scavenge.
|
||||||
ReferenceProcessor* rp = ref_processor();
|
ReferenceProcessor* rp = ref_processor();
|
||||||
IsAliveClosure is_alive(this);
|
IsAliveClosure is_alive(this);
|
||||||
|
@ -1136,7 +1135,7 @@ oop ParNewGeneration::real_forwardee_slow(oop obj) {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
bool ParNewGeneration::is_legal_forward_ptr(oop p) {
|
bool ParNewGeneration::is_legal_forward_ptr(oop p) {
|
||||||
return
|
return
|
||||||
(_avoid_promotion_undo && p == ClaimedForwardPtr)
|
(p == ClaimedForwardPtr)
|
||||||
|| Universe::heap()->is_in_reserved(p);
|
|| Universe::heap()->is_in_reserved(p);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -1157,7 +1156,7 @@ void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
|
||||||
// thus avoiding the need to undo the copy as in
|
// thus avoiding the need to undo the copy as in
|
||||||
// copy_to_survivor_space_avoiding_with_undo.
|
// copy_to_survivor_space_avoiding_with_undo.
|
||||||
|
|
||||||
oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
|
oop ParNewGeneration::copy_to_survivor_space(
|
||||||
ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
|
ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
|
||||||
// In the sequential version, this assert also says that the object is
|
// In the sequential version, this assert also says that the object is
|
||||||
// not forwarded. That might not be the case here. It is the case that
|
// not forwarded. That might not be the case here. It is the case that
|
||||||
|
@ -1277,131 +1276,6 @@ oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
|
||||||
return forward_ptr;
|
return forward_ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Multiple GC threads may try to promote the same object. If two
|
|
||||||
// or more GC threads copy the object, only one wins the race to install
|
|
||||||
// the forwarding pointer. The other threads have to undo their copy.
|
|
||||||
|
|
||||||
oop ParNewGeneration::copy_to_survivor_space_with_undo(
|
|
||||||
ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
|
|
||||||
|
|
||||||
// In the sequential version, this assert also says that the object is
|
|
||||||
// not forwarded. That might not be the case here. It is the case that
|
|
||||||
// the caller observed it to be not forwarded at some time in the past.
|
|
||||||
assert(is_in_reserved(old), "shouldn't be scavenging this oop");
|
|
||||||
|
|
||||||
// The sequential code read "old->age()" below. That doesn't work here,
|
|
||||||
// since the age is in the mark word, and that might be overwritten with
|
|
||||||
// a forwarding pointer by a parallel thread. So we must save the mark
|
|
||||||
// word here, install it in a local oopDesc, and then analyze it.
|
|
||||||
oopDesc dummyOld;
|
|
||||||
dummyOld.set_mark(m);
|
|
||||||
assert(!dummyOld.is_forwarded(),
|
|
||||||
"should not be called with forwarding pointer mark word.");
|
|
||||||
|
|
||||||
bool failed_to_promote = false;
|
|
||||||
oop new_obj = NULL;
|
|
||||||
oop forward_ptr;
|
|
||||||
|
|
||||||
// Try allocating obj in to-space (unless too old)
|
|
||||||
if (dummyOld.age() < tenuring_threshold()) {
|
|
||||||
new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
|
|
||||||
if (new_obj == NULL) {
|
|
||||||
set_survivor_overflow(true);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (new_obj == NULL) {
|
|
||||||
// Either to-space is full or we decided to promote
|
|
||||||
// try allocating obj tenured
|
|
||||||
new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
|
|
||||||
old, m, sz);
|
|
||||||
|
|
||||||
if (new_obj == NULL) {
|
|
||||||
// promotion failed, forward to self
|
|
||||||
forward_ptr = old->forward_to_atomic(old);
|
|
||||||
new_obj = old;
|
|
||||||
|
|
||||||
if (forward_ptr != NULL) {
|
|
||||||
return forward_ptr; // someone else succeeded
|
|
||||||
}
|
|
||||||
|
|
||||||
_promotion_failed = true;
|
|
||||||
failed_to_promote = true;
|
|
||||||
|
|
||||||
preserve_mark_if_necessary(old, m);
|
|
||||||
par_scan_state->register_promotion_failure(sz);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Is in to-space; do copying ourselves.
|
|
||||||
Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
|
|
||||||
// Restore the mark word copied above.
|
|
||||||
new_obj->set_mark(m);
|
|
||||||
// Increment age if new_obj still in new generation
|
|
||||||
new_obj->incr_age();
|
|
||||||
par_scan_state->age_table()->add(new_obj, sz);
|
|
||||||
}
|
|
||||||
assert(new_obj != NULL, "just checking");
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
|
||||||
// This code must come after the CAS test, or it will print incorrect
|
|
||||||
// information.
|
|
||||||
if (TraceScavenge) {
|
|
||||||
gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
|
|
||||||
is_in_reserved(new_obj) ? "copying" : "tenuring",
|
|
||||||
new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size());
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Now attempt to install the forwarding pointer (atomically).
|
|
||||||
// We have to copy the mark word before overwriting with forwarding
|
|
||||||
// ptr, so we can restore it below in the copy.
|
|
||||||
if (!failed_to_promote) {
|
|
||||||
forward_ptr = old->forward_to_atomic(new_obj);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (forward_ptr == NULL) {
|
|
||||||
oop obj_to_push = new_obj;
|
|
||||||
if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
|
|
||||||
// Length field used as index of next element to be scanned.
|
|
||||||
// Real length can be obtained from real_forwardee()
|
|
||||||
arrayOop(old)->set_length(0);
|
|
||||||
obj_to_push = old;
|
|
||||||
assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
|
|
||||||
"push forwarded object");
|
|
||||||
}
|
|
||||||
// Push it on one of the queues of to-be-scanned objects.
|
|
||||||
bool simulate_overflow = false;
|
|
||||||
NOT_PRODUCT(
|
|
||||||
if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
|
|
||||||
// simulate a stack overflow
|
|
||||||
simulate_overflow = true;
|
|
||||||
}
|
|
||||||
)
|
|
||||||
if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
|
|
||||||
// Add stats for overflow pushes.
|
|
||||||
push_on_overflow_list(old, par_scan_state);
|
|
||||||
TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));
|
|
||||||
}
|
|
||||||
|
|
||||||
return new_obj;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Oops. Someone beat us to it. Undo the allocation. Where did we
|
|
||||||
// allocate it?
|
|
||||||
if (is_in_reserved(new_obj)) {
|
|
||||||
// Must be in to_space.
|
|
||||||
assert(to()->is_in_reserved(new_obj), "Checking");
|
|
||||||
par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
|
|
||||||
} else {
|
|
||||||
assert(!_avoid_promotion_undo, "Should not be here if avoiding.");
|
|
||||||
_next_gen->par_promote_alloc_undo(par_scan_state->thread_num(),
|
|
||||||
(HeapWord*)new_obj, sz);
|
|
||||||
}
|
|
||||||
|
|
||||||
return forward_ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
// It's OK to call this multi-threaded; the worst thing
|
// It's OK to call this multi-threaded; the worst thing
|
||||||
// that can happen is that we'll get a bunch of closely
|
// that can happen is that we'll get a bunch of closely
|
||||||
|
|
|
@ -329,9 +329,6 @@ class ParNewGeneration: public DefNewGeneration {
|
||||||
oop _overflow_list;
|
oop _overflow_list;
|
||||||
NOT_PRODUCT(ssize_t _num_par_pushes;)
|
NOT_PRODUCT(ssize_t _num_par_pushes;)
|
||||||
|
|
||||||
// If true, older generation does not support promotion undo, so avoid.
|
|
||||||
static bool _avoid_promotion_undo;
|
|
||||||
|
|
||||||
// This closure is used by the reference processor to filter out
|
// This closure is used by the reference processor to filter out
|
||||||
// references to live referent.
|
// references to live referent.
|
||||||
DefNewGeneration::IsAliveClosure _is_alive_closure;
|
DefNewGeneration::IsAliveClosure _is_alive_closure;
|
||||||
|
@ -349,9 +346,6 @@ class ParNewGeneration: public DefNewGeneration {
|
||||||
|
|
||||||
bool _survivor_overflow;
|
bool _survivor_overflow;
|
||||||
|
|
||||||
bool avoid_promotion_undo() { return _avoid_promotion_undo; }
|
|
||||||
void set_avoid_promotion_undo(bool v) { _avoid_promotion_undo = v; }
|
|
||||||
|
|
||||||
bool survivor_overflow() { return _survivor_overflow; }
|
bool survivor_overflow() { return _survivor_overflow; }
|
||||||
void set_survivor_overflow(bool v) { _survivor_overflow = v; }
|
void set_survivor_overflow(bool v) { _survivor_overflow = v; }
|
||||||
|
|
||||||
|
@ -372,7 +366,6 @@ class ParNewGeneration: public DefNewGeneration {
|
||||||
|
|
||||||
// override
|
// override
|
||||||
virtual bool refs_discovery_is_mt() const {
|
virtual bool refs_discovery_is_mt() const {
|
||||||
assert(UseParNewGC, "ParNewGeneration only when UseParNewGC");
|
|
||||||
return ParallelGCThreads > 1;
|
return ParallelGCThreads > 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -386,20 +379,7 @@ class ParNewGeneration: public DefNewGeneration {
|
||||||
// "obj" is the object to be copied, "m" is a recent value of its mark
|
// "obj" is the object to be copied, "m" is a recent value of its mark
|
||||||
// that must not contain a forwarding pointer (though one might be
|
// that must not contain a forwarding pointer (though one might be
|
||||||
// inserted in "obj"s mark word by a parallel thread).
|
// inserted in "obj"s mark word by a parallel thread).
|
||||||
inline oop copy_to_survivor_space(ParScanThreadState* par_scan_state,
|
oop copy_to_survivor_space(ParScanThreadState* par_scan_state,
|
||||||
oop obj, size_t obj_sz, markOop m) {
|
|
||||||
if (_avoid_promotion_undo) {
|
|
||||||
return copy_to_survivor_space_avoiding_promotion_undo(par_scan_state,
|
|
||||||
obj, obj_sz, m);
|
|
||||||
}
|
|
||||||
|
|
||||||
return copy_to_survivor_space_with_undo(par_scan_state, obj, obj_sz, m);
|
|
||||||
}
|
|
||||||
|
|
||||||
oop copy_to_survivor_space_avoiding_promotion_undo(ParScanThreadState* par_scan_state,
|
|
||||||
oop obj, size_t obj_sz, markOop m);
|
|
||||||
|
|
||||||
oop copy_to_survivor_space_with_undo(ParScanThreadState* par_scan_state,
|
|
||||||
oop obj, size_t obj_sz, markOop m);
|
oop obj, size_t obj_sz, markOop m);
|
||||||
|
|
||||||
// in support of testing overflow code
|
// in support of testing overflow code
|
||||||
|
|
|
@ -168,7 +168,6 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
||||||
{
|
{
|
||||||
HandleMark hm;
|
HandleMark hm;
|
||||||
|
|
||||||
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
|
|
||||||
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
||||||
GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id());
|
GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id());
|
||||||
TraceCollectorStats tcs(counters());
|
TraceCollectorStats tcs(counters());
|
||||||
|
|
|
@ -2055,7 +2055,6 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
||||||
gc_task_manager()->task_idle_workers();
|
gc_task_manager()->task_idle_workers();
|
||||||
heap->set_par_threads(gc_task_manager()->active_workers());
|
heap->set_par_threads(gc_task_manager()->active_workers());
|
||||||
|
|
||||||
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
|
|
||||||
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
||||||
GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
|
GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
|
||||||
TraceCollectorStats tcs(counters());
|
TraceCollectorStats tcs(counters());
|
||||||
|
|
|
@ -330,7 +330,6 @@ bool PSScavenge::invoke_no_policy() {
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
HandleMark hm;
|
HandleMark hm;
|
||||||
|
|
||||||
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
|
|
||||||
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
||||||
GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
|
GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
|
||||||
TraceCollectorStats tcs(counters());
|
TraceCollectorStats tcs(counters());
|
||||||
|
|
|
@ -49,10 +49,8 @@ GCTraceTime::GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* t
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_doit) {
|
if (_doit) {
|
||||||
if (PrintGCTimeStamps) {
|
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
||||||
gclog_or_tty->stamp();
|
gclog_or_tty->stamp(PrintGCTimeStamps);
|
||||||
gclog_or_tty->print(": ");
|
|
||||||
}
|
|
||||||
if (PrintGCID) {
|
if (PrintGCID) {
|
||||||
gclog_or_tty->print("#%u: ", gc_id.id());
|
gclog_or_tty->print("#%u: ", gc_id.id());
|
||||||
}
|
}
|
||||||
|
|
|
@ -142,216 +142,3 @@ void ParGCAllocBuffer::print() {
|
||||||
"FT"[_retained], _retained_filler.start(), _retained_filler.end());
|
"FT"[_retained], _retained_filler.start(), _retained_filler.end());
|
||||||
}
|
}
|
||||||
#endif // !PRODUCT
|
#endif // !PRODUCT
|
||||||
|
|
||||||
const size_t ParGCAllocBufferWithBOT::ChunkSizeInWords =
|
|
||||||
MIN2(CardTableModRefBS::par_chunk_heapword_alignment(),
|
|
||||||
((size_t)Generation::GenGrain)/HeapWordSize);
|
|
||||||
const size_t ParGCAllocBufferWithBOT::ChunkSizeInBytes =
|
|
||||||
MIN2(CardTableModRefBS::par_chunk_heapword_alignment() * HeapWordSize,
|
|
||||||
(size_t)Generation::GenGrain);
|
|
||||||
|
|
||||||
ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
|
|
||||||
BlockOffsetSharedArray* bsa) :
|
|
||||||
ParGCAllocBuffer(word_sz),
|
|
||||||
_bsa(bsa),
|
|
||||||
_bt(bsa, MemRegion(_bottom, _hard_end)),
|
|
||||||
_true_end(_hard_end)
|
|
||||||
{}
|
|
||||||
|
|
||||||
// The buffer comes with its own BOT, with a shared (obviously) underlying
|
|
||||||
// BlockOffsetSharedArray. We manipulate this BOT in the normal way
|
|
||||||
// as we would for any contiguous space. However, on occasion we
|
|
||||||
// need to do some buffer surgery at the extremities before we
|
|
||||||
// start using the body of the buffer for allocations. Such surgery
|
|
||||||
// (as explained elsewhere) is to prevent allocation on a card that
|
|
||||||
// is in the process of being walked concurrently by another GC thread.
|
|
||||||
// When such surgery happens at a point that is far removed (to the
|
|
||||||
// right of the current allocation point, top), we use the "contig"
|
|
||||||
// parameter below to directly manipulate the shared array without
|
|
||||||
// modifying the _next_threshold state in the BOT.
|
|
||||||
void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr,
|
|
||||||
bool contig) {
|
|
||||||
CollectedHeap::fill_with_object(mr);
|
|
||||||
if (contig) {
|
|
||||||
_bt.alloc_block(mr.start(), mr.end());
|
|
||||||
} else {
|
|
||||||
_bt.BlockOffsetArray::alloc_block(mr.start(), mr.end());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) {
|
|
||||||
HeapWord* res = NULL;
|
|
||||||
if (_true_end > _hard_end) {
|
|
||||||
assert((HeapWord*)align_size_down(intptr_t(_hard_end),
|
|
||||||
ChunkSizeInBytes) == _hard_end,
|
|
||||||
"or else _true_end should be equal to _hard_end");
|
|
||||||
assert(_retained, "or else _true_end should be equal to _hard_end");
|
|
||||||
assert(_retained_filler.end() <= _top, "INVARIANT");
|
|
||||||
CollectedHeap::fill_with_object(_retained_filler);
|
|
||||||
if (_top < _hard_end) {
|
|
||||||
fill_region_with_block(MemRegion(_top, _hard_end), true);
|
|
||||||
}
|
|
||||||
HeapWord* next_hard_end = MIN2(_true_end, _hard_end + ChunkSizeInWords);
|
|
||||||
_retained_filler = MemRegion(_hard_end, FillerHeaderSize);
|
|
||||||
_bt.alloc_block(_retained_filler.start(), _retained_filler.word_size());
|
|
||||||
_top = _retained_filler.end();
|
|
||||||
_hard_end = next_hard_end;
|
|
||||||
_end = _hard_end - AlignmentReserve;
|
|
||||||
res = ParGCAllocBuffer::allocate(word_sz);
|
|
||||||
if (res != NULL) {
|
|
||||||
_bt.alloc_block(res, word_sz);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
ParGCAllocBufferWithBOT::undo_allocation(HeapWord* obj, size_t word_sz) {
|
|
||||||
ParGCAllocBuffer::undo_allocation(obj, word_sz);
|
|
||||||
// This may back us up beyond the previous threshold, so reset.
|
|
||||||
_bt.set_region(MemRegion(_top, _hard_end));
|
|
||||||
_bt.initialize_threshold();
|
|
||||||
}
|
|
||||||
|
|
||||||
void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {
|
|
||||||
assert(!retain || end_of_gc, "Can only retain at GC end.");
|
|
||||||
if (_retained) {
|
|
||||||
// We're about to make the retained_filler into a block.
|
|
||||||
_bt.BlockOffsetArray::alloc_block(_retained_filler.start(),
|
|
||||||
_retained_filler.end());
|
|
||||||
}
|
|
||||||
// Reset _hard_end to _true_end (and update _end)
|
|
||||||
if (retain && _hard_end != NULL) {
|
|
||||||
assert(_hard_end <= _true_end, "Invariant.");
|
|
||||||
_hard_end = _true_end;
|
|
||||||
_end = MAX2(_top, _hard_end - AlignmentReserve);
|
|
||||||
assert(_end <= _hard_end, "Invariant.");
|
|
||||||
}
|
|
||||||
_true_end = _hard_end;
|
|
||||||
HeapWord* pre_top = _top;
|
|
||||||
|
|
||||||
ParGCAllocBuffer::retire(end_of_gc, retain);
|
|
||||||
// Now any old _retained_filler is cut back to size, the free part is
|
|
||||||
// filled with a filler object, and top is past the header of that
|
|
||||||
// object.
|
|
||||||
|
|
||||||
if (retain && _top < _end) {
|
|
||||||
assert(end_of_gc && retain, "Or else retain should be false.");
|
|
||||||
// If the lab does not start on a card boundary, we don't want to
|
|
||||||
// allocate onto that card, since that might lead to concurrent
|
|
||||||
// allocation and card scanning, which we don't support. So we fill
|
|
||||||
// the first card with a garbage object.
|
|
||||||
size_t first_card_index = _bsa->index_for(pre_top);
|
|
||||||
HeapWord* first_card_start = _bsa->address_for_index(first_card_index);
|
|
||||||
if (first_card_start < pre_top) {
|
|
||||||
HeapWord* second_card_start =
|
|
||||||
_bsa->inc_by_region_size(first_card_start);
|
|
||||||
|
|
||||||
// Ensure enough room to fill with the smallest block
|
|
||||||
second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve);
|
|
||||||
|
|
||||||
// If the end is already in the first card, don't go beyond it!
|
|
||||||
// Or if the remainder is too small for a filler object, gobble it up.
|
|
||||||
if (_hard_end < second_card_start ||
|
|
||||||
pointer_delta(_hard_end, second_card_start) < AlignmentReserve) {
|
|
||||||
second_card_start = _hard_end;
|
|
||||||
}
|
|
||||||
if (pre_top < second_card_start) {
|
|
||||||
MemRegion first_card_suffix(pre_top, second_card_start);
|
|
||||||
fill_region_with_block(first_card_suffix, true);
|
|
||||||
}
|
|
||||||
pre_top = second_card_start;
|
|
||||||
_top = pre_top;
|
|
||||||
_end = MAX2(_top, _hard_end - AlignmentReserve);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the lab does not end on a card boundary, we don't want to
|
|
||||||
// allocate onto that card, since that might lead to concurrent
|
|
||||||
// allocation and card scanning, which we don't support. So we fill
|
|
||||||
// the last card with a garbage object.
|
|
||||||
size_t last_card_index = _bsa->index_for(_hard_end);
|
|
||||||
HeapWord* last_card_start = _bsa->address_for_index(last_card_index);
|
|
||||||
if (last_card_start < _hard_end) {
|
|
||||||
|
|
||||||
// Ensure enough room to fill with the smallest block
|
|
||||||
last_card_start = MIN2(last_card_start, _hard_end - AlignmentReserve);
|
|
||||||
|
|
||||||
// If the top is already in the last card, don't go back beyond it!
|
|
||||||
// Or if the remainder is too small for a filler object, gobble it up.
|
|
||||||
if (_top > last_card_start ||
|
|
||||||
pointer_delta(last_card_start, _top) < AlignmentReserve) {
|
|
||||||
last_card_start = _top;
|
|
||||||
}
|
|
||||||
if (last_card_start < _hard_end) {
|
|
||||||
MemRegion last_card_prefix(last_card_start, _hard_end);
|
|
||||||
fill_region_with_block(last_card_prefix, false);
|
|
||||||
}
|
|
||||||
_hard_end = last_card_start;
|
|
||||||
_end = MAX2(_top, _hard_end - AlignmentReserve);
|
|
||||||
_true_end = _hard_end;
|
|
||||||
assert(_end <= _hard_end, "Invariant.");
|
|
||||||
}
|
|
||||||
|
|
||||||
// At this point:
|
|
||||||
// 1) we had a filler object from the original top to hard_end.
|
|
||||||
// 2) We've filled in any partial cards at the front and back.
|
|
||||||
if (pre_top < _hard_end) {
|
|
||||||
// Now we can reset the _bt to do allocation in the given area.
|
|
||||||
MemRegion new_filler(pre_top, _hard_end);
|
|
||||||
fill_region_with_block(new_filler, false);
|
|
||||||
_top = pre_top + ParGCAllocBuffer::FillerHeaderSize;
|
|
||||||
// If there's no space left, don't retain.
|
|
||||||
if (_top >= _end) {
|
|
||||||
_retained = false;
|
|
||||||
invalidate();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
_retained_filler = MemRegion(pre_top, _top);
|
|
||||||
_bt.set_region(MemRegion(_top, _hard_end));
|
|
||||||
_bt.initialize_threshold();
|
|
||||||
assert(_bt.threshold() > _top, "initialize_threshold failed!");
|
|
||||||
|
|
||||||
// There may be other reasons for queries into the middle of the
|
|
||||||
// filler object. When such queries are done in parallel with
|
|
||||||
// allocation, bad things can happen, if the query involves object
|
|
||||||
// iteration. So we ensure that such queries do not involve object
|
|
||||||
// iteration, by putting another filler object on the boundaries of
|
|
||||||
// such queries. One such is the object spanning a parallel card
|
|
||||||
// chunk boundary.
|
|
||||||
|
|
||||||
// "chunk_boundary" is the address of the first chunk boundary less
|
|
||||||
// than "hard_end".
|
|
||||||
HeapWord* chunk_boundary =
|
|
||||||
(HeapWord*)align_size_down(intptr_t(_hard_end-1), ChunkSizeInBytes);
|
|
||||||
assert(chunk_boundary < _hard_end, "Or else above did not work.");
|
|
||||||
assert(pointer_delta(_true_end, chunk_boundary) >= AlignmentReserve,
|
|
||||||
"Consequence of last card handling above.");
|
|
||||||
|
|
||||||
if (_top <= chunk_boundary) {
|
|
||||||
assert(_true_end == _hard_end, "Invariant.");
|
|
||||||
while (_top <= chunk_boundary) {
|
|
||||||
assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve,
|
|
||||||
"Consequence of last card handling above.");
|
|
||||||
_bt.BlockOffsetArray::alloc_block(chunk_boundary, _hard_end);
|
|
||||||
CollectedHeap::fill_with_object(chunk_boundary, _hard_end);
|
|
||||||
_hard_end = chunk_boundary;
|
|
||||||
chunk_boundary -= ChunkSizeInWords;
|
|
||||||
}
|
|
||||||
_end = _hard_end - AlignmentReserve;
|
|
||||||
assert(_top <= _end, "Invariant.");
|
|
||||||
// Now reset the initial filler chunk so it doesn't overlap with
|
|
||||||
// the one(s) inserted above.
|
|
||||||
MemRegion new_filler(pre_top, _hard_end);
|
|
||||||
fill_region_with_block(new_filler, false);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
_retained = false;
|
|
||||||
invalidate();
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
assert(!end_of_gc ||
|
|
||||||
(!_retained && _true_end == _hard_end), "Checking.");
|
|
||||||
}
|
|
||||||
assert(_end <= _hard_end, "Invariant.");
|
|
||||||
assert(_top < _end || _top == _hard_end, "Invariant");
|
|
||||||
}
|
|
||||||
|
|
|
@ -216,44 +216,4 @@ class PLABStats VALUE_OBJ_CLASS_SPEC {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class ParGCAllocBufferWithBOT: public ParGCAllocBuffer {
|
|
||||||
BlockOffsetArrayContigSpace _bt;
|
|
||||||
BlockOffsetSharedArray* _bsa;
|
|
||||||
HeapWord* _true_end; // end of the whole ParGCAllocBuffer
|
|
||||||
|
|
||||||
static const size_t ChunkSizeInWords;
|
|
||||||
static const size_t ChunkSizeInBytes;
|
|
||||||
HeapWord* allocate_slow(size_t word_sz);
|
|
||||||
|
|
||||||
void fill_region_with_block(MemRegion mr, bool contig);
|
|
||||||
|
|
||||||
public:
|
|
||||||
ParGCAllocBufferWithBOT(size_t word_sz, BlockOffsetSharedArray* bsa);
|
|
||||||
|
|
||||||
HeapWord* allocate(size_t word_sz) {
|
|
||||||
HeapWord* res = ParGCAllocBuffer::allocate(word_sz);
|
|
||||||
if (res != NULL) {
|
|
||||||
_bt.alloc_block(res, word_sz);
|
|
||||||
} else {
|
|
||||||
res = allocate_slow(word_sz);
|
|
||||||
}
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
void undo_allocation(HeapWord* obj, size_t word_sz);
|
|
||||||
|
|
||||||
virtual void set_buf(HeapWord* buf_start) {
|
|
||||||
ParGCAllocBuffer::set_buf(buf_start);
|
|
||||||
_true_end = _hard_end;
|
|
||||||
_bt.set_region(MemRegion(buf_start, word_sz()));
|
|
||||||
_bt.initialize_threshold();
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void retire(bool end_of_gc, bool retain);
|
|
||||||
|
|
||||||
MemRegion range() {
|
|
||||||
return MemRegion(_top, _true_end);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
|
#endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
|
||||||
|
|
|
@ -54,6 +54,9 @@ const char* GCCause::to_string(GCCause::Cause cause) {
|
||||||
case _wb_young_gc:
|
case _wb_young_gc:
|
||||||
return "WhiteBox Initiated Young GC";
|
return "WhiteBox Initiated Young GC";
|
||||||
|
|
||||||
|
case _wb_conc_mark:
|
||||||
|
return "WhiteBox Initiated Concurrent Mark";
|
||||||
|
|
||||||
case _update_allocation_context_stats_inc:
|
case _update_allocation_context_stats_inc:
|
||||||
case _update_allocation_context_stats_full:
|
case _update_allocation_context_stats_full:
|
||||||
return "Update Allocation Context Stats";
|
return "Update Allocation Context Stats";
|
||||||
|
|
|
@ -47,6 +47,7 @@ class GCCause : public AllStatic {
|
||||||
_heap_inspection,
|
_heap_inspection,
|
||||||
_heap_dump,
|
_heap_dump,
|
||||||
_wb_young_gc,
|
_wb_young_gc,
|
||||||
|
_wb_conc_mark,
|
||||||
_update_allocation_context_stats_inc,
|
_update_allocation_context_stats_inc,
|
||||||
_update_allocation_context_stats_full,
|
_update_allocation_context_stats_full,
|
||||||
|
|
||||||
|
|
|
@ -251,12 +251,6 @@ public:
|
||||||
// Return the address indicating the start of the region corresponding to
|
// Return the address indicating the start of the region corresponding to
|
||||||
// "index" in "_offset_array".
|
// "index" in "_offset_array".
|
||||||
HeapWord* address_for_index(size_t index) const;
|
HeapWord* address_for_index(size_t index) const;
|
||||||
|
|
||||||
// Return the address "p" incremented by the size of
|
|
||||||
// a region. This method does not align the address
|
|
||||||
// returned to the start of a region. It is a simple
|
|
||||||
// primitive.
|
|
||||||
HeapWord* inc_by_region_size(HeapWord* p) const { return p + N_words; }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////
|
||||||
|
|
|
@ -466,11 +466,6 @@ public:
|
||||||
void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
|
void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
|
||||||
void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
|
void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
|
||||||
void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
|
void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
|
||||||
|
|
||||||
static size_t par_chunk_heapword_alignment() {
|
|
||||||
return ParGCCardsPerStrideChunk * card_size_in_words;
|
|
||||||
}
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class CardTableRS;
|
class CardTableRS;
|
||||||
|
|
|
@ -283,14 +283,14 @@ void CardTableRS::younger_refs_in_space_iterate(Space* sp,
|
||||||
// Convert the assertion check to a warning if we are running
|
// Convert the assertion check to a warning if we are running
|
||||||
// CMS+ParNew until related bug is fixed.
|
// CMS+ParNew until related bug is fixed.
|
||||||
MemRegion ur = sp->used_region();
|
MemRegion ur = sp->used_region();
|
||||||
assert(ur.contains(urasm) || (UseConcMarkSweepGC && UseParNewGC),
|
assert(ur.contains(urasm) || (UseConcMarkSweepGC),
|
||||||
err_msg("Did you forget to call save_marks()? "
|
err_msg("Did you forget to call save_marks()? "
|
||||||
"[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
|
"[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
|
||||||
"[" PTR_FORMAT ", " PTR_FORMAT ")",
|
"[" PTR_FORMAT ", " PTR_FORMAT ")",
|
||||||
p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end())));
|
p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end())));
|
||||||
// In the case of CMS+ParNew, issue a warning
|
// In the case of CMS+ParNew, issue a warning
|
||||||
if (!ur.contains(urasm)) {
|
if (!ur.contains(urasm)) {
|
||||||
assert(UseConcMarkSweepGC && UseParNewGC, "Tautology: see assert above");
|
assert(UseConcMarkSweepGC, "Tautology: see assert above");
|
||||||
warning("CMS+ParNew: Did you forget to call save_marks()? "
|
warning("CMS+ParNew: Did you forget to call save_marks()? "
|
||||||
"[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
|
"[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
|
||||||
"[" PTR_FORMAT ", " PTR_FORMAT ")",
|
"[" PTR_FORMAT ", " PTR_FORMAT ")",
|
||||||
|
@ -609,21 +609,3 @@ void CardTableRS::verify() {
|
||||||
_ct_bs->verify();
|
_ct_bs->verify();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void CardTableRS::verify_aligned_region_empty(MemRegion mr) {
|
|
||||||
if (!mr.is_empty()) {
|
|
||||||
jbyte* cur_entry = byte_for(mr.start());
|
|
||||||
jbyte* limit = byte_after(mr.last());
|
|
||||||
// The region mr may not start on a card boundary so
|
|
||||||
// the first card may reflect a write to the space
|
|
||||||
// just prior to mr.
|
|
||||||
if (!is_aligned(mr.start())) {
|
|
||||||
cur_entry++;
|
|
||||||
}
|
|
||||||
for (;cur_entry < limit; cur_entry++) {
|
|
||||||
guarantee(*cur_entry == CardTableModRefBS::clean_card,
|
|
||||||
"Unexpected dirty card found");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -138,7 +138,6 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
void verify();
|
void verify();
|
||||||
void verify_aligned_region_empty(MemRegion mr);
|
|
||||||
|
|
||||||
void clear(MemRegion mr) { _ct_bs->clear(mr); }
|
void clear(MemRegion mr) { _ct_bs->clear(mr); }
|
||||||
void clear_into_younger(Generation* old_gen);
|
void clear_into_younger(Generation* old_gen);
|
||||||
|
|
|
@ -908,32 +908,15 @@ void MarkSweepPolicy::initialize_alignments() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void MarkSweepPolicy::initialize_generations() {
|
void MarkSweepPolicy::initialize_generations() {
|
||||||
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, CURRENT_PC,
|
_generations = NEW_C_HEAP_ARRAY(GenerationSpecPtr, number_of_generations(), mtGC);
|
||||||
AllocFailStrategy::RETURN_NULL);
|
|
||||||
if (_generations == NULL) {
|
|
||||||
vm_exit_during_initialization("Unable to allocate gen spec");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (UseParNewGC) {
|
|
||||||
_generations[0] = new GenerationSpec(Generation::ParNew, _initial_young_size, _max_young_size);
|
|
||||||
} else {
|
|
||||||
_generations[0] = new GenerationSpec(Generation::DefNew, _initial_young_size, _max_young_size);
|
_generations[0] = new GenerationSpec(Generation::DefNew, _initial_young_size, _max_young_size);
|
||||||
}
|
|
||||||
_generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_old_size, _max_old_size);
|
_generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_old_size, _max_old_size);
|
||||||
|
|
||||||
if (_generations[0] == NULL || _generations[1] == NULL) {
|
|
||||||
vm_exit_during_initialization("Unable to allocate gen spec");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MarkSweepPolicy::initialize_gc_policy_counters() {
|
void MarkSweepPolicy::initialize_gc_policy_counters() {
|
||||||
// Initialize the policy counters - 2 collectors, 3 generations.
|
// Initialize the policy counters - 2 collectors, 3 generations.
|
||||||
if (UseParNewGC) {
|
|
||||||
_gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3);
|
|
||||||
} else {
|
|
||||||
_gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
|
_gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/////////////// Unit tests ///////////////
|
/////////////// Unit tests ///////////////
|
||||||
|
|
||||||
|
|
|
@ -340,9 +340,6 @@ protected:
|
||||||
virtual const char* name() const;
|
virtual const char* name() const;
|
||||||
virtual const char* short_name() const { return "DefNew"; }
|
virtual const char* short_name() const { return "DefNew"; }
|
||||||
|
|
||||||
bool must_be_youngest() const { return true; }
|
|
||||||
bool must_be_oldest() const { return false; }
|
|
||||||
|
|
||||||
// PrintHeapAtGC support.
|
// PrintHeapAtGC support.
|
||||||
void print_on(outputStream* st) const;
|
void print_on(outputStream* st) const;
|
||||||
|
|
||||||
|
|
|
@ -182,10 +182,7 @@ void GenCollectedHeap::post_initialize() {
|
||||||
SharedHeap::post_initialize();
|
SharedHeap::post_initialize();
|
||||||
GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
|
GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
|
||||||
guarantee(policy->is_generation_policy(), "Illegal policy type");
|
guarantee(policy->is_generation_policy(), "Illegal policy type");
|
||||||
DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
|
DefNewGeneration* def_new_gen = get_gen(0)->as_DefNewGeneration();
|
||||||
assert(def_new_gen->kind() == Generation::DefNew ||
|
|
||||||
def_new_gen->kind() == Generation::ParNew,
|
|
||||||
"Wrong generation kind");
|
|
||||||
|
|
||||||
Generation* old_gen = get_gen(1);
|
Generation* old_gen = get_gen(1);
|
||||||
assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
|
assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
|
||||||
|
@ -363,7 +360,6 @@ void GenCollectedHeap::do_collection(bool full,
|
||||||
|
|
||||||
bool complete = full && (max_level == (n_gens()-1));
|
bool complete = full && (max_level == (n_gens()-1));
|
||||||
const char* gc_cause_prefix = complete ? "Full GC" : "GC";
|
const char* gc_cause_prefix = complete ? "Full GC" : "GC";
|
||||||
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
|
|
||||||
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
||||||
// The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
|
// The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
|
||||||
// so we can assume here that the next GC id is what we want.
|
// so we can assume here that the next GC id is what we want.
|
||||||
|
@ -1118,10 +1114,8 @@ void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
|
||||||
|
|
||||||
void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
|
void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
if (UseParNewGC) {
|
|
||||||
workers()->print_worker_threads_on(st);
|
|
||||||
}
|
|
||||||
if (UseConcMarkSweepGC) {
|
if (UseConcMarkSweepGC) {
|
||||||
|
workers()->print_worker_threads_on(st);
|
||||||
ConcurrentMarkSweepThread::print_all_on(st);
|
ConcurrentMarkSweepThread::print_all_on(st);
|
||||||
}
|
}
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
|
|
|
@ -262,12 +262,12 @@ public:
|
||||||
|
|
||||||
// We don't need barriers for stores to objects in the
|
// We don't need barriers for stores to objects in the
|
||||||
// young gen and, a fortiori, for initializing stores to
|
// young gen and, a fortiori, for initializing stores to
|
||||||
// objects therein. This applies to {DefNew,ParNew}+{Tenured,CMS}
|
// objects therein. This applies to DefNew+Tenured and ParNew+CMS
|
||||||
// only and may need to be re-examined in case other
|
// only and may need to be re-examined in case other
|
||||||
// kinds of collectors are implemented in the future.
|
// kinds of collectors are implemented in the future.
|
||||||
virtual bool can_elide_initializing_store_barrier(oop new_obj) {
|
virtual bool can_elide_initializing_store_barrier(oop new_obj) {
|
||||||
// We wanted to assert that:-
|
// We wanted to assert that:-
|
||||||
// assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC,
|
// assert(UseSerialGC || UseConcMarkSweepGC,
|
||||||
// "Check can_elide_initializing_store_barrier() for this collector");
|
// "Check can_elide_initializing_store_barrier() for this collector");
|
||||||
// but unfortunately the flag UseSerialGC need not necessarily always
|
// but unfortunately the flag UseSerialGC need not necessarily always
|
||||||
// be set when DefNew+Tenured are being used.
|
// be set when DefNew+Tenured are being used.
|
||||||
|
|
|
@ -105,17 +105,6 @@ public:
|
||||||
|
|
||||||
virtual void verify() = 0;
|
virtual void verify() = 0;
|
||||||
|
|
||||||
// Verify that the remembered set has no entries for
|
|
||||||
// the heap interval denoted by mr. If there are any
|
|
||||||
// alignment constraints on the remembered set, only the
|
|
||||||
// part of the region that is aligned is checked.
|
|
||||||
//
|
|
||||||
// alignment boundaries
|
|
||||||
// +--------+-------+--------+-------+
|
|
||||||
// [ region mr )
|
|
||||||
// [ part checked )
|
|
||||||
virtual void verify_aligned_region_empty(MemRegion mr) = 0;
|
|
||||||
|
|
||||||
// If appropriate, print some information about the remset on "tty".
|
// If appropriate, print some information about the remset on "tty".
|
||||||
virtual void print() {}
|
virtual void print() {}
|
||||||
|
|
||||||
|
|
|
@ -220,12 +220,6 @@ oop Generation::par_promote(int thread_num,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Generation::par_promote_alloc_undo(int thread_num,
|
|
||||||
HeapWord* obj, size_t word_sz) {
|
|
||||||
// Could do a bad general impl here that gets a lock. But no.
|
|
||||||
guarantee(false, "No good general implementation.");
|
|
||||||
}
|
|
||||||
|
|
||||||
Space* Generation::space_containing(const void* p) const {
|
Space* Generation::space_containing(const void* p) const {
|
||||||
GenerationIsInReservedClosure blk(p);
|
GenerationIsInReservedClosure blk(p);
|
||||||
// Cast away const
|
// Cast away const
|
||||||
|
|
|
@ -317,11 +317,6 @@ class Generation: public CHeapObj<mtGC> {
|
||||||
virtual oop par_promote(int thread_num,
|
virtual oop par_promote(int thread_num,
|
||||||
oop obj, markOop m, size_t word_sz);
|
oop obj, markOop m, size_t word_sz);
|
||||||
|
|
||||||
// Undo, if possible, the most recent par_promote_alloc allocation by
|
|
||||||
// "thread_num" ("obj", of "word_sz").
|
|
||||||
virtual void par_promote_alloc_undo(int thread_num,
|
|
||||||
HeapWord* obj, size_t word_sz);
|
|
||||||
|
|
||||||
// Informs the current generation that all par_promote_alloc's in the
|
// Informs the current generation that all par_promote_alloc's in the
|
||||||
// collection have been completed; any supporting data structures can be
|
// collection have been completed; any supporting data structures can be
|
||||||
// reset. Default is to do nothing.
|
// reset. Default is to do nothing.
|
||||||
|
@ -517,13 +512,6 @@ class Generation: public CHeapObj<mtGC> {
|
||||||
|
|
||||||
int level() const { return _level; }
|
int level() const { return _level; }
|
||||||
|
|
||||||
// Attributes
|
|
||||||
|
|
||||||
// True iff the given generation may only be the youngest generation.
|
|
||||||
virtual bool must_be_youngest() const = 0;
|
|
||||||
// True iff the given generation may only be the oldest generation.
|
|
||||||
virtual bool must_be_oldest() const = 0;
|
|
||||||
|
|
||||||
// Reference Processing accessor
|
// Reference Processing accessor
|
||||||
ReferenceProcessor* const ref_processor() { return _ref_processor; }
|
ReferenceProcessor* const ref_processor() { return _ref_processor; }
|
||||||
|
|
||||||
|
|
|
@ -68,9 +68,7 @@ SharedHeap::SharedHeap(CollectorPolicy* policy_) :
|
||||||
vm_exit_during_initialization("Failed necessary allocation.");
|
vm_exit_during_initialization("Failed necessary allocation.");
|
||||||
}
|
}
|
||||||
_sh = this; // ch is static, should be set only once.
|
_sh = this; // ch is static, should be set only once.
|
||||||
if (UseParNewGC ||
|
if (UseConcMarkSweepGC || UseG1GC) {
|
||||||
UseG1GC ||
|
|
||||||
(UseConcMarkSweepGC && (CMSParallelInitialMarkEnabled || CMSParallelRemarkEnabled) && use_parallel_gc_threads())) {
|
|
||||||
_workers = new FlexibleWorkGang("Parallel GC Threads", ParallelGCThreads,
|
_workers = new FlexibleWorkGang("Parallel GC Threads", ParallelGCThreads,
|
||||||
/* are_GC_task_threads */true,
|
/* are_GC_task_threads */true,
|
||||||
/* are_ConcurrentGC_threads */false);
|
/* are_ConcurrentGC_threads */false);
|
||||||
|
|
|
@ -70,15 +70,13 @@ class Space: public CHeapObj<mtGC> {
|
||||||
// Used in support of save_marks()
|
// Used in support of save_marks()
|
||||||
HeapWord* _saved_mark_word;
|
HeapWord* _saved_mark_word;
|
||||||
|
|
||||||
MemRegionClosure* _preconsumptionDirtyCardClosure;
|
|
||||||
|
|
||||||
// A sequential tasks done structure. This supports
|
// A sequential tasks done structure. This supports
|
||||||
// parallel GC, where we have threads dynamically
|
// parallel GC, where we have threads dynamically
|
||||||
// claiming sub-tasks from a larger parallel task.
|
// claiming sub-tasks from a larger parallel task.
|
||||||
SequentialSubTasksDone _par_seq_tasks;
|
SequentialSubTasksDone _par_seq_tasks;
|
||||||
|
|
||||||
Space():
|
Space():
|
||||||
_bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { }
|
_bottom(NULL), _end(NULL) { }
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// Accessors
|
// Accessors
|
||||||
|
@ -97,11 +95,8 @@ class Space: public CHeapObj<mtGC> {
|
||||||
return (HeapWord*)obj >= saved_mark_word();
|
return (HeapWord*)obj >= saved_mark_word();
|
||||||
}
|
}
|
||||||
|
|
||||||
MemRegionClosure* preconsumptionDirtyCardClosure() const {
|
virtual MemRegionClosure* preconsumptionDirtyCardClosure() const {
|
||||||
return _preconsumptionDirtyCardClosure;
|
return NULL;
|
||||||
}
|
|
||||||
void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
|
|
||||||
_preconsumptionDirtyCardClosure = cl;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns a subregion of the space containing only the allocated objects in
|
// Returns a subregion of the space containing only the allocated objects in
|
||||||
|
|
|
@ -64,45 +64,12 @@ TenuredGeneration::TenuredGeneration(ReservedSpace rs,
|
||||||
_space_counters = new CSpaceCounters(gen_name, 0,
|
_space_counters = new CSpaceCounters(gen_name, 0,
|
||||||
_virtual_space.reserved_size(),
|
_virtual_space.reserved_size(),
|
||||||
_the_space, _gen_counters);
|
_the_space, _gen_counters);
|
||||||
#if INCLUDE_ALL_GCS
|
|
||||||
if (UseParNewGC) {
|
|
||||||
typedef ParGCAllocBufferWithBOT* ParGCAllocBufferWithBOTPtr;
|
|
||||||
_alloc_buffers = NEW_C_HEAP_ARRAY(ParGCAllocBufferWithBOTPtr,
|
|
||||||
ParallelGCThreads, mtGC);
|
|
||||||
if (_alloc_buffers == NULL)
|
|
||||||
vm_exit_during_initialization("Could not allocate alloc_buffers");
|
|
||||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
|
||||||
_alloc_buffers[i] =
|
|
||||||
new ParGCAllocBufferWithBOT(OldPLABSize, _bts);
|
|
||||||
if (_alloc_buffers[i] == NULL)
|
|
||||||
vm_exit_during_initialization("Could not allocate alloc_buffers");
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
_alloc_buffers = NULL;
|
|
||||||
}
|
|
||||||
#endif // INCLUDE_ALL_GCS
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
const char* TenuredGeneration::name() const {
|
|
||||||
return "tenured generation";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void TenuredGeneration::gc_prologue(bool full) {
|
void TenuredGeneration::gc_prologue(bool full) {
|
||||||
_capacity_at_prologue = capacity();
|
_capacity_at_prologue = capacity();
|
||||||
_used_at_prologue = used();
|
_used_at_prologue = used();
|
||||||
if (VerifyBeforeGC) {
|
|
||||||
verify_alloc_buffers_clean();
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void TenuredGeneration::gc_epilogue(bool full) {
|
|
||||||
if (VerifyAfterGC) {
|
|
||||||
verify_alloc_buffers_clean();
|
|
||||||
}
|
|
||||||
OneContigSpaceCardGeneration::gc_epilogue(full);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
bool TenuredGeneration::should_collect(bool full,
|
bool TenuredGeneration::should_collect(bool full,
|
||||||
size_t size,
|
size_t size,
|
||||||
|
@ -149,15 +116,6 @@ bool TenuredGeneration::should_collect(bool full,
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void TenuredGeneration::collect(bool full,
|
|
||||||
bool clear_all_soft_refs,
|
|
||||||
size_t size,
|
|
||||||
bool is_tlab) {
|
|
||||||
retire_alloc_buffers_before_full_gc();
|
|
||||||
OneContigSpaceCardGeneration::collect(full, clear_all_soft_refs,
|
|
||||||
size, is_tlab);
|
|
||||||
}
|
|
||||||
|
|
||||||
void TenuredGeneration::compute_new_size() {
|
void TenuredGeneration::compute_new_size() {
|
||||||
assert_locked_or_safepoint(Heap_lock);
|
assert_locked_or_safepoint(Heap_lock);
|
||||||
|
|
||||||
|
@ -171,6 +129,7 @@ void TenuredGeneration::compute_new_size() {
|
||||||
err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
|
err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
|
||||||
" capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
|
" capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TenuredGeneration::update_gc_stats(int current_level,
|
void TenuredGeneration::update_gc_stats(int current_level,
|
||||||
bool full) {
|
bool full) {
|
||||||
// If the next lower level(s) has been collected, gather any statistics
|
// If the next lower level(s) has been collected, gather any statistics
|
||||||
|
@ -198,96 +157,6 @@ void TenuredGeneration::update_counters() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#if INCLUDE_ALL_GCS
|
|
||||||
oop TenuredGeneration::par_promote(int thread_num,
|
|
||||||
oop old, markOop m, size_t word_sz) {
|
|
||||||
|
|
||||||
ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
|
|
||||||
HeapWord* obj_ptr = buf->allocate(word_sz);
|
|
||||||
bool is_lab = true;
|
|
||||||
if (obj_ptr == NULL) {
|
|
||||||
#ifndef PRODUCT
|
|
||||||
if (Universe::heap()->promotion_should_fail()) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
#endif // #ifndef PRODUCT
|
|
||||||
|
|
||||||
// Slow path:
|
|
||||||
if (word_sz * 100 < ParallelGCBufferWastePct * buf->word_sz()) {
|
|
||||||
// Is small enough; abandon this buffer and start a new one.
|
|
||||||
size_t buf_size = buf->word_sz();
|
|
||||||
HeapWord* buf_space =
|
|
||||||
TenuredGeneration::par_allocate(buf_size, false);
|
|
||||||
if (buf_space == NULL) {
|
|
||||||
buf_space = expand_and_allocate(buf_size, false, true /* parallel*/);
|
|
||||||
}
|
|
||||||
if (buf_space != NULL) {
|
|
||||||
buf->retire(false, false);
|
|
||||||
buf->set_buf(buf_space);
|
|
||||||
obj_ptr = buf->allocate(word_sz);
|
|
||||||
assert(obj_ptr != NULL, "Buffer was definitely big enough...");
|
|
||||||
}
|
|
||||||
};
|
|
||||||
// Otherwise, buffer allocation failed; try allocating object
|
|
||||||
// individually.
|
|
||||||
if (obj_ptr == NULL) {
|
|
||||||
obj_ptr = TenuredGeneration::par_allocate(word_sz, false);
|
|
||||||
if (obj_ptr == NULL) {
|
|
||||||
obj_ptr = expand_and_allocate(word_sz, false, true /* parallel */);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (obj_ptr == NULL) return NULL;
|
|
||||||
}
|
|
||||||
assert(obj_ptr != NULL, "program logic");
|
|
||||||
Copy::aligned_disjoint_words((HeapWord*)old, obj_ptr, word_sz);
|
|
||||||
oop obj = oop(obj_ptr);
|
|
||||||
// Restore the mark word copied above.
|
|
||||||
obj->set_mark(m);
|
|
||||||
return obj;
|
|
||||||
}
|
|
||||||
|
|
||||||
void TenuredGeneration::par_promote_alloc_undo(int thread_num,
|
|
||||||
HeapWord* obj,
|
|
||||||
size_t word_sz) {
|
|
||||||
ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
|
|
||||||
if (buf->contains(obj)) {
|
|
||||||
guarantee(buf->contains(obj + word_sz - 1),
|
|
||||||
"should contain whole object");
|
|
||||||
buf->undo_allocation(obj, word_sz);
|
|
||||||
} else {
|
|
||||||
CollectedHeap::fill_with_object(obj, word_sz);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void TenuredGeneration::par_promote_alloc_done(int thread_num) {
|
|
||||||
ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
|
|
||||||
buf->retire(true, ParallelGCRetainPLAB);
|
|
||||||
}
|
|
||||||
|
|
||||||
void TenuredGeneration::retire_alloc_buffers_before_full_gc() {
|
|
||||||
if (UseParNewGC) {
|
|
||||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
|
||||||
_alloc_buffers[i]->retire(true /*end_of_gc*/, false /*retain*/);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify that any retained parallel allocation buffers do not
|
|
||||||
// intersect with dirty cards.
|
|
||||||
void TenuredGeneration::verify_alloc_buffers_clean() {
|
|
||||||
if (UseParNewGC) {
|
|
||||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
|
||||||
_rs->verify_aligned_region_empty(_alloc_buffers[i]->range());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#else // INCLUDE_ALL_GCS
|
|
||||||
void TenuredGeneration::retire_alloc_buffers_before_full_gc() {}
|
|
||||||
void TenuredGeneration::verify_alloc_buffers_clean() {}
|
|
||||||
#endif // INCLUDE_ALL_GCS
|
|
||||||
|
|
||||||
bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
|
bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
|
||||||
size_t available = max_contiguous_available();
|
size_t available = max_contiguous_available();
|
||||||
size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
|
size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
|
||||||
|
|
|
@ -33,22 +33,9 @@
|
||||||
|
|
||||||
// TenuredGeneration models the heap containing old (promoted/tenured) objects.
|
// TenuredGeneration models the heap containing old (promoted/tenured) objects.
|
||||||
|
|
||||||
class ParGCAllocBufferWithBOT;
|
|
||||||
|
|
||||||
class TenuredGeneration: public OneContigSpaceCardGeneration {
|
class TenuredGeneration: public OneContigSpaceCardGeneration {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
#if INCLUDE_ALL_GCS
|
|
||||||
// To support parallel promotion: an array of parallel allocation
|
|
||||||
// buffers, one per thread, initially NULL.
|
|
||||||
ParGCAllocBufferWithBOT** _alloc_buffers;
|
|
||||||
#endif // INCLUDE_ALL_GCS
|
|
||||||
|
|
||||||
// Retire all alloc buffers before a full GC, so that they will be
|
|
||||||
// re-allocated at the start of the next young GC.
|
|
||||||
void retire_alloc_buffers_before_full_gc();
|
|
||||||
|
|
||||||
GenerationCounters* _gen_counters;
|
GenerationCounters* _gen_counters;
|
||||||
CSpaceCounters* _space_counters;
|
CSpaceCounters* _space_counters;
|
||||||
|
|
||||||
|
@ -59,10 +46,8 @@ class TenuredGeneration: public OneContigSpaceCardGeneration {
|
||||||
Generation::Name kind() { return Generation::MarkSweepCompact; }
|
Generation::Name kind() { return Generation::MarkSweepCompact; }
|
||||||
|
|
||||||
// Printing
|
// Printing
|
||||||
const char* name() const;
|
const char* name() const { return "tenured generation"; }
|
||||||
const char* short_name() const { return "Tenured"; }
|
const char* short_name() const { return "Tenured"; }
|
||||||
bool must_be_youngest() const { return false; }
|
|
||||||
bool must_be_oldest() const { return true; }
|
|
||||||
|
|
||||||
// Does a "full" (forced) collection invoked on this generation collect
|
// Does a "full" (forced) collection invoked on this generation collect
|
||||||
// all younger generations as well? Note that this is a
|
// all younger generations as well? Note that this is a
|
||||||
|
@ -73,26 +58,12 @@ class TenuredGeneration: public OneContigSpaceCardGeneration {
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void gc_prologue(bool full);
|
virtual void gc_prologue(bool full);
|
||||||
virtual void gc_epilogue(bool full);
|
|
||||||
bool should_collect(bool full,
|
bool should_collect(bool full,
|
||||||
size_t word_size,
|
size_t word_size,
|
||||||
bool is_tlab);
|
bool is_tlab);
|
||||||
|
|
||||||
virtual void collect(bool full,
|
|
||||||
bool clear_all_soft_refs,
|
|
||||||
size_t size,
|
|
||||||
bool is_tlab);
|
|
||||||
virtual void compute_new_size();
|
virtual void compute_new_size();
|
||||||
|
|
||||||
#if INCLUDE_ALL_GCS
|
|
||||||
// Overrides.
|
|
||||||
virtual oop par_promote(int thread_num,
|
|
||||||
oop obj, markOop m, size_t word_sz);
|
|
||||||
virtual void par_promote_alloc_undo(int thread_num,
|
|
||||||
HeapWord* obj, size_t word_sz);
|
|
||||||
virtual void par_promote_alloc_done(int thread_num);
|
|
||||||
#endif // INCLUDE_ALL_GCS
|
|
||||||
|
|
||||||
// Performance Counter support
|
// Performance Counter support
|
||||||
void update_counters();
|
void update_counters();
|
||||||
|
|
||||||
|
@ -101,8 +72,6 @@ class TenuredGeneration: public OneContigSpaceCardGeneration {
|
||||||
virtual void update_gc_stats(int level, bool full);
|
virtual void update_gc_stats(int level, bool full);
|
||||||
|
|
||||||
virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes) const;
|
virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes) const;
|
||||||
|
|
||||||
void verify_alloc_buffers_clean();
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_VM_MEMORY_TENUREDGENERATION_HPP
|
#endif // SHARE_VM_MEMORY_TENUREDGENERATION_HPP
|
||||||
|
|
|
@ -76,7 +76,7 @@
|
||||||
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
|
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
|
||||||
#include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
|
#include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
|
||||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
#include "gc_implementation/g1/g1CollectorPolicy_ext.hpp"
|
||||||
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
|
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
#if INCLUDE_CDS
|
#if INCLUDE_CDS
|
||||||
|
@ -799,7 +799,7 @@ jint Universe::initialize_heap() {
|
||||||
|
|
||||||
} else if (UseG1GC) {
|
} else if (UseG1GC) {
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
G1CollectorPolicy* g1p = new G1CollectorPolicy();
|
G1CollectorPolicyExt* g1p = new G1CollectorPolicyExt();
|
||||||
g1p->initialize_all();
|
g1p->initialize_all();
|
||||||
G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
|
G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
|
||||||
Universe::_collectedHeap = g1h;
|
Universe::_collectedHeap = g1h;
|
||||||
|
|
|
@ -441,12 +441,12 @@ inline int oopDesc::size_given_klass(Klass* klass) {
|
||||||
s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) /
|
s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) /
|
||||||
HeapWordSize);
|
HeapWordSize);
|
||||||
|
|
||||||
// UseParNewGC, UseParallelGC and UseG1GC can change the length field
|
// ParNew (used by CMS), UseParallelGC and UseG1GC can change the length field
|
||||||
// of an "old copy" of an object array in the young gen so it indicates
|
// of an "old copy" of an object array in the young gen so it indicates
|
||||||
// the grey portion of an already copied array. This will cause the first
|
// the grey portion of an already copied array. This will cause the first
|
||||||
// disjunct below to fail if the two comparands are computed across such
|
// disjunct below to fail if the two comparands are computed across such
|
||||||
// a concurrent change.
|
// a concurrent change.
|
||||||
// UseParNewGC also runs with promotion labs (which look like int
|
// ParNew also runs with promotion labs (which look like int
|
||||||
// filler arrays) which are subject to changing their declared size
|
// filler arrays) which are subject to changing their declared size
|
||||||
// when finally retiring a PLAB; this also can cause the first disjunct
|
// when finally retiring a PLAB; this also can cause the first disjunct
|
||||||
// to fail for another worker thread that is concurrently walking the block
|
// to fail for another worker thread that is concurrently walking the block
|
||||||
|
@ -458,8 +458,8 @@ inline int oopDesc::size_given_klass(Klass* klass) {
|
||||||
// technique, we will need to suitably modify the assertion.
|
// technique, we will need to suitably modify the assertion.
|
||||||
assert((s == klass->oop_size(this)) ||
|
assert((s == klass->oop_size(this)) ||
|
||||||
(Universe::heap()->is_gc_active() &&
|
(Universe::heap()->is_gc_active() &&
|
||||||
((is_typeArray() && UseParNewGC) ||
|
((is_typeArray() && UseConcMarkSweepGC) ||
|
||||||
(is_objArray() && is_forwarded() && (UseParNewGC || UseParallelGC || UseG1GC)))),
|
(is_objArray() && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))),
|
||||||
"wrong array object size");
|
"wrong array object size");
|
||||||
} else {
|
} else {
|
||||||
// Must be zero, so bite the bullet and take the virtual call.
|
// Must be zero, so bite the bullet and take the virtual call.
|
||||||
|
|
|
@ -50,6 +50,7 @@
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
|
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
|
||||||
#include "gc_implementation/g1/concurrentMark.hpp"
|
#include "gc_implementation/g1/concurrentMark.hpp"
|
||||||
|
#include "gc_implementation/g1/concurrentMarkThread.hpp"
|
||||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
|
@ -290,8 +291,16 @@ WB_END
|
||||||
|
|
||||||
WB_ENTRY(jboolean, WB_G1InConcurrentMark(JNIEnv* env, jobject o))
|
WB_ENTRY(jboolean, WB_G1InConcurrentMark(JNIEnv* env, jobject o))
|
||||||
G1CollectedHeap* g1 = G1CollectedHeap::heap();
|
G1CollectedHeap* g1 = G1CollectedHeap::heap();
|
||||||
ConcurrentMark* cm = g1->concurrent_mark();
|
return g1->concurrent_mark()->cmThread()->during_cycle();
|
||||||
return cm->concurrent_marking_in_progress();
|
WB_END
|
||||||
|
|
||||||
|
WB_ENTRY(jboolean, WB_G1StartMarkCycle(JNIEnv* env, jobject o))
|
||||||
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
|
if (!g1h->concurrent_mark()->cmThread()->during_cycle()) {
|
||||||
|
g1h->collect(GCCause::_wb_conc_mark);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
WB_END
|
WB_END
|
||||||
|
|
||||||
WB_ENTRY(jint, WB_G1RegionSize(JNIEnv* env, jobject o))
|
WB_ENTRY(jint, WB_G1RegionSize(JNIEnv* env, jobject o))
|
||||||
|
@ -1144,6 +1153,7 @@ static JNINativeMethod methods[] = {
|
||||||
{CC"g1IsHumongous", CC"(Ljava/lang/Object;)Z", (void*)&WB_G1IsHumongous },
|
{CC"g1IsHumongous", CC"(Ljava/lang/Object;)Z", (void*)&WB_G1IsHumongous },
|
||||||
{CC"g1NumFreeRegions", CC"()J", (void*)&WB_G1NumFreeRegions },
|
{CC"g1NumFreeRegions", CC"()J", (void*)&WB_G1NumFreeRegions },
|
||||||
{CC"g1RegionSize", CC"()I", (void*)&WB_G1RegionSize },
|
{CC"g1RegionSize", CC"()I", (void*)&WB_G1RegionSize },
|
||||||
|
{CC"g1StartConcMarkCycle", CC"()Z", (void*)&WB_G1StartMarkCycle },
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
#if INCLUDE_NMT
|
#if INCLUDE_NMT
|
||||||
{CC"NMTMalloc", CC"(J)J", (void*)&WB_NMTMalloc },
|
{CC"NMTMalloc", CC"(J)J", (void*)&WB_NMTMalloc },
|
||||||
|
|
|
@ -1272,10 +1272,8 @@ static void disable_adaptive_size_policy(const char* collector_name) {
|
||||||
void Arguments::set_parnew_gc_flags() {
|
void Arguments::set_parnew_gc_flags() {
|
||||||
assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC && !UseG1GC,
|
assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC && !UseG1GC,
|
||||||
"control point invariant");
|
"control point invariant");
|
||||||
assert(UseParNewGC, "Error");
|
assert(UseConcMarkSweepGC, "CMS is expected to be on here");
|
||||||
|
assert(UseParNewGC, "ParNew should always be used with CMS");
|
||||||
// Turn off AdaptiveSizePolicy for parnew until it is complete.
|
|
||||||
disable_adaptive_size_policy("UseParNewGC");
|
|
||||||
|
|
||||||
if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
|
if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
|
||||||
FLAG_SET_DEFAULT(ParallelGCThreads, Abstract_VM_Version::parallel_worker_threads());
|
FLAG_SET_DEFAULT(ParallelGCThreads, Abstract_VM_Version::parallel_worker_threads());
|
||||||
|
@ -1316,21 +1314,12 @@ void Arguments::set_parnew_gc_flags() {
|
||||||
void Arguments::set_cms_and_parnew_gc_flags() {
|
void Arguments::set_cms_and_parnew_gc_flags() {
|
||||||
assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC, "Error");
|
assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC, "Error");
|
||||||
assert(UseConcMarkSweepGC, "CMS is expected to be on here");
|
assert(UseConcMarkSweepGC, "CMS is expected to be on here");
|
||||||
|
assert(UseParNewGC, "ParNew should always be used with CMS");
|
||||||
// If we are using CMS, we prefer to UseParNewGC,
|
|
||||||
// unless explicitly forbidden.
|
|
||||||
if (FLAG_IS_DEFAULT(UseParNewGC)) {
|
|
||||||
FLAG_SET_ERGO(bool, UseParNewGC, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Turn off AdaptiveSizePolicy by default for cms until it is complete.
|
// Turn off AdaptiveSizePolicy by default for cms until it is complete.
|
||||||
disable_adaptive_size_policy("UseConcMarkSweepGC");
|
disable_adaptive_size_policy("UseConcMarkSweepGC");
|
||||||
|
|
||||||
// In either case, adjust ParallelGCThreads and/or UseParNewGC
|
|
||||||
// as needed.
|
|
||||||
if (UseParNewGC) {
|
|
||||||
set_parnew_gc_flags();
|
set_parnew_gc_flags();
|
||||||
}
|
|
||||||
|
|
||||||
size_t max_heap = align_size_down(MaxHeapSize,
|
size_t max_heap = align_size_down(MaxHeapSize,
|
||||||
CardTableRS::ct_max_alignment_constraint());
|
CardTableRS::ct_max_alignment_constraint());
|
||||||
|
@ -1800,14 +1789,11 @@ void Arguments::set_gc_specific_flags() {
|
||||||
// Set per-collector flags
|
// Set per-collector flags
|
||||||
if (UseParallelGC || UseParallelOldGC) {
|
if (UseParallelGC || UseParallelOldGC) {
|
||||||
set_parallel_gc_flags();
|
set_parallel_gc_flags();
|
||||||
} else if (UseConcMarkSweepGC) { // Should be done before ParNew check below
|
} else if (UseConcMarkSweepGC) {
|
||||||
set_cms_and_parnew_gc_flags();
|
set_cms_and_parnew_gc_flags();
|
||||||
} else if (UseParNewGC) { // Skipped if CMS is set above
|
|
||||||
set_parnew_gc_flags();
|
|
||||||
} else if (UseG1GC) {
|
} else if (UseG1GC) {
|
||||||
set_g1_gc_flags();
|
set_g1_gc_flags();
|
||||||
}
|
}
|
||||||
check_deprecated_gcs();
|
|
||||||
check_deprecated_gc_flags();
|
check_deprecated_gc_flags();
|
||||||
if (AssumeMP && !UseSerialGC) {
|
if (AssumeMP && !UseSerialGC) {
|
||||||
if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
|
if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
|
||||||
|
@ -2168,17 +2154,11 @@ bool Arguments::verify_MaxHeapFreeRatio(FormatBuffer<80>& err_msg, uintx max_hea
|
||||||
// Check consistency of GC selection
|
// Check consistency of GC selection
|
||||||
bool Arguments::check_gc_consistency_user() {
|
bool Arguments::check_gc_consistency_user() {
|
||||||
check_gclog_consistency();
|
check_gclog_consistency();
|
||||||
bool status = true;
|
|
||||||
// Ensure that the user has not selected conflicting sets
|
// Ensure that the user has not selected conflicting sets
|
||||||
// of collectors. [Note: this check is merely a user convenience;
|
// of collectors.
|
||||||
// collectors over-ride each other so that only a non-conflicting
|
|
||||||
// set is selected; however what the user gets is not what they
|
|
||||||
// may have expected from the combination they asked for. It's
|
|
||||||
// better to reduce user confusion by not allowing them to
|
|
||||||
// select conflicting combinations.
|
|
||||||
uint i = 0;
|
uint i = 0;
|
||||||
if (UseSerialGC) i++;
|
if (UseSerialGC) i++;
|
||||||
if (UseConcMarkSweepGC || UseParNewGC) i++;
|
if (UseConcMarkSweepGC) i++;
|
||||||
if (UseParallelGC || UseParallelOldGC) i++;
|
if (UseParallelGC || UseParallelOldGC) i++;
|
||||||
if (UseG1GC) i++;
|
if (UseG1GC) i++;
|
||||||
if (i > 1) {
|
if (i > 1) {
|
||||||
|
@ -2186,26 +2166,30 @@ bool Arguments::check_gc_consistency_user() {
|
||||||
"Conflicting collector combinations in option list; "
|
"Conflicting collector combinations in option list; "
|
||||||
"please refer to the release notes for the combinations "
|
"please refer to the release notes for the combinations "
|
||||||
"allowed\n");
|
"allowed\n");
|
||||||
status = false;
|
return false;
|
||||||
}
|
|
||||||
return status;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Arguments::check_deprecated_gcs() {
|
|
||||||
if (UseConcMarkSweepGC && !UseParNewGC) {
|
if (UseConcMarkSweepGC && !UseParNewGC) {
|
||||||
warning("Using the DefNew young collector with the CMS collector is deprecated "
|
jio_fprintf(defaultStream::error_stream(),
|
||||||
"and will likely be removed in a future release");
|
"It is not possible to combine the DefNew young collector with the CMS collector.\n");
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (UseParNewGC && !UseConcMarkSweepGC) {
|
if (UseParNewGC && !UseConcMarkSweepGC) {
|
||||||
// !UseConcMarkSweepGC means that we are using serial old gc. Unfortunately we don't
|
// !UseConcMarkSweepGC means that we are using serial old gc. Unfortunately we don't
|
||||||
// set up UseSerialGC properly, so that can't be used in the check here.
|
// set up UseSerialGC properly, so that can't be used in the check here.
|
||||||
warning("Using the ParNew young collector with the Serial old collector is deprecated "
|
jio_fprintf(defaultStream::error_stream(),
|
||||||
"and will likely be removed in a future release");
|
"It is not possible to combine the ParNew young collector with the Serial old collector.\n");
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Arguments::check_deprecated_gc_flags() {
|
void Arguments::check_deprecated_gc_flags() {
|
||||||
|
if (FLAG_IS_CMDLINE(UseParNewGC)) {
|
||||||
|
warning("The UseParNewGC flag is deprecated and will likely be removed in a future release");
|
||||||
|
}
|
||||||
if (FLAG_IS_CMDLINE(MaxGCMinorPauseMillis)) {
|
if (FLAG_IS_CMDLINE(MaxGCMinorPauseMillis)) {
|
||||||
warning("Using MaxGCMinorPauseMillis as minor pause goal is deprecated"
|
warning("Using MaxGCMinorPauseMillis as minor pause goal is deprecated"
|
||||||
"and will likely be removed in future release");
|
"and will likely be removed in future release");
|
||||||
|
@ -2312,7 +2296,7 @@ bool Arguments::check_vm_args_consistency() {
|
||||||
FLAG_SET_DEFAULT(UseGCOverheadLimit, false);
|
FLAG_SET_DEFAULT(UseGCOverheadLimit, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
status = status && ArgumentsExt::check_gc_consistency_user();
|
status = status && check_gc_consistency_user();
|
||||||
status = status && check_stack_pages();
|
status = status && check_stack_pages();
|
||||||
|
|
||||||
status = status && verify_percentage(CMSIncrementalSafetyFactor,
|
status = status && verify_percentage(CMSIncrementalSafetyFactor,
|
||||||
|
@ -3568,7 +3552,12 @@ jint Arguments::finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_req
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!ArgumentsExt::check_vm_args_consistency()) {
|
if (UseConcMarkSweepGC && FLAG_IS_DEFAULT(UseParNewGC) && !UseParNewGC) {
|
||||||
|
// CMS can only be used with ParNew
|
||||||
|
FLAG_SET_ERGO(bool, UseParNewGC, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!check_vm_args_consistency()) {
|
||||||
return JNI_ERR;
|
return JNI_ERR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3966,7 +3955,7 @@ jint Arguments::apply_ergo() {
|
||||||
// Set heap size based on available physical memory
|
// Set heap size based on available physical memory
|
||||||
set_heap_size();
|
set_heap_size();
|
||||||
|
|
||||||
set_gc_specific_flags();
|
ArgumentsExt::set_gc_specific_flags();
|
||||||
|
|
||||||
// Initialize Metaspace flags and alignments
|
// Initialize Metaspace flags and alignments
|
||||||
Metaspace::ergo_initialize();
|
Metaspace::ergo_initialize();
|
||||||
|
|
|
@ -346,7 +346,6 @@ class Arguments : AllStatic {
|
||||||
static void select_gc();
|
static void select_gc();
|
||||||
static void set_ergonomics_flags();
|
static void set_ergonomics_flags();
|
||||||
static void set_shared_spaces_flags();
|
static void set_shared_spaces_flags();
|
||||||
static void set_gc_specific_flags();
|
|
||||||
// limits the given memory size by the maximum amount of memory this process is
|
// limits the given memory size by the maximum amount of memory this process is
|
||||||
// currently allowed to allocate or reserve.
|
// currently allowed to allocate or reserve.
|
||||||
static julong limit_by_allocatable_memory(julong size);
|
static julong limit_by_allocatable_memory(julong size);
|
||||||
|
@ -458,6 +457,7 @@ class Arguments : AllStatic {
|
||||||
// Adjusts the arguments after the OS have adjusted the arguments
|
// Adjusts the arguments after the OS have adjusted the arguments
|
||||||
static jint adjust_after_os();
|
static jint adjust_after_os();
|
||||||
|
|
||||||
|
static void set_gc_specific_flags();
|
||||||
static inline bool gc_selected(); // whether a gc has been selected
|
static inline bool gc_selected(); // whether a gc has been selected
|
||||||
static void select_gc_ergonomically();
|
static void select_gc_ergonomically();
|
||||||
|
|
||||||
|
@ -472,7 +472,6 @@ class Arguments : AllStatic {
|
||||||
// Check for consistency in the selection of the garbage collector.
|
// Check for consistency in the selection of the garbage collector.
|
||||||
static bool check_gc_consistency_user(); // Check user-selected gc
|
static bool check_gc_consistency_user(); // Check user-selected gc
|
||||||
static inline bool check_gc_consistency_ergo(); // Check ergonomic-selected gc
|
static inline bool check_gc_consistency_ergo(); // Check ergonomic-selected gc
|
||||||
static void check_deprecated_gcs();
|
|
||||||
static void check_deprecated_gc_flags();
|
static void check_deprecated_gc_flags();
|
||||||
// Check consistency or otherwise of VM argument settings
|
// Check consistency or otherwise of VM argument settings
|
||||||
static bool check_vm_args_consistency();
|
static bool check_vm_args_consistency();
|
||||||
|
@ -615,8 +614,7 @@ class Arguments : AllStatic {
|
||||||
};
|
};
|
||||||
|
|
||||||
bool Arguments::gc_selected() {
|
bool Arguments::gc_selected() {
|
||||||
return UseConcMarkSweepGC || UseG1GC || UseParallelGC || UseParallelOldGC ||
|
return UseConcMarkSweepGC || UseG1GC || UseParallelGC || UseParallelOldGC || UseSerialGC;
|
||||||
UseParNewGC || UseSerialGC;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Arguments::check_gc_consistency_ergo() {
|
bool Arguments::check_gc_consistency_ergo() {
|
||||||
|
|
|
@ -31,9 +31,8 @@
|
||||||
class ArgumentsExt: AllStatic {
|
class ArgumentsExt: AllStatic {
|
||||||
public:
|
public:
|
||||||
static inline void select_gc_ergonomically();
|
static inline void select_gc_ergonomically();
|
||||||
static inline bool check_gc_consistency_user();
|
static inline void set_gc_specific_flags();
|
||||||
static inline bool check_gc_consistency_ergo();
|
static inline bool check_gc_consistency_ergo();
|
||||||
static inline bool check_vm_args_consistency();
|
|
||||||
// The argument processing extension. Returns true if there is
|
// The argument processing extension. Returns true if there is
|
||||||
// no additional parsing needed in Arguments::parse() for the option.
|
// no additional parsing needed in Arguments::parse() for the option.
|
||||||
// Otherwise returns false.
|
// Otherwise returns false.
|
||||||
|
@ -44,16 +43,12 @@ void ArgumentsExt::select_gc_ergonomically() {
|
||||||
Arguments::select_gc_ergonomically();
|
Arguments::select_gc_ergonomically();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ArgumentsExt::check_gc_consistency_user() {
|
void ArgumentsExt::set_gc_specific_flags() {
|
||||||
return Arguments::check_gc_consistency_user();
|
Arguments::set_gc_specific_flags();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ArgumentsExt::check_gc_consistency_ergo() {
|
bool ArgumentsExt::check_gc_consistency_ergo() {
|
||||||
return Arguments::check_gc_consistency_ergo();
|
return Arguments::check_gc_consistency_ergo();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ArgumentsExt::check_vm_args_consistency() {
|
|
||||||
return Arguments::check_vm_args_consistency();
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif // SHARE_VM_RUNTIME_ARGUMENTS_EXT_HPP
|
#endif // SHARE_VM_RUNTIME_ARGUMENTS_EXT_HPP
|
||||||
|
|
|
@ -139,6 +139,7 @@ needs_compact3 = \
|
||||||
gc/g1/TestShrinkAuxiliaryData20.java \
|
gc/g1/TestShrinkAuxiliaryData20.java \
|
||||||
gc/g1/TestShrinkAuxiliaryData25.java \
|
gc/g1/TestShrinkAuxiliaryData25.java \
|
||||||
gc/g1/TestShrinkAuxiliaryData30.java \
|
gc/g1/TestShrinkAuxiliaryData30.java \
|
||||||
|
gc/survivorAlignment \
|
||||||
runtime/InternalApi/ThreadCpuTimesDeadlock.java \
|
runtime/InternalApi/ThreadCpuTimesDeadlock.java \
|
||||||
serviceability/threads/TestFalseDeadLock.java \
|
serviceability/threads/TestFalseDeadLock.java \
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
* @build Test8010927
|
* @build Test8010927
|
||||||
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||||
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||||
* @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:. -Xmx64m -XX:NewSize=20971520 -XX:MaxNewSize=32m -XX:-UseTLAB -XX:-UseParNewGC -XX:-UseAdaptiveSizePolicy Test8010927
|
* @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:. -Xmx64m -XX:NewSize=20971520 -XX:MaxNewSize=32m -XX:-UseTLAB -XX:-UseAdaptiveSizePolicy Test8010927
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import sun.hotspot.WhiteBox;
|
import sun.hotspot.WhiteBox;
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
* @key gc
|
* @key gc
|
||||||
* @summary Tests that all SoftReferences has been cleared at time of OOM.
|
* @summary Tests that all SoftReferences has been cleared at time of OOM.
|
||||||
* @library /testlibrary
|
* @library /testlibrary
|
||||||
* @build TestSoftReference
|
* @build TestSoftReferencesBehaviorOnOOME
|
||||||
* @run main/othervm -Xmx128m TestSoftReferencesBehaviorOnOOME 512 2k
|
* @run main/othervm -Xmx128m TestSoftReferencesBehaviorOnOOME 512 2k
|
||||||
* @run main/othervm -Xmx128m TestSoftReferencesBehaviorOnOOME 128k 256k
|
* @run main/othervm -Xmx128m TestSoftReferencesBehaviorOnOOME 128k 256k
|
||||||
* @run main/othervm -Xmx128m TestSoftReferencesBehaviorOnOOME 2k 32k 10
|
* @run main/othervm -Xmx128m TestSoftReferencesBehaviorOnOOME 2k 32k 10
|
||||||
|
|
|
@ -28,12 +28,10 @@
|
||||||
* @summary Runs System.gc() with different flags.
|
* @summary Runs System.gc() with different flags.
|
||||||
* @run main/othervm TestSystemGC
|
* @run main/othervm TestSystemGC
|
||||||
* @run main/othervm -XX:+UseSerialGC TestSystemGC
|
* @run main/othervm -XX:+UseSerialGC TestSystemGC
|
||||||
* @run main/othervm -XX:+UseParNewGC TestSystemGC
|
|
||||||
* @run main/othervm -XX:+UseParallelGC TestSystemGC
|
* @run main/othervm -XX:+UseParallelGC TestSystemGC
|
||||||
* @run main/othervm -XX:+UseParallelGC -XX:-UseParallelOldGC TestSystemGC
|
* @run main/othervm -XX:+UseParallelGC -XX:-UseParallelOldGC TestSystemGC
|
||||||
* @run main/othervm -XX:+UseConcMarkSweepGC TestSystemGC
|
* @run main/othervm -XX:+UseConcMarkSweepGC TestSystemGC
|
||||||
* @run main/othervm -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent TestSystemGC
|
* @run main/othervm -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent TestSystemGC
|
||||||
* @run main/othervm -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent -XX:-UseParNewGC TestSystemGC
|
|
||||||
* @run main/othervm -XX:+UseG1GC TestSystemGC
|
* @run main/othervm -XX:+UseG1GC TestSystemGC
|
||||||
* @run main/othervm -XX:+UseG1GC -XX:+ExplicitGCInvokesConcurrent TestSystemGC
|
* @run main/othervm -XX:+UseG1GC -XX:+ExplicitGCInvokesConcurrent TestSystemGC
|
||||||
* @run main/othervm -XX:+UseLargePages TestSystemGC
|
* @run main/othervm -XX:+UseLargePages TestSystemGC
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -25,12 +25,11 @@
|
||||||
* @test TestG1HeapRegionSize
|
* @test TestG1HeapRegionSize
|
||||||
* @key gc
|
* @key gc
|
||||||
* @bug 8021879
|
* @bug 8021879
|
||||||
* @requires vm.gc=="G1" | vm.gc=="null"
|
|
||||||
* @summary Verify that the flag G1HeapRegionSize is updated properly
|
* @summary Verify that the flag G1HeapRegionSize is updated properly
|
||||||
* @run main/othervm -Xmx64m TestG1HeapRegionSize 1048576
|
* @run main/othervm -Xmx64m TestG1HeapRegionSize 1048576
|
||||||
* @run main/othervm -XX:G1HeapRegionSize=2m -Xmx64m -XX:+UseG1GC TestG1HeapRegionSize 2097152
|
* @run main/othervm -XX:G1HeapRegionSize=2m -Xmx64m TestG1HeapRegionSize 2097152
|
||||||
* @run main/othervm -XX:G1HeapRegionSize=3m -Xmx64m -XX:+UseG1GC TestG1HeapRegionSize 2097152
|
* @run main/othervm -XX:G1HeapRegionSize=3m -Xmx64m TestG1HeapRegionSize 2097152
|
||||||
* @run main/othervm -XX:G1HeapRegionSize=64m -Xmx256m -XX:+UseG1GC TestG1HeapRegionSize 33554432
|
* @run main/othervm -XX:G1HeapRegionSize=64m -Xmx256m TestG1HeapRegionSize 33554432
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import sun.management.ManagementFactoryHelper;
|
import sun.management.ManagementFactoryHelper;
|
||||||
|
@ -43,7 +42,13 @@ public class TestG1HeapRegionSize {
|
||||||
HotSpotDiagnosticMXBean diagnostic = ManagementFactoryHelper.getDiagnosticMXBean();
|
HotSpotDiagnosticMXBean diagnostic = ManagementFactoryHelper.getDiagnosticMXBean();
|
||||||
|
|
||||||
String expectedValue = getExpectedValue(args);
|
String expectedValue = getExpectedValue(args);
|
||||||
VMOption option = diagnostic.getVMOption("G1HeapRegionSize");
|
VMOption option = diagnostic.getVMOption("UseG1GC");
|
||||||
|
if (option.getValue().equals("false")) {
|
||||||
|
System.out.println("Skipping this test. It is only a G1 test.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
option = diagnostic.getVMOption("G1HeapRegionSize");
|
||||||
if (!expectedValue.equals(option.getValue())) {
|
if (!expectedValue.equals(option.getValue())) {
|
||||||
throw new RuntimeException("Wrong value for G1HeapRegionSize. Expected " + expectedValue + " but got " + option.getValue());
|
throw new RuntimeException("Wrong value for G1HeapRegionSize. Expected " + expectedValue + " but got " + option.getValue());
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,120 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import com.oracle.java.testlibrary.ExitCode;
|
||||||
|
import com.oracle.java.testlibrary.Utils;
|
||||||
|
import com.oracle.java.testlibrary.cli.CommandLineOptionTest;
|
||||||
|
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.LinkedList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @test
|
||||||
|
* @bug 8031323
|
||||||
|
* @summary Verify SurvivorAlignmentInBytes option processing.
|
||||||
|
* @library /testlibrary
|
||||||
|
* @run main TestSurvivorAlignmentInBytesOption
|
||||||
|
*/
|
||||||
|
public class TestSurvivorAlignmentInBytesOption {
|
||||||
|
private static final String[] FILTERED_VM_OPTIONS
|
||||||
|
= Utils.getFilteredTestJavaOpts(
|
||||||
|
"UnlockExperimentalVMOptions",
|
||||||
|
"SurvivorAlignmentInBytes",
|
||||||
|
"ObjectAlignmentInBytes");
|
||||||
|
|
||||||
|
public static void main(String args[]) throws Throwable {
|
||||||
|
String optionName = "SurvivorAlignmentInBytes";
|
||||||
|
String optionIsExperimental
|
||||||
|
= CommandLineOptionTest.getExperimentalOptionErrorMessage(
|
||||||
|
optionName);
|
||||||
|
String valueIsTooSmall= ".*SurvivorAlignmentInBytes=.*must be greater"
|
||||||
|
+ " than ObjectAlignmentInBytes.*";
|
||||||
|
String mustBePowerOf2 = ".*SurvivorAlignmentInBytes=.*must be "
|
||||||
|
+ "power of 2.*";
|
||||||
|
|
||||||
|
// Verify that without -XX:+UnlockExperimentalVMOptions usage of
|
||||||
|
// SurvivorAlignmentInBytes option will cause JVM startup failure
|
||||||
|
// with the warning message saying that that option is experimental.
|
||||||
|
CommandLineOptionTest.verifyJVMStartup(
|
||||||
|
new String[]{optionIsExperimental}, null, ExitCode.FAIL, false,
|
||||||
|
TestSurvivorAlignmentInBytesOption.prepareOptions(
|
||||||
|
"-XX:-UnlockExperimentalVMOptions",
|
||||||
|
CommandLineOptionTest.prepareNumericFlag(
|
||||||
|
optionName, 64)));
|
||||||
|
|
||||||
|
// Verify that with -XX:+UnlockExperimentalVMOptions passed to JVM
|
||||||
|
// usage of SurvivorAlignmentInBytes option won't cause JVM startup
|
||||||
|
// failure.
|
||||||
|
CommandLineOptionTest.verifyJVMStartup(
|
||||||
|
null, new String[]{optionIsExperimental}, ExitCode.OK, false,
|
||||||
|
TestSurvivorAlignmentInBytesOption.prepareOptions(
|
||||||
|
CommandLineOptionTest.prepareNumericFlag(
|
||||||
|
optionName, 64)));
|
||||||
|
|
||||||
|
// Verify that if specified SurvivorAlignmentInBytes is lower then
|
||||||
|
// ObjectAlignmentInBytes, then the JVM startup will fail with
|
||||||
|
// appropriate error message.
|
||||||
|
CommandLineOptionTest.verifyJVMStartup(
|
||||||
|
new String[]{valueIsTooSmall}, null, ExitCode.FAIL, false,
|
||||||
|
TestSurvivorAlignmentInBytesOption.prepareOptions(
|
||||||
|
CommandLineOptionTest.prepareNumericFlag(
|
||||||
|
optionName, 2)));
|
||||||
|
|
||||||
|
// Verify that if specified SurvivorAlignmentInBytes value is not
|
||||||
|
// a power of 2 then the JVM startup will fail with appropriate error
|
||||||
|
// message.
|
||||||
|
CommandLineOptionTest.verifyJVMStartup(
|
||||||
|
new String[]{mustBePowerOf2}, null, ExitCode.FAIL, false,
|
||||||
|
TestSurvivorAlignmentInBytesOption.prepareOptions(
|
||||||
|
CommandLineOptionTest.prepareNumericFlag(
|
||||||
|
optionName, 127)));
|
||||||
|
|
||||||
|
// Verify that if SurvivorAlignmentInBytes has correct value, then
|
||||||
|
// the JVM will be started without errors.
|
||||||
|
CommandLineOptionTest.verifyJVMStartup(
|
||||||
|
null, new String[]{".*SurvivorAlignmentInBytes.*"},
|
||||||
|
ExitCode.OK, false,
|
||||||
|
TestSurvivorAlignmentInBytesOption.prepareOptions(
|
||||||
|
CommandLineOptionTest.prepareNumericFlag(
|
||||||
|
optionName, 128)));
|
||||||
|
|
||||||
|
// Verify that we can setup different SurvivorAlignmentInBytes values.
|
||||||
|
for (int alignment = 32; alignment <= 128; alignment *= 2) {
|
||||||
|
CommandLineOptionTest.verifyOptionValue(optionName,
|
||||||
|
Integer.toString(alignment), false,
|
||||||
|
TestSurvivorAlignmentInBytesOption.prepareOptions(
|
||||||
|
CommandLineOptionTest.prepareNumericFlag(
|
||||||
|
optionName, alignment)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static String[] prepareOptions(String... options) {
|
||||||
|
List<String> finalOptions = new LinkedList<>();
|
||||||
|
Collections.addAll(finalOptions,
|
||||||
|
TestSurvivorAlignmentInBytesOption.FILTERED_VM_OPTIONS);
|
||||||
|
finalOptions.add(CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS);
|
||||||
|
Collections.addAll(finalOptions, options);
|
||||||
|
return finalOptions.toArray(new String[finalOptions.size()]);
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -24,8 +24,8 @@
|
||||||
/*
|
/*
|
||||||
* @test TestDefNewCMS
|
* @test TestDefNewCMS
|
||||||
* @key gc
|
* @key gc
|
||||||
* @bug 8006398
|
* @bug 8065972
|
||||||
* @summary Test that the deprecated DefNew+CMS combination print a warning message
|
* @summary Test that the unsupported DefNew+CMS combination does not start
|
||||||
* @library /testlibrary
|
* @library /testlibrary
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -37,9 +37,9 @@ public class TestDefNewCMS {
|
||||||
public static void main(String args[]) throws Exception {
|
public static void main(String args[]) throws Exception {
|
||||||
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:-UseParNewGC", "-XX:+UseConcMarkSweepGC", "-version");
|
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:-UseParNewGC", "-XX:+UseConcMarkSweepGC", "-version");
|
||||||
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
||||||
output.shouldContain("warning: Using the DefNew young collector with the CMS collector is deprecated and will likely be removed in a future release");
|
output.shouldContain("It is not possible to combine the DefNew young collector with the CMS collector.");
|
||||||
output.shouldNotContain("error");
|
output.shouldContain("Error");
|
||||||
output.shouldHaveExitValue(0);
|
output.shouldHaveExitValue(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
46
hotspot/test/gc/startup_warnings/TestNoParNew.java
Normal file
46
hotspot/test/gc/startup_warnings/TestNoParNew.java
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @test TestNoParNew
|
||||||
|
* @key gc
|
||||||
|
* @bug 8065972
|
||||||
|
* @summary Test that specifying -XX:-UseParNewGC on the command line logs a warning message
|
||||||
|
* @library /testlibrary
|
||||||
|
*/
|
||||||
|
|
||||||
|
import com.oracle.java.testlibrary.OutputAnalyzer;
|
||||||
|
import com.oracle.java.testlibrary.ProcessTools;
|
||||||
|
|
||||||
|
|
||||||
|
public class TestNoParNew {
|
||||||
|
|
||||||
|
public static void main(String args[]) throws Exception {
|
||||||
|
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:-UseParNewGC", "-version");
|
||||||
|
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
||||||
|
output.shouldContain("warning: The UseParNewGC flag is deprecated and will likely be removed in a future release");
|
||||||
|
output.shouldNotContain("error");
|
||||||
|
output.shouldHaveExitValue(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -24,8 +24,8 @@
|
||||||
/*
|
/*
|
||||||
* @test TestParNewCMS
|
* @test TestParNewCMS
|
||||||
* @key gc
|
* @key gc
|
||||||
* @bug 8006398
|
* @bug 8065972
|
||||||
* @summary Test that the combination ParNew+CMS does not print a warning message
|
* @summary Test that specifying -XX:+UseParNewGC on the command line logs a warning message
|
||||||
* @library /testlibrary
|
* @library /testlibrary
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ public class TestParNewCMS {
|
||||||
public static void main(String args[]) throws Exception {
|
public static void main(String args[]) throws Exception {
|
||||||
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseParNewGC", "-XX:+UseConcMarkSweepGC", "-version");
|
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseParNewGC", "-XX:+UseConcMarkSweepGC", "-version");
|
||||||
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
||||||
output.shouldNotContain("deprecated");
|
output.shouldContain("warning: The UseParNewGC flag is deprecated and will likely be removed in a future release");
|
||||||
output.shouldNotContain("error");
|
output.shouldNotContain("error");
|
||||||
output.shouldHaveExitValue(0);
|
output.shouldHaveExitValue(0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -24,8 +24,8 @@
|
||||||
/*
|
/*
|
||||||
* @test TestParNewSerialOld
|
* @test TestParNewSerialOld
|
||||||
* @key gc
|
* @key gc
|
||||||
* @bug 8006398
|
* @bug 8065972
|
||||||
* @summary Test that the deprecated ParNew+SerialOld combination print a warning message
|
* @summary Test that the unsupported ParNew+SerialOld combination does not start
|
||||||
* @library /testlibrary
|
* @library /testlibrary
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -38,9 +38,9 @@ public class TestParNewSerialOld {
|
||||||
public static void main(String args[]) throws Exception {
|
public static void main(String args[]) throws Exception {
|
||||||
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseParNewGC", "-version");
|
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseParNewGC", "-version");
|
||||||
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
||||||
output.shouldContain("warning: Using the ParNew young collector with the Serial old collector is deprecated and will likely be removed in a future release");
|
output.shouldContain("It is not possible to combine the ParNew young collector with the Serial old collector.");
|
||||||
output.shouldNotContain("error");
|
output.shouldContain("Error");
|
||||||
output.shouldHaveExitValue(0);
|
output.shouldHaveExitValue(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
174
hotspot/test/gc/survivorAlignment/AlignmentHelper.java
Normal file
174
hotspot/test/gc/survivorAlignment/AlignmentHelper.java
Normal file
|
@ -0,0 +1,174 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import java.lang.management.MemoryPoolMXBean;
|
||||||
|
import java.util.Optional;
|
||||||
|
|
||||||
|
import sun.hotspot.WhiteBox;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper class aimed to provide information about alignment of objects in
|
||||||
|
* particular heap space, expected memory usage after objects' allocation so on.
|
||||||
|
*/
|
||||||
|
public class AlignmentHelper {
|
||||||
|
private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
|
||||||
|
|
||||||
|
private static final long OBJECT_ALIGNMENT_IN_BYTES_FOR_32_VM = 8L;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Max relative allowed actual memory usage deviation from expected memory
|
||||||
|
* usage.
|
||||||
|
*/
|
||||||
|
private static final float MAX_RELATIVE_DEVIATION = 0.05f; // 5%
|
||||||
|
|
||||||
|
public static final long OBJECT_ALIGNMENT_IN_BYTES = Optional.ofNullable(
|
||||||
|
AlignmentHelper.WHITE_BOX.getIntxVMFlag("ObjectAlignmentInBytes"))
|
||||||
|
.orElse(AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES_FOR_32_VM);
|
||||||
|
|
||||||
|
public static final long SURVIVOR_ALIGNMENT_IN_BYTES = Optional.ofNullable(
|
||||||
|
AlignmentHelper.WHITE_BOX.getIntxVMFlag("SurvivorAlignmentInBytes"))
|
||||||
|
.orElseThrow(() ->new AssertionError(
|
||||||
|
"Unable to get SurvivorAlignmentInBytes value"));
|
||||||
|
/**
|
||||||
|
* Min amount of memory that will be occupied by an object.
|
||||||
|
*/
|
||||||
|
public static final long MIN_OBJECT_SIZE
|
||||||
|
= AlignmentHelper.WHITE_BOX.getObjectSize(new Object());
|
||||||
|
/**
|
||||||
|
* Min amount of memory that will be occupied by an empty byte array.
|
||||||
|
*/
|
||||||
|
public static final long MIN_ARRAY_SIZE
|
||||||
|
= AlignmentHelper.WHITE_BOX.getObjectSize(new byte[0]);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Precision at which actual memory usage in a heap space represented by
|
||||||
|
* this sizing helper could be measured.
|
||||||
|
*/
|
||||||
|
private final long memoryUsageMeasurementPrecision;
|
||||||
|
/**
|
||||||
|
* Min amount of memory that will be occupied by an object allocated in a
|
||||||
|
* heap space represented by this sizing helper.
|
||||||
|
*/
|
||||||
|
private final long minObjectSizeInThisSpace;
|
||||||
|
/**
|
||||||
|
* Object's alignment in a heap space represented by this sizing helper.
|
||||||
|
*/
|
||||||
|
private final long objectAlignmentInThisRegion;
|
||||||
|
/**
|
||||||
|
* MemoryPoolMXBean associated with a heap space represented by this sizing
|
||||||
|
* helper.
|
||||||
|
*/
|
||||||
|
private final MemoryPoolMXBean poolMXBean;
|
||||||
|
|
||||||
|
private static long alignUp(long value, long alignment) {
|
||||||
|
return ((value - 1) / alignment + 1) * alignment;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected AlignmentHelper(long memoryUsageMeasurementPrecision,
|
||||||
|
long objectAlignmentInThisRegion, long minObjectSizeInThisSpace,
|
||||||
|
MemoryPoolMXBean poolMXBean) {
|
||||||
|
this.memoryUsageMeasurementPrecision = memoryUsageMeasurementPrecision;
|
||||||
|
this.minObjectSizeInThisSpace = minObjectSizeInThisSpace;
|
||||||
|
this.objectAlignmentInThisRegion = objectAlignmentInThisRegion;
|
||||||
|
this.poolMXBean = poolMXBean;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns how many objects have to be allocated to fill
|
||||||
|
* {@code memoryToFill} bytes in this heap space using objects of size
|
||||||
|
* {@code objectSize}.
|
||||||
|
*/
|
||||||
|
public int getObjectsCount(long memoryToFill, long objectSize) {
|
||||||
|
return (int) (memoryToFill / getObjectSizeInThisSpace(objectSize));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns amount of memory that {@code objectsCount} of objects with size
|
||||||
|
* {@code objectSize} will occupy this this space after allocation.
|
||||||
|
*/
|
||||||
|
public long getExpectedMemoryUsage(long objectSize, int objectsCount) {
|
||||||
|
long correctedObjectSize = getObjectSizeInThisSpace(objectSize);
|
||||||
|
return AlignmentHelper.alignUp(correctedObjectSize * objectsCount,
|
||||||
|
memoryUsageMeasurementPrecision);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns current memory usage in this heap space.
|
||||||
|
*/
|
||||||
|
public long getActualMemoryUsage() {
|
||||||
|
return poolMXBean.getUsage().getUsed();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns maximum memory usage deviation from {@code expectedMemoryUsage}
|
||||||
|
* given the max allowed relative deviation equal to
|
||||||
|
* {@code relativeDeviation}.
|
||||||
|
*
|
||||||
|
* Note that value returned by this method is aligned according to
|
||||||
|
* memory measurement precision for this heap space.
|
||||||
|
*/
|
||||||
|
public long getAllowedMemoryUsageDeviation(long expectedMemoryUsage) {
|
||||||
|
long unalignedDeviation = (long) (expectedMemoryUsage *
|
||||||
|
AlignmentHelper.MAX_RELATIVE_DEVIATION);
|
||||||
|
return AlignmentHelper.alignUp(unalignedDeviation,
|
||||||
|
memoryUsageMeasurementPrecision);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns amount of memory that will be occupied by an object with size
|
||||||
|
* {@code objectSize} in this heap space.
|
||||||
|
*/
|
||||||
|
public long getObjectSizeInThisSpace(long objectSize) {
|
||||||
|
objectSize = Math.max(objectSize, minObjectSizeInThisSpace);
|
||||||
|
|
||||||
|
long alignedObjectSize = AlignmentHelper.alignUp(objectSize,
|
||||||
|
objectAlignmentInThisRegion);
|
||||||
|
long sizeDiff = alignedObjectSize - objectSize;
|
||||||
|
|
||||||
|
// If there is not enough space to fit padding object, then object will
|
||||||
|
// be aligned to {@code 2 * objectAlignmentInThisRegion}.
|
||||||
|
if (sizeDiff >= AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES
|
||||||
|
&& sizeDiff < AlignmentHelper.MIN_OBJECT_SIZE) {
|
||||||
|
alignedObjectSize += AlignmentHelper.MIN_OBJECT_SIZE;
|
||||||
|
alignedObjectSize = AlignmentHelper.alignUp(alignedObjectSize,
|
||||||
|
objectAlignmentInThisRegion);
|
||||||
|
}
|
||||||
|
|
||||||
|
return alignedObjectSize;
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
|
||||||
|
builder.append(String.format("AlignmentHelper for memory pool '%s':%n",
|
||||||
|
poolMXBean.getName()));
|
||||||
|
builder.append(String.format("Memory usage measurement precision: %d%n",
|
||||||
|
memoryUsageMeasurementPrecision));
|
||||||
|
builder.append(String.format("Min object size in this space: %d%n",
|
||||||
|
minObjectSizeInThisSpace));
|
||||||
|
builder.append(String.format("Object alignment in this space: %d%n",
|
||||||
|
objectAlignmentInThisRegion));
|
||||||
|
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
|
}
|
416
hotspot/test/gc/survivorAlignment/SurvivorAlignmentTestMain.java
Normal file
416
hotspot/test/gc/survivorAlignment/SurvivorAlignmentTestMain.java
Normal file
|
@ -0,0 +1,416 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import java.lang.management.ManagementFactory;
|
||||||
|
import java.lang.management.MemoryPoolMXBean;
|
||||||
|
import java.util.Objects;
|
||||||
|
import java.util.Optional;
|
||||||
|
import java.util.regex.Matcher;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import com.oracle.java.testlibrary.Asserts;
|
||||||
|
import com.sun.management.ThreadMXBean;
|
||||||
|
import sun.hotspot.WhiteBox;
|
||||||
|
import sun.misc.Unsafe;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Main class for tests on {@code SurvivorAlignmentInBytes} option.
|
||||||
|
*
|
||||||
|
* Typical usage is to obtain instance using fromArgs method, allocate objects
|
||||||
|
* and verify that actual memory usage in tested heap space is close to
|
||||||
|
* expected.
|
||||||
|
*/
|
||||||
|
public class SurvivorAlignmentTestMain {
|
||||||
|
enum HeapSpace {
|
||||||
|
EDEN,
|
||||||
|
SURVIVOR,
|
||||||
|
TENURED
|
||||||
|
}
|
||||||
|
|
||||||
|
public static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
|
||||||
|
|
||||||
|
public static final long MAX_TENURING_THRESHOLD = Optional.ofNullable(
|
||||||
|
SurvivorAlignmentTestMain.WHITE_BOX.getIntxVMFlag(
|
||||||
|
"MaxTenuringThreshold")).orElse(15L);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Regexp used to parse memory size params, like 2G, 34m or 15k.
|
||||||
|
*/
|
||||||
|
private static final Pattern SIZE_REGEX
|
||||||
|
= Pattern.compile("(?<size>[0-9]+)(?<multiplier>[GMKgmk])?");
|
||||||
|
|
||||||
|
// Names of different heap spaces.
|
||||||
|
private static final String DEF_NEW_EDEN = "Eden Space";
|
||||||
|
private static final String DEF_NEW_SURVIVOR = "Survivor Space";
|
||||||
|
private static final String PAR_NEW_EDEN = "Par Eden Space";
|
||||||
|
private static final String PAR_NEW_SURVIVOR = "Par Survivor Space";
|
||||||
|
private static final String PS_EDEN = "PS Eden Space";
|
||||||
|
private static final String PS_SURVIVOR = "PS Survivor Space";
|
||||||
|
private static final String G1_EDEN = "G1 Eden Space";
|
||||||
|
private static final String G1_SURVIVOR = "G1 Survivor Space";
|
||||||
|
private static final String SERIAL_TENURED = "Tenured Gen";
|
||||||
|
private static final String CMS_TENURED = "CMS Old Gen";
|
||||||
|
private static final String PS_TENURED = "PS Old Gen";
|
||||||
|
private static final String G1_TENURED = "G1 Old Gen";
|
||||||
|
|
||||||
|
private static final long G1_HEAP_REGION_SIZE = Optional.ofNullable(
|
||||||
|
SurvivorAlignmentTestMain.WHITE_BOX.getUintxVMFlag(
|
||||||
|
"G1HeapRegionSize")).orElse(-1L);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Min size of free chunk in CMS generation.
|
||||||
|
* An object allocated in CMS generation will at least occupy this amount
|
||||||
|
* of bytes.
|
||||||
|
*/
|
||||||
|
private static final long CMS_MIN_FREE_CHUNK_SIZE
|
||||||
|
= 3L * Unsafe.ADDRESS_SIZE;
|
||||||
|
|
||||||
|
private static final AlignmentHelper EDEN_SPACE_HELPER;
|
||||||
|
private static final AlignmentHelper SURVIVOR_SPACE_HELPER;
|
||||||
|
private static final AlignmentHelper TENURED_SPACE_HELPER;
|
||||||
|
/**
|
||||||
|
* Amount of memory that should be filled during a test run.
|
||||||
|
*/
|
||||||
|
private final long memoryToFill;
|
||||||
|
/**
|
||||||
|
* The size of an objects that will be allocated during a test run.
|
||||||
|
*/
|
||||||
|
private final long objectSize;
|
||||||
|
/**
|
||||||
|
* Amount of memory that will be actually occupied by an object in eden
|
||||||
|
* space.
|
||||||
|
*/
|
||||||
|
private final long actualObjectSize;
|
||||||
|
/**
|
||||||
|
* Storage for allocated objects.
|
||||||
|
*/
|
||||||
|
private final Object[] garbage;
|
||||||
|
/**
|
||||||
|
* Heap space whose memory usage is a subject of assertions during the test
|
||||||
|
* run.
|
||||||
|
*/
|
||||||
|
private final HeapSpace testedSpace;
|
||||||
|
|
||||||
|
private long[] baselinedThreadMemoryUsage = null;
|
||||||
|
private long[] threadIds = null;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize {@code EDEN_SPACE_HELPER}, {@code SURVIVOR_SPACE_HELPER} and
|
||||||
|
* {@code TENURED_SPACE_HELPER} to represent heap spaces in use.
|
||||||
|
*
|
||||||
|
* Note that regardless to GC object's alignment in survivor space is
|
||||||
|
* expected to be equal to {@code SurvivorAlignmentInBytes} value and
|
||||||
|
* alignment in other spaces is expected to be equal to
|
||||||
|
* {@code ObjectAlignmentInBytes} value.
|
||||||
|
*
|
||||||
|
* In CMS generation we can't allocate less then {@code MinFreeChunk} value,
|
||||||
|
* for other CGs we expect that object of size {@code MIN_OBJECT_SIZE}
|
||||||
|
* could be allocated as it is (of course, its size could be aligned
|
||||||
|
* according to alignment value used in a particular space).
|
||||||
|
*
|
||||||
|
* For G1 GC MXBeans could report memory usage only with region size
|
||||||
|
* precision (if an object allocated in some G1 heap region, then all region
|
||||||
|
* will claimed as used), so for G1's spaces precision is equal to
|
||||||
|
* {@code G1HeapRegionSize} value.
|
||||||
|
*/
|
||||||
|
static {
|
||||||
|
AlignmentHelper edenHelper = null;
|
||||||
|
AlignmentHelper survivorHelper = null;
|
||||||
|
AlignmentHelper tenuredHelper = null;
|
||||||
|
for (MemoryPoolMXBean pool : ManagementFactory.getMemoryPoolMXBeans()) {
|
||||||
|
switch (pool.getName()) {
|
||||||
|
case SurvivorAlignmentTestMain.DEF_NEW_EDEN:
|
||||||
|
case SurvivorAlignmentTestMain.PAR_NEW_EDEN:
|
||||||
|
case SurvivorAlignmentTestMain.PS_EDEN:
|
||||||
|
Asserts.assertNull(edenHelper,
|
||||||
|
"Only one bean for eden space is expected.");
|
||||||
|
edenHelper = new AlignmentHelper(
|
||||||
|
AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
|
||||||
|
AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
|
||||||
|
AlignmentHelper.MIN_OBJECT_SIZE, pool);
|
||||||
|
break;
|
||||||
|
case SurvivorAlignmentTestMain.G1_EDEN:
|
||||||
|
Asserts.assertNull(edenHelper,
|
||||||
|
"Only one bean for eden space is expected.");
|
||||||
|
edenHelper = new AlignmentHelper(
|
||||||
|
SurvivorAlignmentTestMain.G1_HEAP_REGION_SIZE,
|
||||||
|
AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
|
||||||
|
AlignmentHelper.MIN_OBJECT_SIZE, pool);
|
||||||
|
break;
|
||||||
|
case SurvivorAlignmentTestMain.DEF_NEW_SURVIVOR:
|
||||||
|
case SurvivorAlignmentTestMain.PAR_NEW_SURVIVOR:
|
||||||
|
case SurvivorAlignmentTestMain.PS_SURVIVOR:
|
||||||
|
Asserts.assertNull(survivorHelper,
|
||||||
|
"Only one bean for survivor space is expected.");
|
||||||
|
survivorHelper = new AlignmentHelper(
|
||||||
|
AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
|
||||||
|
AlignmentHelper.SURVIVOR_ALIGNMENT_IN_BYTES,
|
||||||
|
AlignmentHelper.MIN_OBJECT_SIZE, pool);
|
||||||
|
break;
|
||||||
|
case SurvivorAlignmentTestMain.G1_SURVIVOR:
|
||||||
|
Asserts.assertNull(survivorHelper,
|
||||||
|
"Only one bean for survivor space is expected.");
|
||||||
|
survivorHelper = new AlignmentHelper(
|
||||||
|
SurvivorAlignmentTestMain.G1_HEAP_REGION_SIZE,
|
||||||
|
AlignmentHelper.SURVIVOR_ALIGNMENT_IN_BYTES,
|
||||||
|
AlignmentHelper.MIN_OBJECT_SIZE, pool);
|
||||||
|
break;
|
||||||
|
case SurvivorAlignmentTestMain.SERIAL_TENURED:
|
||||||
|
case SurvivorAlignmentTestMain.PS_TENURED:
|
||||||
|
case SurvivorAlignmentTestMain.G1_TENURED:
|
||||||
|
Asserts.assertNull(tenuredHelper,
|
||||||
|
"Only one bean for tenured space is expected.");
|
||||||
|
tenuredHelper = new AlignmentHelper(
|
||||||
|
AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
|
||||||
|
AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
|
||||||
|
AlignmentHelper.MIN_OBJECT_SIZE, pool);
|
||||||
|
break;
|
||||||
|
case SurvivorAlignmentTestMain.CMS_TENURED:
|
||||||
|
Asserts.assertNull(tenuredHelper,
|
||||||
|
"Only one bean for tenured space is expected.");
|
||||||
|
tenuredHelper = new AlignmentHelper(
|
||||||
|
AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
|
||||||
|
AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
|
||||||
|
SurvivorAlignmentTestMain.CMS_MIN_FREE_CHUNK_SIZE,
|
||||||
|
pool);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EDEN_SPACE_HELPER = Objects.requireNonNull(edenHelper,
|
||||||
|
"AlignmentHelper for eden space should be initialized.");
|
||||||
|
SURVIVOR_SPACE_HELPER = Objects.requireNonNull(survivorHelper,
|
||||||
|
"AlignmentHelper for survivor space should be initialized.");
|
||||||
|
TENURED_SPACE_HELPER = Objects.requireNonNull(tenuredHelper,
|
||||||
|
"AlignmentHelper for tenured space should be initialized.");
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* Returns an SurvivorAlignmentTestMain instance constructed using CLI
|
||||||
|
* options.
|
||||||
|
*
|
||||||
|
* Following options are expected:
|
||||||
|
* <ul>
|
||||||
|
* <li>memoryToFill</li>
|
||||||
|
* <li>objectSize</li>
|
||||||
|
* </ul>
|
||||||
|
*
|
||||||
|
* Both argument may contain multiplier suffix k, m or g.
|
||||||
|
*/
|
||||||
|
public static SurvivorAlignmentTestMain fromArgs(String[] args) {
|
||||||
|
Asserts.assertEQ(args.length, 3, "Expected three arguments: "
|
||||||
|
+ "memory size, object size and tested heap space name.");
|
||||||
|
|
||||||
|
long memoryToFill = parseSize(args[0]);
|
||||||
|
long objectSize = Math.max(parseSize(args[1]),
|
||||||
|
AlignmentHelper.MIN_ARRAY_SIZE);
|
||||||
|
HeapSpace testedSpace = HeapSpace.valueOf(args[2]);
|
||||||
|
|
||||||
|
return new SurvivorAlignmentTestMain(memoryToFill, objectSize,
|
||||||
|
testedSpace);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a value parsed from a string with format
|
||||||
|
* <integer><multiplier>.
|
||||||
|
*/
|
||||||
|
private static long parseSize(String sizeString) {
|
||||||
|
Matcher matcher = SIZE_REGEX.matcher(sizeString);
|
||||||
|
Asserts.assertTrue(matcher.matches(),
|
||||||
|
"sizeString should have following format \"[0-9]+([MBK])?\"");
|
||||||
|
long size = Long.valueOf(matcher.group("size"));
|
||||||
|
|
||||||
|
if (matcher.group("multiplier") != null) {
|
||||||
|
long K = 1024L;
|
||||||
|
// fall through multipliers
|
||||||
|
switch (matcher.group("multiplier").toLowerCase()) {
|
||||||
|
case "g":
|
||||||
|
size *= K;
|
||||||
|
case "m":
|
||||||
|
size *= K;
|
||||||
|
case "k":
|
||||||
|
size *= K;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
|
private SurvivorAlignmentTestMain(long memoryToFill, long objectSize,
|
||||||
|
HeapSpace testedSpace) {
|
||||||
|
this.objectSize = objectSize;
|
||||||
|
this.memoryToFill = memoryToFill;
|
||||||
|
this.testedSpace = testedSpace;
|
||||||
|
|
||||||
|
AlignmentHelper helper = SurvivorAlignmentTestMain.EDEN_SPACE_HELPER;
|
||||||
|
|
||||||
|
this.actualObjectSize = helper.getObjectSizeInThisSpace(
|
||||||
|
this.objectSize);
|
||||||
|
int arrayLength = helper.getObjectsCount(memoryToFill, this.objectSize);
|
||||||
|
garbage = new Object[arrayLength];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Allocate byte arrays to fill {@code memoryToFill} memory.
|
||||||
|
*/
|
||||||
|
public void allocate() {
|
||||||
|
int byteArrayLength = Math.max((int) (objectSize
|
||||||
|
- Unsafe.ARRAY_BYTE_BASE_OFFSET), 0);
|
||||||
|
|
||||||
|
for (int i = 0; i < garbage.length; i++) {
|
||||||
|
garbage[i] = new byte[byteArrayLength];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Release memory occupied after {@code allocate} call.
|
||||||
|
*/
|
||||||
|
public void release() {
|
||||||
|
for (int i = 0; i < garbage.length; i++) {
|
||||||
|
garbage[i] = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns expected amount of memory occupied in a {@code heapSpace} by
|
||||||
|
* objects referenced from {@code garbage} array.
|
||||||
|
*/
|
||||||
|
public long getExpectedMemoryUsage() {
|
||||||
|
AlignmentHelper alignmentHelper = getAlignmentHelper(testedSpace);
|
||||||
|
return alignmentHelper.getExpectedMemoryUsage(objectSize,
|
||||||
|
garbage.length);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Verifies that memory usage in a {@code heapSpace} deviates from
|
||||||
|
* {@code expectedUsage} for no more than {@code MAX_RELATIVE_DEVIATION}.
|
||||||
|
*/
|
||||||
|
public void verifyMemoryUsage(long expectedUsage) {
|
||||||
|
AlignmentHelper alignmentHelper = getAlignmentHelper(testedSpace);
|
||||||
|
|
||||||
|
long actualMemoryUsage = alignmentHelper.getActualMemoryUsage();
|
||||||
|
boolean otherThreadsAllocatedMemory = areOtherThreadsAllocatedMemory();
|
||||||
|
|
||||||
|
long memoryUsageDiff = Math.abs(actualMemoryUsage - expectedUsage);
|
||||||
|
long maxAllowedUsageDiff
|
||||||
|
= alignmentHelper.getAllowedMemoryUsageDeviation(expectedUsage);
|
||||||
|
|
||||||
|
System.out.println("Verifying memory usage in space: " + testedSpace);
|
||||||
|
System.out.println("Allocated objects count: " + garbage.length);
|
||||||
|
System.out.println("Desired object size: " + objectSize);
|
||||||
|
System.out.println("Actual object size: " + actualObjectSize);
|
||||||
|
System.out.println("Expected object size in space: "
|
||||||
|
+ alignmentHelper.getObjectSizeInThisSpace(objectSize));
|
||||||
|
System.out.println("Expected memory usage: " + expectedUsage);
|
||||||
|
System.out.println("Actual memory usage: " + actualMemoryUsage);
|
||||||
|
System.out.println("Memory usage diff: " + memoryUsageDiff);
|
||||||
|
System.out.println("Max allowed usage diff: " + maxAllowedUsageDiff);
|
||||||
|
|
||||||
|
if (memoryUsageDiff > maxAllowedUsageDiff
|
||||||
|
&& otherThreadsAllocatedMemory) {
|
||||||
|
System.out.println("Memory usage diff is incorrect, but it seems "
|
||||||
|
+ "like someone else allocated objects");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
Asserts.assertLTE(memoryUsageDiff, maxAllowedUsageDiff,
|
||||||
|
"Actual memory usage should not deviate from expected for " +
|
||||||
|
"more then " + maxAllowedUsageDiff);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Baselines amount of memory allocated by each thread.
|
||||||
|
*/
|
||||||
|
public void baselineMemoryAllocation() {
|
||||||
|
ThreadMXBean bean = (ThreadMXBean) ManagementFactory.getThreadMXBean();
|
||||||
|
threadIds = bean.getAllThreadIds();
|
||||||
|
baselinedThreadMemoryUsage = bean.getThreadAllocatedBytes(threadIds);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if threads other then the current thread were allocating objects
|
||||||
|
* after baselinedThreadMemoryUsage call.
|
||||||
|
*
|
||||||
|
* If baselinedThreadMemoryUsage was not called, then this method will return
|
||||||
|
* {@code false}.
|
||||||
|
*/
|
||||||
|
public boolean areOtherThreadsAllocatedMemory() {
|
||||||
|
if (baselinedThreadMemoryUsage == null) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
ThreadMXBean bean = (ThreadMXBean) ManagementFactory.getThreadMXBean();
|
||||||
|
long currentMemoryAllocation[]
|
||||||
|
= bean.getThreadAllocatedBytes(threadIds);
|
||||||
|
boolean otherThreadsAllocatedMemory = false;
|
||||||
|
|
||||||
|
System.out.println("Verifying amount of memory allocated by threads:");
|
||||||
|
for (int i = 0; i < threadIds.length; i++) {
|
||||||
|
System.out.format("Thread %d%nbaseline allocation: %d"
|
||||||
|
+ "%ncurrent allocation:%d%n", threadIds[i],
|
||||||
|
baselinedThreadMemoryUsage[i], currentMemoryAllocation[i]);
|
||||||
|
System.out.println(bean.getThreadInfo(threadIds[i]));
|
||||||
|
|
||||||
|
long bytesAllocated = Math.abs(currentMemoryAllocation[i]
|
||||||
|
- baselinedThreadMemoryUsage[i]);
|
||||||
|
if (bytesAllocated > 0
|
||||||
|
&& threadIds[i] != Thread.currentThread().getId()) {
|
||||||
|
otherThreadsAllocatedMemory = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return otherThreadsAllocatedMemory;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
|
||||||
|
builder.append(String.format("SurvivorAlignmentTestMain info:%n"));
|
||||||
|
builder.append(String.format("Desired object size: %d%n", objectSize));
|
||||||
|
builder.append(String.format("Memory to fill: %d%n", memoryToFill));
|
||||||
|
builder.append(String.format("Objects to be allocated: %d%n",
|
||||||
|
garbage.length));
|
||||||
|
|
||||||
|
builder.append(String.format("Alignment helpers to be used: %n"));
|
||||||
|
for (HeapSpace heapSpace: HeapSpace.values()) {
|
||||||
|
builder.append(String.format("For space %s:%n%s%n", heapSpace,
|
||||||
|
getAlignmentHelper(heapSpace)));
|
||||||
|
}
|
||||||
|
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns {@code AlignmentHelper} for a space {@code heapSpace}.
|
||||||
|
*/
|
||||||
|
public static AlignmentHelper getAlignmentHelper(HeapSpace heapSpace) {
|
||||||
|
switch (heapSpace) {
|
||||||
|
case EDEN:
|
||||||
|
return SurvivorAlignmentTestMain.EDEN_SPACE_HELPER;
|
||||||
|
case SURVIVOR:
|
||||||
|
return SurvivorAlignmentTestMain.SURVIVOR_SPACE_HELPER;
|
||||||
|
case TENURED:
|
||||||
|
return SurvivorAlignmentTestMain.TENURED_SPACE_HELPER;
|
||||||
|
default:
|
||||||
|
throw new Error("Unexpected heap space: " + heapSpace);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
84
hotspot/test/gc/survivorAlignment/TestAllocationInEden.java
Normal file
84
hotspot/test/gc/survivorAlignment/TestAllocationInEden.java
Normal file
|
@ -0,0 +1,84 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @test
|
||||||
|
* @bug 8031323
|
||||||
|
* @summary Verify that object's alignment in eden space is not affected by
|
||||||
|
* SurvivorAlignmentInBytes option.
|
||||||
|
* @library /testlibrary /testlibrary/whitebox
|
||||||
|
* @build TestAllocationInEden SurvivorAlignmentTestMain AlignmentHelper
|
||||||
|
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||||
|
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
|
||||||
|
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=32 -XX:-UseTLAB
|
||||||
|
* -XX:OldSize=128m -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* TestAllocationInEden 10m 9 EDEN
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
|
||||||
|
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=32 -XX:-UseTLAB
|
||||||
|
* -XX:OldSize=128m -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* TestAllocationInEden 10m 47 EDEN
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
|
||||||
|
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=64 -XX:-UseTLAB
|
||||||
|
* -XX:OldSize=128m -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* TestAllocationInEden 10m 9 EDEN
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
|
||||||
|
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=64 -XX:-UseTLAB
|
||||||
|
* -XX:OldSize=128m -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* TestAllocationInEden 10m 87 EDEN
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
|
||||||
|
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=128 -XX:-UseTLAB
|
||||||
|
* -XX:OldSize=128m -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* TestAllocationInEden 10m 9 EDEN
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
|
||||||
|
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=128 -XX:-UseTLAB
|
||||||
|
* -XX:OldSize=128m -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* TestAllocationInEden 10m 147 EDEN
|
||||||
|
*/
|
||||||
|
public class TestAllocationInEden {
|
||||||
|
public static void main(String args[]) {
|
||||||
|
SurvivorAlignmentTestMain test
|
||||||
|
= SurvivorAlignmentTestMain.fromArgs(args);
|
||||||
|
System.out.println(test);
|
||||||
|
|
||||||
|
long expectedMemoryUsage = test.getExpectedMemoryUsage();
|
||||||
|
test.baselineMemoryAllocation();
|
||||||
|
System.gc();
|
||||||
|
|
||||||
|
test.allocate();
|
||||||
|
|
||||||
|
test.verifyMemoryUsage(expectedMemoryUsage);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,96 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @test
|
||||||
|
* @bug 8031323
|
||||||
|
* @summary Verify that objects promoted from eden space to tenured space during
|
||||||
|
* full GC are not aligned to SurvivorAlignmentInBytes value.
|
||||||
|
* @library /testlibrary /testlibrary/whitebox
|
||||||
|
* @build TestPromotionFromEdenToTenured SurvivorAlignmentTestMain
|
||||||
|
* AlignmentHelper
|
||||||
|
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||||
|
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
|
||||||
|
* -XX:OldSize=32m -XX:SurvivorRatio=1
|
||||||
|
* -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=32
|
||||||
|
* TestPromotionFromEdenToTenured 10m 9 TENURED
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
|
||||||
|
* -XX:OldSize=32m -XX:SurvivorRatio=1
|
||||||
|
* -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=32
|
||||||
|
* TestPromotionFromEdenToTenured 10m 47 TENURED
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
|
||||||
|
* -XX:OldSize=32m -XX:SurvivorRatio=1
|
||||||
|
* -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=64
|
||||||
|
* TestPromotionFromEdenToTenured 10m 9 TENURED
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
|
||||||
|
* -XX:OldSize=32m -XX:SurvivorRatio=1
|
||||||
|
* -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=64
|
||||||
|
* TestPromotionFromEdenToTenured 10m 87 TENURED
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
|
||||||
|
* -XX:OldSize=32M -XX:SurvivorRatio=1
|
||||||
|
* -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=128
|
||||||
|
* TestPromotionFromEdenToTenured 10m 9 TENURED
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
|
||||||
|
* -XX:OldSize=32m -XX:SurvivorRatio=1
|
||||||
|
* -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=128
|
||||||
|
* TestPromotionFromEdenToTenured 10m 147 TENURED
|
||||||
|
*/
|
||||||
|
public class TestPromotionFromEdenToTenured {
|
||||||
|
public static void main(String args[]) {
|
||||||
|
SurvivorAlignmentTestMain test
|
||||||
|
= SurvivorAlignmentTestMain.fromArgs(args);
|
||||||
|
System.out.println(test);
|
||||||
|
|
||||||
|
long expectedMemoryUsage = test.getExpectedMemoryUsage();
|
||||||
|
test.baselineMemoryAllocation();
|
||||||
|
System.gc();
|
||||||
|
// increase expected usage by current old gen usage
|
||||||
|
expectedMemoryUsage += SurvivorAlignmentTestMain.getAlignmentHelper(
|
||||||
|
SurvivorAlignmentTestMain.HeapSpace.TENURED)
|
||||||
|
.getActualMemoryUsage();
|
||||||
|
|
||||||
|
test.allocate();
|
||||||
|
System.gc();
|
||||||
|
|
||||||
|
test.verifyMemoryUsage(expectedMemoryUsage);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,101 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @test
|
||||||
|
* @bug 8031323
|
||||||
|
* @summary Verify that objects promoted from survivor space to tenured space
|
||||||
|
* during full GC are not aligned to SurvivorAlignmentInBytes value.
|
||||||
|
* @library /testlibrary /testlibrary/whitebox
|
||||||
|
* @build TestPromotionFromSurvivorToTenuredAfterFullGC
|
||||||
|
* SurvivorAlignmentTestMain AlignmentHelper
|
||||||
|
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||||
|
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
|
||||||
|
* -XX:OldSize=32m -XX:SurvivorRatio=1
|
||||||
|
* -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=32
|
||||||
|
* TestPromotionFromSurvivorToTenuredAfterFullGC 10m 9 TENURED
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
|
||||||
|
* -XX:OldSize=32m -XX:SurvivorRatio=1
|
||||||
|
* -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=32
|
||||||
|
* TestPromotionFromSurvivorToTenuredAfterFullGC 20m 47
|
||||||
|
* TENURED
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=200m -XX:MaxNewSize=200m
|
||||||
|
* -XX:OldSize=32m -XX:InitialHeapSize=232m
|
||||||
|
* -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=64
|
||||||
|
* TestPromotionFromSurvivorToTenuredAfterFullGC 10m 9 TENURED
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
|
||||||
|
* -XX:OldSize=32m -XX:SurvivorRatio=1
|
||||||
|
* -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=64
|
||||||
|
* TestPromotionFromSurvivorToTenuredAfterFullGC 20m 87
|
||||||
|
* TENURED
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=256m -XX:MaxNewSize=256m
|
||||||
|
* -XX:OldSize=32M -XX:InitialHeapSize=288m
|
||||||
|
* -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=128
|
||||||
|
* TestPromotionFromSurvivorToTenuredAfterFullGC 10m 9
|
||||||
|
* TENURED
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
|
||||||
|
* -XX:OldSize=32m -XX:SurvivorRatio=1
|
||||||
|
* -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=128
|
||||||
|
* TestPromotionFromSurvivorToTenuredAfterFullGC 20m 147
|
||||||
|
* TENURED
|
||||||
|
*/
|
||||||
|
public class TestPromotionFromSurvivorToTenuredAfterFullGC {
|
||||||
|
public static void main(String args[]) {
|
||||||
|
SurvivorAlignmentTestMain test
|
||||||
|
= SurvivorAlignmentTestMain.fromArgs(args);
|
||||||
|
System.out.println(test);
|
||||||
|
|
||||||
|
long expectedMemoryUsage = test.getExpectedMemoryUsage();
|
||||||
|
test.baselineMemoryAllocation();
|
||||||
|
System.gc();
|
||||||
|
// increase expected usage by current old gen usage
|
||||||
|
expectedMemoryUsage += SurvivorAlignmentTestMain.getAlignmentHelper(
|
||||||
|
SurvivorAlignmentTestMain.HeapSpace.TENURED)
|
||||||
|
.getActualMemoryUsage();
|
||||||
|
|
||||||
|
test.allocate();
|
||||||
|
SurvivorAlignmentTestMain.WHITE_BOX.youngGC();
|
||||||
|
System.gc();
|
||||||
|
|
||||||
|
test.verifyMemoryUsage(expectedMemoryUsage);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,106 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @test
|
||||||
|
* @bug 8031323
|
||||||
|
* @summary Verify that objects promoted from survivor space to tenured space
|
||||||
|
* when their age exceeded tenuring threshold are not aligned to
|
||||||
|
* SurvivorAlignmentInBytes value.
|
||||||
|
* @library /testlibrary /testlibrary/whitebox
|
||||||
|
* @build TestPromotionFromSurvivorToTenuredAfterMinorGC
|
||||||
|
* SurvivorAlignmentTestMain AlignmentHelper
|
||||||
|
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||||
|
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
|
||||||
|
* -XX:OldSize=32M -XX:SurvivorRatio=1
|
||||||
|
* -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=32
|
||||||
|
* TestPromotionFromSurvivorToTenuredAfterMinorGC 10m 9
|
||||||
|
* TENURED
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
|
||||||
|
* -XX:OldSize=32M -XX:SurvivorRatio=1
|
||||||
|
* -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=32
|
||||||
|
* TestPromotionFromSurvivorToTenuredAfterMinorGC 20m 47
|
||||||
|
* TENURED
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=200m -XX:MaxNewSize=200m
|
||||||
|
* -XX:OldSize=32M -XX:InitialHeapSize=232m
|
||||||
|
* -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=64
|
||||||
|
* TestPromotionFromSurvivorToTenuredAfterMinorGC 10m 9
|
||||||
|
* TENURED
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
|
||||||
|
* -XX:OldSize=32M -XX:SurvivorRatio=1
|
||||||
|
* -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=64
|
||||||
|
* TestPromotionFromSurvivorToTenuredAfterMinorGC 20m 87
|
||||||
|
* TENURED
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=256m -XX:MaxNewSize=256m
|
||||||
|
* -XX:OldSize=32M -XX:InitialHeapSize=288m
|
||||||
|
* -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=128
|
||||||
|
* TestPromotionFromSurvivorToTenuredAfterMinorGC 10m 9
|
||||||
|
* TENURED
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
|
||||||
|
* -XX:OldSize=32M -XX:SurvivorRatio=1
|
||||||
|
* -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=128
|
||||||
|
* TestPromotionFromSurvivorToTenuredAfterMinorGC 20m 147
|
||||||
|
* TENURED
|
||||||
|
*/
|
||||||
|
public class TestPromotionFromSurvivorToTenuredAfterMinorGC {
|
||||||
|
public static void main(String args[]) throws Exception {
|
||||||
|
SurvivorAlignmentTestMain test
|
||||||
|
= SurvivorAlignmentTestMain.fromArgs(args);
|
||||||
|
System.out.println(test);
|
||||||
|
|
||||||
|
long expectedMemoryUsage = test.getExpectedMemoryUsage();
|
||||||
|
test.baselineMemoryAllocation();
|
||||||
|
SurvivorAlignmentTestMain.WHITE_BOX.fullGC();
|
||||||
|
// increase expected usage by current old gen usage
|
||||||
|
expectedMemoryUsage += SurvivorAlignmentTestMain.getAlignmentHelper(
|
||||||
|
SurvivorAlignmentTestMain.HeapSpace.TENURED)
|
||||||
|
.getActualMemoryUsage();
|
||||||
|
|
||||||
|
test.allocate();
|
||||||
|
for (int i = 0; i <= SurvivorAlignmentTestMain.MAX_TENURING_THRESHOLD;
|
||||||
|
i++) {
|
||||||
|
SurvivorAlignmentTestMain.WHITE_BOX.youngGC();
|
||||||
|
}
|
||||||
|
|
||||||
|
test.verifyMemoryUsage(expectedMemoryUsage);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,85 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @test
|
||||||
|
* @bug 8031323
|
||||||
|
* @summary Verify that objects promoted from eden space to survivor space after
|
||||||
|
* minor GC are aligned to SurvivorAlignmentInBytes.
|
||||||
|
* @library /testlibrary /testlibrary/whitebox
|
||||||
|
* @build TestPromotionToSurvivor
|
||||||
|
* SurvivorAlignmentTestMain AlignmentHelper
|
||||||
|
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||||
|
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
|
||||||
|
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=32 -XX:OldSize=128m
|
||||||
|
* -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* TestPromotionToSurvivor 10m 9 SURVIVOR
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
|
||||||
|
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=32 -XX:OldSize=128m
|
||||||
|
* TestPromotionToSurvivor 20m 47 SURVIVOR
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
|
||||||
|
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=64 -XX:OldSize=128m
|
||||||
|
* -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* TestPromotionToSurvivor 8m 9 SURVIVOR
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
|
||||||
|
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=64 -XX:OldSize=128m
|
||||||
|
* -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* TestPromotionToSurvivor 20m 87 SURVIVOR
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=256m -XX:MaxNewSize=256m
|
||||||
|
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=128 -XX:OldSize=32m
|
||||||
|
* -XX:InitialHeapSize=288m -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* TestPromotionToSurvivor 10m 9 SURVIVOR
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
|
||||||
|
* -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
|
||||||
|
* -XX:SurvivorAlignmentInBytes=128 -XX:OldSize=128m
|
||||||
|
* -XX:-ExplicitGCInvokesConcurrent
|
||||||
|
* TestPromotionToSurvivor 20m 147 SURVIVOR
|
||||||
|
*/
|
||||||
|
public class TestPromotionToSurvivor {
|
||||||
|
public static void main(String args[]) {
|
||||||
|
SurvivorAlignmentTestMain test
|
||||||
|
= SurvivorAlignmentTestMain.fromArgs(args);
|
||||||
|
System.out.println(test);
|
||||||
|
|
||||||
|
long expectedUsage = test.getExpectedMemoryUsage();
|
||||||
|
test.baselineMemoryAllocation();
|
||||||
|
SurvivorAlignmentTestMain.WHITE_BOX.fullGC();
|
||||||
|
|
||||||
|
test.allocate();
|
||||||
|
SurvivorAlignmentTestMain.WHITE_BOX.youngGC();
|
||||||
|
|
||||||
|
test.verifyMemoryUsage(expectedUsage);
|
||||||
|
}
|
||||||
|
}
|
57
hotspot/test/gc/whitebox/TestConcMarkCycleWB.java
Normal file
57
hotspot/test/gc/whitebox/TestConcMarkCycleWB.java
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @test TestConMarkCycleWB
|
||||||
|
* @bug 8065579
|
||||||
|
* @requires vm.gc=="null" | vm.gc=="G1"
|
||||||
|
* @library /testlibrary /testlibrary/whitebox
|
||||||
|
* @build ClassFileInstaller com.oracle.java.testlibrary.* sun.hotspot.WhiteBox TestConcMarkCycleWB
|
||||||
|
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||||
|
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UseG1GC TestConcMarkCycleWB
|
||||||
|
* @summary Verifies that ConcurrentMarking-related WB works properly
|
||||||
|
*/
|
||||||
|
import static com.oracle.java.testlibrary.Asserts.assertFalse;
|
||||||
|
import static com.oracle.java.testlibrary.Asserts.assertTrue;
|
||||||
|
import sun.hotspot.WhiteBox;
|
||||||
|
|
||||||
|
public class TestConcMarkCycleWB {
|
||||||
|
|
||||||
|
public static void main(String[] args) throws Exception {
|
||||||
|
WhiteBox wb = WhiteBox.getWhiteBox();
|
||||||
|
|
||||||
|
wb.youngGC();
|
||||||
|
assertTrue(wb.g1StartConcMarkCycle());
|
||||||
|
while (wb.g1InConcurrentMark()) {
|
||||||
|
Thread.sleep(5);
|
||||||
|
}
|
||||||
|
|
||||||
|
wb.fullGC();
|
||||||
|
assertTrue(wb.g1StartConcMarkCycle());
|
||||||
|
while (wb.g1InConcurrentMark()) {
|
||||||
|
Thread.sleep(5);
|
||||||
|
}
|
||||||
|
assertTrue(wb.g1StartConcMarkCycle());
|
||||||
|
}
|
||||||
|
}
|
|
@ -168,12 +168,16 @@ public class WhiteBox {
|
||||||
public native long incMetaspaceCapacityUntilGC(long increment);
|
public native long incMetaspaceCapacityUntilGC(long increment);
|
||||||
public native long metaspaceCapacityUntilGC();
|
public native long metaspaceCapacityUntilGC();
|
||||||
|
|
||||||
// force Young GC
|
// Force Young GC
|
||||||
public native void youngGC();
|
public native void youngGC();
|
||||||
|
|
||||||
// force Full GC
|
// Force Full GC
|
||||||
public native void fullGC();
|
public native void fullGC();
|
||||||
|
|
||||||
|
// Method tries to start concurrent mark cycle.
|
||||||
|
// It returns false if CM Thread is always in concurrent cycle.
|
||||||
|
public native boolean g1StartConcMarkCycle();
|
||||||
|
|
||||||
// Tests on ReservedSpace/VirtualSpace classes
|
// Tests on ReservedSpace/VirtualSpace classes
|
||||||
public native int stressVirtualSpaceResize(long reservedSpaceSize, long magnitude, long iterations);
|
public native int stressVirtualSpaceResize(long reservedSpaceSize, long magnitude, long iterations);
|
||||||
public native void runMemoryUnitTests();
|
public native void runMemoryUnitTests();
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue