mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-15 16:44:36 +02:00
8079555: REDO - Determining the desired PLAB size adjusts to the the number of threads at the wrong place
Calculate the desired PLAB value for a single thread and then return desired PLAB size according to the current number of threads when needed Reviewed-by: jmasa, tschatzl
This commit is contained in:
parent
513b3b2ac1
commit
3e36930486
9 changed files with 37 additions and 30 deletions
|
@ -1021,7 +1021,7 @@ void ParNewGeneration::collect(bool full,
|
|||
to()->set_concurrent_iteration_safe_limit(to()->top());
|
||||
|
||||
if (ResizePLAB) {
|
||||
plab_stats()->adjust_desired_plab_sz(active_workers);
|
||||
plab_stats()->adjust_desired_plab_sz();
|
||||
}
|
||||
|
||||
if (PrintGC && !PrintGCDetails) {
|
||||
|
@ -1059,6 +1059,10 @@ void ParNewGeneration::collect(bool full,
|
|||
_gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
|
||||
}
|
||||
|
||||
size_t ParNewGeneration::desired_plab_sz() {
|
||||
return _plab_stats.desired_plab_sz(GenCollectedHeap::heap()->workers()->active_workers());
|
||||
}
|
||||
|
||||
static int sum;
|
||||
void ParNewGeneration::waste_some_time() {
|
||||
for (int i = 0; i < 100; i++) {
|
||||
|
|
|
@ -411,9 +411,7 @@ class ParNewGeneration: public DefNewGeneration {
|
|||
return &_plab_stats;
|
||||
}
|
||||
|
||||
size_t desired_plab_sz() {
|
||||
return _plab_stats.desired_plab_sz();
|
||||
}
|
||||
size_t desired_plab_sz();
|
||||
|
||||
const ParNewTracer* gc_tracer() const {
|
||||
return &_gc_tracer;
|
||||
|
|
|
@ -86,7 +86,7 @@ void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info)
|
|||
&_retained_old_gc_alloc_region);
|
||||
}
|
||||
|
||||
void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
|
||||
void G1DefaultAllocator::release_gc_alloc_regions(EvacuationInfo& evacuation_info) {
|
||||
AllocationContext_t context = AllocationContext::current();
|
||||
evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
|
||||
old_gc_alloc_region(context)->count());
|
||||
|
@ -102,8 +102,8 @@ void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, Evacuat
|
|||
}
|
||||
|
||||
if (ResizePLAB) {
|
||||
_g1h->alloc_buffer_stats(InCSetState::Young)->adjust_desired_plab_sz(no_of_gc_workers);
|
||||
_g1h->alloc_buffer_stats(InCSetState::Old)->adjust_desired_plab_sz(no_of_gc_workers);
|
||||
_g1h->alloc_buffer_stats(InCSetState::Young)->adjust_desired_plab_sz();
|
||||
_g1h->alloc_buffer_stats(InCSetState::Old)->adjust_desired_plab_sz();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ public:
|
|||
virtual void release_mutator_alloc_region() = 0;
|
||||
|
||||
virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
|
||||
virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) = 0;
|
||||
virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
|
||||
virtual void abandon_gc_alloc_regions() = 0;
|
||||
|
||||
virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0;
|
||||
|
@ -114,7 +114,7 @@ public:
|
|||
virtual void release_mutator_alloc_region();
|
||||
|
||||
virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
|
||||
virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
|
||||
virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info);
|
||||
virtual void abandon_gc_alloc_regions();
|
||||
|
||||
virtual bool is_retained_old_region(HeapRegion* hr) {
|
||||
|
|
|
@ -5644,7 +5644,7 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
|
|||
phase_times->record_string_dedup_fixup_time(fixup_time_ms);
|
||||
}
|
||||
|
||||
_allocator->release_gc_alloc_regions(n_workers, evacuation_info);
|
||||
_allocator->release_gc_alloc_regions(evacuation_info);
|
||||
g1_rem_set()->cleanup_after_oops_into_collection_set_do();
|
||||
|
||||
// Reset and re-enable the hot card cache.
|
||||
|
|
|
@ -281,7 +281,7 @@ private:
|
|||
void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
|
||||
|
||||
// It releases the GC alloc regions at the end of a GC.
|
||||
void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
|
||||
void release_gc_alloc_regions(EvacuationInfo& evacuation_info);
|
||||
|
||||
// It does any cleanup that needs to be done on the GC alloc regions
|
||||
// before a Full GC.
|
||||
|
|
|
@ -49,7 +49,7 @@ PLABStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) {
|
|||
}
|
||||
|
||||
size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
|
||||
size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz();
|
||||
size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(G1CollectedHeap::heap()->workers()->active_workers());
|
||||
// Prevent humongous PLAB sizes for two reasons:
|
||||
// * PLABs are allocated using a similar paths as oops, but should
|
||||
// never be in a humongous region
|
||||
|
|
|
@ -109,10 +109,15 @@ void PLAB::undo_allocation(HeapWord* obj, size_t word_sz) {
|
|||
}
|
||||
}
|
||||
|
||||
// Compute desired plab size and latch result for later
|
||||
// Calculates plab size for current number of gc worker threads.
|
||||
size_t PLABStats::desired_plab_sz(uint no_of_gc_workers) {
|
||||
return MAX2(min_size(), (size_t)align_object_size(_desired_net_plab_sz / no_of_gc_workers));
|
||||
}
|
||||
|
||||
// Compute desired plab size for one gc worker thread and latch result for later
|
||||
// use. This should be called once at the end of parallel
|
||||
// scavenge; it clears the sensor accumulators.
|
||||
void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
|
||||
void PLABStats::adjust_desired_plab_sz() {
|
||||
assert(ResizePLAB, "Not set");
|
||||
|
||||
assert(is_object_aligned(max_size()) && min_size() <= max_size(),
|
||||
|
@ -135,7 +140,8 @@ void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
|
|||
target_refills = 1;
|
||||
}
|
||||
size_t used = _allocated - _wasted - _unused;
|
||||
size_t recent_plab_sz = used / (target_refills * no_of_gc_workers);
|
||||
// Assumed to have 1 gc worker thread
|
||||
size_t recent_plab_sz = used / target_refills;
|
||||
// Take historical weighted average
|
||||
_filter.sample(recent_plab_sz);
|
||||
// Clip from above and below, and align to object boundary
|
||||
|
@ -144,9 +150,9 @@ void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
|
|||
new_plab_sz = align_object_size(new_plab_sz);
|
||||
// Latch the result
|
||||
if (PrintPLAB) {
|
||||
gclog_or_tty->print(" (plab_sz = " SIZE_FORMAT " desired_plab_sz = " SIZE_FORMAT ") ", recent_plab_sz, new_plab_sz);
|
||||
gclog_or_tty->print(" (plab_sz = " SIZE_FORMAT " desired_net_plab_sz = " SIZE_FORMAT ") ", recent_plab_sz, new_plab_sz);
|
||||
}
|
||||
_desired_plab_sz = new_plab_sz;
|
||||
_desired_net_plab_sz = new_plab_sz;
|
||||
|
||||
reset();
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ class PLABStats VALUE_OBJ_CLASS_SPEC {
|
|||
size_t _wasted; // of which wasted (internal fragmentation)
|
||||
size_t _undo_wasted; // of which wasted on undo (is not used for calculation of PLAB size)
|
||||
size_t _unused; // Unused in last buffer
|
||||
size_t _desired_plab_sz;// Output of filter (below), suitably trimmed and quantized
|
||||
size_t _desired_net_plab_sz;// Output of filter (below), suitably trimmed and quantized
|
||||
AdaptiveWeightedAverage
|
||||
_filter; // Integrator with decay
|
||||
|
||||
|
@ -165,12 +165,12 @@ class PLABStats VALUE_OBJ_CLASS_SPEC {
|
|||
_unused = 0;
|
||||
}
|
||||
public:
|
||||
PLABStats(size_t desired_plab_sz_, unsigned wt) :
|
||||
PLABStats(size_t desired_net_plab_sz_, unsigned wt) :
|
||||
_allocated(0),
|
||||
_wasted(0),
|
||||
_undo_wasted(0),
|
||||
_unused(0),
|
||||
_desired_plab_sz(desired_plab_sz_),
|
||||
_desired_net_plab_sz(desired_net_plab_sz_),
|
||||
_filter(wt)
|
||||
{ }
|
||||
|
||||
|
@ -182,13 +182,12 @@ class PLABStats VALUE_OBJ_CLASS_SPEC {
|
|||
return PLAB::max_size();
|
||||
}
|
||||
|
||||
size_t desired_plab_sz() {
|
||||
return _desired_plab_sz;
|
||||
}
|
||||
// Calculates plab size for current number of gc worker threads.
|
||||
size_t desired_plab_sz(uint no_of_gc_workers);
|
||||
|
||||
// Updates the current desired PLAB size. Computes the new desired PLAB size,
|
||||
// Updates the current desired PLAB size. Computes the new desired PLAB size with one gc worker thread,
|
||||
// updates _desired_plab_sz and clears sensor accumulators.
|
||||
void adjust_desired_plab_sz(uint no_of_gc_workers);
|
||||
void adjust_desired_plab_sz();
|
||||
|
||||
void add_allocated(size_t v) {
|
||||
Atomic::add_ptr(v, &_allocated);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue