mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-27 14:54:52 +02:00
8000311: G1: ParallelGCThreads==0 broken
Divide by zero error, if ParallelGCThreads is 0, when adjusting the PLAB size. Reviewed-by: jmasa, jcoomes
This commit is contained in:
parent
3506d44c57
commit
ed98ea0a88
5 changed files with 30 additions and 31 deletions
|
@ -4151,7 +4151,7 @@ void G1CollectedHeap::init_gc_alloc_regions() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::release_gc_alloc_regions() {
|
void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers) {
|
||||||
_survivor_gc_alloc_region.release();
|
_survivor_gc_alloc_region.release();
|
||||||
// If we have an old GC alloc region to release, we'll save it in
|
// If we have an old GC alloc region to release, we'll save it in
|
||||||
// _retained_old_gc_alloc_region. If we don't
|
// _retained_old_gc_alloc_region. If we don't
|
||||||
|
@ -4161,8 +4161,8 @@ void G1CollectedHeap::release_gc_alloc_regions() {
|
||||||
_retained_old_gc_alloc_region = _old_gc_alloc_region.release();
|
_retained_old_gc_alloc_region = _old_gc_alloc_region.release();
|
||||||
|
|
||||||
if (ResizePLAB) {
|
if (ResizePLAB) {
|
||||||
_survivor_plab_stats.adjust_desired_plab_sz();
|
_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
|
||||||
_old_plab_stats.adjust_desired_plab_sz();
|
_old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5427,7 +5427,7 @@ public:
|
||||||
};
|
};
|
||||||
|
|
||||||
// Weak Reference processing during an evacuation pause (part 1).
|
// Weak Reference processing during an evacuation pause (part 1).
|
||||||
void G1CollectedHeap::process_discovered_references() {
|
void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
|
||||||
double ref_proc_start = os::elapsedTime();
|
double ref_proc_start = os::elapsedTime();
|
||||||
|
|
||||||
ReferenceProcessor* rp = _ref_processor_stw;
|
ReferenceProcessor* rp = _ref_processor_stw;
|
||||||
|
@ -5454,15 +5454,14 @@ void G1CollectedHeap::process_discovered_references() {
|
||||||
// referents points to another object which is also referenced by an
|
// referents points to another object which is also referenced by an
|
||||||
// object discovered by the STW ref processor.
|
// object discovered by the STW ref processor.
|
||||||
|
|
||||||
uint active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
|
|
||||||
workers()->active_workers() : 1);
|
|
||||||
|
|
||||||
assert(!G1CollectedHeap::use_parallel_gc_threads() ||
|
assert(!G1CollectedHeap::use_parallel_gc_threads() ||
|
||||||
active_workers == workers()->active_workers(),
|
no_of_gc_workers == workers()->active_workers(),
|
||||||
"Need to reset active_workers");
|
"Need to reset active GC workers");
|
||||||
|
|
||||||
set_par_threads(active_workers);
|
set_par_threads(no_of_gc_workers);
|
||||||
G1ParPreserveCMReferentsTask keep_cm_referents(this, active_workers, _task_queues);
|
G1ParPreserveCMReferentsTask keep_cm_referents(this,
|
||||||
|
no_of_gc_workers,
|
||||||
|
_task_queues);
|
||||||
|
|
||||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
workers()->run_task(&keep_cm_referents);
|
workers()->run_task(&keep_cm_referents);
|
||||||
|
@ -5528,10 +5527,10 @@ void G1CollectedHeap::process_discovered_references() {
|
||||||
NULL);
|
NULL);
|
||||||
} else {
|
} else {
|
||||||
// Parallel reference processing
|
// Parallel reference processing
|
||||||
assert(rp->num_q() == active_workers, "sanity");
|
assert(rp->num_q() == no_of_gc_workers, "sanity");
|
||||||
assert(active_workers <= rp->max_num_q(), "sanity");
|
assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
|
||||||
|
|
||||||
G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
|
G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
|
||||||
rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor);
|
rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5546,7 +5545,7 @@ void G1CollectedHeap::process_discovered_references() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Weak Reference processing during an evacuation pause (part 2).
|
// Weak Reference processing during an evacuation pause (part 2).
|
||||||
void G1CollectedHeap::enqueue_discovered_references() {
|
void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
|
||||||
double ref_enq_start = os::elapsedTime();
|
double ref_enq_start = os::elapsedTime();
|
||||||
|
|
||||||
ReferenceProcessor* rp = _ref_processor_stw;
|
ReferenceProcessor* rp = _ref_processor_stw;
|
||||||
|
@ -5560,13 +5559,12 @@ void G1CollectedHeap::enqueue_discovered_references() {
|
||||||
} else {
|
} else {
|
||||||
// Parallel reference enqueuing
|
// Parallel reference enqueuing
|
||||||
|
|
||||||
uint active_workers = (ParallelGCThreads > 0 ? workers()->active_workers() : 1);
|
assert(no_of_gc_workers == workers()->active_workers(),
|
||||||
assert(active_workers == workers()->active_workers(),
|
"Need to reset active workers");
|
||||||
"Need to reset active_workers");
|
assert(rp->num_q() == no_of_gc_workers, "sanity");
|
||||||
assert(rp->num_q() == active_workers, "sanity");
|
assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
|
||||||
assert(active_workers <= rp->max_num_q(), "sanity");
|
|
||||||
|
|
||||||
G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
|
G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
|
||||||
rp->enqueue_discovered_references(&par_task_executor);
|
rp->enqueue_discovered_references(&par_task_executor);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5658,7 +5656,7 @@ void G1CollectedHeap::evacuate_collection_set() {
|
||||||
// as we may have to copy some 'reachable' referent
|
// as we may have to copy some 'reachable' referent
|
||||||
// objects (and their reachable sub-graphs) that were
|
// objects (and their reachable sub-graphs) that were
|
||||||
// not copied during the pause.
|
// not copied during the pause.
|
||||||
process_discovered_references();
|
process_discovered_references(n_workers);
|
||||||
|
|
||||||
// Weak root processing.
|
// Weak root processing.
|
||||||
// Note: when JSR 292 is enabled and code blobs can contain
|
// Note: when JSR 292 is enabled and code blobs can contain
|
||||||
|
@ -5670,7 +5668,7 @@ void G1CollectedHeap::evacuate_collection_set() {
|
||||||
JNIHandles::weak_oops_do(&is_alive, &keep_alive);
|
JNIHandles::weak_oops_do(&is_alive, &keep_alive);
|
||||||
}
|
}
|
||||||
|
|
||||||
release_gc_alloc_regions();
|
release_gc_alloc_regions(n_workers);
|
||||||
g1_rem_set()->cleanup_after_oops_into_collection_set_do();
|
g1_rem_set()->cleanup_after_oops_into_collection_set_do();
|
||||||
|
|
||||||
concurrent_g1_refine()->clear_hot_cache();
|
concurrent_g1_refine()->clear_hot_cache();
|
||||||
|
@ -5694,7 +5692,7 @@ void G1CollectedHeap::evacuate_collection_set() {
|
||||||
// will log these updates (and dirty their associated
|
// will log these updates (and dirty their associated
|
||||||
// cards). We need these updates logged to update any
|
// cards). We need these updates logged to update any
|
||||||
// RSets.
|
// RSets.
|
||||||
enqueue_discovered_references();
|
enqueue_discovered_references(n_workers);
|
||||||
|
|
||||||
if (G1DeferredRSUpdate) {
|
if (G1DeferredRSUpdate) {
|
||||||
RedirtyLoggedCardTableEntryFastClosure redirty;
|
RedirtyLoggedCardTableEntryFastClosure redirty;
|
||||||
|
|
|
@ -326,7 +326,7 @@ private:
|
||||||
void init_gc_alloc_regions();
|
void init_gc_alloc_regions();
|
||||||
|
|
||||||
// It releases the GC alloc regions at the end of a GC.
|
// It releases the GC alloc regions at the end of a GC.
|
||||||
void release_gc_alloc_regions();
|
void release_gc_alloc_regions(uint no_of_gc_workers);
|
||||||
|
|
||||||
// It does any cleanup that needs to be done on the GC alloc regions
|
// It does any cleanup that needs to be done on the GC alloc regions
|
||||||
// before a Full GC.
|
// before a Full GC.
|
||||||
|
@ -652,11 +652,11 @@ protected:
|
||||||
|
|
||||||
// Process any reference objects discovered during
|
// Process any reference objects discovered during
|
||||||
// an incremental evacuation pause.
|
// an incremental evacuation pause.
|
||||||
void process_discovered_references();
|
void process_discovered_references(uint no_of_gc_workers);
|
||||||
|
|
||||||
// Enqueue any remaining discovered references
|
// Enqueue any remaining discovered references
|
||||||
// after processing.
|
// after processing.
|
||||||
void enqueue_discovered_references();
|
void enqueue_discovered_references(uint no_of_gc_workers);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
|
|
@ -1037,7 +1037,7 @@ void ParNewGeneration::collect(bool full,
|
||||||
|
|
||||||
adjust_desired_tenuring_threshold();
|
adjust_desired_tenuring_threshold();
|
||||||
if (ResizePLAB) {
|
if (ResizePLAB) {
|
||||||
plab_stats()->adjust_desired_plab_sz();
|
plab_stats()->adjust_desired_plab_sz(n_workers);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (PrintGC && !PrintGCDetails) {
|
if (PrintGC && !PrintGCDetails) {
|
||||||
|
|
|
@ -87,7 +87,7 @@ void ParGCAllocBuffer::flush_stats(PLABStats* stats) {
|
||||||
// Compute desired plab size and latch result for later
|
// Compute desired plab size and latch result for later
|
||||||
// use. This should be called once at the end of parallel
|
// use. This should be called once at the end of parallel
|
||||||
// scavenge; it clears the sensor accumulators.
|
// scavenge; it clears the sensor accumulators.
|
||||||
void PLABStats::adjust_desired_plab_sz() {
|
void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
|
||||||
assert(ResizePLAB, "Not set");
|
assert(ResizePLAB, "Not set");
|
||||||
if (_allocated == 0) {
|
if (_allocated == 0) {
|
||||||
assert(_unused == 0,
|
assert(_unused == 0,
|
||||||
|
@ -107,7 +107,7 @@ void PLABStats::adjust_desired_plab_sz() {
|
||||||
target_refills = 1;
|
target_refills = 1;
|
||||||
}
|
}
|
||||||
_used = _allocated - _wasted - _unused;
|
_used = _allocated - _wasted - _unused;
|
||||||
size_t plab_sz = _used/(target_refills*ParallelGCThreads);
|
size_t plab_sz = _used/(target_refills*no_of_gc_workers);
|
||||||
if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz);
|
if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz);
|
||||||
// Take historical weighted average
|
// Take historical weighted average
|
||||||
_filter.sample(plab_sz);
|
_filter.sample(plab_sz);
|
||||||
|
|
|
@ -204,7 +204,8 @@ class PLABStats VALUE_OBJ_CLASS_SPEC {
|
||||||
return _desired_plab_sz;
|
return _desired_plab_sz;
|
||||||
}
|
}
|
||||||
|
|
||||||
void adjust_desired_plab_sz(); // filter computation, latches output to
|
void adjust_desired_plab_sz(uint no_of_gc_workers);
|
||||||
|
// filter computation, latches output to
|
||||||
// _desired_plab_sz, clears sensor accumulators
|
// _desired_plab_sz, clears sensor accumulators
|
||||||
|
|
||||||
void add_allocated(size_t v) {
|
void add_allocated(size_t v) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue