8080876: Replace unnecessary MAX2(ParallelGCThreads, 1) calls with ParallelGCThreads

Reviewed-by: kbarrett, mgerdin
This commit is contained in:
Stefan Karlsson 2015-05-22 10:58:16 +02:00
parent 21bb8edbba
commit 23b343af68
11 changed files with 21 additions and 25 deletions

View file

@ -1475,9 +1475,9 @@ void ParNewGeneration::ref_processor_init() {
_ref_processor =
new ReferenceProcessor(_reserved, // span
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
(int) ParallelGCThreads, // mt processing degree
(uint) ParallelGCThreads, // mt processing degree
refs_discovery_is_mt(), // mt discovery
(int) ParallelGCThreads, // mt discovery degree
(uint) ParallelGCThreads, // mt discovery degree
refs_discovery_is_atomic(), // atomic_discovery
NULL); // is_alive_non_header
}

View file

@ -35,7 +35,7 @@ ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h, CardTableEntryClosu
{
// Ergonomically select initial concurrent refinement parameters
if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, (intx)ParallelGCThreads);
}
set_green_zone(G1ConcRefinementGreenZone);

View file

@ -518,7 +518,7 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev
_markStack(this),
// _finger set in set_non_marking_state
_max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
_max_worker_id((uint)ParallelGCThreads),
// _active_tasks set in set_non_marking_state
// _tasks set inside the constructor
_task_queues(new CMTaskQueueSet((int) _max_worker_id)),

View file

@ -1752,7 +1752,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_allocator = G1Allocator::create_allocator(this);
_humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
int n_queues = MAX2((int)ParallelGCThreads, 1);
int n_queues = (int)ParallelGCThreads;
_task_queues = new RefToScanQueueSet(n_queues);
uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
@ -2064,11 +2064,11 @@ void G1CollectedHeap::ref_processing_init() {
new ReferenceProcessor(mr, // span
ParallelRefProcEnabled && (ParallelGCThreads > 1),
// mt processing
(int) ParallelGCThreads,
(uint) ParallelGCThreads,
// degree of mt processing
(ParallelGCThreads > 1) || (ConcGCThreads > 1),
// mt discovery
(int) MAX2(ParallelGCThreads, ConcGCThreads),
(uint) MAX2(ParallelGCThreads, ConcGCThreads),
// degree of mt discovery
false,
// Reference discovery is not atomic
@ -2081,11 +2081,11 @@ void G1CollectedHeap::ref_processing_init() {
new ReferenceProcessor(mr, // span
ParallelRefProcEnabled && (ParallelGCThreads > 1),
// mt processing
MAX2((int)ParallelGCThreads, 1),
(uint) ParallelGCThreads,
// degree of mt processing
(ParallelGCThreads > 1),
// mt discovery
MAX2((int)ParallelGCThreads, 1),
(uint) ParallelGCThreads,
// degree of mt discovery
true,
// Reference discovery is atomic
@ -2485,8 +2485,7 @@ void G1CollectedHeap::clear_cset_start_regions() {
assert(_worker_cset_start_region != NULL, "sanity");
assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
int n_queues = MAX2((int)ParallelGCThreads, 1);
for (int i = 0; i < n_queues; i++) {
for (uint i = 0; i < ParallelGCThreads; i++) {
_worker_cset_start_region[i] = NULL;
_worker_cset_start_region_time_stamp[i] = 0;
}
@ -3844,8 +3843,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
if (evacuation_failed()) {
_allocator->set_used(recalculate_used());
uint n_queues = MAX2((int)ParallelGCThreads, 1);
for (uint i = 0; i < n_queues; i++) {
for (uint i = 0; i < ParallelGCThreads; i++) {
if (_evacuation_failed_info_array[i].has_failed()) {
_gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
}

View file

@ -50,8 +50,8 @@ void G1ParClosureSuper::set_par_scan_thread_state(G1ParScanThreadState* par_scan
_par_scan_state = par_scan_state;
_worker_id = par_scan_state->queue_num();
assert(_worker_id < MAX2((uint)ParallelGCThreads, 1u),
err_msg("The given worker id %u must be less than the number of threads %u", _worker_id, MAX2((uint)ParallelGCThreads, 1u)));
assert(_worker_id < ParallelGCThreads,
err_msg("The given worker id %u must be less than the number of threads " UINTX_FORMAT, _worker_id, ParallelGCThreads));
}
// Generate G1 specialized oop_oop_iterate functions.

View file

@ -42,7 +42,7 @@ G1StringDedupQueue::G1StringDedupQueue() :
_cancel(false),
_empty(true),
_dropped(0) {
_nqueues = MAX2(ParallelGCThreads, (size_t)1);
_nqueues = ParallelGCThreads;
_queues = NEW_C_HEAP_ARRAY(G1StringDedupWorkerQueue, _nqueues, mtGC);
for (size_t i = 0; i < _nqueues; i++) {
new (_queues + i) G1StringDedupWorkerQueue(G1StringDedupWorkerQueue::default_segment_size(), _max_cache_size, _max_size);

View file

@ -112,7 +112,7 @@ public:
};
G1StringDedupEntryCache::G1StringDedupEntryCache() {
_nlists = MAX2(ParallelGCThreads, (size_t)1);
_nlists = ParallelGCThreads;
_lists = PaddedArray<G1StringDedupEntryFreeList, mtGC>::create_unfreeable((uint)_nlists);
}

View file

@ -832,9 +832,9 @@ void PSParallelCompact::post_initialize() {
_ref_processor =
new ReferenceProcessor(mr, // span
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
(int) ParallelGCThreads, // mt processing degree
(uint) ParallelGCThreads, // mt processing degree
true, // mt discovery
(int) ParallelGCThreads, // mt discovery degree
(uint) ParallelGCThreads, // mt discovery degree
true, // atomic_discovery
&_is_alive_closure); // non-header is alive closure
_counters = new CollectorCounters("PSParallelCompact", 1);

View file

@ -845,9 +845,9 @@ void PSScavenge::initialize() {
_ref_processor =
new ReferenceProcessor(mr, // span
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
(int) ParallelGCThreads, // mt processing degree
(uint) ParallelGCThreads, // mt processing degree
true, // mt discovery
(int) ParallelGCThreads, // mt discovery degree
(uint) ParallelGCThreads, // mt discovery degree
true, // atomic_discovery
NULL); // header provides liveness info

View file

@ -161,7 +161,7 @@ uint AdaptiveSizePolicy::calc_default_active_workers(uintx total_workers,
}
_debug_perturbation = !_debug_perturbation;
}
assert((new_active_workers <= (uintx) ParallelGCThreads) &&
assert((new_active_workers <= ParallelGCThreads) &&
(new_active_workers >= min_workers),
"Jiggled active workers too much");
}

View file

@ -1278,10 +1278,8 @@ void Arguments::set_cms_and_parnew_gc_flags() {
// Preferred young gen size for "short" pauses:
// upper bound depends on # of threads and NewRatio.
const uintx parallel_gc_threads =
(ParallelGCThreads == 0 ? 1 : ParallelGCThreads);
const size_t preferred_max_new_size_unaligned =
MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * parallel_gc_threads));
MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * ParallelGCThreads));
size_t preferred_max_new_size =
align_size_up(preferred_max_new_size_unaligned, os::vm_page_size());