mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-15 16:44:36 +02:00
8026853: Prepare GC code for collector policy regression fix
Cleanup related to the NewSize and MaxNewSize bugs Reviewed-by: tschatzl, jcoomes, ehelin
This commit is contained in:
parent
5976b6915a
commit
9705a6e3f9
12 changed files with 39 additions and 55 deletions
|
@ -217,7 +217,6 @@ private:
|
||||||
return _during_marking;
|
return _during_marking;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
|
||||||
enum PredictionConstants {
|
enum PredictionConstants {
|
||||||
TruncatedSeqLength = 10
|
TruncatedSeqLength = 10
|
||||||
};
|
};
|
||||||
|
|
|
@ -377,11 +377,6 @@ void G1RemSet::prepare_for_oops_into_collection_set_do() {
|
||||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||||
dcqs.concatenate_logs();
|
dcqs.concatenate_logs();
|
||||||
|
|
||||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
|
||||||
// Don't set the number of workers here. It will be set
|
|
||||||
// when the task is run
|
|
||||||
// _seq_task->set_n_termination((int)n_workers());
|
|
||||||
}
|
|
||||||
guarantee( _cards_scanned == NULL, "invariant" );
|
guarantee( _cards_scanned == NULL, "invariant" );
|
||||||
_cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers(), mtGC);
|
_cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers(), mtGC);
|
||||||
for (uint i = 0; i < n_workers(); ++i) {
|
for (uint i = 0; i < n_workers(); ++i) {
|
||||||
|
|
|
@ -54,7 +54,6 @@ ASPSOldGen::ASPSOldGen(size_t initial_size,
|
||||||
int level) :
|
int level) :
|
||||||
PSOldGen(initial_size, min_size, size_limit, gen_name, level),
|
PSOldGen(initial_size, min_size, size_limit, gen_name, level),
|
||||||
_gen_size_limit(size_limit)
|
_gen_size_limit(size_limit)
|
||||||
|
|
||||||
{}
|
{}
|
||||||
|
|
||||||
ASPSOldGen::ASPSOldGen(PSVirtualSpace* vs,
|
ASPSOldGen::ASPSOldGen(PSVirtualSpace* vs,
|
||||||
|
@ -65,13 +64,11 @@ ASPSOldGen::ASPSOldGen(PSVirtualSpace* vs,
|
||||||
int level) :
|
int level) :
|
||||||
PSOldGen(initial_size, min_size, size_limit, gen_name, level),
|
PSOldGen(initial_size, min_size, size_limit, gen_name, level),
|
||||||
_gen_size_limit(size_limit)
|
_gen_size_limit(size_limit)
|
||||||
|
|
||||||
{
|
{
|
||||||
_virtual_space = vs;
|
_virtual_space = vs;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ASPSOldGen::initialize_work(const char* perf_data_name, int level) {
|
void ASPSOldGen::initialize_work(const char* perf_data_name, int level) {
|
||||||
|
|
||||||
PSOldGen::initialize_work(perf_data_name, level);
|
PSOldGen::initialize_work(perf_data_name, level);
|
||||||
|
|
||||||
// The old gen can grow to gen_size_limit(). _reserve reflects only
|
// The old gen can grow to gen_size_limit(). _reserve reflects only
|
||||||
|
|
|
@ -70,7 +70,6 @@ void ASPSYoungGen::initialize(ReservedSpace rs, size_t alignment) {
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t ASPSYoungGen::available_for_expansion() {
|
size_t ASPSYoungGen::available_for_expansion() {
|
||||||
|
|
||||||
size_t current_committed_size = virtual_space()->committed_size();
|
size_t current_committed_size = virtual_space()->committed_size();
|
||||||
assert((gen_size_limit() >= current_committed_size),
|
assert((gen_size_limit() >= current_committed_size),
|
||||||
"generation size limit is wrong");
|
"generation size limit is wrong");
|
||||||
|
@ -85,7 +84,6 @@ size_t ASPSYoungGen::available_for_expansion() {
|
||||||
// Future implementations could check the survivors and if to_space is in the
|
// Future implementations could check the survivors and if to_space is in the
|
||||||
// right place (below from_space), take a chunk from to_space.
|
// right place (below from_space), take a chunk from to_space.
|
||||||
size_t ASPSYoungGen::available_for_contraction() {
|
size_t ASPSYoungGen::available_for_contraction() {
|
||||||
|
|
||||||
size_t uncommitted_bytes = virtual_space()->uncommitted_size();
|
size_t uncommitted_bytes = virtual_space()->uncommitted_size();
|
||||||
if (uncommitted_bytes != 0) {
|
if (uncommitted_bytes != 0) {
|
||||||
return uncommitted_bytes;
|
return uncommitted_bytes;
|
||||||
|
@ -121,7 +119,6 @@ size_t ASPSYoungGen::available_for_contraction() {
|
||||||
gclog_or_tty->print_cr(" gen_avail %d K", gen_avail/K);
|
gclog_or_tty->print_cr(" gen_avail %d K", gen_avail/K);
|
||||||
}
|
}
|
||||||
return result_aligned;
|
return result_aligned;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -35,7 +35,6 @@
|
||||||
#include "utilities/ostream.hpp"
|
#include "utilities/ostream.hpp"
|
||||||
|
|
||||||
class AdjoiningGenerations;
|
class AdjoiningGenerations;
|
||||||
class CollectorPolicy;
|
|
||||||
class GCHeapSummary;
|
class GCHeapSummary;
|
||||||
class GCTaskManager;
|
class GCTaskManager;
|
||||||
class GenerationSizer;
|
class GenerationSizer;
|
||||||
|
@ -50,8 +49,8 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||||
static PSOldGen* _old_gen;
|
static PSOldGen* _old_gen;
|
||||||
|
|
||||||
// Sizing policy for entire heap
|
// Sizing policy for entire heap
|
||||||
static PSAdaptiveSizePolicy* _size_policy;
|
static PSAdaptiveSizePolicy* _size_policy;
|
||||||
static PSGCAdaptivePolicyCounters* _gc_policy_counters;
|
static PSGCAdaptivePolicyCounters* _gc_policy_counters;
|
||||||
|
|
||||||
static ParallelScavengeHeap* _psh;
|
static ParallelScavengeHeap* _psh;
|
||||||
|
|
||||||
|
@ -67,7 +66,8 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||||
AdjoiningGenerations* _gens;
|
AdjoiningGenerations* _gens;
|
||||||
unsigned int _death_march_count;
|
unsigned int _death_march_count;
|
||||||
|
|
||||||
static GCTaskManager* _gc_task_manager; // The task manager.
|
// The task manager
|
||||||
|
static GCTaskManager* _gc_task_manager;
|
||||||
|
|
||||||
void trace_heap(GCWhen::Type when, GCTracer* tracer);
|
void trace_heap(GCWhen::Type when, GCTracer* tracer);
|
||||||
|
|
||||||
|
@ -80,15 +80,14 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||||
HeapWord* mem_allocate_old_gen(size_t size);
|
HeapWord* mem_allocate_old_gen(size_t size);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ParallelScavengeHeap() : CollectedHeap() {
|
ParallelScavengeHeap() : CollectedHeap(), _death_march_count(0) {
|
||||||
_death_march_count = 0;
|
|
||||||
set_alignment(_young_gen_alignment, intra_heap_alignment());
|
set_alignment(_young_gen_alignment, intra_heap_alignment());
|
||||||
set_alignment(_old_gen_alignment, intra_heap_alignment());
|
set_alignment(_old_gen_alignment, intra_heap_alignment());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the (conservative) maximum heap alignment
|
// Return the (conservative) maximum heap alignment
|
||||||
static size_t conservative_max_heap_alignment() {
|
static size_t conservative_max_heap_alignment() {
|
||||||
return intra_heap_alignment();
|
return GenCollectorPolicy::intra_heap_alignment();
|
||||||
}
|
}
|
||||||
|
|
||||||
// For use by VM operations
|
// For use by VM operations
|
||||||
|
@ -103,8 +102,8 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||||
|
|
||||||
virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }
|
virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }
|
||||||
|
|
||||||
static PSYoungGen* young_gen() { return _young_gen; }
|
static PSYoungGen* young_gen() { return _young_gen; }
|
||||||
static PSOldGen* old_gen() { return _old_gen; }
|
static PSOldGen* old_gen() { return _old_gen; }
|
||||||
|
|
||||||
virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; }
|
virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; }
|
||||||
|
|
||||||
|
@ -127,7 +126,7 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||||
|
|
||||||
// The alignment used for eden and survivors within the young gen
|
// The alignment used for eden and survivors within the young gen
|
||||||
// and for boundary between young gen and old gen.
|
// and for boundary between young gen and old gen.
|
||||||
static size_t intra_heap_alignment() { return 64 * K * HeapWordSize; }
|
size_t intra_heap_alignment() { return GenCollectorPolicy::intra_heap_alignment(); }
|
||||||
|
|
||||||
size_t capacity() const;
|
size_t capacity() const;
|
||||||
size_t used() const;
|
size_t used() const;
|
||||||
|
@ -157,16 +156,15 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||||
virtual bool is_in_partial_collection(const void *p);
|
virtual bool is_in_partial_collection(const void *p);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
bool is_in_young(oop p); // reserved part
|
bool is_in_young(oop p); // reserved part
|
||||||
bool is_in_old(oop p); // reserved part
|
bool is_in_old(oop p); // reserved part
|
||||||
|
|
||||||
// Memory allocation. "gc_time_limit_was_exceeded" will
|
// Memory allocation. "gc_time_limit_was_exceeded" will
|
||||||
// be set to true if the adaptive size policy determine that
|
// be set to true if the adaptive size policy determine that
|
||||||
// an excessive amount of time is being spent doing collections
|
// an excessive amount of time is being spent doing collections
|
||||||
// and caused a NULL to be returned. If a NULL is not returned,
|
// and caused a NULL to be returned. If a NULL is not returned,
|
||||||
// "gc_time_limit_was_exceeded" has an undefined meaning.
|
// "gc_time_limit_was_exceeded" has an undefined meaning.
|
||||||
HeapWord* mem_allocate(size_t size,
|
HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
|
||||||
bool* gc_overhead_limit_was_exceeded);
|
|
||||||
|
|
||||||
// Allocation attempt(s) during a safepoint. It should never be called
|
// Allocation attempt(s) during a safepoint. It should never be called
|
||||||
// to allocate a new TLAB as this allocation might be satisfied out
|
// to allocate a new TLAB as this allocation might be satisfied out
|
||||||
|
@ -257,7 +255,7 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||||
|
|
||||||
// Call these in sequential code around the processing of strong roots.
|
// Call these in sequential code around the processing of strong roots.
|
||||||
class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
|
class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
|
||||||
public:
|
public:
|
||||||
ParStrongRootsScope();
|
ParStrongRootsScope();
|
||||||
~ParStrongRootsScope();
|
~ParStrongRootsScope();
|
||||||
};
|
};
|
||||||
|
|
|
@ -46,8 +46,7 @@ PSAdaptiveSizePolicy::PSAdaptiveSizePolicy(size_t init_eden_size,
|
||||||
init_survivor_size,
|
init_survivor_size,
|
||||||
gc_pause_goal_sec,
|
gc_pause_goal_sec,
|
||||||
gc_cost_ratio),
|
gc_cost_ratio),
|
||||||
_collection_cost_margin_fraction(AdaptiveSizePolicyCollectionCostMargin/
|
_collection_cost_margin_fraction(AdaptiveSizePolicyCollectionCostMargin / 100.0),
|
||||||
100.0),
|
|
||||||
_intra_generation_alignment(intra_generation_alignment),
|
_intra_generation_alignment(intra_generation_alignment),
|
||||||
_live_at_last_full_gc(init_promo_size),
|
_live_at_last_full_gc(init_promo_size),
|
||||||
_gc_minor_pause_goal_sec(gc_minor_pause_goal_sec),
|
_gc_minor_pause_goal_sec(gc_minor_pause_goal_sec),
|
||||||
|
|
|
@ -160,7 +160,7 @@ size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
|
||||||
void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
|
void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
|
||||||
size_t init_promo_size,
|
size_t init_promo_size,
|
||||||
size_t init_survivor_size) {
|
size_t init_survivor_size) {
|
||||||
const double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
|
const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0;
|
||||||
_size_policy = new AdaptiveSizePolicy(init_eden_size,
|
_size_policy = new AdaptiveSizePolicy(init_eden_size,
|
||||||
init_promo_size,
|
init_promo_size,
|
||||||
init_survivor_size,
|
init_survivor_size,
|
||||||
|
@ -192,6 +192,7 @@ void GenCollectorPolicy::initialize_flags() {
|
||||||
// make sure there room for eden and two survivor spaces
|
// make sure there room for eden and two survivor spaces
|
||||||
vm_exit_during_initialization("Too small new size specified");
|
vm_exit_during_initialization("Too small new size specified");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (SurvivorRatio < 1 || NewRatio < 1) {
|
if (SurvivorRatio < 1 || NewRatio < 1) {
|
||||||
vm_exit_during_initialization("Invalid young gen ratio specified");
|
vm_exit_during_initialization("Invalid young gen ratio specified");
|
||||||
}
|
}
|
||||||
|
@ -465,7 +466,7 @@ void TwoGenerationCollectorPolicy::initialize_size_info() {
|
||||||
"generation sizes: using minimum heap = " SIZE_FORMAT,
|
"generation sizes: using minimum heap = " SIZE_FORMAT,
|
||||||
_min_heap_byte_size);
|
_min_heap_byte_size);
|
||||||
}
|
}
|
||||||
if ((OldSize > _max_gen1_size)) {
|
if (OldSize > _max_gen1_size) {
|
||||||
warning("Inconsistency between maximum heap size and maximum "
|
warning("Inconsistency between maximum heap size and maximum "
|
||||||
"generation sizes: using maximum heap = " SIZE_FORMAT
|
"generation sizes: using maximum heap = " SIZE_FORMAT
|
||||||
" -XX:OldSize flag is being ignored",
|
" -XX:OldSize flag is being ignored",
|
||||||
|
@ -596,9 +597,7 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
|
||||||
gc_count_before = Universe::heap()->total_collections();
|
gc_count_before = Universe::heap()->total_collections();
|
||||||
}
|
}
|
||||||
|
|
||||||
VM_GenCollectForAllocation op(size,
|
VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
|
||||||
is_tlab,
|
|
||||||
gc_count_before);
|
|
||||||
VMThread::execute(&op);
|
VMThread::execute(&op);
|
||||||
if (op.prologue_succeeded()) {
|
if (op.prologue_succeeded()) {
|
||||||
result = op.result();
|
result = op.result();
|
||||||
|
@ -833,8 +832,9 @@ MarkSweepPolicy::MarkSweepPolicy() {
|
||||||
|
|
||||||
void MarkSweepPolicy::initialize_generations() {
|
void MarkSweepPolicy::initialize_generations() {
|
||||||
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
|
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
|
||||||
if (_generations == NULL)
|
if (_generations == NULL) {
|
||||||
vm_exit_during_initialization("Unable to allocate gen spec");
|
vm_exit_during_initialization("Unable to allocate gen spec");
|
||||||
|
}
|
||||||
|
|
||||||
if (UseParNewGC) {
|
if (UseParNewGC) {
|
||||||
_generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size);
|
_generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size);
|
||||||
|
@ -843,8 +843,9 @@ void MarkSweepPolicy::initialize_generations() {
|
||||||
}
|
}
|
||||||
_generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size);
|
_generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size);
|
||||||
|
|
||||||
if (_generations[0] == NULL || _generations[1] == NULL)
|
if (_generations[0] == NULL || _generations[1] == NULL) {
|
||||||
vm_exit_during_initialization("Unable to allocate gen spec");
|
vm_exit_during_initialization("Unable to allocate gen spec");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MarkSweepPolicy::initialize_gc_policy_counters() {
|
void MarkSweepPolicy::initialize_gc_policy_counters() {
|
||||||
|
|
|
@ -79,6 +79,7 @@ class CollectorPolicy : public CHeapObj<mtGC> {
|
||||||
// Set to true when policy wants soft refs cleared.
|
// Set to true when policy wants soft refs cleared.
|
||||||
// Reset to false by gc after it clears all soft refs.
|
// Reset to false by gc after it clears all soft refs.
|
||||||
bool _should_clear_all_soft_refs;
|
bool _should_clear_all_soft_refs;
|
||||||
|
|
||||||
// Set to true by the GC if the just-completed gc cleared all
|
// Set to true by the GC if the just-completed gc cleared all
|
||||||
// softrefs. This is set to true whenever a gc clears all softrefs, and
|
// softrefs. This is set to true whenever a gc clears all softrefs, and
|
||||||
// set to false each time gc returns to the mutator. For example, in the
|
// set to false each time gc returns to the mutator. For example, in the
|
||||||
|
@ -101,8 +102,8 @@ class CollectorPolicy : public CHeapObj<mtGC> {
|
||||||
// Return maximum heap alignment that may be imposed by the policy
|
// Return maximum heap alignment that may be imposed by the policy
|
||||||
static size_t compute_max_alignment();
|
static size_t compute_max_alignment();
|
||||||
|
|
||||||
size_t min_alignment() { return _min_alignment; }
|
size_t min_alignment() { return _min_alignment; }
|
||||||
size_t max_alignment() { return _max_alignment; }
|
size_t max_alignment() { return _max_alignment; }
|
||||||
|
|
||||||
size_t initial_heap_byte_size() { return _initial_heap_byte_size; }
|
size_t initial_heap_byte_size() { return _initial_heap_byte_size; }
|
||||||
size_t max_heap_byte_size() { return _max_heap_byte_size; }
|
size_t max_heap_byte_size() { return _max_heap_byte_size; }
|
||||||
|
@ -248,7 +249,7 @@ class GenCollectorPolicy : public CollectorPolicy {
|
||||||
|
|
||||||
virtual int number_of_generations() = 0;
|
virtual int number_of_generations() = 0;
|
||||||
|
|
||||||
virtual GenerationSpec **generations() {
|
virtual GenerationSpec **generations() {
|
||||||
assert(_generations != NULL, "Sanity check");
|
assert(_generations != NULL, "Sanity check");
|
||||||
return _generations;
|
return _generations;
|
||||||
}
|
}
|
||||||
|
@ -273,6 +274,12 @@ class GenCollectorPolicy : public CollectorPolicy {
|
||||||
virtual void initialize_size_policy(size_t init_eden_size,
|
virtual void initialize_size_policy(size_t init_eden_size,
|
||||||
size_t init_promo_size,
|
size_t init_promo_size,
|
||||||
size_t init_survivor_size);
|
size_t init_survivor_size);
|
||||||
|
|
||||||
|
// The alignment used for eden and survivors within the young gen
|
||||||
|
// and for boundary between young gen and old gen.
|
||||||
|
static size_t intra_heap_alignment() {
|
||||||
|
return 64 * K * HeapWordSize;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// All of hotspot's current collectors are subtypes of this
|
// All of hotspot's current collectors are subtypes of this
|
||||||
|
@ -300,8 +307,8 @@ class TwoGenerationCollectorPolicy : public GenCollectorPolicy {
|
||||||
// Inherited methods
|
// Inherited methods
|
||||||
TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; }
|
TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; }
|
||||||
|
|
||||||
int number_of_generations() { return 2; }
|
int number_of_generations() { return 2; }
|
||||||
BarrierSet::Name barrier_set_name() { return BarrierSet::CardTableModRef; }
|
BarrierSet::Name barrier_set_name() { return BarrierSet::CardTableModRef; }
|
||||||
|
|
||||||
virtual CollectorPolicy::Name kind() {
|
virtual CollectorPolicy::Name kind() {
|
||||||
return CollectorPolicy::TwoGenerationCollectorPolicyKind;
|
return CollectorPolicy::TwoGenerationCollectorPolicyKind;
|
||||||
|
|
|
@ -1053,12 +1053,6 @@ void GenCollectedHeap::save_marks() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) {
|
|
||||||
for (int i = 0; i <= collectedGen; i++) {
|
|
||||||
_gens[i]->compute_new_size();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
GenCollectedHeap* GenCollectedHeap::heap() {
|
GenCollectedHeap* GenCollectedHeap::heap() {
|
||||||
assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
|
assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
|
||||||
assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
|
assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
|
||||||
|
|
|
@ -86,10 +86,6 @@ public:
|
||||||
NOT_PRODUCT(static size_t _skip_header_HeapWords;)
|
NOT_PRODUCT(static size_t _skip_header_HeapWords;)
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// Directs each generation up to and including "collectedGen" to recompute
|
|
||||||
// its desired size.
|
|
||||||
void compute_new_generation_sizes(int collectedGen);
|
|
||||||
|
|
||||||
// Helper functions for allocation
|
// Helper functions for allocation
|
||||||
HeapWord* attempt_allocation(size_t size,
|
HeapWord* attempt_allocation(size_t size,
|
||||||
bool is_tlab,
|
bool is_tlab,
|
||||||
|
|
|
@ -1021,7 +1021,7 @@ bool universe_post_init() {
|
||||||
Universe::_virtual_machine_error_instance =
|
Universe::_virtual_machine_error_instance =
|
||||||
InstanceKlass::cast(k)->allocate_instance(CHECK_false);
|
InstanceKlass::cast(k)->allocate_instance(CHECK_false);
|
||||||
|
|
||||||
Universe::_vm_exception = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
|
Universe::_vm_exception = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
|
||||||
|
|
||||||
if (!DumpSharedSpaces) {
|
if (!DumpSharedSpaces) {
|
||||||
// These are the only Java fields that are currently set during shared space dumping.
|
// These are the only Java fields that are currently set during shared space dumping.
|
||||||
|
|
|
@ -1408,7 +1408,7 @@ uintx Arguments::max_heap_for_compressed_oops() {
|
||||||
// NULL page is located before the heap, we pad the NULL page to the conservative
|
// NULL page is located before the heap, we pad the NULL page to the conservative
|
||||||
// maximum alignment that the GC may ever impose upon the heap.
|
// maximum alignment that the GC may ever impose upon the heap.
|
||||||
size_t displacement_due_to_null_page = align_size_up_(os::vm_page_size(),
|
size_t displacement_due_to_null_page = align_size_up_(os::vm_page_size(),
|
||||||
Arguments::conservative_max_heap_alignment());
|
_conservative_max_heap_alignment);
|
||||||
|
|
||||||
LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page);
|
LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page);
|
||||||
NOT_LP64(ShouldNotReachHere(); return 0);
|
NOT_LP64(ShouldNotReachHere(); return 0);
|
||||||
|
@ -2681,9 +2681,10 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
||||||
describe_range_error(errcode);
|
describe_range_error(errcode);
|
||||||
return JNI_EINVAL;
|
return JNI_EINVAL;
|
||||||
}
|
}
|
||||||
FLAG_SET_CMDLINE(uintx, InitialHeapSize, (uintx)long_initial_heap_size);
|
set_min_heap_size((uintx)long_initial_heap_size);
|
||||||
// Currently the minimum size and the initial heap sizes are the same.
|
// Currently the minimum size and the initial heap sizes are the same.
|
||||||
set_min_heap_size(InitialHeapSize);
|
// Can be overridden with -XX:InitialHeapSize.
|
||||||
|
FLAG_SET_CMDLINE(uintx, InitialHeapSize, (uintx)long_initial_heap_size);
|
||||||
// -Xmx
|
// -Xmx
|
||||||
} else if (match_option(option, "-Xmx", &tail) || match_option(option, "-XX:MaxHeapSize=", &tail)) {
|
} else if (match_option(option, "-Xmx", &tail) || match_option(option, "-XX:MaxHeapSize=", &tail)) {
|
||||||
julong long_max_heap_size = 0;
|
julong long_max_heap_size = 0;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue