8061802: REDO - Remove the generations array

The _gens array is removed and replaced by explicit _young_gen and _old_gen variables.

Reviewed-by: kbarrett, mgerdin
This commit is contained in:
Jesper Wilhelmsson 2014-08-22 10:10:08 +02:00
parent 0241e477f1
commit e238141b5c
9 changed files with 337 additions and 340 deletions

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -35,7 +35,8 @@ import sun.jvm.hotspot.utilities.*;
public class GenCollectedHeap extends SharedHeap { public class GenCollectedHeap extends SharedHeap {
private static CIntegerField nGensField; private static CIntegerField nGensField;
private static long gensOffset; private static AddressField youngGenField;
private static AddressField oldGenField;
private static AddressField genSpecsField; private static AddressField genSpecsField;
private static GenerationFactory genFactory; private static GenerationFactory genFactory;
@ -52,7 +53,8 @@ public class GenCollectedHeap extends SharedHeap {
Type type = db.lookupType("GenCollectedHeap"); Type type = db.lookupType("GenCollectedHeap");
nGensField = type.getCIntegerField("_n_gens"); nGensField = type.getCIntegerField("_n_gens");
gensOffset = type.getField("_gens").getOffset(); youngGenField = type.getAddressField("_young_gen");
oldGenField = type.getAddressField("_old_gen");
genSpecsField = type.getAddressField("_gen_specs"); genSpecsField = type.getAddressField("_gen_specs");
genFactory = new GenerationFactory(); genFactory = new GenerationFactory();
@ -68,18 +70,19 @@ public class GenCollectedHeap extends SharedHeap {
public Generation getGen(int i) { public Generation getGen(int i) {
if (Assert.ASSERTS_ENABLED) { if (Assert.ASSERTS_ENABLED) {
Assert.that((i >= 0) && (i < nGens()), "Index " + i + Assert.that((i == 0) || (i == 1), "Index " + i +
" out of range (should be between 0 and " + nGens() + ")"); " out of range (should be 0 or 1)");
} }
if ((i < 0) || (i >= nGens())) { switch (i) {
case 0:
return genFactory.newObject(youngGenField.getAddress());
case 1:
return genFactory.newObject(oldGenField.getAddress());
default:
// no generation for i, and assertions disabled.
return null; return null;
} }
Address genAddr = addr.getAddressAt(gensOffset +
(i * VM.getVM().getAddressSize()));
return genFactory.newObject(addr.getAddressAt(gensOffset +
(i * VM.getVM().getAddressSize())));
} }
public boolean isIn(Address a) { public boolean isIn(Address a) {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -869,7 +869,7 @@ void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
if (prev_level >= 0) { if (prev_level >= 0) {
size_t prev_size = 0; size_t prev_size = 0;
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
Generation* prev_gen = gch->_gens[prev_level]; Generation* prev_gen = gch->get_gen(prev_level);
prev_size = prev_gen->capacity(); prev_size = prev_gen->capacity();
gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT, gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
prev_size/1000); prev_size/1000);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -383,7 +383,7 @@ void DefNewGeneration::compute_new_size() {
assert(next_level < gch->_n_gens, assert(next_level < gch->_n_gens,
"DefNewGeneration cannot be an oldest gen"); "DefNewGeneration cannot be an oldest gen");
Generation* next_gen = gch->_gens[next_level]; Generation* next_gen = gch->get_gen(next_level);
size_t old_size = next_gen->capacity(); size_t old_size = next_gen->capacity();
size_t new_size_before = _virtual_space.committed_size(); size_t new_size_before = _virtual_space.committed_size();
size_t min_new_size = spec()->init_size(); size_t min_new_size = spec()->init_size();

View file

@ -86,6 +86,7 @@ jint GenCollectedHeap::initialize() {
int i; int i;
_n_gens = gen_policy()->number_of_generations(); _n_gens = gen_policy()->number_of_generations();
assert(_n_gens == 2, "There is no support for more than two generations");
// While there are no constraints in the GC code that HeapWordSize // While there are no constraints in the GC code that HeapWordSize
// be any particular value, there are multiple other areas in the // be any particular value, there are multiple other areas in the
@ -126,11 +127,12 @@ jint GenCollectedHeap::initialize() {
_gch = this; _gch = this;
for (i = 0; i < _n_gens; i++) { ReservedSpace young_rs = heap_rs.first_part(_gen_specs[0]->max_size(), false, false);
ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), false, false); _young_gen = _gen_specs[0]->init(young_rs, 0, rem_set());
_gens[i] = _gen_specs[i]->init(this_rs, i, rem_set()); heap_rs = heap_rs.last_part(_gen_specs[0]->max_size());
heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
} ReservedSpace old_rs = heap_rs.first_part(_gen_specs[1]->max_size(), false, false);
_old_gen = _gen_specs[1]->init(old_rs, 1, rem_set());
clear_incremental_collection_failed(); clear_incremental_collection_failed();
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
@ -145,7 +147,6 @@ jint GenCollectedHeap::initialize() {
return JNI_OK; return JNI_OK;
} }
char* GenCollectedHeap::allocate(size_t alignment, char* GenCollectedHeap::allocate(size_t alignment,
ReservedSpace* heap_rs){ ReservedSpace* heap_rs){
const char overflow_msg[] = "The size of the object heap + VM data exceeds " const char overflow_msg[] = "The size of the object heap + VM data exceeds "
@ -172,7 +173,6 @@ char* GenCollectedHeap::allocate(size_t alignment,
return heap_rs->base(); return heap_rs->base();
} }
void GenCollectedHeap::post_initialize() { void GenCollectedHeap::post_initialize() {
SharedHeap::post_initialize(); SharedHeap::post_initialize();
GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy(); GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
@ -195,41 +195,30 @@ void GenCollectedHeap::post_initialize() {
void GenCollectedHeap::ref_processing_init() { void GenCollectedHeap::ref_processing_init() {
SharedHeap::ref_processing_init(); SharedHeap::ref_processing_init();
for (int i = 0; i < _n_gens; i++) { _young_gen->ref_processor_init();
_gens[i]->ref_processor_init(); _old_gen->ref_processor_init();
}
} }
size_t GenCollectedHeap::capacity() const { size_t GenCollectedHeap::capacity() const {
size_t res = 0; return _young_gen->capacity() + _old_gen->capacity();
for (int i = 0; i < _n_gens; i++) {
res += _gens[i]->capacity();
}
return res;
} }
size_t GenCollectedHeap::used() const { size_t GenCollectedHeap::used() const {
size_t res = 0; return _young_gen->used() + _old_gen->used();
for (int i = 0; i < _n_gens; i++) {
res += _gens[i]->used();
}
return res;
} }
// Save the "used_region" for generations level and lower. // Save the "used_region" for generations level and lower.
void GenCollectedHeap::save_used_regions(int level) { void GenCollectedHeap::save_used_regions(int level) {
assert(level >= 0, "Illegal level parameter");
assert(level < _n_gens, "Illegal level parameter"); assert(level < _n_gens, "Illegal level parameter");
for (int i = level; i >= 0; i--) { if (level == 1) {
_gens[i]->save_used_region(); _old_gen->save_used_region();
} }
_young_gen->save_used_region();
} }
size_t GenCollectedHeap::max_capacity() const { size_t GenCollectedHeap::max_capacity() const {
size_t res = 0; return _young_gen->max_capacity() + _old_gen->max_capacity();
for (int i = 0; i < _n_gens; i++) {
res += _gens[i]->max_capacity();
}
return res;
} }
// Update the _full_collections_completed counter // Update the _full_collections_completed counter
@ -293,16 +282,20 @@ void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
HeapWord* GenCollectedHeap::attempt_allocation(size_t size, HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
bool is_tlab, bool is_tlab,
bool first_only) { bool first_only) {
HeapWord* res; HeapWord* res = NULL;
for (int i = 0; i < _n_gens; i++) {
if (_gens[i]->should_allocate(size, is_tlab)) { if (_young_gen->should_allocate(size, is_tlab)) {
res = _gens[i]->allocate(size, is_tlab); res = _young_gen->allocate(size, is_tlab);
if (res != NULL) return res; if (res != NULL || first_only) {
else if (first_only) break; return res;
} }
} }
// Otherwise...
return NULL; if (_old_gen->should_allocate(size, is_tlab)) {
res = _old_gen->allocate(size, is_tlab);
}
return res;
} }
HeapWord* GenCollectedHeap::mem_allocate(size_t size, HeapWord* GenCollectedHeap::mem_allocate(size_t size,
@ -322,12 +315,107 @@ bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
(cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
} }
void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
bool is_tlab, bool run_verification, bool clear_soft_refs,
bool restore_marks_for_biased_locking) {
// Timer for individual generations. Last argument is false: no CR
// FIXME: We should try to start the timing earlier to cover more of the GC pause
// The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
// so we can assume here that the next GC id is what we want.
GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek());
TraceCollectorStats tcs(gen->counters());
TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
size_t prev_used = gen->used();
gen->stat_record()->invocations++;
gen->stat_record()->accumulated_time.start();
// Must be done anew before each collection because
// a previous collection will do mangling and will
// change top of some spaces.
record_gen_tops_before_GC();
if (PrintGC && Verbose) {
gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
gen->level(),
gen->stat_record()->invocations,
size * HeapWordSize);
}
if (run_verification && VerifyBeforeGC) {
HandleMark hm; // Discard invalid handles created during verification
Universe::verify(" VerifyBeforeGC:");
}
COMPILER2_PRESENT(DerivedPointerTable::clear());
if (restore_marks_for_biased_locking) {
// We perform this mark word preservation work lazily
// because it's only at this point that we know whether we
// absolutely have to do it; we want to avoid doing it for
// scavenge-only collections where it's unnecessary
BiasedLocking::preserve_marks();
}
// Do collection work
{
// Note on ref discovery: For what appear to be historical reasons,
// GCH enables and disabled (by enqueing) refs discovery.
// In the future this should be moved into the generation's
// collect method so that ref discovery and enqueueing concerns
// are local to a generation. The collect method could return
// an appropriate indication in the case that notification on
// the ref lock was needed. This will make the treatment of
// weak refs more uniform (and indeed remove such concerns
// from GCH). XXX
HandleMark hm; // Discard invalid handles created during gc
save_marks(); // save marks for all gens
// We want to discover references, but not process them yet.
// This mode is disabled in process_discovered_references if the
// generation does some collection work, or in
// enqueue_discovered_references if the generation returns
// without doing any work.
ReferenceProcessor* rp = gen->ref_processor();
// If the discovery of ("weak") refs in this generation is
// atomic wrt other collectors in this configuration, we
// are guaranteed to have empty discovered ref lists.
if (rp->discovery_is_atomic()) {
rp->enable_discovery();
rp->setup_policy(clear_soft_refs);
} else {
// collect() below will enable discovery as appropriate
}
gen->collect(full, clear_soft_refs, size, is_tlab);
if (!rp->enqueuing_is_done()) {
rp->enqueue_discovered_references();
} else {
rp->set_enqueuing_is_done(false);
}
rp->verify_no_references_recorded();
}
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
gen->stat_record()->accumulated_time.stop();
update_gc_stats(gen->level(), full);
if (run_verification && VerifyAfterGC) {
HandleMark hm; // Discard invalid handles created during verification
Universe::verify(" VerifyAfterGC:");
}
if (PrintGCDetails) {
gclog_or_tty->print(":");
gen->print_heap_change(prev_used);
}
}
void GenCollectedHeap::do_collection(bool full, void GenCollectedHeap::do_collection(bool full,
bool clear_all_soft_refs, bool clear_all_soft_refs,
size_t size, size_t size,
bool is_tlab, bool is_tlab,
int max_level) { int max_level) {
bool prepared_for_verification = false;
ResourceMark rm; ResourceMark rm;
DEBUG_ONLY(Thread* my_thread = Thread::current();) DEBUG_ONLY(Thread* my_thread = Thread::current();)
@ -367,141 +455,62 @@ void GenCollectedHeap::do_collection(bool full,
increment_total_collections(complete); increment_total_collections(complete);
size_t gch_prev_used = used(); size_t gch_prev_used = used();
bool run_verification = total_collections() >= VerifyGCStartAt;
int starting_level = 0; bool prepared_for_verification = false;
if (full) { int max_level_collected = 0;
// Search for the oldest generation which will collect all younger bool old_collects_young = (max_level == 1) &&
// generations, and start collection loop there. full &&
for (int i = max_level; i >= 0; i--) { _old_gen->full_collects_younger_generations();
if (_gens[i]->full_collects_younger_generations()) { if (!old_collects_young &&
starting_level = i; _young_gen->should_collect(full, size, is_tlab)) {
break; if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
prepare_for_verify();
prepared_for_verification = true;
} }
assert(!_young_gen->performs_in_place_marking(), "No young generation do in place marking");
collect_generation(_young_gen,
full,
size,
is_tlab,
run_verification && VerifyGCLevel <= 0,
do_clear_all_soft_refs,
false);
if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
// Allocation request was met by young GC.
size = 0;
} }
} }
bool must_restore_marks_for_biased_locking = false; bool must_restore_marks_for_biased_locking = false;
int max_level_collected = starting_level; if (max_level == 1 && _old_gen->should_collect(full, size, is_tlab)) {
for (int i = starting_level; i <= max_level; i++) {
if (_gens[i]->should_collect(full, size, is_tlab)) {
if (i == n_gens() - 1) { // a major collection is to happen
if (!complete) { if (!complete) {
// The full_collections increment was missed above. // The full_collections increment was missed above.
increment_total_full_collections(); increment_total_full_collections();
} }
pre_full_gc_dump(NULL); // do any pre full gc dumps pre_full_gc_dump(NULL); // do any pre full gc dumps
}
// Timer for individual generations. Last argument is false: no CR
// FIXME: We should try to start the timing earlier to cover more of the GC pause
// The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
// so we can assume here that the next GC id is what we want.
GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL, GCId::peek());
TraceCollectorStats tcs(_gens[i]->counters());
TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
size_t prev_used = _gens[i]->used(); if (!prepared_for_verification && run_verification &&
_gens[i]->stat_record()->invocations++; VerifyGCLevel <= 1 && VerifyBeforeGC) {
_gens[i]->stat_record()->accumulated_time.start();
// Must be done anew before each collection because
// a previous collection will do mangling and will
// change top of some spaces.
record_gen_tops_before_GC();
if (PrintGC && Verbose) {
gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
i,
_gens[i]->stat_record()->invocations,
size*HeapWordSize);
}
if (VerifyBeforeGC && i >= VerifyGCLevel &&
total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
if (!prepared_for_verification) {
prepare_for_verify(); prepare_for_verify();
prepared_for_verification = true;
} }
Universe::verify(" VerifyBeforeGC:");
}
COMPILER2_PRESENT(DerivedPointerTable::clear());
if (!must_restore_marks_for_biased_locking && assert(_old_gen->performs_in_place_marking(), "All old generations do in place marking");
_gens[i]->performs_in_place_marking()) { collect_generation(_old_gen,
// We perform this mark word preservation work lazily full,
// because it's only at this point that we know whether we size,
// absolutely have to do it; we want to avoid doing it for is_tlab,
// scavenge-only collections where it's unnecessary run_verification && VerifyGCLevel <= 1,
do_clear_all_soft_refs,
true);
must_restore_marks_for_biased_locking = true; must_restore_marks_for_biased_locking = true;
BiasedLocking::preserve_marks(); max_level_collected = 1;
}
// Do collection work
{
// Note on ref discovery: For what appear to be historical reasons,
// GCH enables and disabled (by enqueing) refs discovery.
// In the future this should be moved into the generation's
// collect method so that ref discovery and enqueueing concerns
// are local to a generation. The collect method could return
// an appropriate indication in the case that notification on
// the ref lock was needed. This will make the treatment of
// weak refs more uniform (and indeed remove such concerns
// from GCH). XXX
HandleMark hm; // Discard invalid handles created during gc
save_marks(); // save marks for all gens
// We want to discover references, but not process them yet.
// This mode is disabled in process_discovered_references if the
// generation does some collection work, or in
// enqueue_discovered_references if the generation returns
// without doing any work.
ReferenceProcessor* rp = _gens[i]->ref_processor();
// If the discovery of ("weak") refs in this generation is
// atomic wrt other collectors in this configuration, we
// are guaranteed to have empty discovered ref lists.
if (rp->discovery_is_atomic()) {
rp->enable_discovery();
rp->setup_policy(do_clear_all_soft_refs);
} else {
// collect() below will enable discovery as appropriate
}
_gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
if (!rp->enqueuing_is_done()) {
rp->enqueue_discovered_references();
} else {
rp->set_enqueuing_is_done(false);
}
rp->verify_no_references_recorded();
}
max_level_collected = i;
// Determine if allocation request was met.
if (size > 0) {
if (!is_tlab || _gens[i]->supports_tlab_allocation()) {
if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) {
size = 0;
}
}
}
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
_gens[i]->stat_record()->accumulated_time.stop();
update_gc_stats(i, full);
if (VerifyAfterGC && i >= VerifyGCLevel &&
total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
Universe::verify(" VerifyAfterGC:");
}
if (PrintGCDetails) {
gclog_or_tty->print(":");
_gens[i]->print_heap_change(prev_used);
}
}
} }
// Update "complete" boolean wrt what actually transpired -- // Update "complete" boolean wrt what actually transpired --
@ -523,10 +532,11 @@ void GenCollectedHeap::do_collection(bool full,
} }
} }
for (int j = max_level_collected; j >= 0; j -= 1) {
// Adjust generation sizes. // Adjust generation sizes.
_gens[j]->compute_new_size(); if (max_level_collected == 1) {
_old_gen->compute_new_size();
} }
_young_gen->compute_new_size();
if (complete) { if (complete) {
// Delete metaspaces for unloaded class loaders and clean up loader_data graph // Delete metaspaces for unloaded class loaders and clean up loader_data graph
@ -583,18 +593,18 @@ gen_process_roots(int level,
if (younger_gens_as_roots) { if (younger_gens_as_roots) {
if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) { if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) {
for (int i = 0; i < level; i++) { if (level == 1) {
not_older_gens->set_generation(_gens[i]); not_older_gens->set_generation(_young_gen);
_gens[i]->oop_iterate(not_older_gens); _young_gen->oop_iterate(not_older_gens);
} }
not_older_gens->reset_generation(); not_older_gens->reset_generation();
} }
} }
// When collection is parallel, all threads get to cooperate to do // When collection is parallel, all threads get to cooperate to do
// older-gen scanning. // older-gen scanning.
for (int i = level+1; i < _n_gens; i++) { if (level == 0) {
older_gens->set_generation(_gens[i]); older_gens->set_generation(_old_gen);
rem_set()->younger_refs_iterate(_gens[i], older_gens); rem_set()->younger_refs_iterate(_old_gen, older_gens);
older_gens->reset_generation(); older_gens->reset_generation();
} }
@ -635,9 +645,8 @@ gen_process_roots(int level,
void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) { void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
SharedHeap::process_weak_roots(root_closure); SharedHeap::process_weak_roots(root_closure);
// "Local" "weak" refs // "Local" "weak" refs
for (int i = 0; i < _n_gens; i++) { _young_gen->ref_processor()->weak_oops_do(root_closure);
_gens[i]->ref_processor()->weak_oops_do(root_closure); _old_gen->ref_processor()->weak_oops_do(root_closure);
}
} }
#define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
@ -645,9 +654,11 @@ void GenCollectedHeap:: \
oop_since_save_marks_iterate(int level, \ oop_since_save_marks_iterate(int level, \
OopClosureType* cur, \ OopClosureType* cur, \
OopClosureType* older) { \ OopClosureType* older) { \
_gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \ if (level == 0) { \
for (int i = level+1; i < n_gens(); i++) { \ _young_gen->oop_since_save_marks_iterate##nv_suffix(cur); \
_gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \ _old_gen->oop_since_save_marks_iterate##nv_suffix(older); \
} else { \
_old_gen->oop_since_save_marks_iterate##nv_suffix(cur); \
} \ } \
} }
@ -656,22 +667,22 @@ ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
#undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
bool GenCollectedHeap::no_allocs_since_save_marks(int level) { bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
for (int i = level; i < _n_gens; i++) { if (level == 0 && !_young_gen->no_allocs_since_save_marks()) {
if (!_gens[i]->no_allocs_since_save_marks()) return false; return false;
} }
return true; return _old_gen->no_allocs_since_save_marks();
} }
bool GenCollectedHeap::supports_inline_contig_alloc() const { bool GenCollectedHeap::supports_inline_contig_alloc() const {
return _gens[0]->supports_inline_contig_alloc(); return _young_gen->supports_inline_contig_alloc();
} }
HeapWord** GenCollectedHeap::top_addr() const { HeapWord** GenCollectedHeap::top_addr() const {
return _gens[0]->top_addr(); return _young_gen->top_addr();
} }
HeapWord** GenCollectedHeap::end_addr() const { HeapWord** GenCollectedHeap::end_addr() const {
return _gens[0]->end_addr(); return _young_gen->end_addr();
} }
// public collection interfaces // public collection interfaces
@ -734,12 +745,12 @@ void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
bool GenCollectedHeap::create_cms_collector() { bool GenCollectedHeap::create_cms_collector() {
assert(_gens[1]->kind() == Generation::ConcurrentMarkSweep, assert(_old_gen->kind() == Generation::ConcurrentMarkSweep,
"Unexpected generation kinds"); "Unexpected generation kinds");
// Skip two header words in the block content verification // Skip two header words in the block content verification
NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();) NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
CMSCollector* collector = new CMSCollector( CMSCollector* collector = new CMSCollector(
(ConcurrentMarkSweepGeneration*)_gens[1], (ConcurrentMarkSweepGeneration*)_old_gen,
_rem_set->as_CardTableRS(), _rem_set->as_CardTableRS(),
(ConcurrentMarkSweepPolicy*) collector_policy()); (ConcurrentMarkSweepPolicy*) collector_policy());
@ -806,8 +817,8 @@ void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
} }
bool GenCollectedHeap::is_in_young(oop p) { bool GenCollectedHeap::is_in_young(oop p) {
bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start(); bool result = ((HeapWord*)p) < _old_gen->reserved().start();
assert(result == _gens[0]->is_in_reserved(p), assert(result == _young_gen->is_in_reserved(p),
err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p))); err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p)));
return result; return result;
} }
@ -825,13 +836,7 @@ bool GenCollectedHeap::is_in(const void* p) const {
VMError::fatal_error_in_progress(), "too expensive"); VMError::fatal_error_in_progress(), "too expensive");
#endif #endif
// This might be sped up with a cache of the last generation that return _young_gen->is_in(p) || _old_gen->is_in(p);
// answered yes.
for (int i = 0; i < _n_gens; i++) {
if (_gens[i]->is_in(p)) return true;
}
// Otherwise...
return false;
} }
#ifdef ASSERT #ifdef ASSERT
@ -840,114 +845,97 @@ bool GenCollectedHeap::is_in(const void* p) const {
bool GenCollectedHeap::is_in_partial_collection(const void* p) { bool GenCollectedHeap::is_in_partial_collection(const void* p) {
assert(is_in_reserved(p) || p == NULL, assert(is_in_reserved(p) || p == NULL,
"Does not work if address is non-null and outside of the heap"); "Does not work if address is non-null and outside of the heap");
return p < _gens[_n_gens - 2]->reserved().end() && p != NULL; return p < _young_gen->reserved().end() && p != NULL;
} }
#endif #endif
void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) { void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
for (int i = 0; i < _n_gens; i++) { _young_gen->oop_iterate(cl);
_gens[i]->oop_iterate(cl); _old_gen->oop_iterate(cl);
}
} }
void GenCollectedHeap::object_iterate(ObjectClosure* cl) { void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
for (int i = 0; i < _n_gens; i++) { _young_gen->object_iterate(cl);
_gens[i]->object_iterate(cl); _old_gen->object_iterate(cl);
}
} }
void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) { void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
for (int i = 0; i < _n_gens; i++) { _young_gen->safe_object_iterate(cl);
_gens[i]->safe_object_iterate(cl); _old_gen->safe_object_iterate(cl);
}
} }
Space* GenCollectedHeap::space_containing(const void* addr) const { Space* GenCollectedHeap::space_containing(const void* addr) const {
for (int i = 0; i < _n_gens; i++) { Space* res = _young_gen->space_containing(addr);
Space* res = _gens[i]->space_containing(addr); if (res != NULL) {
if (res != NULL) return res; return res;
} }
// Otherwise... res = _old_gen->space_containing(addr);
assert(false, "Could not find containing space"); assert(res != NULL, "Could not find containing space");
return NULL; return res;
} }
HeapWord* GenCollectedHeap::block_start(const void* addr) const { HeapWord* GenCollectedHeap::block_start(const void* addr) const {
assert(is_in_reserved(addr), "block_start of address outside of heap"); assert(is_in_reserved(addr), "block_start of address outside of heap");
for (int i = 0; i < _n_gens; i++) { if (_young_gen->is_in_reserved(addr)) {
if (_gens[i]->is_in_reserved(addr)) { assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
assert(_gens[i]->is_in(addr), return _young_gen->block_start(addr);
"addr should be in allocated part of generation");
return _gens[i]->block_start(addr);
} }
}
assert(false, "Some generation should contain the address"); assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
return NULL; assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
return _old_gen->block_start(addr);
} }
size_t GenCollectedHeap::block_size(const HeapWord* addr) const { size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
assert(is_in_reserved(addr), "block_size of address outside of heap"); assert(is_in_reserved(addr), "block_size of address outside of heap");
for (int i = 0; i < _n_gens; i++) { if (_young_gen->is_in_reserved(addr)) {
if (_gens[i]->is_in_reserved(addr)) { assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
assert(_gens[i]->is_in(addr), return _young_gen->block_size(addr);
"addr should be in allocated part of generation");
return _gens[i]->block_size(addr);
} }
}
assert(false, "Some generation should contain the address"); assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
return 0; assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
return _old_gen->block_size(addr);
} }
bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const { bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
assert(is_in_reserved(addr), "block_is_obj of address outside of heap"); assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
assert(block_start(addr) == addr, "addr must be a block start"); assert(block_start(addr) == addr, "addr must be a block start");
for (int i = 0; i < _n_gens; i++) { if (_young_gen->is_in_reserved(addr)) {
if (_gens[i]->is_in_reserved(addr)) { return _young_gen->block_is_obj(addr);
return _gens[i]->block_is_obj(addr);
} }
}
assert(false, "Some generation should contain the address"); assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
return false; return _old_gen->block_is_obj(addr);
} }
bool GenCollectedHeap::supports_tlab_allocation() const { bool GenCollectedHeap::supports_tlab_allocation() const {
for (int i = 0; i < _n_gens; i += 1) { assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
if (_gens[i]->supports_tlab_allocation()) { return _young_gen->supports_tlab_allocation();
return true;
}
}
return false;
} }
size_t GenCollectedHeap::tlab_capacity(Thread* thr) const { size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
size_t result = 0; assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
for (int i = 0; i < _n_gens; i += 1) { if (_young_gen->supports_tlab_allocation()) {
if (_gens[i]->supports_tlab_allocation()) { return _young_gen->tlab_capacity();
result += _gens[i]->tlab_capacity();
} }
} return 0;
return result;
} }
size_t GenCollectedHeap::tlab_used(Thread* thr) const { size_t GenCollectedHeap::tlab_used(Thread* thr) const {
size_t result = 0; assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
for (int i = 0; i < _n_gens; i += 1) { if (_young_gen->supports_tlab_allocation()) {
if (_gens[i]->supports_tlab_allocation()) { return _young_gen->tlab_used();
result += _gens[i]->tlab_used();
} }
} return 0;
return result;
} }
size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const { size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
size_t result = 0; assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
for (int i = 0; i < _n_gens; i += 1) { if (_young_gen->supports_tlab_allocation()) {
if (_gens[i]->supports_tlab_allocation()) { return _young_gen->unsafe_max_tlab_alloc();
result += _gens[i]->unsafe_max_tlab_alloc();
} }
} return 0;
return result;
} }
HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) { HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
@ -996,17 +984,15 @@ static void sort_scratch_list(ScratchBlock*& list) {
ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor, ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
size_t max_alloc_words) { size_t max_alloc_words) {
ScratchBlock* res = NULL; ScratchBlock* res = NULL;
for (int i = 0; i < _n_gens; i++) { _young_gen->contribute_scratch(res, requestor, max_alloc_words);
_gens[i]->contribute_scratch(res, requestor, max_alloc_words); _old_gen->contribute_scratch(res, requestor, max_alloc_words);
}
sort_scratch_list(res); sort_scratch_list(res);
return res; return res;
} }
void GenCollectedHeap::release_scratch() { void GenCollectedHeap::release_scratch() {
for (int i = 0; i < _n_gens; i++) { _young_gen->reset_scratch();
_gens[i]->reset_scratch(); _old_gen->reset_scratch();
}
} }
class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure { class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
@ -1021,39 +1007,29 @@ void GenCollectedHeap::prepare_for_verify() {
generation_iterate(&blk, false); generation_iterate(&blk, false);
} }
void GenCollectedHeap::generation_iterate(GenClosure* cl, void GenCollectedHeap::generation_iterate(GenClosure* cl,
bool old_to_young) { bool old_to_young) {
if (old_to_young) { if (old_to_young) {
for (int i = _n_gens-1; i >= 0; i--) { cl->do_generation(_old_gen);
cl->do_generation(_gens[i]); cl->do_generation(_young_gen);
}
} else { } else {
for (int i = 0; i < _n_gens; i++) { cl->do_generation(_young_gen);
cl->do_generation(_gens[i]); cl->do_generation(_old_gen);
}
} }
} }
void GenCollectedHeap::space_iterate(SpaceClosure* cl) { void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
for (int i = 0; i < _n_gens; i++) { _young_gen->space_iterate(cl, true);
_gens[i]->space_iterate(cl, true); _old_gen->space_iterate(cl, true);
}
} }
bool GenCollectedHeap::is_maximal_no_gc() const { bool GenCollectedHeap::is_maximal_no_gc() const {
for (int i = 0; i < _n_gens; i++) { return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
if (!_gens[i]->is_maximal_no_gc()) {
return false;
}
}
return true;
} }
void GenCollectedHeap::save_marks() { void GenCollectedHeap::save_marks() {
for (int i = 0; i < _n_gens; i++) { _young_gen->save_marks();
_gens[i]->save_marks(); _old_gen->save_marks();
}
} }
GenCollectedHeap* GenCollectedHeap::heap() { GenCollectedHeap* GenCollectedHeap::heap() {
@ -1065,27 +1041,33 @@ GenCollectedHeap* GenCollectedHeap::heap() {
void GenCollectedHeap::prepare_for_compaction() { void GenCollectedHeap::prepare_for_compaction() {
guarantee(_n_gens = 2, "Wrong number of generations"); guarantee(_n_gens = 2, "Wrong number of generations");
Generation* old_gen = _gens[1];
// Start by compacting into same gen. // Start by compacting into same gen.
CompactPoint cp(old_gen); CompactPoint cp(_old_gen);
old_gen->prepare_for_compaction(&cp); _old_gen->prepare_for_compaction(&cp);
Generation* young_gen = _gens[0]; _young_gen->prepare_for_compaction(&cp);
young_gen->prepare_for_compaction(&cp);
} }
GCStats* GenCollectedHeap::gc_stats(int level) const { GCStats* GenCollectedHeap::gc_stats(int level) const {
return _gens[level]->gc_stats(); if (level == 0) {
return _young_gen->gc_stats();
} else {
return _old_gen->gc_stats();
}
} }
void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) { void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
for (int i = _n_gens-1; i >= 0; i--) {
Generation* g = _gens[i];
if (!silent) { if (!silent) {
gclog_or_tty->print("%s", g->name()); gclog_or_tty->print("%s", _old_gen->name());
gclog_or_tty->print(" "); gclog_or_tty->print(" ");
} }
g->verify(); _old_gen->verify();
if (!silent) {
gclog_or_tty->print("%s", _young_gen->name());
gclog_or_tty->print(" ");
} }
_young_gen->verify();
if (!silent) { if (!silent) {
gclog_or_tty->print("remset "); gclog_or_tty->print("remset ");
} }
@ -1093,9 +1075,8 @@ void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
} }
void GenCollectedHeap::print_on(outputStream* st) const { void GenCollectedHeap::print_on(outputStream* st) const {
for (int i = 0; i < _n_gens; i++) { _young_gen->print_on(st);
_gens[i]->print_on(st); _old_gen->print_on(st);
}
MetaspaceAux::print_on(st); MetaspaceAux::print_on(st);
} }

View file

@ -33,7 +33,7 @@
class SubTasksDone; class SubTasksDone;
// A "GenCollectedHeap" is a SharedHeap that uses generational // A "GenCollectedHeap" is a SharedHeap that uses generational
// collection. It is represented with a sequence of Generation's. // collection. It has two generations, young and old.
class GenCollectedHeap : public SharedHeap { class GenCollectedHeap : public SharedHeap {
friend class GenCollectorPolicy; friend class GenCollectorPolicy;
friend class Generation; friend class Generation;
@ -63,7 +63,10 @@ public:
private: private:
int _n_gens; int _n_gens;
Generation* _gens[max_gens];
Generation* _young_gen;
Generation* _old_gen;
GenerationSpec** _gen_specs; GenerationSpec** _gen_specs;
// The singleton Gen Remembered Set. // The singleton Gen Remembered Set.
@ -85,6 +88,11 @@ public:
SubTasksDone* _gen_process_roots_tasks; SubTasksDone* _gen_process_roots_tasks;
SubTasksDone* gen_process_roots_tasks() { return _gen_process_roots_tasks; } SubTasksDone* gen_process_roots_tasks() { return _gen_process_roots_tasks; }
// Collects the given generation.
void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
bool run_verification, bool clear_soft_refs,
bool restore_marks_for_biased_locking);
// In block contents verification, the number of header words to skip // In block contents verification, the number of header words to skip
NOT_PRODUCT(static size_t _skip_header_HeapWords;) NOT_PRODUCT(static size_t _skip_header_HeapWords;)
@ -138,8 +146,12 @@ public:
return CollectedHeap::GenCollectedHeap; return CollectedHeap::GenCollectedHeap;
} }
Generation* young_gen() { return _young_gen; }
Generation* old_gen() { return _old_gen; }
// The generational collector policy. // The generational collector policy.
GenCollectorPolicy* gen_policy() const { return _gen_policy; } GenCollectorPolicy* gen_policy() const { return _gen_policy; }
virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) gen_policy(); } virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) gen_policy(); }
// Adaptive size policy // Adaptive size policy
@ -309,20 +321,17 @@ public:
// Update above counter, as appropriate, at the end of a concurrent GC cycle // Update above counter, as appropriate, at the end of a concurrent GC cycle
unsigned int update_full_collections_completed(unsigned int count); unsigned int update_full_collections_completed(unsigned int count);
// Update "time of last gc" for all constituent generations // Update "time of last gc" for all generations to "now".
// to "now".
void update_time_of_last_gc(jlong now) { void update_time_of_last_gc(jlong now) {
for (int i = 0; i < _n_gens; i++) { _young_gen->update_time_of_last_gc(now);
_gens[i]->update_time_of_last_gc(now); _old_gen->update_time_of_last_gc(now);
}
} }
// Update the gc statistics for each generation. // Update the gc statistics for each generation.
// "level" is the level of the latest collection. // "level" is the level of the latest collection.
void update_gc_stats(int current_level, bool full) { void update_gc_stats(int current_level, bool full) {
for (int i = 0; i < _n_gens; i++) { _young_gen->update_gc_stats(current_level, full);
_gens[i]->update_gc_stats(current_level, full); _old_gen->update_gc_stats(current_level, full);
}
} }
// Override. // Override.
@ -366,21 +375,23 @@ public:
// Return the generation before "gen". // Return the generation before "gen".
Generation* prev_gen(Generation* gen) const { Generation* prev_gen(Generation* gen) const {
int l = gen->level(); guarantee(gen->level() == 1, "Out of bounds");
guarantee(l > 0, "Out of bounds"); return _young_gen;
return _gens[l-1];
} }
// Return the generation after "gen". // Return the generation after "gen".
Generation* next_gen(Generation* gen) const { Generation* next_gen(Generation* gen) const {
int l = gen->level() + 1; guarantee(gen->level() == 0, "Out of bounds");
guarantee(l < _n_gens, "Out of bounds"); return _old_gen;
return _gens[l];
} }
Generation* get_gen(int i) const { Generation* get_gen(int i) const {
guarantee(i >= 0 && i < _n_gens, "Out of bounds"); guarantee(i == 0 || i == 1, "Out of bounds");
return _gens[i]; if (i == 0) {
return _young_gen;
} else {
return _old_gen;
}
} }
int n_gens() const { int n_gens() const {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -159,7 +159,7 @@ void GenMarkSweep::allocate_stacks() {
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
// Scratch request on behalf of oldest generation; will do no // Scratch request on behalf of oldest generation; will do no
// allocation. // allocation.
ScratchBlock* scratch = gch->gather_scratch(gch->_gens[gch->_n_gens-1], 0); ScratchBlock* scratch = gch->gather_scratch(gch->get_gen(gch->_n_gens-1), 0);
// $$$ To cut a corner, we'll only use the first scratch block, and then // $$$ To cut a corner, we'll only use the first scratch block, and then
// revert to malloc. // revert to malloc.

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -155,7 +155,7 @@ Generation* Generation::next_gen() const {
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
int next = level() + 1; int next = level() + 1;
if (next < gch->_n_gens) { if (next < gch->_n_gens) {
return gch->_gens[next]; return gch->get_gen(next);
} else { } else {
return NULL; return NULL;
} }

View file

@ -552,8 +552,9 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
nonstatic_field(GenerationSpec, _max_size, size_t) \ nonstatic_field(GenerationSpec, _max_size, size_t) \
\ \
static_field(GenCollectedHeap, _gch, GenCollectedHeap*) \ static_field(GenCollectedHeap, _gch, GenCollectedHeap*) \
nonstatic_field(GenCollectedHeap, _young_gen, Generation*) \
nonstatic_field(GenCollectedHeap, _old_gen, Generation*) \
nonstatic_field(GenCollectedHeap, _n_gens, int) \ nonstatic_field(GenCollectedHeap, _n_gens, int) \
unchecked_nonstatic_field(GenCollectedHeap, _gens, sizeof(GenCollectedHeap::_gens)) /* NOTE: no type */ \
nonstatic_field(GenCollectedHeap, _gen_specs, GenerationSpec**) \ nonstatic_field(GenCollectedHeap, _gen_specs, GenerationSpec**) \
\ \
nonstatic_field(HeapWord, i, char*) \ nonstatic_field(HeapWord, i, char*) \

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -36,6 +36,7 @@ import java.nio.charset.Charset;
import java.util.List; import java.util.List;
public class CompressedClassSpaceSizeInJmapHeap { public class CompressedClassSpaceSizeInJmapHeap {
// Note that on some platforms it may require root privileges to run this test.
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
if (!Platform.is64bit()) { if (!Platform.is64bit()) {
// Compressed Class Space is only available on 64-bit JVMs // Compressed Class Space is only available on 64-bit JVMs