6964458: Reimplement class meta-data storage to use native memory

Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes

Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
This commit is contained in:
Jon Masamitsu 2012-09-01 13:25:18 -04:00 committed by Coleen Phillimore
parent 36eee7c8c8
commit 5c58d27aac
853 changed files with 26124 additions and 82956 deletions

View file

@ -36,7 +36,6 @@
#include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
#include "gc_implementation/parallelScavenge/psOldGen.hpp"
#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
#include "gc_implementation/parallelScavenge/psPermGen.hpp"
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
#include "gc_implementation/parallelScavenge/psYoungGen.hpp"
@ -45,7 +44,7 @@
#include "memory/gcLocker.inline.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/referenceProcessor.hpp"
#include "oops/methodDataOop.hpp"
#include "oops/methodData.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.pcgc.inline.hpp"
#include "runtime/fprofiler.hpp"
@ -90,7 +89,7 @@ SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
bool PSParallelCompact::_print_phases = false;
ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
klassOop PSParallelCompact::_updated_int_array_klass_obj = NULL;
Klass* PSParallelCompact::_updated_int_array_klass_obj = NULL;
double PSParallelCompact::_dwl_mean;
double PSParallelCompact::_dwl_std_dev;
@ -106,7 +105,6 @@ GrowableArray<oop> * PSParallelCompact::_live_oops = NULL;
GrowableArray<oop> * PSParallelCompact::_live_oops_moved_to = NULL;
GrowableArray<size_t>* PSParallelCompact::_live_oops_size = NULL;
size_t PSParallelCompact::_live_oops_index = 0;
size_t PSParallelCompact::_live_oops_index_at_perm = 0;
GrowableArray<void*>* PSParallelCompact::_other_refs_stack = NULL;
GrowableArray<void*>* PSParallelCompact::_adjusted_pointers = NULL;
bool PSParallelCompact::_pointer_tracking = false;
@ -188,7 +186,7 @@ void SplitInfo::verify_clear()
#ifndef PRODUCT
const char* PSParallelCompact::space_names[] = {
"perm", "old ", "eden", "from", "to "
"old ", "eden", "from", "to "
};
void PSParallelCompact::print_region_ranges()
@ -347,7 +345,7 @@ print_initial_summary_data(ParallelCompactData& summary_data,
void
print_initial_summary_data(ParallelCompactData& summary_data,
SpaceInfo* space_info) {
unsigned int id = PSParallelCompact::perm_space_id;
unsigned int id = PSParallelCompact::old_space_id;
const MutableSpace* space;
do {
space = space_info[id].space();
@ -480,7 +478,7 @@ void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
const size_t beg_ofs = region_offset(addr);
_region_data[beg_region].add_live_obj(RegionSize - beg_ofs);
klassOop klass = ((oop)addr)->klass();
Klass* klass = ((oop)addr)->klass();
// Middle regions--completely spanned by this object.
for (size_t region = beg_region + 1; region < end_region; ++region) {
_region_data[region].set_partial_obj_size(RegionSize);
@ -765,17 +763,6 @@ HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
return result;
}
klassOop ParallelCompactData::calc_new_klass(klassOop old_klass) {
klassOop updated_klass;
if (PSParallelCompact::should_update_klass(old_klass)) {
updated_klass = (klassOop) calc_new_pointer(old_klass);
} else {
updated_klass = old_klass;
}
return updated_klass;
}
#ifdef ASSERT
void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
{
@ -817,15 +804,25 @@ void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompa
PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true);
PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false);
PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure;
void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); }
void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(_compaction_manager, p); }
void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) {
mark_and_push(_compaction_manager, p);
}
void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); }
void PSParallelCompact::FollowKlassClosure::do_klass(Klass* klass) {
klass->oops_do(_mark_and_push_closure);
}
void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) {
klass->oops_do(&PSParallelCompact::_adjust_root_pointer_closure);
}
void PSParallelCompact::post_initialize() {
ParallelScavengeHeap* heap = gc_heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
@ -880,22 +877,13 @@ void PSParallelCompact::initialize_space_info()
ParallelScavengeHeap* heap = gc_heap();
PSYoungGen* young_gen = heap->young_gen();
MutableSpace* perm_space = heap->perm_gen()->object_space();
_space_info[perm_space_id].set_space(perm_space);
_space_info[old_space_id].set_space(heap->old_gen()->object_space());
_space_info[eden_space_id].set_space(young_gen->eden_space());
_space_info[from_space_id].set_space(young_gen->from_space());
_space_info[to_space_id].set_space(young_gen->to_space());
_space_info[perm_space_id].set_start_array(heap->perm_gen()->start_array());
_space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
_space_info[perm_space_id].set_min_dense_prefix(perm_space->top());
if (TraceParallelOldGCDensePrefix) {
tty->print_cr("perm min_dense_prefix=" PTR_FORMAT,
_space_info[perm_space_id].min_dense_prefix());
}
}
void PSParallelCompact::initialize_dead_wood_limiter()
@ -919,19 +907,19 @@ public:
_heap_used = heap->used();
_young_gen_used = heap->young_gen()->used_in_bytes();
_old_gen_used = heap->old_gen()->used_in_bytes();
_perm_gen_used = heap->perm_gen()->used_in_bytes();
_metadata_used = MetaspaceAux::used_in_bytes();
};
size_t heap_used() const { return _heap_used; }
size_t young_gen_used() const { return _young_gen_used; }
size_t old_gen_used() const { return _old_gen_used; }
size_t perm_gen_used() const { return _perm_gen_used; }
size_t metadata_used() const { return _metadata_used; }
private:
size_t _heap_used;
size_t _young_gen_used;
size_t _old_gen_used;
size_t _perm_gen_used;
size_t _metadata_used;
};
void
@ -976,7 +964,6 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
pre_gc_values->fill(heap);
ParCompactionManager::reset();
NOT_PRODUCT(_mark_bitmap.reset_counters());
DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
@ -1003,7 +990,6 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
if (VerifyObjectStartArray &&
VerifyBeforeGC) {
heap->old_gen()->verify_object_start_array();
heap->perm_gen()->verify_object_start_array();
}
DEBUG_ONLY(mark_bitmap()->verify_clear();)
@ -1017,7 +1003,7 @@ void PSParallelCompact::post_compact()
{
TraceTime tm("post compact", print_phases(), true, gclog_or_tty);
for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
// Clear the marking bitmap, summary data and split info.
clear_data_covering_space(SpaceId(id));
// Update top(). Must be done after clearing the bitmap and summary data.
@ -1046,16 +1032,17 @@ void PSParallelCompact::post_compact()
if (bs->is_a(BarrierSet::ModRef)) {
ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
MemRegion old_mr = heap->old_gen()->reserved();
MemRegion perm_mr = heap->perm_gen()->reserved();
assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
if (young_gen_empty) {
modBS->clear(MemRegion(perm_mr.start(), old_mr.end()));
modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
} else {
modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end()));
modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
}
}
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
ClassLoaderDataGraph::purge();
Threads::gc_epilogue();
CodeCache::gc_epilogue();
JvmtiExport::gc_epilogue();
@ -1409,8 +1396,7 @@ PSParallelCompact::compute_dense_prefix(const SpaceId id,
const size_t space_capacity = space->capacity_in_words();
const double density = double(space_live) / double(space_capacity);
const size_t min_percent_free =
id == perm_space_id ? PermMarkSweepDeadRatio : MarkSweepDeadRatio;
const size_t min_percent_free = MarkSweepDeadRatio;
const double limiter = dead_wood_limiter(density, min_percent_free);
const size_t dead_wood_max = space_used - space_live;
const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter),
@ -1868,7 +1854,6 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
// The amount of live data that will end up in old space (assuming it fits).
size_t old_space_total_live = 0;
assert(perm_space_id < old_space_id, "should not count perm data here");
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
old_space_total_live += pointer_delta(_space_info[id].new_top(),
_space_info[id].space()->bottom());
@ -1886,8 +1871,7 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
}
#endif // #ifndef PRODUCT
// Permanent and Old generations.
summarize_space(perm_space_id, maximum_compaction);
// Old generations.
summarize_space(old_space_id, maximum_compaction);
// Summarize the remaining spaces in the young gen. The initial target space
@ -2013,7 +1997,6 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
GCCause::Cause gc_cause = heap->gc_cause();
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
PSPermGen* perm_gen = heap->perm_gen();
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
// The scope of casr should end after code that can change
@ -2062,8 +2045,6 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
// Let the size policy know we're starting
size_policy->major_collection_begin();
// When collecting the permanent generation methodOops may be moving,
// so we either have to flush all bcp data or convert it into bci.
CodeCache::gc_prologue();
Threads::gc_prologue();
@ -2098,10 +2079,6 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
adjust_roots();
compaction_start.update();
// Does the perm gen always have to be done serially because
// klasses are used in the update of an object?
compact_perm(vmthread_cm);
compact();
// Reset the mark bitmap, summary data, and do other bookkeeping. Must be
@ -2118,10 +2095,8 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
gclog_or_tty->print_cr(" collection: %d ",
heap->total_collections());
if (Verbose) {
gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
" perm_gen_capacity: %d ",
old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
perm_gen->capacity_in_bytes());
gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d",
old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
}
}
@ -2142,7 +2117,6 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
young_gen->used_in_bytes(),
young_gen->eden_space()->used_in_bytes(),
old_gen->used_in_bytes(),
perm_gen->used_in_bytes(),
young_gen->eden_space()->capacity_in_bytes(),
old_gen->max_gen_size(),
max_eden_size,
@ -2175,8 +2149,8 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
heap->resize_all_tlabs();
// We collected the perm gen, so we'll resize it here.
perm_gen->compute_new_size(pre_gc_values.perm_gen_used());
// Resize the metaspace capactiy after a collection
MetaspaceGC::compute_new_size();
if (TraceGen1Time) accumulated_time()->stop();
@ -2186,8 +2160,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
young_gen->print_used_change(pre_gc_values.young_gen_used());
old_gen->print_used_change(pre_gc_values.old_gen_used());
heap->print_heap_change(pre_gc_values.heap_used());
// Print perm gen last (print_heap_change() excludes the perm gen).
perm_gen->print_used_change(pre_gc_values.perm_gen_used());
MetaspaceAux::print_metaspace_change(pre_gc_values.metadata_used());
} else {
heap->print_heap_change(pre_gc_values.heap_used());
}
@ -2205,7 +2178,6 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
ParCompactionManager::manager_array(int(i));
assert(cm->marking_stack()->is_empty(), "should be empty");
assert(ParCompactionManager::region_list(int(i))->is_empty(), "should be empty");
assert(cm->revisit_klass_stack()->is_empty(), "should be empty");
}
#endif // ASSERT
@ -2219,12 +2191,10 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
if (VerifyObjectStartArray &&
VerifyAfterGC) {
old_gen->verify_object_start_array();
perm_gen->verify_object_start_array();
}
if (ZapUnusedHeapArea) {
old_gen->object_space()->check_mangled_unused_area_complete();
perm_gen->object_space()->check_mangled_unused_area_complete();
}
NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
@ -2357,6 +2327,9 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
// Need new claim bits before marking starts.
ClassLoaderDataGraph::clear_claimed_marks();
{
TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty);
ParallelScavengeHeap::ParStrongRootsScope psrs;
@ -2407,11 +2380,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
cm->follow_marking_stacks(); // Flush marking stack.
// Update subklass/sibling/implementor links of live klasses
// revisit_klass_stack is used in follow_weak_klass_links().
follow_weak_klass_links();
// Revisit memoized MDO's and clear any unmarked weak refs
follow_mdo_weak_refs();
Klass::clean_weak_klass_links(is_alive_closure());
// Visit interned string tables and delete unmarked oops
StringTable::unlink(is_alive_closure());
@ -2421,6 +2390,39 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
assert(cm->marking_stacks_empty(), "marking stacks should be empty");
}
void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) {
ClassLoaderData* cld = klass->class_loader_data();
assert(cld->has_defined(klass), "inconsistency!");
// The actual processing of the klass is done when we
// traverse the list of Klasses in the class loader data.
PSParallelCompact::follow_class_loader(cm, cld);
}
void PSParallelCompact::adjust_klass(ParCompactionManager* cm, Klass* klass) {
ClassLoaderData* cld = klass->class_loader_data();
assert(cld->has_defined(klass), "inconsistency!");
// The actual processing of the klass is done when we
// traverse the list of Klasses in the class loader data.
PSParallelCompact::adjust_class_loader(cm, cld);
}
void PSParallelCompact::follow_class_loader(ParCompactionManager* cm,
ClassLoaderData* cld) {
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
PSParallelCompact::FollowKlassClosure follow_klass_closure(&mark_and_push_closure);
cld->oops_do(&mark_and_push_closure, &follow_klass_closure, true);
}
void PSParallelCompact::adjust_class_loader(ParCompactionManager* cm,
ClassLoaderData* cld) {
cld->oops_do(PSParallelCompact::adjust_root_pointer_closure(),
PSParallelCompact::adjust_klass_closure(),
true);
}
// This should be moved to the shared markSweep code!
class PSAlwaysTrueClosure: public BoolObjectClosure {
public:
@ -2433,6 +2435,9 @@ void PSParallelCompact::adjust_roots() {
// Adjust the pointers to reflect the new locations
TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty);
// Need new claim bits when tracing through and adjusting pointers.
ClassLoaderDataGraph::clear_claimed_marks();
// General strong roots.
Universe::oops_do(adjust_root_pointer_closure());
JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
@ -2443,6 +2448,7 @@ void PSParallelCompact::adjust_roots() {
JvmtiExport::oops_do(adjust_root_pointer_closure());
// SO_AllClasses
SystemDictionary::oops_do(adjust_root_pointer_closure());
ClassLoaderDataGraph::oops_do(adjust_root_pointer_closure(), adjust_klass_closure(), true);
// Now adjust pointers in remaining weak roots. (All of which should
// have been cleared if they pointed to non-surviving objects.)
@ -2460,14 +2466,6 @@ void PSParallelCompact::adjust_roots() {
adjust_root_pointer_closure());
}
void PSParallelCompact::compact_perm(ParCompactionManager* cm) {
TraceTime tm("compact perm gen", print_phases(), true, gclog_or_tty);
// trace("4");
gc_heap()->perm_gen()->start_array()->reset();
move_and_update(cm, perm_space_id);
}
void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
uint parallel_gc_threads)
{
@ -2501,7 +2499,9 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
// "which" must be 0 <= which < task_count
which = 0;
for (unsigned int id = to_space_id; id > perm_space_id; --id) {
// id + 1 is used to test termination so unsigned can
// be used with an old_space_id == 0.
for (unsigned int id = to_space_id; id + 1 > old_space_id; --id) {
SpaceInfo* const space_info = _space_info + id;
MutableSpace* const space = space_info->space();
HeapWord* const new_top = space_info->new_top();
@ -2509,9 +2509,8 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
const size_t end_region =
sd.addr_to_region_idx(sd.region_align_up(new_top));
assert(end_region > 0, "perm gen cannot be empty");
for (size_t cur = end_region - 1; cur >= beg_region; --cur) {
for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
if (sd.region(cur)->claim_unsafe()) {
ParCompactionManager::region_list_push(which, cur);
@ -2662,8 +2661,6 @@ void PSParallelCompact::compact() {
#ifdef ASSERT
// Verify that all regions have been processed before the deferred updates.
// Note that perm_space_id is skipped; this type of verification is not
// valid until the perm gen is compacted by regions.
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
verify_complete(SpaceId(id));
}
@ -2722,66 +2719,6 @@ void PSParallelCompact::verify_complete(SpaceId space_id) {
}
#endif // #ifdef ASSERT
void
PSParallelCompact::follow_weak_klass_links() {
// All klasses on the revisit stack are marked at this point.
// Update and follow all subklass, sibling and implementor links.
// Check all the stacks here even if not all the workers are active.
// There is no accounting which indicates which stacks might have
// contents to be followed.
if (PrintRevisitStats) {
gclog_or_tty->print_cr("#classes in system dictionary = %d",
SystemDictionary::number_of_classes());
}
for (uint i = 0; i < ParallelGCThreads + 1; i++) {
ParCompactionManager* cm = ParCompactionManager::manager_array(i);
KeepAliveClosure keep_alive_closure(cm);
Stack<Klass*, mtGC>* const rks = cm->revisit_klass_stack();
if (PrintRevisitStats) {
gclog_or_tty->print_cr("Revisit klass stack[%u] length = " SIZE_FORMAT,
i, rks->size());
}
while (!rks->is_empty()) {
Klass* const k = rks->pop();
k->follow_weak_klass_links(is_alive_closure(), &keep_alive_closure);
}
cm->follow_marking_stacks();
}
}
void
PSParallelCompact::revisit_weak_klass_link(ParCompactionManager* cm, Klass* k) {
cm->revisit_klass_stack()->push(k);
}
void PSParallelCompact::revisit_mdo(ParCompactionManager* cm, DataLayout* p) {
cm->revisit_mdo_stack()->push(p);
}
void PSParallelCompact::follow_mdo_weak_refs() {
// All strongly reachable oops have been marked at this point;
// we can visit and clear any weak references from MDO's which
// we memoized during the strong marking phase.
if (PrintRevisitStats) {
gclog_or_tty->print_cr("#classes in system dictionary = %d",
SystemDictionary::number_of_classes());
}
for (uint i = 0; i < ParallelGCThreads + 1; i++) {
ParCompactionManager* cm = ParCompactionManager::manager_array(i);
Stack<DataLayout*, mtGC>* rms = cm->revisit_mdo_stack();
if (PrintRevisitStats) {
gclog_or_tty->print_cr("Revisit MDO stack[%u] size = " SIZE_FORMAT,
i, rms->size());
}
while (!rms->is_empty()) {
rms->pop()->follow_weak_refs(is_alive_closure());
}
cm->follow_marking_stacks();
}
}
#ifdef VALIDATE_MARK_SWEEP
@ -2829,7 +2766,7 @@ void PSParallelCompact::track_interior_pointers(oop obj) {
_pointer_tracking = true;
AdjusterTracker checker;
obj->oop_iterate(&checker);
obj->oop_iterate_no_header(&checker);
}
}
@ -2842,10 +2779,10 @@ void PSParallelCompact::check_interior_pointers() {
}
void PSParallelCompact::reset_live_oop_tracking(bool at_perm) {
void PSParallelCompact::reset_live_oop_tracking() {
if (ValidateMarkSweep) {
guarantee((size_t)_live_oops->length() == _live_oops_index, "should be at end of live oops");
_live_oops_index = at_perm ? _live_oops_index_at_perm : 0;
_live_oops_index = 0;
}
}
@ -2995,7 +2932,7 @@ PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap");
for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
if (_space_info[id].space()->contains(addr)) {
return SpaceId(id);
}
@ -3483,12 +3420,3 @@ UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
do_addr(addr);
return ParMarkBitMap::incomplete;
}
// Prepare for compaction. This method is executed once
// (i.e., by a single thread) before compaction.
// Save the updated location of the intArrayKlassObj for
// filling holes in the dense prefix.
void PSParallelCompact::compact_prologue() {
_updated_int_array_klass_obj = (klassOop)
summary_data().calc_new_pointer(Universe::intArrayKlassObj());
}