8049421: G1 Class Unloading after completing a concurrent mark cycle

Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Reviewed-by: tschatzl, ehelin, brutisso, coleenp, roland, iveresov
This commit is contained in:
Stefan Karlsson 2014-07-07 10:12:40 +02:00
parent ef1e9b3c80
commit 1b001a2afd
75 changed files with 2169 additions and 874 deletions

View file

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "classfile/symbolTable.hpp"
#include "code/codeCache.hpp"
#include "gc_implementation/g1/concurrentMark.inline.hpp"
#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
@ -39,6 +40,7 @@
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "memory/allocation.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp"
@ -58,8 +60,8 @@ CMBitMapRO::CMBitMapRO(int shifter) :
_bmWordSize = 0;
}
HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
HeapWord* limit) const {
HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
const HeapWord* limit) const {
// First we must round addr *up* to a possible object boundary.
addr = (HeapWord*)align_size_up((intptr_t)addr,
HeapWordSize << _shifter);
@ -76,8 +78,8 @@ HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
return nextAddr;
}
HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
HeapWord* limit) const {
HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
const HeapWord* limit) const {
size_t addrOffset = heapWordToOffset(addr);
if (limit == NULL) {
limit = _bmStartWord + _bmWordSize;
@ -1223,6 +1225,9 @@ public:
};
void ConcurrentMark::scanRootRegions() {
// Start of concurrent marking.
ClassLoaderDataGraph::clear_claimed_marks();
// scan_in_progress() will have been set to true only if there was
// at least one root region to scan. So, if it's false, we
// should not attempt to do any further work.
@ -1271,7 +1276,7 @@ void ConcurrentMark::markFromRoots() {
CMConcurrentMarkingTask markingTask(this, cmThread());
if (use_parallel_marking_threads()) {
_parallel_workers->set_active_workers((int)active_workers);
// Don't set _n_par_threads because it affects MT in process_strong_roots()
// Don't set _n_par_threads because it affects MT in process_roots()
// and the decisions on that MT processing is made elsewhere.
assert(_parallel_workers->active_workers() > 0, "Should have been set");
_parallel_workers->run_task(&markingTask);
@ -2142,23 +2147,29 @@ void ConcurrentMark::cleanup() {
// Update the soft reference policy with the new heap occupancy.
Universe::update_heap_info_at_gc();
// We need to make this be a "collection" so any collection pause that
// races with it goes around and waits for completeCleanup to finish.
g1h->increment_total_collections();
// We reclaimed old regions so we should calculate the sizes to make
// sure we update the old gen/space data.
g1h->g1mm()->update_sizes();
if (VerifyDuringGC) {
HandleMark hm; // handle scope
Universe::heap()->prepare_for_verify();
Universe::verify(VerifyOption_G1UsePrevMarking,
" VerifyDuringGC:(after)");
}
g1h->check_bitmaps("Cleanup End");
g1h->verify_region_sets_optional();
// We need to make this be a "collection" so any collection pause that
// races with it goes around and waits for completeCleanup to finish.
g1h->increment_total_collections();
// Clean out dead classes and update Metaspace sizes.
ClassLoaderDataGraph::purge();
MetaspaceGC::compute_new_size();
// We reclaimed old regions so we should calculate the sizes to make
// sure we update the old gen/space data.
g1h->g1mm()->update_sizes();
g1h->trace_heap_after_concurrent_cycle();
}
@ -2445,6 +2456,26 @@ void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
_g1h->set_par_threads(0);
}
void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
}
// Helper class to get rid of some boilerplate code.
class G1RemarkGCTraceTime : public GCTraceTime {
static bool doit_and_prepend(bool doit) {
if (doit) {
gclog_or_tty->put(' ');
}
return doit;
}
public:
G1RemarkGCTraceTime(const char* title, bool doit)
: GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
}
};
void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
if (has_overflown()) {
// Skip processing the discovered references if we have
@ -2557,9 +2588,28 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
return;
}
g1h->unlink_string_and_symbol_table(&g1_is_alive,
/* process_strings */ false, // currently strings are always roots
/* process_symbols */ true);
assert(_markStack.isEmpty(), "Marking should have completed");
// Unload Klasses, String, Symbols, Code Cache, etc.
G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
bool purged_classes;
{
G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
}
{
G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
}
if (G1StringDedup::is_enabled()) {
G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
G1StringDedup::unlink(&g1_is_alive);
}
}
void ConcurrentMark::swapMarkBitMaps() {
@ -2568,6 +2618,57 @@ void ConcurrentMark::swapMarkBitMaps() {
_nextMarkBitMap = (CMBitMap*) temp;
}
class CMObjectClosure;
// Closure for iterating over objects, currently only used for
// processing SATB buffers.
class CMObjectClosure : public ObjectClosure {
private:
CMTask* _task;
public:
void do_object(oop obj) {
_task->deal_with_reference(obj);
}
CMObjectClosure(CMTask* task) : _task(task) { }
};
class G1RemarkThreadsClosure : public ThreadClosure {
CMObjectClosure _cm_obj;
G1CMOopClosure _cm_cl;
MarkingCodeBlobClosure _code_cl;
int _thread_parity;
bool _is_par;
public:
G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) :
_cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
_thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {}
void do_thread(Thread* thread) {
if (thread->is_Java_thread()) {
if (thread->claim_oops_do(_is_par, _thread_parity)) {
JavaThread* jt = (JavaThread*)thread;
// In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
// however the liveness of oops reachable from nmethods have very complex lifecycles:
// * Alive if on the stack of an executing method
// * Weakly reachable otherwise
// Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
// live by the SATB invariant but other oops recorded in nmethods may behave differently.
jt->nmethods_do(&_code_cl);
jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj);
}
} else if (thread->is_VM_thread()) {
if (thread->claim_oops_do(_is_par, _thread_parity)) {
JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj);
}
}
}
};
class CMRemarkTask: public AbstractGangTask {
private:
ConcurrentMark* _cm;
@ -2579,6 +2680,14 @@ public:
if (worker_id < _cm->active_tasks()) {
CMTask* task = _cm->task(worker_id);
task->record_start_time();
{
ResourceMark rm;
HandleMark hm;
G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial);
Threads::threads_do(&threads_f);
}
do {
task->do_marking_step(1000000000.0 /* something very large */,
true /* do_termination */,
@ -2601,6 +2710,8 @@ void ConcurrentMark::checkpointRootsFinalWork() {
HandleMark hm;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer());
g1h->ensure_parsability(false);
if (G1CollectedHeap::use_parallel_gc_threads()) {
@ -3430,20 +3541,6 @@ public:
}
};
// Closure for iterating over objects, currently only used for
// processing SATB buffers.
class CMObjectClosure : public ObjectClosure {
private:
CMTask* _task;
public:
void do_object(oop obj) {
_task->deal_with_reference(obj);
}
CMObjectClosure(CMTask* task) : _task(task) { }
};
G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
ConcurrentMark* cm,
CMTask* task)
@ -3908,15 +4005,6 @@ void CMTask::drain_satb_buffers() {
}
}
if (!concurrent() && !has_aborted()) {
// We should only do this during remark.
if (G1CollectedHeap::use_parallel_gc_threads()) {
satb_mq_set.par_iterate_closure_all_threads(_worker_id);
} else {
satb_mq_set.iterate_closure_all_threads();
}
}
_draining_satb_buffers = false;
assert(has_aborted() ||