8139200: Eliminate G1ParClosureSuper::_worker_id

Moved _worker_id from G1ParClosureSuper to G1ParCopyHelper.

Reviewed-by: mgerdin, tschatzl
This commit is contained in:
Kim Barrett 2015-10-15 10:10:13 -04:00
parent e8e182c0e7
commit e7d0e95b08
6 changed files with 23 additions and 45 deletions

View file

@ -31,32 +31,15 @@
#include "utilities/stack.inline.hpp"
G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
G1ParClosureSuper(g1, par_scan_state), _scanned_klass(NULL),
_cm(_g1->concurrent_mark()) { }
G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1) :
G1ParClosureSuper(g1), _scanned_klass(NULL),
_cm(_g1->concurrent_mark()) { }
G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1) :
_g1(g1), _par_scan_state(NULL), _worker_id(UINT_MAX) { }
G1ParClosureSuper(g1, par_scan_state),
_worker_id(par_scan_state->worker_id()),
_scanned_klass(NULL),
_cm(_g1->concurrent_mark())
{ }
G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
_g1(g1), _par_scan_state(NULL),
_worker_id(UINT_MAX) {
set_par_scan_thread_state(par_scan_state);
}
void G1ParClosureSuper::set_par_scan_thread_state(G1ParScanThreadState* par_scan_state) {
assert(_par_scan_state == NULL, "_par_scan_state must only be set once");
assert(par_scan_state != NULL, "Must set par_scan_state to non-NULL.");
_par_scan_state = par_scan_state;
_worker_id = par_scan_state->worker_id();
assert(_worker_id < ParallelGCThreads,
"The given worker id %u must be less than the number of threads %u", _worker_id, ParallelGCThreads);
}
_g1(g1), _par_scan_state(par_scan_state)
{ }
void G1KlassScanClosure::do_klass(Klass* klass) {
// If the klass has not been dirtied we know that there's

View file

@ -52,15 +52,12 @@ class G1ParClosureSuper : public OopsInHeapRegionClosure {
protected:
G1CollectedHeap* _g1;
G1ParScanThreadState* _par_scan_state;
uint _worker_id;
public:
// Initializes the instance, leaving _par_scan_state uninitialized. Must be done
// later using the set_par_scan_thread_state() method.
G1ParClosureSuper(G1CollectedHeap* g1);
G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
bool apply_to_weak_ref_discovered_field() { return true; }
void set_par_scan_thread_state(G1ParScanThreadState* par_scan_state);
G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
~G1ParClosureSuper() { }
public:
virtual bool apply_to_weak_ref_discovered_field() { return true; }
};
class G1ParPushHeapRSClosure : public G1ParClosureSuper {
@ -76,7 +73,8 @@ public:
class G1ParScanClosure : public G1ParClosureSuper {
public:
G1ParScanClosure(G1CollectedHeap* g1) : G1ParClosureSuper(g1) { }
G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
G1ParClosureSuper(g1, par_scan_state) { }
template <class T> void do_oop_nv(T* p);
virtual void do_oop(oop* p) { do_oop_nv(p); }
@ -88,6 +86,7 @@ public:
// Add back base class for metadata
class G1ParCopyHelper : public G1ParClosureSuper {
protected:
uint _worker_id; // Cache value from par_scan_state.
Klass* _scanned_klass;
ConcurrentMark* _cm;
@ -100,10 +99,11 @@ protected:
// objects pointed to by roots that have been forwarded during a
// GC. It is MT-safe.
inline void mark_forwarded_object(oop from_obj, oop to_obj);
public:
G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
G1ParCopyHelper(G1CollectedHeap* g1);
G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
~G1ParCopyHelper() { }
public:
void set_scanned_klass(Klass* k) { _scanned_klass = k; }
template <class T> inline void do_klass_barrier(T* p, oop new_obj);
};
@ -130,10 +130,6 @@ public:
assert(_ref_processor == NULL, "sanity");
}
G1ParCopyClosure(G1CollectedHeap* g1) : G1ParCopyHelper(g1) {
assert(_ref_processor == NULL, "sanity");
}
template <class T> void do_oop_nv(T* p) { do_oop_work(p); }
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }

View file

@ -91,7 +91,7 @@ inline void G1ParScanClosure::do_oop_nv(T* p) {
if (state.is_humongous()) {
_g1->set_humongous_is_live(obj);
}
_par_scan_state->update_rs(_from, p, _worker_id);
_par_scan_state->update_rs(_from, p);
}
}
}

View file

@ -43,10 +43,9 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id,
_worker_id(worker_id),
_tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
_age_table(false),
_scanner(g1h),
_scanner(g1h, this),
_old_gen_is_full(false)
{
_scanner.set_par_scan_thread_state(this);
// we allocate G1YoungSurvRateNumRegions plus one entries, since
// we "sacrifice" entry 0 to keep track of surviving bytes for
// non-young regions (where the age is -1)

View file

@ -98,7 +98,7 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
template <class T> void push_on_queue(T* ref);
template <class T> void update_rs(HeapRegion* from, T* p, uint tid) {
template <class T> void update_rs(HeapRegion* from, T* p) {
// If the new value of the field points to the same region or
// is the to-space, we don't need to include it in the Rset updates.
if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {

View file

@ -56,7 +56,7 @@ template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from
}
assert(obj != NULL, "Must be");
update_rs(from, p, _worker_id);
update_rs(from, p);
}
template <class T> inline void G1ParScanThreadState::push_on_queue(T* ref) {