8201316: Move G1-related static members from JavaThread to G1BarrierSet

Reviewed-by: stefank, shade
This commit is contained in:
Per Lidén 2018-04-12 08:25:30 +02:00
parent 397e628d12
commit 4c60e69b97
11 changed files with 71 additions and 67 deletions

View file

@ -37,25 +37,26 @@
#include "runtime/thread.inline.hpp" #include "runtime/thread.inline.hpp"
#include "utilities/macros.hpp" #include "utilities/macros.hpp"
SATBMarkQueueSet G1BarrierSet::_satb_mark_queue_set;
DirtyCardQueueSet G1BarrierSet::_dirty_card_queue_set;
G1BarrierSet::G1BarrierSet(G1CardTable* card_table) : G1BarrierSet::G1BarrierSet(G1CardTable* card_table) :
CardTableBarrierSet(make_barrier_set_assembler<G1BarrierSetAssembler>(), CardTableBarrierSet(make_barrier_set_assembler<G1BarrierSetAssembler>(),
card_table, card_table,
BarrierSet::FakeRtti(BarrierSet::G1BarrierSet)), BarrierSet::FakeRtti(BarrierSet::G1BarrierSet)) {}
_dcqs(JavaThread::dirty_card_queue_set())
{ }
void G1BarrierSet::enqueue(oop pre_val) { void G1BarrierSet::enqueue(oop pre_val) {
// Nulls should have been already filtered. // Nulls should have been already filtered.
assert(oopDesc::is_oop(pre_val, true), "Error"); assert(oopDesc::is_oop(pre_val, true), "Error");
if (!JavaThread::satb_mark_queue_set().is_active()) return; if (!_satb_mark_queue_set.is_active()) return;
Thread* thr = Thread::current(); Thread* thr = Thread::current();
if (thr->is_Java_thread()) { if (thr->is_Java_thread()) {
JavaThread* jt = (JavaThread*)thr; JavaThread* jt = (JavaThread*)thr;
jt->satb_mark_queue().enqueue(pre_val); jt->satb_mark_queue().enqueue(pre_val);
} else { } else {
MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag);
JavaThread::satb_mark_queue_set().shared_satb_queue()->enqueue(pre_val); _satb_mark_queue_set.shared_satb_queue()->enqueue(pre_val);
} }
} }
@ -76,7 +77,7 @@ void G1BarrierSet::write_ref_array_post_entry(HeapWord* dst, size_t length) {
template <class T> void template <class T> void
G1BarrierSet::write_ref_array_pre_work(T* dst, size_t count) { G1BarrierSet::write_ref_array_pre_work(T* dst, size_t count) {
if (!JavaThread::satb_mark_queue_set().is_active()) return; if (!_satb_mark_queue_set.is_active()) return;
T* elem_ptr = dst; T* elem_ptr = dst;
for (size_t i = 0; i < count; i++, elem_ptr++) { for (size_t i = 0; i < count; i++, elem_ptr++) {
T heap_oop = RawAccess<>::oop_load(elem_ptr); T heap_oop = RawAccess<>::oop_load(elem_ptr);
@ -111,7 +112,7 @@ void G1BarrierSet::write_ref_field_post_slow(volatile jbyte* byte) {
} else { } else {
MutexLockerEx x(Shared_DirtyCardQ_lock, MutexLockerEx x(Shared_DirtyCardQ_lock,
Mutex::_no_safepoint_check_flag); Mutex::_no_safepoint_check_flag);
_dcqs.shared_dirty_card_queue()->enqueue(byte); _dirty_card_queue_set.shared_dirty_card_queue()->enqueue(byte);
} }
} }
} }
@ -149,7 +150,7 @@ void G1BarrierSet::invalidate(MemRegion mr) {
} }
if (*byte != G1CardTable::dirty_card_val()) { if (*byte != G1CardTable::dirty_card_val()) {
*byte = G1CardTable::dirty_card_val(); *byte = G1CardTable::dirty_card_val();
_dcqs.shared_dirty_card_queue()->enqueue(byte); _dirty_card_queue_set.shared_dirty_card_queue()->enqueue(byte);
} }
} }
} }
@ -181,7 +182,7 @@ void G1BarrierSet::on_thread_attach(JavaThread* thread) {
// If we are creating the thread during a marking cycle, we should // If we are creating the thread during a marking cycle, we should
// set the active field of the SATB queue to true. // set the active field of the SATB queue to true.
if (thread->satb_mark_queue_set().is_active()) { if (_satb_mark_queue_set.is_active()) {
thread->satb_mark_queue().set_active(true); thread->satb_mark_queue().set_active(true);
} }
} }

View file

@ -25,6 +25,8 @@
#ifndef SHARE_VM_GC_G1_G1BARRIERSET_HPP #ifndef SHARE_VM_GC_G1_G1BARRIERSET_HPP
#define SHARE_VM_GC_G1_G1BARRIERSET_HPP #define SHARE_VM_GC_G1_G1BARRIERSET_HPP
#include "gc/g1/dirtyCardQueue.hpp"
#include "gc/g1/satbMarkQueue.hpp"
#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/cardTableBarrierSet.hpp"
class DirtyCardQueueSet; class DirtyCardQueueSet;
@ -37,7 +39,8 @@ class G1CardTable;
class G1BarrierSet: public CardTableBarrierSet { class G1BarrierSet: public CardTableBarrierSet {
friend class VMStructs; friend class VMStructs;
private: private:
DirtyCardQueueSet& _dcqs; static SATBMarkQueueSet _satb_mark_queue_set;
static DirtyCardQueueSet _dirty_card_queue_set;
public: public:
G1BarrierSet(G1CardTable* table); G1BarrierSet(G1CardTable* table);
@ -74,6 +77,14 @@ class G1BarrierSet: public CardTableBarrierSet {
virtual void on_thread_attach(JavaThread* thread); virtual void on_thread_attach(JavaThread* thread);
virtual void on_thread_detach(JavaThread* thread); virtual void on_thread_detach(JavaThread* thread);
static SATBMarkQueueSet& satb_mark_queue_set() {
return _satb_mark_queue_set;
}
static DirtyCardQueueSet& dirty_card_queue_set() {
return _dirty_card_queue_set;
}
// Callbacks for runtime accesses. // Callbacks for runtime accesses.
template <DecoratorSet decorators, typename BarrierSetT = G1BarrierSet> template <DecoratorSet decorators, typename BarrierSetT = G1BarrierSet>
class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> { class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {

View file

@ -30,6 +30,7 @@
#include "code/icBuffer.hpp" #include "code/icBuffer.hpp"
#include "gc/g1/bufferingOopClosure.hpp" #include "gc/g1/bufferingOopClosure.hpp"
#include "gc/g1/g1Allocator.inline.hpp" #include "gc/g1/g1Allocator.inline.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSet.hpp" #include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1CollectorPolicy.hpp" #include "gc/g1/g1CollectorPolicy.hpp"
@ -1036,7 +1037,7 @@ void G1CollectedHeap::abort_refinement() {
} }
// Discard all remembered set updates. // Discard all remembered set updates.
JavaThread::dirty_card_queue_set().abandon_logs(); G1BarrierSet::dirty_card_queue_set().abandon_logs();
assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty"); assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
} }
@ -1636,10 +1637,10 @@ jint G1CollectedHeap::initialize() {
// Perform any initialization actions delegated to the policy. // Perform any initialization actions delegated to the policy.
g1_policy()->init(this, &_collection_set); g1_policy()->init(this, &_collection_set);
JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, G1BarrierSet::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
SATB_Q_FL_lock, SATB_Q_FL_lock,
G1SATBProcessCompletedThreshold, G1SATBProcessCompletedThreshold,
Shared_SATB_Q_lock); Shared_SATB_Q_lock);
jint ecode = initialize_concurrent_refinement(); jint ecode = initialize_concurrent_refinement();
if (ecode != JNI_OK) { if (ecode != JNI_OK) {
@ -1651,20 +1652,20 @@ jint G1CollectedHeap::initialize() {
return ecode; return ecode;
} }
JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
DirtyCardQ_FL_lock, DirtyCardQ_FL_lock,
(int)concurrent_refine()->yellow_zone(), (int)concurrent_refine()->yellow_zone(),
(int)concurrent_refine()->red_zone(), (int)concurrent_refine()->red_zone(),
Shared_DirtyCardQ_lock, Shared_DirtyCardQ_lock,
NULL, // fl_owner NULL, // fl_owner
true); // init_free_ids true); // init_free_ids
dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
DirtyCardQ_FL_lock, DirtyCardQ_FL_lock,
-1, // never trigger processing -1, // never trigger processing
-1, // no limit on length -1, // no limit on length
Shared_DirtyCardQ_lock, Shared_DirtyCardQ_lock,
&JavaThread::dirty_card_queue_set()); &G1BarrierSet::dirty_card_queue_set());
// Here we allocate the dummy HeapRegion that is required by the // Here we allocate the dummy HeapRegion that is required by the
// G1AllocRegion class. // G1AllocRegion class.
@ -1833,7 +1834,7 @@ void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker
} }
void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) { void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
size_t n_completed_buffers = 0; size_t n_completed_buffers = 0;
while (dcqs.apply_closure_during_gc(cl, worker_i)) { while (dcqs.apply_closure_during_gc(cl, worker_i)) {
n_completed_buffers++; n_completed_buffers++;
@ -2467,7 +2468,7 @@ size_t G1CollectedHeap::pending_card_num() {
DirtyCardQueue& dcq = curr->dirty_card_queue(); DirtyCardQueue& dcq = curr->dirty_card_queue();
extra_cards += dcq.size(); extra_cards += dcq.size();
} }
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
size_t buffer_size = dcqs.buffer_size(); size_t buffer_size = dcqs.buffer_size();
size_t buffer_num = dcqs.completed_buffers_num(); size_t buffer_num = dcqs.completed_buffers_num();
@ -2551,7 +2552,7 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
RegisterHumongousWithInCSetFastTestClosure() RegisterHumongousWithInCSetFastTestClosure()
: _total_humongous(0), : _total_humongous(0),
_candidate_humongous(0), _candidate_humongous(0),
_dcq(&JavaThread::dirty_card_queue_set()) { _dcq(&G1BarrierSet::dirty_card_queue_set()) {
} }
virtual bool do_heap_region(HeapRegion* r) { virtual bool do_heap_region(HeapRegion* r) {
@ -3623,7 +3624,7 @@ void G1CollectedHeap::redirty_logged_cards() {
dirty_card_queue_set().reset_for_par_iteration(); dirty_card_queue_set().reset_for_par_iteration();
workers()->run_task(&redirty_task); workers()->run_task(&redirty_task);
DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set(); DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set();
dcq.merge_bufferlists(&dirty_card_queue_set()); dcq.merge_bufferlists(&dirty_card_queue_set());
assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");

View file

@ -26,6 +26,7 @@
#include "classfile/metadataOnStackMark.hpp" #include "classfile/metadataOnStackMark.hpp"
#include "classfile/symbolTable.hpp" #include "classfile/symbolTable.hpp"
#include "code/codeCache.hpp" #include "code/codeCache.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorState.hpp" #include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentMark.inline.hpp" #include "gc/g1/g1ConcurrentMark.inline.hpp"
@ -405,7 +406,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
assert(CGC_lock != NULL, "CGC_lock must be initialized"); assert(CGC_lock != NULL, "CGC_lock must be initialized");
SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); SATBMarkQueueSet& satb_qs = G1BarrierSet::satb_mark_queue_set();
satb_qs.set_buffer_size(G1SATBBufferSize); satb_qs.set_buffer_size(G1SATBBufferSize);
_root_regions.init(_g1h->survivor(), this); _root_regions.init(_g1h->survivor(), this);
@ -762,7 +763,7 @@ void G1ConcurrentMark::post_initial_mark() {
rp->enable_discovery(); rp->enable_discovery();
rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
// This is the start of the marking cycle, we're expected all // This is the start of the marking cycle, we're expected all
// threads to have SATB queues with active set to false. // threads to have SATB queues with active set to false.
satb_mq_set.set_active_all_threads(true, /* new active value */ satb_mq_set.set_active_all_threads(true, /* new active value */
@ -1073,7 +1074,7 @@ void G1ConcurrentMark::remark() {
if (mark_finished) { if (mark_finished) {
weak_refs_work(false /* clear_all_soft_refs */); weak_refs_work(false /* clear_all_soft_refs */);
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
// We're done with marking. // We're done with marking.
// This is the end of the marking cycle, we're expected all // This is the end of the marking cycle, we're expected all
// threads to have SATB queues with active set to true. // threads to have SATB queues with active set to true.
@ -1695,7 +1696,7 @@ class G1RemarkThreadsClosure : public ThreadClosure {
} }
} else if (thread->is_VM_thread()) { } else if (thread->is_VM_thread()) {
if (thread->claim_oops_do(true, _thread_parity)) { if (thread->claim_oops_do(true, _thread_parity)) {
JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); G1BarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl);
} }
} }
} }
@ -1755,7 +1756,7 @@ void G1ConcurrentMark::finalize_marking() {
_g1h->workers()->run_task(&remarkTask); _g1h->workers()->run_task(&remarkTask);
} }
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
guarantee(has_overflown() || guarantee(has_overflown() ||
satb_mq_set.completed_buffers_num() == 0, satb_mq_set.completed_buffers_num() == 0,
"Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
@ -1937,7 +1938,7 @@ void G1ConcurrentMark::concurrent_cycle_abort() {
_second_overflow_barrier_sync.abort(); _second_overflow_barrier_sync.abort();
_has_aborted = true; _has_aborted = true;
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
satb_mq_set.abandon_partial_marking(); satb_mq_set.abandon_partial_marking();
// This can be called either during or outside marking, we'll read // This can be called either during or outside marking, we'll read
// the expected_active value from the SATB queue set. // the expected_active value from the SATB queue set.
@ -2147,7 +2148,7 @@ void G1CMTask::regular_clock_call() {
// (6) Finally, we check whether there are enough completed STAB // (6) Finally, we check whether there are enough completed STAB
// buffers available for processing. If there are, we abort. // buffers available for processing. If there are, we abort.
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
// we do need to process SATB buffers, we'll abort and restart // we do need to process SATB buffers, we'll abort and restart
// the marking task to do so // the marking task to do so
@ -2302,7 +2303,7 @@ void G1CMTask::drain_satb_buffers() {
_draining_satb_buffers = true; _draining_satb_buffers = true;
G1CMSATBBufferClosure satb_cl(this, _g1h); G1CMSATBBufferClosure satb_cl(this, _g1h);
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
// This keeps claiming and applying the closure to completed buffers // This keeps claiming and applying the closure to completed buffers
// until we run out of buffers or we need to abort. // until we run out of buffers or we need to abort.

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,7 @@
*/ */
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp" #include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp" #include "gc/g1/g1ConcurrentRefineThread.hpp"
#include "logging/log.hpp" #include "logging/log.hpp"
@ -377,7 +378,7 @@ void G1ConcurrentRefine::update_zones(double update_rs_time,
void G1ConcurrentRefine::adjust(double update_rs_time, void G1ConcurrentRefine::adjust(double update_rs_time,
size_t update_rs_processed_buffers, size_t update_rs_processed_buffers,
double goal_ms) { double goal_ms) {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
if (G1UseAdaptiveConcRefinement) { if (G1UseAdaptiveConcRefinement) {
update_zones(update_rs_time, update_rs_processed_buffers, goal_ms); update_zones(update_rs_time, update_rs_processed_buffers, goal_ms);
@ -425,7 +426,7 @@ void G1ConcurrentRefine::maybe_activate_more_threads(uint worker_id, size_t num_
} }
bool G1ConcurrentRefine::do_refinement_step(uint worker_id) { bool G1ConcurrentRefine::do_refinement_step(uint worker_id) {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
size_t curr_buffer_num = dcqs.completed_buffers_num(); size_t curr_buffer_num = dcqs.completed_buffers_num();
// If the number of the buffers falls down into the yellow zone, // If the number of the buffers falls down into the yellow zone,

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,7 @@
*/ */
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp" #include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp" #include "gc/g1/g1ConcurrentRefineThread.hpp"
#include "gc/shared/suspendibleThreadSet.hpp" #include "gc/shared/suspendibleThreadSet.hpp"
@ -63,7 +64,7 @@ void G1ConcurrentRefineThread::wait_for_completed_buffers() {
} }
bool G1ConcurrentRefineThread::is_active() { bool G1ConcurrentRefineThread::is_active() {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
return is_primary() ? dcqs.process_completed_buffers() : _active; return is_primary() ? dcqs.process_completed_buffers() : _active;
} }
@ -72,7 +73,7 @@ void G1ConcurrentRefineThread::activate() {
if (!is_primary()) { if (!is_primary()) {
set_active(true); set_active(true);
} else { } else {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
dcqs.set_process_completed(true); dcqs.set_process_completed(true);
} }
_monitor->notify(); _monitor->notify();
@ -83,7 +84,7 @@ void G1ConcurrentRefineThread::deactivate() {
if (!is_primary()) { if (!is_primary()) {
set_active(false); set_active(false);
} else { } else {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
dcqs.set_process_completed(false); dcqs.set_process_completed(false);
} }
} }
@ -101,7 +102,7 @@ void G1ConcurrentRefineThread::run_service() {
size_t buffers_processed = 0; size_t buffers_processed = 0;
log_debug(gc, refine)("Activated worker %d, on threshold: " SIZE_FORMAT ", current: " SIZE_FORMAT, log_debug(gc, refine)("Activated worker %d, on threshold: " SIZE_FORMAT ", current: " SIZE_FORMAT,
_worker_id, _cr->activation_threshold(_worker_id), _worker_id, _cr->activation_threshold(_worker_id),
JavaThread::dirty_card_queue_set().completed_buffers_num()); G1BarrierSet::dirty_card_queue_set().completed_buffers_num());
{ {
SuspendibleThreadSetJoiner sts_join; SuspendibleThreadSetJoiner sts_join;
@ -123,7 +124,7 @@ void G1ConcurrentRefineThread::run_service() {
log_debug(gc, refine)("Deactivated worker %d, off threshold: " SIZE_FORMAT log_debug(gc, refine)("Deactivated worker %d, off threshold: " SIZE_FORMAT
", current: " SIZE_FORMAT ", processed: " SIZE_FORMAT, ", current: " SIZE_FORMAT ", processed: " SIZE_FORMAT,
_worker_id, _cr->deactivation_threshold(_worker_id), _worker_id, _cr->deactivation_threshold(_worker_id),
JavaThread::dirty_card_queue_set().completed_buffers_num(), G1BarrierSet::dirty_card_queue_set().completed_buffers_num(),
buffers_processed); buffers_processed);
if (os::supports_vtime()) { if (os::supports_vtime()) {

View file

@ -24,6 +24,7 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc/g1/dirtyCardQueue.hpp" #include "gc/g1/dirtyCardQueue.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BlockOffsetTable.inline.hpp" #include "gc/g1/g1BlockOffsetTable.inline.hpp"
#include "gc/g1/g1CardTable.inline.hpp" #include "gc/g1/g1CardTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp"
@ -488,7 +489,7 @@ void G1RemSet::oops_into_collection_set_do(G1ParScanThreadState* pss,
} }
void G1RemSet::prepare_for_oops_into_collection_set_do() { void G1RemSet::prepare_for_oops_into_collection_set_do() {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
dcqs.concatenate_logs(); dcqs.concatenate_logs();
_scan_state->reset(); _scan_state->reset();
@ -641,7 +642,7 @@ void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
MutexLockerEx x(Shared_DirtyCardQ_lock, MutexLockerEx x(Shared_DirtyCardQ_lock,
Mutex::_no_safepoint_check_flag); Mutex::_no_safepoint_check_flag);
DirtyCardQueue* sdcq = DirtyCardQueue* sdcq =
JavaThread::dirty_card_queue_set().shared_dirty_card_queue(); G1BarrierSet::dirty_card_queue_set().shared_dirty_card_queue();
sdcq->enqueue(card_ptr); sdcq->enqueue(card_ptr);
} }
} else { } else {

View file

@ -53,7 +53,7 @@ public:
void G1RemSetSummary::update() { void G1RemSetSummary::update() {
_num_conc_refined_cards = _rem_set->num_conc_refined_cards(); _num_conc_refined_cards = _rem_set->num_conc_refined_cards();
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
_num_processed_buf_mutator = dcqs.processed_buffers_mut(); _num_processed_buf_mutator = dcqs.processed_buffers_mut();
_num_processed_buf_rs_threads = dcqs.processed_buffers_rs_thread(); _num_processed_buf_rs_threads = dcqs.processed_buffers_rs_thread();

View file

@ -29,6 +29,7 @@
#include "classfile/systemDictionary.hpp" #include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp" #include "code/codeCache.hpp"
#include "gc/g1/bufferingOopClosure.hpp" #include "gc/g1/bufferingOopClosure.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CodeBlobClosure.hpp" #include "gc/g1/g1CodeBlobClosure.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorState.hpp" #include "gc/g1/g1CollectorState.hpp"
@ -134,7 +135,7 @@ void G1RootProcessor::evacuate_roots(G1EvacuationRootClosures* closures, uint wo
{ {
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SATBFiltering, worker_i); G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SATBFiltering, worker_i);
if (!_process_strong_tasks.is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->collector_state()->mark_or_rebuild_in_progress()) { if (!_process_strong_tasks.is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->collector_state()->mark_or_rebuild_in_progress()) {
JavaThread::satb_mark_queue_set().filter_thread_buffers(); G1BarrierSet::satb_mark_queue_set().filter_thread_buffers();
} }
} }

View file

@ -115,6 +115,7 @@
#include "utilities/vmError.hpp" #include "utilities/vmError.hpp"
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
#include "gc/cms/concurrentMarkSweepThread.hpp" #include "gc/cms/concurrentMarkSweepThread.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1ConcurrentMarkThread.inline.hpp" #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
#include "gc/parallel/pcTasks.hpp" #include "gc/parallel/pcTasks.hpp"
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
@ -1589,16 +1590,11 @@ void JavaThread::initialize() {
pd_initialize(); pd_initialize();
} }
#if INCLUDE_ALL_GCS
SATBMarkQueueSet JavaThread::_satb_mark_queue_set;
DirtyCardQueueSet JavaThread::_dirty_card_queue_set;
#endif // INCLUDE_ALL_GCS
JavaThread::JavaThread(bool is_attaching_via_jni) : JavaThread::JavaThread(bool is_attaching_via_jni) :
Thread() Thread()
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
, _satb_mark_queue(&_satb_mark_queue_set), , _satb_mark_queue(&G1BarrierSet::satb_mark_queue_set()),
_dirty_card_queue(&_dirty_card_queue_set) _dirty_card_queue(&G1BarrierSet::dirty_card_queue_set())
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
{ {
initialize(); initialize();
@ -1664,8 +1660,8 @@ static void sweeper_thread_entry(JavaThread* thread, TRAPS);
JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) : JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) :
Thread() Thread()
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
, _satb_mark_queue(&_satb_mark_queue_set), , _satb_mark_queue(&G1BarrierSet::satb_mark_queue_set()),
_dirty_card_queue(&_dirty_card_queue_set) _dirty_card_queue(&G1BarrierSet::dirty_card_queue_set())
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
{ {
initialize(); initialize();

View file

@ -1063,12 +1063,8 @@ class JavaThread: public Thread {
// Support for G1 barriers // Support for G1 barriers
SATBMarkQueue _satb_mark_queue; // Thread-local log for SATB barrier. SATBMarkQueue _satb_mark_queue; // Thread-local log for SATB barrier.
// Set of all such queues.
static SATBMarkQueueSet _satb_mark_queue_set;
DirtyCardQueue _dirty_card_queue; // Thread-local log for dirty cards. DirtyCardQueue _dirty_card_queue; // Thread-local log for dirty cards.
// Set of all such queues.
static DirtyCardQueueSet _dirty_card_queue_set;
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
friend class VMThread; friend class VMThread;
@ -1948,15 +1944,9 @@ class JavaThread: public Thread {
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
// SATB marking queue support // SATB marking queue support
SATBMarkQueue& satb_mark_queue() { return _satb_mark_queue; } SATBMarkQueue& satb_mark_queue() { return _satb_mark_queue; }
static SATBMarkQueueSet& satb_mark_queue_set() {
return _satb_mark_queue_set;
}
// Dirty card queue support // Dirty card queue support
DirtyCardQueue& dirty_card_queue() { return _dirty_card_queue; } DirtyCardQueue& dirty_card_queue() { return _dirty_card_queue; }
static DirtyCardQueueSet& dirty_card_queue_set() {
return _dirty_card_queue_set;
}
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
// Machine dependent stuff // Machine dependent stuff