8130931: Refactor CardTableModRefBS[ForCTRS]

Remove friends and push CTRS-specific code down from base to derived

Reviewed-by: tschatzl, mgerdin
This commit is contained in:
Kim Barrett 2015-07-22 00:37:01 -04:00
parent d7f565d9eb
commit 717679c169
11 changed files with 302 additions and 251 deletions

View file

@ -36,10 +36,11 @@
#include "runtime/orderAccess.inline.hpp"
#include "runtime/vmThread.hpp"
void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
OopsInGenClosure* cl,
CardTableRS* ct,
uint n_threads) {
void CardTableModRefBSForCTRS::
non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
OopsInGenClosure* cl,
CardTableRS* ct,
uint n_threads) {
assert(n_threads > 0, "expected n_threads > 0");
assert(n_threads <= ParallelGCThreads,
err_msg("n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads));
@ -81,7 +82,7 @@ void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegio
}
void
CardTableModRefBS::
CardTableModRefBSForCTRS::
process_stride(Space* sp,
MemRegion used,
jint stride, int n_strides,
@ -170,7 +171,7 @@ process_stride(Space* sp,
#endif
void
CardTableModRefBS::
CardTableModRefBSForCTRS::
process_chunk_boundaries(Space* sp,
DirtyCardToOopClosure* dcto_cl,
MemRegion chunk_mr,
@ -426,7 +427,7 @@ process_chunk_boundaries(Space* sp,
#undef NOISY
void
CardTableModRefBS::
CardTableModRefBSForCTRS::
get_LNC_array_for_space(Space* sp,
jbyte**& lowest_non_clean,
uintptr_t& lowest_non_clean_base_chunk_index,

View file

@ -40,7 +40,6 @@ class CheckForUnmarkedOops : public OopClosure {
PSYoungGen* _young_gen;
CardTableExtension* _card_table;
HeapWord* _unmarked_addr;
jbyte* _unmarked_card;
protected:
template <class T> void do_oop_work(T* p) {
@ -50,7 +49,6 @@ class CheckForUnmarkedOops : public OopClosure {
// Don't overwrite the first missing card mark
if (_unmarked_addr == NULL) {
_unmarked_addr = (HeapWord*)p;
_unmarked_card = _card_table->byte_for(p);
}
}
}

View file

@ -623,7 +623,7 @@ void DefNewGeneration::collect(bool full,
{
// DefNew needs to run with n_threads == 0, to make sure the serial
// version of the card table scanning code is used.
// See: CardTableModRefBS::non_clean_card_iterate_possibly_parallel.
// See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel.
StrongRootsScope srs(0);
gch->gen_process_roots(&srs,

View file

@ -24,22 +24,12 @@
#include "precompiled.hpp"
#include "gc/shared/cardTableModRefBS.inline.hpp"
#include "gc/shared/cardTableRS.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/space.hpp"
#include "gc/shared/space.inline.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/universe.hpp"
#include "memory/virtualspace.hpp"
#include "runtime/java.hpp"
#include "runtime/mutexLocker.hpp"
#include "services/memTracker.hpp"
#include "utilities/macros.hpp"
#ifdef COMPILER1
#include "c1/c1_LIR.hpp"
#include "c1/c1_LIRGenerator.hpp"
#endif
// This kind of "BarrierSet" allows a "CollectedHeap" to detect and
// enumerate ref fields that have been modified (since the last
@ -68,12 +58,7 @@ CardTableModRefBS::CardTableModRefBS(
_committed(NULL),
_cur_covered_regions(0),
_byte_map(NULL),
byte_map_base(NULL),
// LNC functionality
_lowest_non_clean(NULL),
_lowest_non_clean_chunk_size(NULL),
_lowest_non_clean_base_chunk_index(NULL),
_last_LNC_resizing_collection(NULL)
byte_map_base(NULL)
{
assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary");
assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary");
@ -130,25 +115,6 @@ void CardTableModRefBS::initialize() {
!ExecMem, "card table last card");
*guard_card = last_card;
_lowest_non_clean =
NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC);
_lowest_non_clean_chunk_size =
NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC);
_lowest_non_clean_base_chunk_index =
NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC);
_last_LNC_resizing_collection =
NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC);
if (_lowest_non_clean == NULL
|| _lowest_non_clean_chunk_size == NULL
|| _lowest_non_clean_base_chunk_index == NULL
|| _last_LNC_resizing_collection == NULL)
vm_exit_during_initialization("couldn't allocate an LNC array.");
for (int i = 0; i < _max_covered_regions; i++) {
_lowest_non_clean[i] = NULL;
_lowest_non_clean_chunk_size[i] = 0;
_last_LNC_resizing_collection[i] = -1;
}
if (TraceCardTableModRefBS) {
gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
gclog_or_tty->print_cr(" "
@ -171,22 +137,6 @@ CardTableModRefBS::~CardTableModRefBS() {
delete[] _committed;
_committed = NULL;
}
if (_lowest_non_clean) {
FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean);
_lowest_non_clean = NULL;
}
if (_lowest_non_clean_chunk_size) {
FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size);
_lowest_non_clean_chunk_size = NULL;
}
if (_lowest_non_clean_base_chunk_index) {
FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index);
_lowest_non_clean_base_chunk_index = NULL;
}
if (_last_LNC_resizing_collection) {
FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection);
_last_LNC_resizing_collection = NULL;
}
}
int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
@ -437,32 +387,6 @@ void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool relea
}
void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
MemRegion mr,
OopsInGenClosure* cl,
CardTableRS* ct,
uint n_threads) {
if (!mr.is_empty()) {
if (n_threads > 0) {
#if INCLUDE_ALL_GCS
non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
#else // INCLUDE_ALL_GCS
fatal("Parallel gc not supported here.");
#endif // INCLUDE_ALL_GCS
} else {
// clear_cl finds contiguous dirty ranges of cards to process and clear.
// This is the single-threaded version used by DefNew.
const bool parallel = false;
DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel);
ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
clear_cl.do_MemRegion(mr);
}
}
}
void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
@ -623,15 +547,3 @@ void CardTableModRefBS::print_on(outputStream* st) const {
p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base));
}
bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
return
CardTableModRefBS::card_will_be_scanned(cv) ||
_rs->is_prev_nonclean_card_val(cv);
};
bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
return
cv != clean_card &&
(CardTableModRefBS::card_may_have_been_dirty(cv) ||
CardTableRS::youngergen_may_have_been_dirty(cv));
};

View file

@ -40,23 +40,9 @@
// Closures used to scan dirty cards should take these
// considerations into account.
class Generation;
class OopsInGenClosure;
class DirtyCardToOopClosure;
class ClearNoncleanCardWrapper;
class CardTableRS;
class CardTableModRefBS: public ModRefBarrierSet {
// Some classes get to look at some private stuff.
friend class BytecodeInterpreter;
friend class VMStructs;
friend class CardTableRS;
friend class CheckForUnmarkedOops; // Needs access to raw card bytes.
friend class SharkBuilder;
#ifndef PRODUCT
// For debugging.
friend class GuaranteeNotModClosure;
#endif
protected:
enum CardValues {
@ -75,24 +61,6 @@ class CardTableModRefBS: public ModRefBarrierSet {
// a word's worth (row) of clean card values
static const intptr_t clean_card_row = (intptr_t)(-1);
// dirty and precleaned are equivalent wrt younger_refs_iter.
static bool card_is_dirty_wrt_gen_iter(jbyte cv) {
return cv == dirty_card || cv == precleaned_card;
}
// Returns "true" iff the value "cv" will cause the card containing it
// to be scanned in the current traversal. May be overridden by
// subtypes.
virtual bool card_will_be_scanned(jbyte cv) {
return CardTableModRefBS::card_is_dirty_wrt_gen_iter(cv);
}
// Returns "true" iff the value "cv" may have represented a dirty card at
// some point.
virtual bool card_may_have_been_dirty(jbyte cv) {
return card_is_dirty_wrt_gen_iter(cv);
}
// The declaration order of these const fields is important; see the
// constructor before changing.
const MemRegion _whole_heap; // the region covered by the card table
@ -174,20 +142,6 @@ class CardTableModRefBS: public ModRefBarrierSet {
return byte_for(p) + 1;
}
// Iterate over the portion of the card-table which covers the given
// region mr in the given space and apply cl to any dirty sub-regions
// of mr. Clears the dirty cards as they are processed.
void non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr,
OopsInGenClosure* cl, CardTableRS* ct,
uint n_threads);
private:
// Work method used to implement non_clean_card_iterate_possibly_parallel()
// above in the parallel case.
void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
OopsInGenClosure* cl, CardTableRS* ct,
uint n_threads);
protected:
// Dirty the bytes corresponding to "mr" (not all of which must be
// covered.)
@ -197,65 +151,6 @@ class CardTableModRefBS: public ModRefBarrierSet {
// all of which must be covered.)
void clear_MemRegion(MemRegion mr);
// *** Support for parallel card scanning.
// This is an array, one element per covered region of the card table.
// Each entry is itself an array, with one element per chunk in the
// covered region. Each entry of these arrays is the lowest non-clean
// card of the corresponding chunk containing part of an object from the
// previous chunk, or else NULL.
typedef jbyte* CardPtr;
typedef CardPtr* CardArr;
CardArr* _lowest_non_clean;
size_t* _lowest_non_clean_chunk_size;
uintptr_t* _lowest_non_clean_base_chunk_index;
int* _last_LNC_resizing_collection;
// Initializes "lowest_non_clean" to point to the array for the region
// covering "sp", and "lowest_non_clean_base_chunk_index" to the chunk
// index of the corresponding to the first element of that array.
// Ensures that these arrays are of sufficient size, allocating if necessary.
// May be called by several threads concurrently.
void get_LNC_array_for_space(Space* sp,
jbyte**& lowest_non_clean,
uintptr_t& lowest_non_clean_base_chunk_index,
size_t& lowest_non_clean_chunk_size);
// Returns the number of chunks necessary to cover "mr".
size_t chunks_to_cover(MemRegion mr) {
return (size_t)(addr_to_chunk_index(mr.last()) -
addr_to_chunk_index(mr.start()) + 1);
}
// Returns the index of the chunk in a stride which
// covers the given address.
uintptr_t addr_to_chunk_index(const void* addr) {
uintptr_t card = (uintptr_t) byte_for(addr);
return card / ParGCCardsPerStrideChunk;
}
// Apply cl, which must either itself apply dcto_cl or be dcto_cl,
// to the cards in the stride (of n_strides) within the given space.
void process_stride(Space* sp,
MemRegion used,
jint stride, int n_strides,
OopsInGenClosure* cl,
CardTableRS* ct,
jbyte** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size);
// Makes sure that chunk boundaries are handled appropriately, by
// adjusting the min_done of dcto_cl, and by using a special card-table
// value to indicate how min_done should be set.
void process_chunk_boundaries(Space* sp,
DirtyCardToOopClosure* dcto_cl,
MemRegion chunk_mr,
MemRegion used,
jbyte** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size);
public:
// Constants
enum SomePublicConstants {
@ -436,34 +331,5 @@ struct BarrierSet::GetName<CardTableModRefBS> {
static const BarrierSet::Name value = BarrierSet::CardTableModRef;
};
class CardTableRS;
// A specialization for the CardTableRS gen rem set.
class CardTableModRefBSForCTRS: public CardTableModRefBS {
CardTableRS* _rs;
protected:
bool card_will_be_scanned(jbyte cv);
bool card_may_have_been_dirty(jbyte cv);
public:
CardTableModRefBSForCTRS(MemRegion whole_heap) :
CardTableModRefBS(
whole_heap,
// Concrete tag should be BarrierSet::CardTableForRS.
// That will presently break things in a bunch of places though.
// The concrete tag is used as a dispatch key in many places, and
// CardTableForRS does not correctly dispatch in some of those
// uses. This will be addressed as part of a reorganization of the
// BarrierSet hierarchy.
BarrierSet::FakeRtti(BarrierSet::CardTableModRef, 0).add_tag(BarrierSet::CardTableForRS))
{}
void set_CTRS(CardTableRS* rs) { _rs = rs; }
};
template<>
struct BarrierSet::GetName<CardTableModRefBSForCTRS> {
static const BarrierSet::Name value = BarrierSet::CardTableForRS;
};
#endif // SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP

View file

@ -0,0 +1,129 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/shared/cardTableModRefBS.inline.hpp"
#include "gc/shared/cardTableRS.hpp"
#include "memory/allocation.inline.hpp"
#include "gc/shared/space.inline.hpp"
CardTableModRefBSForCTRS::CardTableModRefBSForCTRS(MemRegion whole_heap) :
CardTableModRefBS(
whole_heap,
// Concrete tag should be BarrierSet::CardTableForRS.
// That will presently break things in a bunch of places though.
// The concrete tag is used as a dispatch key in many places, and
// CardTableForRS does not correctly dispatch in some of those
// uses. This will be addressed as part of a reorganization of the
// BarrierSet hierarchy.
BarrierSet::FakeRtti(BarrierSet::CardTableModRef, 0).add_tag(BarrierSet::CardTableForRS)),
// LNC functionality
_lowest_non_clean(NULL),
_lowest_non_clean_chunk_size(NULL),
_lowest_non_clean_base_chunk_index(NULL),
_last_LNC_resizing_collection(NULL)
{ }
void CardTableModRefBSForCTRS::initialize() {
CardTableModRefBS::initialize();
_lowest_non_clean =
NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC);
_lowest_non_clean_chunk_size =
NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC);
_lowest_non_clean_base_chunk_index =
NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC);
_last_LNC_resizing_collection =
NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC);
if (_lowest_non_clean == NULL
|| _lowest_non_clean_chunk_size == NULL
|| _lowest_non_clean_base_chunk_index == NULL
|| _last_LNC_resizing_collection == NULL)
vm_exit_during_initialization("couldn't allocate an LNC array.");
for (int i = 0; i < _max_covered_regions; i++) {
_lowest_non_clean[i] = NULL;
_lowest_non_clean_chunk_size[i] = 0;
_last_LNC_resizing_collection[i] = -1;
}
}
CardTableModRefBSForCTRS::~CardTableModRefBSForCTRS() {
if (_lowest_non_clean) {
FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean);
_lowest_non_clean = NULL;
}
if (_lowest_non_clean_chunk_size) {
FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size);
_lowest_non_clean_chunk_size = NULL;
}
if (_lowest_non_clean_base_chunk_index) {
FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index);
_lowest_non_clean_base_chunk_index = NULL;
}
if (_last_LNC_resizing_collection) {
FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection);
_last_LNC_resizing_collection = NULL;
}
}
bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
return
card_is_dirty_wrt_gen_iter(cv) ||
_rs->is_prev_nonclean_card_val(cv);
}
bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
return
cv != clean_card &&
(card_is_dirty_wrt_gen_iter(cv) ||
CardTableRS::youngergen_may_have_been_dirty(cv));
}
void CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel(
Space* sp,
MemRegion mr,
OopsInGenClosure* cl,
CardTableRS* ct,
uint n_threads)
{
if (!mr.is_empty()) {
if (n_threads > 0) {
#if INCLUDE_ALL_GCS
non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
#else // INCLUDE_ALL_GCS
fatal("Parallel gc not supported here.");
#endif // INCLUDE_ALL_GCS
} else {
// clear_cl finds contiguous dirty ranges of cards to process and clear.
// This is the single-threaded version used by DefNew.
const bool parallel = false;
DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel);
ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
clear_cl.do_MemRegion(mr);
}
}
}

View file

@ -0,0 +1,143 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_SHARED_CARDTABLEMODREFBSFORCTRS_HPP
#define SHARE_VM_GC_SHARED_CARDTABLEMODREFBSFORCTRS_HPP
#include "gc/shared/cardTableModRefBS.hpp"
class CardTableRS;
class DirtyCardToOopClosure;
class OopsInGenClosure;
// A specialization for the CardTableRS gen rem set.
class CardTableModRefBSForCTRS: public CardTableModRefBS {
friend class CardTableRS;
public:
CardTableModRefBSForCTRS(MemRegion whole_heap);
~CardTableModRefBSForCTRS();
virtual void initialize();
void set_CTRS(CardTableRS* rs) { _rs = rs; }
private:
CardTableRS* _rs;
// *** Support for parallel card scanning.
// dirty and precleaned are equivalent wrt younger_refs_iter.
static bool card_is_dirty_wrt_gen_iter(jbyte cv) {
return cv == dirty_card || cv == precleaned_card;
}
// Returns "true" iff the value "cv" will cause the card containing it
// to be scanned in the current traversal. May be overridden by
// subtypes.
bool card_will_be_scanned(jbyte cv);
// Returns "true" iff the value "cv" may have represented a dirty card at
// some point.
bool card_may_have_been_dirty(jbyte cv);
// Iterate over the portion of the card-table which covers the given
// region mr in the given space and apply cl to any dirty sub-regions
// of mr. Clears the dirty cards as they are processed.
void non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr,
OopsInGenClosure* cl, CardTableRS* ct,
uint n_threads);
// Work method used to implement non_clean_card_iterate_possibly_parallel()
// above in the parallel case.
void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
OopsInGenClosure* cl, CardTableRS* ct,
uint n_threads);
// This is an array, one element per covered region of the card table.
// Each entry is itself an array, with one element per chunk in the
// covered region. Each entry of these arrays is the lowest non-clean
// card of the corresponding chunk containing part of an object from the
// previous chunk, or else NULL.
typedef jbyte* CardPtr;
typedef CardPtr* CardArr;
CardArr* _lowest_non_clean;
size_t* _lowest_non_clean_chunk_size;
uintptr_t* _lowest_non_clean_base_chunk_index;
int* _last_LNC_resizing_collection;
// Initializes "lowest_non_clean" to point to the array for the region
// covering "sp", and "lowest_non_clean_base_chunk_index" to the chunk
// index of the corresponding to the first element of that array.
// Ensures that these arrays are of sufficient size, allocating if necessary.
// May be called by several threads concurrently.
void get_LNC_array_for_space(Space* sp,
jbyte**& lowest_non_clean,
uintptr_t& lowest_non_clean_base_chunk_index,
size_t& lowest_non_clean_chunk_size);
// Returns the number of chunks necessary to cover "mr".
size_t chunks_to_cover(MemRegion mr) {
return (size_t)(addr_to_chunk_index(mr.last()) -
addr_to_chunk_index(mr.start()) + 1);
}
// Returns the index of the chunk in a stride which
// covers the given address.
uintptr_t addr_to_chunk_index(const void* addr) {
uintptr_t card = (uintptr_t) byte_for(addr);
return card / ParGCCardsPerStrideChunk;
}
// Apply cl, which must either itself apply dcto_cl or be dcto_cl,
// to the cards in the stride (of n_strides) within the given space.
void process_stride(Space* sp,
MemRegion used,
jint stride, int n_strides,
OopsInGenClosure* cl,
CardTableRS* ct,
jbyte** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size);
// Makes sure that chunk boundaries are handled appropriately, by
// adjusting the min_done of dcto_cl, and by using a special card-table
// value to indicate how min_done should be set.
void process_chunk_boundaries(Space* sp,
DirtyCardToOopClosure* dcto_cl,
MemRegion chunk_mr,
MemRegion used,
jbyte** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size);
};
template<>
struct BarrierSet::GetName<CardTableModRefBSForCTRS> {
static const BarrierSet::Name value = BarrierSet::CardTableForRS;
};
#endif // include guard

View file

@ -240,7 +240,7 @@ void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {
// cur-younger-gen ==> cur_younger_gen
// cur_youngergen_and_prev_nonclean_card ==> no change.
void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
jbyte* entry = ct_bs()->byte_for(field);
jbyte* entry = _ct_bs->byte_for(field);
do {
jbyte entry_val = *entry;
// We put this first because it's probably the most common case.
@ -398,10 +398,10 @@ void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
jbyte* cur_entry = byte_for(used.start());
jbyte* limit = byte_after(used.last());
while (cur_entry < limit) {
if (*cur_entry == CardTableModRefBS::clean_card) {
if (*cur_entry == clean_card_val()) {
jbyte* first_dirty = cur_entry+1;
while (first_dirty < limit &&
*first_dirty == CardTableModRefBS::clean_card) {
*first_dirty == clean_card_val()) {
first_dirty++;
}
// If the first object is a regular object, and it has a
@ -418,7 +418,7 @@ void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
!boundary_obj->is_typeArray()) {
guarantee(cur_entry > byte_for(used.start()),
"else boundary would be boundary_block");
if (*byte_for(boundary_block) != CardTableModRefBS::clean_card) {
if (*byte_for(boundary_block) != clean_card_val()) {
begin = boundary_block + s->block_size(boundary_block);
start_block = begin;
}

View file

@ -25,7 +25,7 @@
#ifndef SHARE_VM_GC_SHARED_CARDTABLERS_HPP
#define SHARE_VM_GC_SHARED_CARDTABLERS_HPP
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/cardTableModRefBSForCTRS.hpp"
#include "gc/shared/genRemSet.hpp"
#include "memory/memRegion.hpp"
@ -42,16 +42,16 @@ class CardTableRS: public GenRemSet {
friend class ClearNoncleanCardWrapper;
static jbyte clean_card_val() {
return CardTableModRefBS::clean_card;
return CardTableModRefBSForCTRS::clean_card;
}
static intptr_t clean_card_row() {
return CardTableModRefBS::clean_card_row;
return CardTableModRefBSForCTRS::clean_card_row;
}
static bool
card_is_dirty_wrt_gen_iter(jbyte cv) {
return CardTableModRefBS::card_is_dirty_wrt_gen_iter(cv);
return CardTableModRefBSForCTRS::card_is_dirty_wrt_gen_iter(cv);
}
CardTableModRefBSForCTRS* _ct_bs;
@ -61,17 +61,17 @@ class CardTableRS: public GenRemSet {
void verify_space(Space* s, HeapWord* gen_start);
enum ExtendedCardValue {
youngergen_card = CardTableModRefBS::CT_MR_BS_last_reserved + 1,
youngergen_card = CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 1,
// These are for parallel collection.
// There are three P (parallel) youngergen card values. In general, this
// needs to be more than the number of generations (including the perm
// gen) that might have younger_refs_do invoked on them separately. So
// if we add more gens, we have to add more values.
youngergenP1_card = CardTableModRefBS::CT_MR_BS_last_reserved + 2,
youngergenP2_card = CardTableModRefBS::CT_MR_BS_last_reserved + 3,
youngergenP3_card = CardTableModRefBS::CT_MR_BS_last_reserved + 4,
youngergenP1_card = CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 2,
youngergenP2_card = CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 3,
youngergenP3_card = CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 4,
cur_youngergen_and_prev_nonclean_card =
CardTableModRefBS::CT_MR_BS_last_reserved + 5
CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 5
};
// An array that contains, for each generation, the card table value last
@ -107,7 +107,7 @@ public:
// *** GenRemSet functions.
CardTableRS* as_CardTableRS() { return this; }
CardTableModRefBS* ct_bs() { return _ct_bs; }
CardTableModRefBSForCTRS* ct_bs() { return _ct_bs; }
// Override.
void prepare_for_younger_refs_iterate(bool parallel);
@ -147,7 +147,7 @@ public:
void invalidate_or_clear(Generation* old_gen);
static uintx ct_max_alignment_constraint() {
return CardTableModRefBS::ct_max_alignment_constraint();
return CardTableModRefBSForCTRS::ct_max_alignment_constraint();
}
jbyte* byte_for(void* p) { return _ct_bs->byte_for(p); }

View file

@ -25,6 +25,8 @@
#include "precompiled.hpp"
#include "ci/ciMethod.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "memory/resourceArea.hpp"
#include "oops/method.hpp"
#include "runtime/os.hpp"
@ -442,7 +444,7 @@ void SharkBuilder::CreateUpdateBarrierSet(BarrierSet* bs, Value* field) {
Unimplemented();
CreateStore(
LLVMValue::jbyte_constant(CardTableModRefBS::dirty_card),
LLVMValue::jbyte_constant(CardTableModRefBS::dirty_card_val()),
CreateIntToPtr(
CreateAdd(
LLVMValue::intptr_constant(

View file

@ -27,8 +27,6 @@
#define SHARE_VM_SHARK_SHARKBUILDER_HPP
#include "ci/ciType.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "shark/llvmHeaders.hpp"
#include "shark/llvmValue.hpp"
#include "shark/sharkCodeBuffer.hpp"
@ -38,6 +36,8 @@
#include "utilities/debug.hpp"
#include "utilities/sizes.hpp"
class BarrierSet;
class SharkBuilder : public llvm::IRBuilder<> {
friend class SharkCompileInvariants;