mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-27 06:45:07 +02:00
6819085: G1: use larger and/or user settable region size
Instead of the region size being hard-coded, allow the user to set it. Reviewed-by: jmasa, johnc, apetrusenko
This commit is contained in:
parent
c6763b5bad
commit
243367ec16
11 changed files with 125 additions and 41 deletions
|
@ -25,6 +25,8 @@
|
||||||
#include "incls/_precompiled.incl"
|
#include "incls/_precompiled.incl"
|
||||||
#include "incls/_g1CollectedHeap.cpp.incl"
|
#include "incls/_g1CollectedHeap.cpp.incl"
|
||||||
|
|
||||||
|
size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
|
||||||
|
|
||||||
// turn it on so that the contents of the young list (scan-only /
|
// turn it on so that the contents of the young list (scan-only /
|
||||||
// to-be-collected) are printed at "strategic" points before / during
|
// to-be-collected) are printed at "strategic" points before / during
|
||||||
// / after the collection --- this is useful for debugging
|
// / after the collection --- this is useful for debugging
|
||||||
|
@ -1394,6 +1396,9 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||||
if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
|
if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
|
||||||
vm_exit_during_initialization("Failed necessary allocation.");
|
vm_exit_during_initialization("Failed necessary allocation.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
|
||||||
|
|
||||||
int n_queues = MAX2((int)ParallelGCThreads, 1);
|
int n_queues = MAX2((int)ParallelGCThreads, 1);
|
||||||
_task_queues = new RefToScanQueueSet(n_queues);
|
_task_queues = new RefToScanQueueSet(n_queues);
|
||||||
|
|
||||||
|
@ -1546,9 +1551,10 @@ jint G1CollectedHeap::initialize() {
|
||||||
const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
|
const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
|
||||||
guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
|
guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
|
||||||
|
|
||||||
const size_t cards_per_region = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift;
|
|
||||||
size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
|
size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
|
||||||
guarantee(cards_per_region < max_cards_per_region, "too many cards per region");
|
guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
|
||||||
|
guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region,
|
||||||
|
"too many cards per region");
|
||||||
|
|
||||||
_bot_shared = new G1BlockOffsetSharedArray(_reserved,
|
_bot_shared = new G1BlockOffsetSharedArray(_reserved,
|
||||||
heap_word_size(init_byte_size));
|
heap_word_size(init_byte_size));
|
||||||
|
|
|
@ -167,16 +167,11 @@ class G1CollectedHeap : public SharedHeap {
|
||||||
friend class G1MarkSweep;
|
friend class G1MarkSweep;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
enum SomePrivateConstants {
|
|
||||||
VeryLargeInBytes = HeapRegion::GrainBytes/2,
|
|
||||||
VeryLargeInWords = VeryLargeInBytes/HeapWordSize,
|
|
||||||
MinHeapDeltaBytes = 10 * HeapRegion::GrainBytes, // FIXME
|
|
||||||
NumAPIs = HeapRegion::MaxAge
|
|
||||||
};
|
|
||||||
|
|
||||||
// The one and only G1CollectedHeap, so static functions can find it.
|
// The one and only G1CollectedHeap, so static functions can find it.
|
||||||
static G1CollectedHeap* _g1h;
|
static G1CollectedHeap* _g1h;
|
||||||
|
|
||||||
|
static size_t _humongous_object_threshold_in_words;
|
||||||
|
|
||||||
// Storage for the G1 heap (excludes the permanent generation).
|
// Storage for the G1 heap (excludes the permanent generation).
|
||||||
VirtualSpace _g1_storage;
|
VirtualSpace _g1_storage;
|
||||||
MemRegion _g1_reserved;
|
MemRegion _g1_reserved;
|
||||||
|
@ -1021,7 +1016,7 @@ public:
|
||||||
|
|
||||||
// Returns "true" iff the given word_size is "very large".
|
// Returns "true" iff the given word_size is "very large".
|
||||||
static bool isHumongous(size_t word_size) {
|
static bool isHumongous(size_t word_size) {
|
||||||
return word_size >= VeryLargeInWords;
|
return word_size >= _humongous_object_threshold_in_words;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update mod union table with the set of dirty cards.
|
// Update mod union table with the set of dirty cards.
|
||||||
|
|
|
@ -201,6 +201,11 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||||
_survivors_age_table(true)
|
_survivors_age_table(true)
|
||||||
|
|
||||||
{
|
{
|
||||||
|
// Set up the region size and associated fields. Given that the
|
||||||
|
// policy is created before the heap, we have to set this up here,
|
||||||
|
// so it's done as soon as possible.
|
||||||
|
HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
|
||||||
|
|
||||||
_recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
|
_recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
|
||||||
_prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
|
_prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
|
||||||
|
|
||||||
|
|
|
@ -92,9 +92,7 @@ protected:
|
||||||
int _parallel_gc_threads;
|
int _parallel_gc_threads;
|
||||||
|
|
||||||
enum SomePrivateConstants {
|
enum SomePrivateConstants {
|
||||||
NumPrevPausesForHeuristics = 10,
|
NumPrevPausesForHeuristics = 10
|
||||||
NumPrevGCsForHeuristics = 10,
|
|
||||||
NumAPIs = HeapRegion::MaxAge
|
|
||||||
};
|
};
|
||||||
|
|
||||||
G1MMUTracker* _mmu_tracker;
|
G1MMUTracker* _mmu_tracker;
|
||||||
|
|
|
@ -37,7 +37,7 @@
|
||||||
develop(intx, G1MarkingOverheadPercent, 0, \
|
develop(intx, G1MarkingOverheadPercent, 0, \
|
||||||
"Overhead of concurrent marking") \
|
"Overhead of concurrent marking") \
|
||||||
\
|
\
|
||||||
product(intx, G1YoungGenSize, 0, \
|
product(uintx, G1YoungGenSize, 0, \
|
||||||
"Size of the G1 young generation, 0 is the adaptive policy") \
|
"Size of the G1 young generation, 0 is the adaptive policy") \
|
||||||
\
|
\
|
||||||
develop(bool, G1Gen, true, \
|
develop(bool, G1Gen, true, \
|
||||||
|
@ -246,6 +246,9 @@
|
||||||
"If non-0 is the size of the G1 survivor space, " \
|
"If non-0 is the size of the G1 survivor space, " \
|
||||||
"otherwise SurvivorRatio is used to determine the size") \
|
"otherwise SurvivorRatio is used to determine the size") \
|
||||||
\
|
\
|
||||||
|
product(uintx, G1HeapRegionSize, 0, \
|
||||||
|
"Size of the G1 regions.") \
|
||||||
|
\
|
||||||
experimental(bool, G1ParallelRSetUpdatingEnabled, false, \
|
experimental(bool, G1ParallelRSetUpdatingEnabled, false, \
|
||||||
"Enables the parallelization of remembered set updating " \
|
"Enables the parallelization of remembered set updating " \
|
||||||
"during evacuation pauses") \
|
"during evacuation pauses") \
|
||||||
|
|
|
@ -25,6 +25,12 @@
|
||||||
#include "incls/_precompiled.incl"
|
#include "incls/_precompiled.incl"
|
||||||
#include "incls/_heapRegion.cpp.incl"
|
#include "incls/_heapRegion.cpp.incl"
|
||||||
|
|
||||||
|
int HeapRegion::LogOfHRGrainBytes = 0;
|
||||||
|
int HeapRegion::LogOfHRGrainWords = 0;
|
||||||
|
int HeapRegion::GrainBytes = 0;
|
||||||
|
int HeapRegion::GrainWords = 0;
|
||||||
|
int HeapRegion::CardsPerRegion = 0;
|
||||||
|
|
||||||
HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
|
HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
|
||||||
HeapRegion* hr, OopClosure* cl,
|
HeapRegion* hr, OopClosure* cl,
|
||||||
CardTableModRefBS::PrecisionStyle precision,
|
CardTableModRefBS::PrecisionStyle precision,
|
||||||
|
@ -231,6 +237,73 @@ void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Minimum region size; we won't go lower than that.
|
||||||
|
// We might want to decrease this in the future, to deal with small
|
||||||
|
// heaps a bit more efficiently.
|
||||||
|
#define MIN_REGION_SIZE ( 1024 * 1024 )
|
||||||
|
|
||||||
|
// Maximum region size; we don't go higher than that. There's a good
|
||||||
|
// reason for having an upper bound. We don't want regions to get too
|
||||||
|
// large, otherwise cleanup's effectiveness would decrease as there
|
||||||
|
// will be fewer opportunities to find totally empty regions after
|
||||||
|
// marking.
|
||||||
|
#define MAX_REGION_SIZE ( 32 * 1024 * 1024 )
|
||||||
|
|
||||||
|
// The automatic region size calculation will try to have around this
|
||||||
|
// many regions in the heap (based on the min heap size).
|
||||||
|
#define TARGET_REGION_NUMBER 2048
|
||||||
|
|
||||||
|
void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
|
||||||
|
// region_size in bytes
|
||||||
|
uintx region_size = G1HeapRegionSize;
|
||||||
|
if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
|
||||||
|
// We base the automatic calculation on the min heap size. This
|
||||||
|
// can be problematic if the spread between min and max is quite
|
||||||
|
// wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
|
||||||
|
// the max size, the region size might be way too large for the
|
||||||
|
// min size. Either way, some users might have to set the region
|
||||||
|
// size manually for some -Xms / -Xmx combos.
|
||||||
|
|
||||||
|
region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
|
||||||
|
(uintx) MIN_REGION_SIZE);
|
||||||
|
}
|
||||||
|
|
||||||
|
int region_size_log = log2_long((jlong) region_size);
|
||||||
|
// Recalculate the region size to make sure it's a power of
|
||||||
|
// 2. This means that region_size is the largest power of 2 that's
|
||||||
|
// <= what we've calculated so far.
|
||||||
|
region_size = 1 << region_size_log;
|
||||||
|
|
||||||
|
// Now make sure that we don't go over or under our limits.
|
||||||
|
if (region_size < MIN_REGION_SIZE) {
|
||||||
|
region_size = MIN_REGION_SIZE;
|
||||||
|
} else if (region_size > MAX_REGION_SIZE) {
|
||||||
|
region_size = MAX_REGION_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
// And recalculate the log.
|
||||||
|
region_size_log = log2_long((jlong) region_size);
|
||||||
|
|
||||||
|
// Now, set up the globals.
|
||||||
|
guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
|
||||||
|
LogOfHRGrainBytes = region_size_log;
|
||||||
|
|
||||||
|
guarantee(LogOfHRGrainWords == 0, "we should only set it once");
|
||||||
|
LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
|
||||||
|
|
||||||
|
guarantee(GrainBytes == 0, "we should only set it once");
|
||||||
|
// The cast to int is safe, given that we've bounded region_size by
|
||||||
|
// MIN_REGION_SIZE and MAX_REGION_SIZE.
|
||||||
|
GrainBytes = (int) region_size;
|
||||||
|
|
||||||
|
guarantee(GrainWords == 0, "we should only set it once");
|
||||||
|
GrainWords = GrainBytes >> LogHeapWordSize;
|
||||||
|
guarantee(1 << LogOfHRGrainWords == GrainWords, "sanity");
|
||||||
|
|
||||||
|
guarantee(CardsPerRegion == 0, "we should only set it once");
|
||||||
|
CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
|
||||||
|
}
|
||||||
|
|
||||||
void HeapRegion::reset_after_compaction() {
|
void HeapRegion::reset_after_compaction() {
|
||||||
G1OffsetTableContigSpace::reset_after_compaction();
|
G1OffsetTableContigSpace::reset_after_compaction();
|
||||||
// After a compaction the mark bitmap is invalid, so we must
|
// After a compaction the mark bitmap is invalid, so we must
|
||||||
|
|
|
@ -297,15 +297,24 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||||
HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
|
HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||||
MemRegion mr, bool is_zeroed);
|
MemRegion mr, bool is_zeroed);
|
||||||
|
|
||||||
enum SomePublicConstants {
|
static int LogOfHRGrainBytes;
|
||||||
// HeapRegions are GrainBytes-aligned
|
static int LogOfHRGrainWords;
|
||||||
// and have sizes that are multiples of GrainBytes.
|
// The normal type of these should be size_t. However, they used to
|
||||||
LogOfHRGrainBytes = 20,
|
// be members of an enum before and they are assumed by the
|
||||||
LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize,
|
// compilers to be ints. To avoid going and fixing all their uses,
|
||||||
GrainBytes = 1 << LogOfHRGrainBytes,
|
// I'm declaring them as ints. I'm not anticipating heap region
|
||||||
GrainWords = 1 <<LogOfHRGrainWords,
|
// sizes to reach anywhere near 2g, so using an int here is safe.
|
||||||
MaxAge = 2, NoOfAges = MaxAge+1
|
static int GrainBytes;
|
||||||
};
|
static int GrainWords;
|
||||||
|
static int CardsPerRegion;
|
||||||
|
|
||||||
|
// It sets up the heap region size (GrainBytes / GrainWords), as
|
||||||
|
// well as other related fields that are based on the heap region
|
||||||
|
// size (LogOfHRGrainBytes / LogOfHRGrainWords /
|
||||||
|
// CardsPerRegion). All those fields are considered constant
|
||||||
|
// throughout the JVM's execution, therefore they should only be set
|
||||||
|
// up once during initialization time.
|
||||||
|
static void setup_heap_region_size(uintx min_heap_size);
|
||||||
|
|
||||||
enum ClaimValues {
|
enum ClaimValues {
|
||||||
InitialClaimValue = 0,
|
InitialClaimValue = 0,
|
||||||
|
|
|
@ -57,10 +57,6 @@ class PerRegionTable: public CHeapObj {
|
||||||
|
|
||||||
#endif // _MSC_VER
|
#endif // _MSC_VER
|
||||||
|
|
||||||
enum SomePrivateConstants {
|
|
||||||
CardsPerRegion = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift
|
|
||||||
};
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// We need access in order to union things into the base table.
|
// We need access in order to union things into the base table.
|
||||||
BitMap* bm() { return &_bm; }
|
BitMap* bm() { return &_bm; }
|
||||||
|
@ -76,7 +72,7 @@ protected:
|
||||||
#if PRT_COUNT_OCCUPIED
|
#if PRT_COUNT_OCCUPIED
|
||||||
_occupied(0),
|
_occupied(0),
|
||||||
#endif
|
#endif
|
||||||
_bm(CardsPerRegion, false /* in-resource-area */)
|
_bm(HeapRegion::CardsPerRegion, false /* in-resource-area */)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
static void free(PerRegionTable* prt) {
|
static void free(PerRegionTable* prt) {
|
||||||
|
@ -144,7 +140,8 @@ protected:
|
||||||
CardIdx_t from_card = (CardIdx_t)
|
CardIdx_t from_card = (CardIdx_t)
|
||||||
hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
|
hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
|
||||||
|
|
||||||
assert(0 <= from_card && from_card < CardsPerRegion, "Must be in range.");
|
assert(0 <= from_card && from_card < HeapRegion::CardsPerRegion,
|
||||||
|
"Must be in range.");
|
||||||
add_card_work(from_card, par);
|
add_card_work(from_card, par);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -631,7 +628,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
||||||
uintptr_t(from_hr->bottom())
|
uintptr_t(from_hr->bottom())
|
||||||
>> CardTableModRefBS::card_shift;
|
>> CardTableModRefBS::card_shift;
|
||||||
CardIdx_t card_index = from_card - from_hr_bot_card_index;
|
CardIdx_t card_index = from_card - from_hr_bot_card_index;
|
||||||
assert(0 <= card_index && card_index < PosParPRT::CardsPerRegion,
|
assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
|
||||||
"Must be in range.");
|
"Must be in range.");
|
||||||
if (G1HRRSUseSparseTable &&
|
if (G1HRRSUseSparseTable &&
|
||||||
_sparse_table.add_card(from_hrs_ind, card_index)) {
|
_sparse_table.add_card(from_hrs_ind, card_index)) {
|
||||||
|
@ -922,7 +919,7 @@ size_t OtherRegionsTable::occ_fine() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t OtherRegionsTable::occ_coarse() const {
|
size_t OtherRegionsTable::occ_coarse() const {
|
||||||
return (_n_coarse_entries * PosParPRT::CardsPerRegion);
|
return (_n_coarse_entries * HeapRegion::CardsPerRegion);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t OtherRegionsTable::occ_sparse() const {
|
size_t OtherRegionsTable::occ_sparse() const {
|
||||||
|
@ -1049,7 +1046,8 @@ bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const
|
||||||
uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
|
uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
|
||||||
assert(from_card >= hr_bot_card_index, "Inv");
|
assert(from_card >= hr_bot_card_index, "Inv");
|
||||||
CardIdx_t card_index = from_card - hr_bot_card_index;
|
CardIdx_t card_index = from_card - hr_bot_card_index;
|
||||||
assert(0 <= card_index && card_index < PosParPRT::CardsPerRegion, "Must be in range.");
|
assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
|
||||||
|
"Must be in range.");
|
||||||
return _sparse_table.contains_card(hr_ind, card_index);
|
return _sparse_table.contains_card(hr_ind, card_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1176,7 +1174,7 @@ void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
|
||||||
_is = Sparse;
|
_is = Sparse;
|
||||||
// Set these values so that we increment to the first region.
|
// Set these values so that we increment to the first region.
|
||||||
_coarse_cur_region_index = -1;
|
_coarse_cur_region_index = -1;
|
||||||
_coarse_cur_region_cur_card = (PosParPRT::CardsPerRegion-1);;
|
_coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);;
|
||||||
|
|
||||||
_cur_region_cur_card = 0;
|
_cur_region_cur_card = 0;
|
||||||
|
|
||||||
|
@ -1195,7 +1193,7 @@ bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
|
||||||
// Go to the next card.
|
// Go to the next card.
|
||||||
_coarse_cur_region_cur_card++;
|
_coarse_cur_region_cur_card++;
|
||||||
// Was the last the last card in the current region?
|
// Was the last the last card in the current region?
|
||||||
if (_coarse_cur_region_cur_card == PosParPRT::CardsPerRegion) {
|
if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) {
|
||||||
// Yes: find the next region. This may leave _coarse_cur_region_index
|
// Yes: find the next region. This may leave _coarse_cur_region_index
|
||||||
// Set to the last index, in which case there are no more coarse
|
// Set to the last index, in which case there are no more coarse
|
||||||
// regions.
|
// regions.
|
||||||
|
@ -1232,7 +1230,7 @@ bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
|
||||||
_fine_cur_prt->_bm.get_next_one_offset(_cur_region_cur_card + 1);
|
_fine_cur_prt->_bm.get_next_one_offset(_cur_region_cur_card + 1);
|
||||||
}
|
}
|
||||||
while (!fine_has_next()) {
|
while (!fine_has_next()) {
|
||||||
if (_cur_region_cur_card == PosParPRT::CardsPerRegion) {
|
if (_cur_region_cur_card == (size_t) HeapRegion::CardsPerRegion) {
|
||||||
_cur_region_cur_card = 0;
|
_cur_region_cur_card = 0;
|
||||||
_fine_cur_prt = _fine_cur_prt->next();
|
_fine_cur_prt = _fine_cur_prt->next();
|
||||||
}
|
}
|
||||||
|
@ -1255,7 +1253,7 @@ bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
|
||||||
bool HeapRegionRemSetIterator::fine_has_next() {
|
bool HeapRegionRemSetIterator::fine_has_next() {
|
||||||
return
|
return
|
||||||
_fine_cur_prt != NULL &&
|
_fine_cur_prt != NULL &&
|
||||||
_cur_region_cur_card < PosParPRT::CardsPerRegion;
|
_cur_region_cur_card < (size_t) HeapRegion::CardsPerRegion;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
|
bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
|
||||||
|
|
|
@ -347,7 +347,7 @@ CardIdx_t /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() {
|
||||||
size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(CardIdx_t ci) {
|
size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(CardIdx_t ci) {
|
||||||
return
|
return
|
||||||
_heap_bot_card_ind
|
_heap_bot_card_ind
|
||||||
+ (_rsht->entry(_bl_ind)->r_ind() * CardsPerRegion)
|
+ (_rsht->entry(_bl_ind)->r_ind() * HeapRegion::CardsPerRegion)
|
||||||
+ ci;
|
+ ci;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -172,10 +172,6 @@ class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
|
||||||
RSHashTable* _rsht;
|
RSHashTable* _rsht;
|
||||||
size_t _heap_bot_card_ind;
|
size_t _heap_bot_card_ind;
|
||||||
|
|
||||||
enum SomePrivateConstants {
|
|
||||||
CardsPerRegion = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift
|
|
||||||
};
|
|
||||||
|
|
||||||
// If the bucket list pointed to by _bl_ind contains a card, sets
|
// If the bucket list pointed to by _bl_ind contains a card, sets
|
||||||
// _bl_ind to the index of that entry, and returns the card.
|
// _bl_ind to the index of that entry, and returns the card.
|
||||||
// Otherwise, returns SparseEntry::NullEntry.
|
// Otherwise, returns SparseEntry::NullEntry.
|
||||||
|
|
|
@ -165,6 +165,7 @@ g1CollectedHeap.inline.hpp g1CollectedHeap.hpp
|
||||||
g1CollectedHeap.inline.hpp heapRegionSeq.hpp
|
g1CollectedHeap.inline.hpp heapRegionSeq.hpp
|
||||||
g1CollectedHeap.inline.hpp taskqueue.hpp
|
g1CollectedHeap.inline.hpp taskqueue.hpp
|
||||||
|
|
||||||
|
g1CollectorPolicy.cpp arguments.hpp
|
||||||
g1CollectorPolicy.cpp concurrentG1Refine.hpp
|
g1CollectorPolicy.cpp concurrentG1Refine.hpp
|
||||||
g1CollectorPolicy.cpp concurrentMark.hpp
|
g1CollectorPolicy.cpp concurrentMark.hpp
|
||||||
g1CollectorPolicy.cpp concurrentMarkThread.inline.hpp
|
g1CollectorPolicy.cpp concurrentMarkThread.inline.hpp
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue