7157073: G1: type change size_t -> uint for region counts / indexes

Change the type of fields / variables / etc. that represent region counts and indeces from size_t to uint.

Reviewed-by: iveresov, brutisso, jmasa, jwilhelm
This commit is contained in:
Antonios Printezis 2012-04-18 07:21:15 -04:00
parent 99802ac63d
commit 01ea4199c7
28 changed files with 432 additions and 440 deletions

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -42,7 +42,7 @@ import sun.jvm.hotspot.types.TypeDataBase;
public class HeapRegionSeq extends VMObject { public class HeapRegionSeq extends VMObject {
// HeapRegion** _regions; // HeapRegion** _regions;
static private AddressField regionsField; static private AddressField regionsField;
// size_t _length; // uint _length;
static private CIntegerField lengthField; static private CIntegerField lengthField;
static { static {

View file

@ -40,9 +40,9 @@ import sun.jvm.hotspot.types.TypeDataBase;
// Mirror class for HeapRegionSetBase. Represents a group of regions. // Mirror class for HeapRegionSetBase. Represents a group of regions.
public class HeapRegionSetBase extends VMObject { public class HeapRegionSetBase extends VMObject {
// size_t _length; // uint _length;
static private CIntegerField lengthField; static private CIntegerField lengthField;
// size_t _region_num; // uint _region_num;
static private CIntegerField regionNumField; static private CIntegerField regionNumField;
// size_t _total_used_bytes; // size_t _total_used_bytes;
static private CIntegerField totalUsedBytesField; static private CIntegerField totalUsedBytesField;

View file

@ -273,7 +273,7 @@ void CollectionSetChooser::sortMarkedHeapRegions() {
assert(verify(), "CSet chooser verification"); assert(verify(), "CSet chooser verification");
} }
size_t CollectionSetChooser::calcMinOldCSetLength() { uint CollectionSetChooser::calcMinOldCSetLength() {
// The min old CSet region bound is based on the maximum desired // The min old CSet region bound is based on the maximum desired
// number of mixed GCs after a cycle. I.e., even if some old regions // number of mixed GCs after a cycle. I.e., even if some old regions
// look expensive, we should add them to the CSet anyway to make // look expensive, we should add them to the CSet anyway to make
@ -291,10 +291,10 @@ size_t CollectionSetChooser::calcMinOldCSetLength() {
if (result * gc_num < region_num) { if (result * gc_num < region_num) {
result += 1; result += 1;
} }
return result; return (uint) result;
} }
size_t CollectionSetChooser::calcMaxOldCSetLength() { uint CollectionSetChooser::calcMaxOldCSetLength() {
// The max old CSet region bound is based on the threshold expressed // The max old CSet region bound is based on the threshold expressed
// as a percentage of the heap size. I.e., it should bound the // as a percentage of the heap size. I.e., it should bound the
// number of old regions added to the CSet irrespective of how many // number of old regions added to the CSet irrespective of how many
@ -308,7 +308,7 @@ size_t CollectionSetChooser::calcMaxOldCSetLength() {
if (100 * result < region_num * perc) { if (100 * result < region_num * perc) {
result += 1; result += 1;
} }
return result; return (uint) result;
} }
void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) { void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) {
@ -321,10 +321,10 @@ void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) {
hr->calc_gc_efficiency(); hr->calc_gc_efficiency();
} }
void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(size_t n_regions, void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(uint n_regions,
size_t chunkSize) { uint chunkSize) {
_first_par_unreserved_idx = 0; _first_par_unreserved_idx = 0;
int n_threads = ParallelGCThreads; uint n_threads = (uint) ParallelGCThreads;
if (UseDynamicNumberOfGCThreads) { if (UseDynamicNumberOfGCThreads) {
assert(G1CollectedHeap::heap()->workers()->active_workers() > 0, assert(G1CollectedHeap::heap()->workers()->active_workers() > 0,
"Should have been set earlier"); "Should have been set earlier");
@ -335,12 +335,11 @@ void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(size_t n_regions,
n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(), n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(),
1U); 1U);
} }
size_t max_waste = n_threads * chunkSize; uint max_waste = n_threads * chunkSize;
// it should be aligned with respect to chunkSize // it should be aligned with respect to chunkSize
size_t aligned_n_regions = uint aligned_n_regions = (n_regions + chunkSize - 1) / chunkSize * chunkSize;
(n_regions + (chunkSize - 1)) / chunkSize * chunkSize; assert(aligned_n_regions % chunkSize == 0, "should be aligned");
assert( aligned_n_regions % chunkSize == 0, "should be aligned" ); _markedRegions.at_put_grow((int) (aligned_n_regions + max_waste - 1), NULL);
_markedRegions.at_put_grow((int)(aligned_n_regions + max_waste - 1), NULL);
} }
jint CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) { jint CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) {

View file

@ -150,18 +150,18 @@ public:
// Calculate the minimum number of old regions we'll add to the CSet // Calculate the minimum number of old regions we'll add to the CSet
// during a mixed GC. // during a mixed GC.
size_t calcMinOldCSetLength(); uint calcMinOldCSetLength();
// Calculate the maximum number of old regions we'll add to the CSet // Calculate the maximum number of old regions we'll add to the CSet
// during a mixed GC. // during a mixed GC.
size_t calcMaxOldCSetLength(); uint calcMaxOldCSetLength();
// Serial version. // Serial version.
void addMarkedHeapRegion(HeapRegion *hr); void addMarkedHeapRegion(HeapRegion *hr);
// Must be called before calls to getParMarkedHeapRegionChunk. // Must be called before calls to getParMarkedHeapRegionChunk.
// "n_regions" is the number of regions, "chunkSize" the chunk size. // "n_regions" is the number of regions, "chunkSize" the chunk size.
void prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize); void prepareForAddMarkedHeapRegionsPar(uint n_regions, uint chunkSize);
// Returns the first index in a contiguous chunk of "n_regions" indexes // Returns the first index in a contiguous chunk of "n_regions" indexes
// that the calling thread has reserved. These must be set by the // that the calling thread has reserved. These must be set by the
// calling thread using "setMarkedHeapRegion" (to NULL if necessary). // calling thread using "setMarkedHeapRegion" (to NULL if necessary).
@ -176,7 +176,7 @@ public:
void clearMarkedHeapRegions(); void clearMarkedHeapRegions();
// Return the number of candidate regions that remain to be collected. // Return the number of candidate regions that remain to be collected.
size_t remainingRegions() { return _length - _curr_index; } uint remainingRegions() { return (uint) (_length - _curr_index); }
// Determine whether the CSet chooser has more candidate regions or not. // Determine whether the CSet chooser has more candidate regions or not.
bool isEmpty() { return remainingRegions() == 0; } bool isEmpty() { return remainingRegions() == 0; }

View file

@ -403,8 +403,7 @@ uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
return MAX2((n_par_threads + 2) / 4, 1U); return MAX2((n_par_threads + 2) / 4, 1U);
} }
ConcurrentMark::ConcurrentMark(ReservedSpace rs, ConcurrentMark::ConcurrentMark(ReservedSpace rs, uint max_regions) :
int max_regions) :
_markBitMap1(rs, MinObjAlignment - 1), _markBitMap1(rs, MinObjAlignment - 1),
_markBitMap2(rs, MinObjAlignment - 1), _markBitMap2(rs, MinObjAlignment - 1),
@ -415,7 +414,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
_cleanup_sleep_factor(0.0), _cleanup_sleep_factor(0.0),
_cleanup_task_overhead(1.0), _cleanup_task_overhead(1.0),
_cleanup_list("Cleanup List"), _cleanup_list("Cleanup List"),
_region_bm(max_regions, false /* in_resource_area*/), _region_bm((BitMap::idx_t) max_regions, false /* in_resource_area*/),
_card_bm((rs.size() + CardTableModRefBS::card_size - 1) >> _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >>
CardTableModRefBS::card_shift, CardTableModRefBS::card_shift,
false /* in_resource_area*/), false /* in_resource_area*/),
@ -497,7 +496,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
_task_queues->register_queue(i, task_queue); _task_queues->register_queue(i, task_queue);
_count_card_bitmaps[i] = BitMap(card_bm_size, false); _count_card_bitmaps[i] = BitMap(card_bm_size, false);
_count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions); _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, (size_t) max_regions);
_tasks[i] = new CMTask(i, this, _tasks[i] = new CMTask(i, this,
_count_marked_bytes[i], _count_marked_bytes[i],
@ -1228,18 +1227,17 @@ public:
void set_bit_for_region(HeapRegion* hr) { void set_bit_for_region(HeapRegion* hr) {
assert(!hr->continuesHumongous(), "should have filtered those out"); assert(!hr->continuesHumongous(), "should have filtered those out");
size_t index = hr->hrs_index(); BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
if (!hr->startsHumongous()) { if (!hr->startsHumongous()) {
// Normal (non-humongous) case: just set the bit. // Normal (non-humongous) case: just set the bit.
_region_bm->par_at_put((BitMap::idx_t) index, true); _region_bm->par_at_put(index, true);
} else { } else {
// Starts humongous case: calculate how many regions are part of // Starts humongous case: calculate how many regions are part of
// this humongous region and then set the bit range. // this humongous region and then set the bit range.
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1); HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1);
size_t end_index = last_hr->hrs_index() + 1; BitMap::idx_t end_index = (BitMap::idx_t) last_hr->hrs_index() + 1;
_region_bm->par_at_put_range((BitMap::idx_t) index, _region_bm->par_at_put_range(index, end_index, true);
(BitMap::idx_t) end_index, true);
} }
} }
@ -1418,7 +1416,7 @@ public:
// Verify that _top_at_conc_count == ntams // Verify that _top_at_conc_count == ntams
if (hr->top_at_conc_mark_count() != hr->next_top_at_mark_start()) { if (hr->top_at_conc_mark_count() != hr->next_top_at_mark_start()) {
if (_verbose) { if (_verbose) {
gclog_or_tty->print_cr("Region " SIZE_FORMAT ": top at conc count incorrect: " gclog_or_tty->print_cr("Region %u: top at conc count incorrect: "
"expected " PTR_FORMAT ", actual: " PTR_FORMAT, "expected " PTR_FORMAT ", actual: " PTR_FORMAT,
hr->hrs_index(), hr->next_top_at_mark_start(), hr->hrs_index(), hr->next_top_at_mark_start(),
hr->top_at_conc_mark_count()); hr->top_at_conc_mark_count());
@ -1434,7 +1432,7 @@ public:
// we have missed accounting some objects during the actual marking. // we have missed accounting some objects during the actual marking.
if (exp_marked_bytes > act_marked_bytes) { if (exp_marked_bytes > act_marked_bytes) {
if (_verbose) { if (_verbose) {
gclog_or_tty->print_cr("Region " SIZE_FORMAT ": marked bytes mismatch: " gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
"expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
hr->hrs_index(), exp_marked_bytes, act_marked_bytes); hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
} }
@ -1445,15 +1443,16 @@ public:
// (which was just calculated) region bit maps. // (which was just calculated) region bit maps.
// We're not OK if the bit in the calculated expected region // We're not OK if the bit in the calculated expected region
// bitmap is set and the bit in the actual region bitmap is not. // bitmap is set and the bit in the actual region bitmap is not.
BitMap::idx_t index = (BitMap::idx_t)hr->hrs_index(); BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
bool expected = _exp_region_bm->at(index); bool expected = _exp_region_bm->at(index);
bool actual = _region_bm->at(index); bool actual = _region_bm->at(index);
if (expected && !actual) { if (expected && !actual) {
if (_verbose) { if (_verbose) {
gclog_or_tty->print_cr("Region " SIZE_FORMAT ": region bitmap mismatch: " gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
"expected: %d, actual: %d", "expected: %s, actual: %s",
hr->hrs_index(), expected, actual); hr->hrs_index(),
BOOL_TO_STR(expected), BOOL_TO_STR(actual));
} }
failures += 1; failures += 1;
} }
@ -1471,9 +1470,10 @@ public:
if (expected && !actual) { if (expected && !actual) {
if (_verbose) { if (_verbose) {
gclog_or_tty->print_cr("Region " SIZE_FORMAT ": card bitmap mismatch at " SIZE_FORMAT ": " gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
"expected: %d, actual: %d", "expected: %s, actual: %s",
hr->hrs_index(), i, expected, actual); hr->hrs_index(), i,
BOOL_TO_STR(expected), BOOL_TO_STR(actual));
} }
failures += 1; failures += 1;
} }
@ -1603,18 +1603,17 @@ class FinalCountDataUpdateClosure: public HeapRegionClosure {
void set_bit_for_region(HeapRegion* hr) { void set_bit_for_region(HeapRegion* hr) {
assert(!hr->continuesHumongous(), "should have filtered those out"); assert(!hr->continuesHumongous(), "should have filtered those out");
size_t index = hr->hrs_index(); BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
if (!hr->startsHumongous()) { if (!hr->startsHumongous()) {
// Normal (non-humongous) case: just set the bit. // Normal (non-humongous) case: just set the bit.
_region_bm->par_set_bit((BitMap::idx_t) index); _region_bm->par_set_bit(index);
} else { } else {
// Starts humongous case: calculate how many regions are part of // Starts humongous case: calculate how many regions are part of
// this humongous region and then set the bit range. // this humongous region and then set the bit range.
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1); HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1);
size_t end_index = last_hr->hrs_index() + 1; BitMap::idx_t end_index = (BitMap::idx_t) last_hr->hrs_index() + 1;
_region_bm->par_at_put_range((BitMap::idx_t) index, _region_bm->par_at_put_range(index, end_index, true);
(BitMap::idx_t) end_index, true);
} }
} }
@ -1718,8 +1717,8 @@ public:
_n_workers = 1; _n_workers = 1;
} }
_live_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers); _live_bytes = NEW_C_HEAP_ARRAY(size_t, (size_t) _n_workers);
_used_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers); _used_bytes = NEW_C_HEAP_ARRAY(size_t, (size_t) _n_workers);
} }
~G1ParFinalCountTask() { ~G1ParFinalCountTask() {
@ -1768,7 +1767,7 @@ class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
G1CollectedHeap* _g1; G1CollectedHeap* _g1;
int _worker_num; int _worker_num;
size_t _max_live_bytes; size_t _max_live_bytes;
size_t _regions_claimed; uint _regions_claimed;
size_t _freed_bytes; size_t _freed_bytes;
FreeRegionList* _local_cleanup_list; FreeRegionList* _local_cleanup_list;
OldRegionSet* _old_proxy_set; OldRegionSet* _old_proxy_set;
@ -1821,7 +1820,7 @@ public:
} }
size_t max_live_bytes() { return _max_live_bytes; } size_t max_live_bytes() { return _max_live_bytes; }
size_t regions_claimed() { return _regions_claimed; } uint regions_claimed() { return _regions_claimed; }
double claimed_region_time_sec() { return _claimed_region_time; } double claimed_region_time_sec() { return _claimed_region_time; }
double max_region_time_sec() { return _max_region_time; } double max_region_time_sec() { return _max_region_time; }
}; };
@ -2146,7 +2145,7 @@ void ConcurrentMark::completeCleanup() {
if (G1ConcRegionFreeingVerbose) { if (G1ConcRegionFreeingVerbose) {
gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
"cleanup list has "SIZE_FORMAT" entries", "cleanup list has %u entries",
_cleanup_list.length()); _cleanup_list.length());
} }
@ -2168,9 +2167,8 @@ void ConcurrentMark::completeCleanup() {
_cleanup_list.is_empty()) { _cleanup_list.is_empty()) {
if (G1ConcRegionFreeingVerbose) { if (G1ConcRegionFreeingVerbose) {
gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
"appending "SIZE_FORMAT" entries to the " "appending %u entries to the secondary_free_list, "
"secondary_free_list, clean list still has " "cleanup list still has %u entries",
SIZE_FORMAT" entries",
tmp_free_list.length(), tmp_free_list.length(),
_cleanup_list.length()); _cleanup_list.length());
} }
@ -3140,7 +3138,7 @@ class AggregateCountDataHRClosure: public HeapRegionClosure {
assert(limit_idx <= end_idx, "or else use atomics"); assert(limit_idx <= end_idx, "or else use atomics");
// Aggregate the "stripe" in the count data associated with hr. // Aggregate the "stripe" in the count data associated with hr.
size_t hrs_index = hr->hrs_index(); uint hrs_index = hr->hrs_index();
size_t marked_bytes = 0; size_t marked_bytes = 0;
for (int i = 0; (size_t)i < _max_task_num; i += 1) { for (int i = 0; (size_t)i < _max_task_num; i += 1) {
@ -3248,7 +3246,7 @@ void ConcurrentMark::clear_all_count_data() {
// of the final counting task. // of the final counting task.
_region_bm.clear(); _region_bm.clear();
size_t max_regions = _g1h->max_regions(); uint max_regions = _g1h->max_regions();
assert(_max_task_num != 0, "unitialized"); assert(_max_task_num != 0, "unitialized");
for (int i = 0; (size_t) i < _max_task_num; i += 1) { for (int i = 0; (size_t) i < _max_task_num; i += 1) {
@ -3258,7 +3256,7 @@ void ConcurrentMark::clear_all_count_data() {
assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
assert(marked_bytes_array != NULL, "uninitialized"); assert(marked_bytes_array != NULL, "uninitialized");
memset(marked_bytes_array, 0, (max_regions * sizeof(size_t))); memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
task_card_bm->clear(); task_card_bm->clear();
} }
} }

View file

@ -636,7 +636,7 @@ public:
return _task_queues->steal(task_num, hash_seed, obj); return _task_queues->steal(task_num, hash_seed, obj);
} }
ConcurrentMark(ReservedSpace rs, int max_regions); ConcurrentMark(ReservedSpace rs, uint max_regions);
~ConcurrentMark(); ~ConcurrentMark();
ConcurrentMarkThread* cmThread() { return _cmThread; } ConcurrentMarkThread* cmThread() { return _cmThread; }

View file

@ -49,7 +49,7 @@ inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
HeapWord* start = mr.start(); HeapWord* start = mr.start();
HeapWord* last = mr.last(); HeapWord* last = mr.last();
size_t region_size_bytes = mr.byte_size(); size_t region_size_bytes = mr.byte_size();
size_t index = hr->hrs_index(); uint index = hr->hrs_index();
assert(!hr->continuesHumongous(), "should not be HC region"); assert(!hr->continuesHumongous(), "should not be HC region");
assert(hr == g1h->heap_region_containing(start), "sanity"); assert(hr == g1h->heap_region_containing(start), "sanity");

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -140,7 +140,7 @@ HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
} }
void G1AllocRegion::fill_in_ext_msg(ar_ext_msg* msg, const char* message) { void G1AllocRegion::fill_in_ext_msg(ar_ext_msg* msg, const char* message) {
msg->append("[%s] %s c: "SIZE_FORMAT" b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT, msg->append("[%s] %s c: %u b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT,
_name, message, _count, BOOL_TO_STR(_bot_updates), _name, message, _count, BOOL_TO_STR(_bot_updates),
_alloc_region, _used_bytes_before); _alloc_region, _used_bytes_before);
} }
@ -215,7 +215,7 @@ void G1AllocRegion::trace(const char* str, size_t word_size, HeapWord* result) {
jio_snprintf(rest_buffer, buffer_length, ""); jio_snprintf(rest_buffer, buffer_length, "");
} }
tty->print_cr("[%s] "SIZE_FORMAT" %s : %s %s", tty->print_cr("[%s] %u %s : %s %s",
_name, _count, hr_buffer, str, rest_buffer); _name, _count, hr_buffer, str, rest_buffer);
} }
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -64,7 +64,7 @@ private:
// the region that is re-used using the set() method. This count can // the region that is re-used using the set() method. This count can
// be used in any heuristics that might want to bound how many // be used in any heuristics that might want to bound how many
// distinct regions this object can used during an active interval. // distinct regions this object can used during an active interval.
size_t _count; uint _count;
// When we set up a new active region we save its used bytes in this // When we set up a new active region we save its used bytes in this
// field so that, when we retire it, we can calculate how much space // field so that, when we retire it, we can calculate how much space
@ -136,7 +136,7 @@ public:
return (_alloc_region == _dummy_region) ? NULL : _alloc_region; return (_alloc_region == _dummy_region) ? NULL : _alloc_region;
} }
size_t count() { return _count; } uint count() { return _count; }
// The following two are the building blocks for the allocation method. // The following two are the building blocks for the allocation method.

View file

@ -234,7 +234,7 @@ void YoungList::empty_list() {
bool YoungList::check_list_well_formed() { bool YoungList::check_list_well_formed() {
bool ret = true; bool ret = true;
size_t length = 0; uint length = 0;
HeapRegion* curr = _head; HeapRegion* curr = _head;
HeapRegion* last = NULL; HeapRegion* last = NULL;
while (curr != NULL) { while (curr != NULL) {
@ -253,7 +253,7 @@ bool YoungList::check_list_well_formed() {
if (!ret) { if (!ret) {
gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!"); gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
gclog_or_tty->print_cr("### list has %d entries, _length is %d", gclog_or_tty->print_cr("### list has %u entries, _length is %u",
length, _length); length, _length);
} }
@ -264,7 +264,7 @@ bool YoungList::check_list_empty(bool check_sample) {
bool ret = true; bool ret = true;
if (_length != 0) { if (_length != 0) {
gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d", gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
_length); _length);
ret = false; ret = false;
} }
@ -337,8 +337,7 @@ YoungList::reset_auxilary_lists() {
_g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr); _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
young_index_in_cset += 1; young_index_in_cset += 1;
} }
assert((size_t) young_index_in_cset == _survivor_length, assert((uint) young_index_in_cset == _survivor_length, "post-condition");
"post-condition");
_g1h->g1_policy()->note_stop_adding_survivor_regions(); _g1h->g1_policy()->note_stop_adding_survivor_regions();
_head = _survivor_head; _head = _survivor_head;
@ -533,7 +532,7 @@ G1CollectedHeap::new_region_try_secondary_free_list() {
if (!_secondary_free_list.is_empty()) { if (!_secondary_free_list.is_empty()) {
if (G1ConcRegionFreeingVerbose) { if (G1ConcRegionFreeingVerbose) {
gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
"secondary_free_list has "SIZE_FORMAT" entries", "secondary_free_list has %u entries",
_secondary_free_list.length()); _secondary_free_list.length());
} }
// It looks as if there are free regions available on the // It looks as if there are free regions available on the
@ -619,12 +618,12 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
return res; return res;
} }
size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
size_t word_size) { size_t word_size) {
assert(isHumongous(word_size), "word_size should be humongous"); assert(isHumongous(word_size), "word_size should be humongous");
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
size_t first = G1_NULL_HRS_INDEX; uint first = G1_NULL_HRS_INDEX;
if (num_regions == 1) { if (num_regions == 1) {
// Only one region to allocate, no need to go through the slower // Only one region to allocate, no need to go through the slower
// path. The caller will attempt the expasion if this fails, so // path. The caller will attempt the expasion if this fails, so
@ -650,7 +649,7 @@ size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
if (free_regions() >= num_regions) { if (free_regions() >= num_regions) {
first = _hrs.find_contiguous(num_regions); first = _hrs.find_contiguous(num_regions);
if (first != G1_NULL_HRS_INDEX) { if (first != G1_NULL_HRS_INDEX) {
for (size_t i = first; i < first + num_regions; ++i) { for (uint i = first; i < first + num_regions; ++i) {
HeapRegion* hr = region_at(i); HeapRegion* hr = region_at(i);
assert(hr->is_empty(), "sanity"); assert(hr->is_empty(), "sanity");
assert(is_on_master_free_list(hr), "sanity"); assert(is_on_master_free_list(hr), "sanity");
@ -664,15 +663,15 @@ size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
} }
HeapWord* HeapWord*
G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first, G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
size_t num_regions, uint num_regions,
size_t word_size) { size_t word_size) {
assert(first != G1_NULL_HRS_INDEX, "pre-condition"); assert(first != G1_NULL_HRS_INDEX, "pre-condition");
assert(isHumongous(word_size), "word_size should be humongous"); assert(isHumongous(word_size), "word_size should be humongous");
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
// Index of last region in the series + 1. // Index of last region in the series + 1.
size_t last = first + num_regions; uint last = first + num_regions;
// We need to initialize the region(s) we just discovered. This is // We need to initialize the region(s) we just discovered. This is
// a bit tricky given that it can happen concurrently with // a bit tricky given that it can happen concurrently with
@ -683,7 +682,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first,
// a specific order. // a specific order.
// The word size sum of all the regions we will allocate. // The word size sum of all the regions we will allocate.
size_t word_size_sum = num_regions * HeapRegion::GrainWords; size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
assert(word_size <= word_size_sum, "sanity"); assert(word_size <= word_size_sum, "sanity");
// This will be the "starts humongous" region. // This will be the "starts humongous" region.
@ -722,7 +721,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first,
// Then, if there are any, we will set up the "continues // Then, if there are any, we will set up the "continues
// humongous" regions. // humongous" regions.
HeapRegion* hr = NULL; HeapRegion* hr = NULL;
for (size_t i = first + 1; i < last; ++i) { for (uint i = first + 1; i < last; ++i) {
hr = region_at(i); hr = region_at(i);
hr->set_continuesHumongous(first_hr); hr->set_continuesHumongous(first_hr);
} }
@ -768,7 +767,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first,
// last one) is actually used when we will free up the humongous // last one) is actually used when we will free up the humongous
// region in free_humongous_region(). // region in free_humongous_region().
hr = NULL; hr = NULL;
for (size_t i = first + 1; i < last; ++i) { for (uint i = first + 1; i < last; ++i) {
hr = region_at(i); hr = region_at(i);
if ((i + 1) == last) { if ((i + 1) == last) {
// last continues humongous region // last continues humongous region
@ -804,14 +803,14 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
verify_region_sets_optional(); verify_region_sets_optional();
size_t num_regions = size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords);
round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords);
size_t x_size = expansion_regions(); uint x_num = expansion_regions();
size_t fs = _hrs.free_suffix(); uint fs = _hrs.free_suffix();
size_t first = humongous_obj_allocate_find_first(num_regions, word_size); uint first = humongous_obj_allocate_find_first(num_regions, word_size);
if (first == G1_NULL_HRS_INDEX) { if (first == G1_NULL_HRS_INDEX) {
// The only thing we can do now is attempt expansion. // The only thing we can do now is attempt expansion.
if (fs + x_size >= num_regions) { if (fs + x_num >= num_regions) {
// If the number of regions we're trying to allocate for this // If the number of regions we're trying to allocate for this
// object is at most the number of regions in the free suffix, // object is at most the number of regions in the free suffix,
// then the call to humongous_obj_allocate_find_first() above // then the call to humongous_obj_allocate_find_first() above
@ -1781,7 +1780,7 @@ void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
ReservedSpace::page_align_size_down(shrink_bytes); ReservedSpace::page_align_size_down(shrink_bytes);
aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
HeapRegion::GrainBytes); HeapRegion::GrainBytes);
size_t num_regions_deleted = 0; uint num_regions_deleted = 0;
MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted); MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
HeapWord* old_end = (HeapWord*) _g1_storage.high(); HeapWord* old_end = (HeapWord*) _g1_storage.high();
assert(mr.end() == old_end, "post-condition"); assert(mr.end() == old_end, "post-condition");
@ -2004,7 +2003,7 @@ jint G1CollectedHeap::initialize() {
_reserved.set_start((HeapWord*)heap_rs.base()); _reserved.set_start((HeapWord*)heap_rs.base());
_reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
_expansion_regions = max_byte_size/HeapRegion::GrainBytes; _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
// Create the gen rem set (and barrier set) for the entire reserved region. // Create the gen rem set (and barrier set) for the entire reserved region.
_rem_set = collector_policy()->create_rem_set(_reserved, 2); _rem_set = collector_policy()->create_rem_set(_reserved, 2);
@ -2041,7 +2040,7 @@ jint G1CollectedHeap::initialize() {
// 6843694 - ensure that the maximum region index can fit // 6843694 - ensure that the maximum region index can fit
// in the remembered set structures. // in the remembered set structures.
const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1; const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
guarantee((max_regions() - 1) <= max_region_idx, "too many regions"); guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
@ -2057,13 +2056,14 @@ jint G1CollectedHeap::initialize() {
_g1h = this; _g1h = this;
_in_cset_fast_test_length = max_regions(); _in_cset_fast_test_length = max_regions();
_in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); _in_cset_fast_test_base =
NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length);
// We're biasing _in_cset_fast_test to avoid subtracting the // We're biasing _in_cset_fast_test to avoid subtracting the
// beginning of the heap every time we want to index; basically // beginning of the heap every time we want to index; basically
// it's the same with what we do with the card table. // it's the same with what we do with the card table.
_in_cset_fast_test = _in_cset_fast_test_base - _in_cset_fast_test = _in_cset_fast_test_base -
((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
// Clear the _cset_fast_test bitmap in anticipation of adding // Clear the _cset_fast_test bitmap in anticipation of adding
// regions to the incremental collection set for the first // regions to the incremental collection set for the first
@ -2072,7 +2072,7 @@ jint G1CollectedHeap::initialize() {
// Create the ConcurrentMark data structure and thread. // Create the ConcurrentMark data structure and thread.
// (Must do this late, so that "max_regions" is defined.) // (Must do this late, so that "max_regions" is defined.)
_cm = new ConcurrentMark(heap_rs, (int) max_regions()); _cm = new ConcurrentMark(heap_rs, max_regions());
_cmThread = _cm->cmThread(); _cmThread = _cm->cmThread();
// Initialize the from_card cache structure of HeapRegionRemSet. // Initialize the from_card cache structure of HeapRegionRemSet.
@ -2581,7 +2581,7 @@ G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
uint worker, uint worker,
uint no_of_par_workers, uint no_of_par_workers,
jint claim_value) { jint claim_value) {
const size_t regions = n_regions(); const uint regions = n_regions();
const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ? const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
no_of_par_workers : no_of_par_workers :
1); 1);
@ -2589,11 +2589,11 @@ G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
no_of_par_workers == workers()->total_workers(), no_of_par_workers == workers()->total_workers(),
"Non dynamic should use fixed number of workers"); "Non dynamic should use fixed number of workers");
// try to spread out the starting points of the workers // try to spread out the starting points of the workers
const size_t start_index = regions / max_workers * (size_t) worker; const uint start_index = regions / max_workers * worker;
// each worker will actually look at all regions // each worker will actually look at all regions
for (size_t count = 0; count < regions; ++count) { for (uint count = 0; count < regions; ++count) {
const size_t index = (start_index + count) % regions; const uint index = (start_index + count) % regions;
assert(0 <= index && index < regions, "sanity"); assert(0 <= index && index < regions, "sanity");
HeapRegion* r = region_at(index); HeapRegion* r = region_at(index);
// we'll ignore "continues humongous" regions (we'll process them // we'll ignore "continues humongous" regions (we'll process them
@ -2615,7 +2615,7 @@ G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
// result, we might end up processing them twice. So, we'll do // result, we might end up processing them twice. So, we'll do
// them first (notice: most closures will ignore them anyway) and // them first (notice: most closures will ignore them anyway) and
// then we'll do the "starts humongous" region. // then we'll do the "starts humongous" region.
for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) { for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
HeapRegion* chr = region_at(ch_index); HeapRegion* chr = region_at(ch_index);
// if the region has already been claimed or it's not // if the region has already been claimed or it's not
@ -2683,8 +2683,9 @@ void G1CollectedHeap::reset_cset_heap_region_claim_values() {
class CheckClaimValuesClosure : public HeapRegionClosure { class CheckClaimValuesClosure : public HeapRegionClosure {
private: private:
jint _claim_value; jint _claim_value;
size_t _failures; uint _failures;
HeapRegion* _sh_region; HeapRegion* _sh_region;
public: public:
CheckClaimValuesClosure(jint claim_value) : CheckClaimValuesClosure(jint claim_value) :
_claim_value(claim_value), _failures(0), _sh_region(NULL) { } _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
@ -2712,9 +2713,7 @@ public:
} }
return false; return false;
} }
size_t failures() { uint failures() { return _failures; }
return _failures;
}
}; };
bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
@ -2724,17 +2723,15 @@ bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
} }
class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure { class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {
private:
jint _claim_value; jint _claim_value;
size_t _failures; uint _failures;
public: public:
CheckClaimValuesInCSetHRClosure(jint claim_value) : CheckClaimValuesInCSetHRClosure(jint claim_value) :
_claim_value(claim_value), _claim_value(claim_value), _failures(0) { }
_failures(0) { }
size_t failures() { uint failures() { return _failures; }
return _failures;
}
bool doHeapRegion(HeapRegion* hr) { bool doHeapRegion(HeapRegion* hr) {
assert(hr->in_collection_set(), "how?"); assert(hr->in_collection_set(), "how?");
@ -2801,14 +2798,14 @@ HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) {
result = g1_policy()->collection_set(); result = g1_policy()->collection_set();
if (G1CollectedHeap::use_parallel_gc_threads()) { if (G1CollectedHeap::use_parallel_gc_threads()) {
size_t cs_size = g1_policy()->cset_region_length(); uint cs_size = g1_policy()->cset_region_length();
uint active_workers = workers()->active_workers(); uint active_workers = workers()->active_workers();
assert(UseDynamicNumberOfGCThreads || assert(UseDynamicNumberOfGCThreads ||
active_workers == workers()->total_workers(), active_workers == workers()->total_workers(),
"Unless dynamic should use total workers"); "Unless dynamic should use total workers");
size_t end_ind = (cs_size * worker_i) / active_workers; uint end_ind = (cs_size * worker_i) / active_workers;
size_t start_ind = 0; uint start_ind = 0;
if (worker_i > 0 && if (worker_i > 0 &&
_worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) { _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
@ -2818,7 +2815,7 @@ HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) {
result = _worker_cset_start_region[worker_i - 1]; result = _worker_cset_start_region[worker_i - 1];
} }
for (size_t i = start_ind; i < end_ind; i++) { for (uint i = start_ind; i < end_ind; i++) {
result = result->next_in_collection_set(); result = result->next_in_collection_set();
} }
} }
@ -3280,12 +3277,12 @@ void G1CollectedHeap::print_on(outputStream* st) const {
_g1_storage.high_boundary()); _g1_storage.high_boundary());
st->cr(); st->cr();
st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K); st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
size_t young_regions = _young_list->length(); uint young_regions = _young_list->length();
st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ", st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
young_regions, young_regions * HeapRegion::GrainBytes / K); (size_t) young_regions * HeapRegion::GrainBytes / K);
size_t survivor_regions = g1_policy()->recorded_survivor_regions(); uint survivor_regions = g1_policy()->recorded_survivor_regions();
st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)", st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
survivor_regions, survivor_regions * HeapRegion::GrainBytes / K); (size_t) survivor_regions * HeapRegion::GrainBytes / K);
st->cr(); st->cr();
perm()->as_gen()->print_on(st); perm()->as_gen()->print_on(st);
} }
@ -3295,7 +3292,11 @@ void G1CollectedHeap::print_extended_on(outputStream* st) const {
// Print the per-region information. // Print the per-region information.
st->cr(); st->cr();
st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), HS=humongous(starts), HC=humongous(continues), CS=collection set, F=free, TS=gc time stamp, PTAMS=previous top-at-mark-start, NTAMS=next top-at-mark-start)"); st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
"HS=humongous(starts), HC=humongous(continues), "
"CS=collection set, F=free, TS=gc time stamp, "
"PTAMS=previous top-at-mark-start, "
"NTAMS=next top-at-mark-start)");
PrintRegionClosure blk(st); PrintRegionClosure blk(st);
heap_region_iterate(&blk); heap_region_iterate(&blk);
} }
@ -3473,16 +3474,16 @@ size_t G1CollectedHeap::cards_scanned() {
void void
G1CollectedHeap::setup_surviving_young_words() { G1CollectedHeap::setup_surviving_young_words() {
guarantee( _surviving_young_words == NULL, "pre-condition" ); assert(_surviving_young_words == NULL, "pre-condition");
size_t array_length = g1_policy()->young_cset_region_length(); uint array_length = g1_policy()->young_cset_region_length();
_surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length); _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length);
if (_surviving_young_words == NULL) { if (_surviving_young_words == NULL) {
vm_exit_out_of_memory(sizeof(size_t) * array_length, vm_exit_out_of_memory(sizeof(size_t) * array_length,
"Not enough space for young surv words summary."); "Not enough space for young surv words summary.");
} }
memset(_surviving_young_words, 0, array_length * sizeof(size_t)); memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
#ifdef ASSERT #ifdef ASSERT
for (size_t i = 0; i < array_length; ++i) { for (uint i = 0; i < array_length; ++i) {
assert( _surviving_young_words[i] == 0, "memset above" ); assert( _surviving_young_words[i] == 0, "memset above" );
} }
#endif // !ASSERT #endif // !ASSERT
@ -3491,9 +3492,10 @@ G1CollectedHeap::setup_surviving_young_words() {
void void
G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) { G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
size_t array_length = g1_policy()->young_cset_region_length(); uint array_length = g1_policy()->young_cset_region_length();
for (size_t i = 0; i < array_length; ++i) for (uint i = 0; i < array_length; ++i) {
_surviving_young_words[i] += surv_young_words[i]; _surviving_young_words[i] += surv_young_words[i];
}
} }
void void
@ -4242,8 +4244,8 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
// non-young regions (where the age is -1) // non-young regions (where the age is -1)
// We also add a few elements at the beginning and at the end in // We also add a few elements at the beginning and at the end in
// an attempt to eliminate cache contention // an attempt to eliminate cache contention
size_t real_length = 1 + _g1h->g1_policy()->young_cset_region_length(); uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
size_t array_length = PADDING_ELEM_NUM + uint array_length = PADDING_ELEM_NUM +
real_length + real_length +
PADDING_ELEM_NUM; PADDING_ELEM_NUM;
_surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
@ -4251,7 +4253,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
vm_exit_out_of_memory(array_length * sizeof(size_t), vm_exit_out_of_memory(array_length * sizeof(size_t),
"Not enough space for young surv histo."); "Not enough space for young surv histo.");
_surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
memset(_surviving_young_words, 0, real_length * sizeof(size_t)); memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
_alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
_alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer; _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
@ -5585,8 +5587,8 @@ void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
hr->set_notHumongous(); hr->set_notHumongous();
free_region(hr, &hr_pre_used, free_list, par); free_region(hr, &hr_pre_used, free_list, par);
size_t i = hr->hrs_index() + 1; uint i = hr->hrs_index() + 1;
size_t num = 1; uint num = 1;
while (i < n_regions()) { while (i < n_regions()) {
HeapRegion* curr_hr = region_at(i); HeapRegion* curr_hr = region_at(i);
if (!curr_hr->continuesHumongous()) { if (!curr_hr->continuesHumongous()) {
@ -5795,7 +5797,7 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
if (cur->is_young()) { if (cur->is_young()) {
int index = cur->young_index_in_cset(); int index = cur->young_index_in_cset();
assert(index != -1, "invariant"); assert(index != -1, "invariant");
assert((size_t) index < policy->young_cset_region_length(), "invariant"); assert((uint) index < policy->young_cset_region_length(), "invariant");
size_t words_survived = _surviving_young_words[index]; size_t words_survived = _surviving_young_words[index];
cur->record_surv_words_in_group(words_survived); cur->record_surv_words_in_group(words_survived);
@ -6135,7 +6137,7 @@ void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
// Methods for the GC alloc regions // Methods for the GC alloc regions
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
size_t count, uint count,
GCAllocPurpose ap) { GCAllocPurpose ap) {
assert(FreeList_lock->owned_by_self(), "pre-condition"); assert(FreeList_lock->owned_by_self(), "pre-condition");
@ -6207,7 +6209,7 @@ private:
FreeRegionList* _free_list; FreeRegionList* _free_list;
OldRegionSet* _old_set; OldRegionSet* _old_set;
HumongousRegionSet* _humongous_set; HumongousRegionSet* _humongous_set;
size_t _region_count; uint _region_count;
public: public:
VerifyRegionListsClosure(OldRegionSet* old_set, VerifyRegionListsClosure(OldRegionSet* old_set,
@ -6216,7 +6218,7 @@ public:
_old_set(old_set), _humongous_set(humongous_set), _old_set(old_set), _humongous_set(humongous_set),
_free_list(free_list), _region_count(0) { } _free_list(free_list), _region_count(0) { }
size_t region_count() { return _region_count; } uint region_count() { return _region_count; }
bool doHeapRegion(HeapRegion* hr) { bool doHeapRegion(HeapRegion* hr) {
_region_count += 1; _region_count += 1;
@ -6238,7 +6240,7 @@ public:
} }
}; };
HeapRegion* G1CollectedHeap::new_heap_region(size_t hrs_index, HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
HeapWord* bottom) { HeapWord* bottom) {
HeapWord* end = bottom + HeapRegion::GrainWords; HeapWord* end = bottom + HeapRegion::GrainWords;
MemRegion mr(bottom, end); MemRegion mr(bottom, end);

View file

@ -85,8 +85,8 @@ private:
HeapRegion* _curr; HeapRegion* _curr;
size_t _length; uint _length;
size_t _survivor_length; uint _survivor_length;
size_t _last_sampled_rs_lengths; size_t _last_sampled_rs_lengths;
size_t _sampled_rs_lengths; size_t _sampled_rs_lengths;
@ -101,8 +101,8 @@ public:
void empty_list(); void empty_list();
bool is_empty() { return _length == 0; } bool is_empty() { return _length == 0; }
size_t length() { return _length; } uint length() { return _length; }
size_t survivor_length() { return _survivor_length; } uint survivor_length() { return _survivor_length; }
// Currently we do not keep track of the used byte sum for the // Currently we do not keep track of the used byte sum for the
// young list and the survivors and it'd be quite a lot of work to // young list and the survivors and it'd be quite a lot of work to
@ -111,10 +111,10 @@ public:
// we'll report the more accurate information then. // we'll report the more accurate information then.
size_t eden_used_bytes() { size_t eden_used_bytes() {
assert(length() >= survivor_length(), "invariant"); assert(length() >= survivor_length(), "invariant");
return (length() - survivor_length()) * HeapRegion::GrainBytes; return (size_t) (length() - survivor_length()) * HeapRegion::GrainBytes;
} }
size_t survivor_used_bytes() { size_t survivor_used_bytes() {
return survivor_length() * HeapRegion::GrainBytes; return (size_t) survivor_length() * HeapRegion::GrainBytes;
} }
void rs_length_sampling_init(); void rs_length_sampling_init();
@ -247,7 +247,7 @@ private:
MasterHumongousRegionSet _humongous_set; MasterHumongousRegionSet _humongous_set;
// The number of regions we could create by expansion. // The number of regions we could create by expansion.
size_t _expansion_regions; uint _expansion_regions;
// The block offset table for the G1 heap. // The block offset table for the G1 heap.
G1BlockOffsetSharedArray* _bot_shared; G1BlockOffsetSharedArray* _bot_shared;
@ -339,7 +339,7 @@ private:
bool* _in_cset_fast_test_base; bool* _in_cset_fast_test_base;
// The length of the _in_cset_fast_test_base array. // The length of the _in_cset_fast_test_base array.
size_t _in_cset_fast_test_length; uint _in_cset_fast_test_length;
volatile unsigned _gc_time_stamp; volatile unsigned _gc_time_stamp;
@ -458,14 +458,14 @@ protected:
// length and remove them from the master free list. Return the // length and remove them from the master free list. Return the
// index of the first region or G1_NULL_HRS_INDEX if the search // index of the first region or G1_NULL_HRS_INDEX if the search
// was unsuccessful. // was unsuccessful.
size_t humongous_obj_allocate_find_first(size_t num_regions, uint humongous_obj_allocate_find_first(uint num_regions,
size_t word_size); size_t word_size);
// Initialize a contiguous set of free regions of length num_regions // Initialize a contiguous set of free regions of length num_regions
// and starting at index first so that they appear as a single // and starting at index first so that they appear as a single
// humongous region. // humongous region.
HeapWord* humongous_obj_allocate_initialize_regions(size_t first, HeapWord* humongous_obj_allocate_initialize_regions(uint first,
size_t num_regions, uint num_regions,
size_t word_size); size_t word_size);
// Attempt to allocate a humongous object of the given size. Return // Attempt to allocate a humongous object of the given size. Return
@ -574,7 +574,7 @@ protected:
size_t allocated_bytes); size_t allocated_bytes);
// For GC alloc regions. // For GC alloc regions.
HeapRegion* new_gc_alloc_region(size_t word_size, size_t count, HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
GCAllocPurpose ap); GCAllocPurpose ap);
void retire_gc_alloc_region(HeapRegion* alloc_region, void retire_gc_alloc_region(HeapRegion* alloc_region,
size_t allocated_bytes, GCAllocPurpose ap); size_t allocated_bytes, GCAllocPurpose ap);
@ -641,7 +641,7 @@ public:
void register_region_with_in_cset_fast_test(HeapRegion* r) { void register_region_with_in_cset_fast_test(HeapRegion* r) {
assert(_in_cset_fast_test_base != NULL, "sanity"); assert(_in_cset_fast_test_base != NULL, "sanity");
assert(r->in_collection_set(), "invariant"); assert(r->in_collection_set(), "invariant");
size_t index = r->hrs_index(); uint index = r->hrs_index();
assert(index < _in_cset_fast_test_length, "invariant"); assert(index < _in_cset_fast_test_length, "invariant");
assert(!_in_cset_fast_test_base[index], "invariant"); assert(!_in_cset_fast_test_base[index], "invariant");
_in_cset_fast_test_base[index] = true; _in_cset_fast_test_base[index] = true;
@ -655,7 +655,7 @@ public:
if (_g1_committed.contains((HeapWord*) obj)) { if (_g1_committed.contains((HeapWord*) obj)) {
// no need to subtract the bottom of the heap from obj, // no need to subtract the bottom of the heap from obj,
// _in_cset_fast_test is biased // _in_cset_fast_test is biased
size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes; uintx index = (uintx) obj >> HeapRegion::LogOfHRGrainBytes;
bool ret = _in_cset_fast_test[index]; bool ret = _in_cset_fast_test[index];
// let's make sure the result is consistent with what the slower // let's make sure the result is consistent with what the slower
// test returns // test returns
@ -670,7 +670,7 @@ public:
void clear_cset_fast_test() { void clear_cset_fast_test() {
assert(_in_cset_fast_test_base != NULL, "sanity"); assert(_in_cset_fast_test_base != NULL, "sanity");
memset(_in_cset_fast_test_base, false, memset(_in_cset_fast_test_base, false,
_in_cset_fast_test_length * sizeof(bool)); (size_t) _in_cset_fast_test_length * sizeof(bool));
} }
// This is called at the end of either a concurrent cycle or a Full // This is called at the end of either a concurrent cycle or a Full
@ -1101,23 +1101,23 @@ public:
} }
// The total number of regions in the heap. // The total number of regions in the heap.
size_t n_regions() { return _hrs.length(); } uint n_regions() { return _hrs.length(); }
// The max number of regions in the heap. // The max number of regions in the heap.
size_t max_regions() { return _hrs.max_length(); } uint max_regions() { return _hrs.max_length(); }
// The number of regions that are completely free. // The number of regions that are completely free.
size_t free_regions() { return _free_list.length(); } uint free_regions() { return _free_list.length(); }
// The number of regions that are not completely free. // The number of regions that are not completely free.
size_t used_regions() { return n_regions() - free_regions(); } uint used_regions() { return n_regions() - free_regions(); }
// The number of regions available for "regular" expansion. // The number of regions available for "regular" expansion.
size_t expansion_regions() { return _expansion_regions; } uint expansion_regions() { return _expansion_regions; }
// Factory method for HeapRegion instances. It will return NULL if // Factory method for HeapRegion instances. It will return NULL if
// the allocation fails. // the allocation fails.
HeapRegion* new_heap_region(size_t hrs_index, HeapWord* bottom); HeapRegion* new_heap_region(uint hrs_index, HeapWord* bottom);
void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN; void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN; void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
@ -1301,7 +1301,7 @@ public:
void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const; void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const;
// Return the region with the given index. It assumes the index is valid. // Return the region with the given index. It assumes the index is valid.
HeapRegion* region_at(size_t index) const { return _hrs.at(index); } HeapRegion* region_at(uint index) const { return _hrs.at(index); }
// Divide the heap region sequence into "chunks" of some size (the number // Divide the heap region sequence into "chunks" of some size (the number
// of regions divided by the number of parallel threads times some // of regions divided by the number of parallel threads times some

View file

@ -431,31 +431,36 @@ G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(
} }
if (FLAG_IS_CMDLINE(NewSize)) { if (FLAG_IS_CMDLINE(NewSize)) {
_min_desired_young_length = MAX2((size_t) 1, NewSize / HeapRegion::GrainBytes); _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),
1U);
if (FLAG_IS_CMDLINE(MaxNewSize)) { if (FLAG_IS_CMDLINE(MaxNewSize)) {
_max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes); _max_desired_young_length =
MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
1U);
_sizer_kind = SizerMaxAndNewSize; _sizer_kind = SizerMaxAndNewSize;
_adaptive_size = _min_desired_young_length == _max_desired_young_length; _adaptive_size = _min_desired_young_length == _max_desired_young_length;
} else { } else {
_sizer_kind = SizerNewSizeOnly; _sizer_kind = SizerNewSizeOnly;
} }
} else if (FLAG_IS_CMDLINE(MaxNewSize)) { } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
_max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes); _max_desired_young_length =
MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
1U);
_sizer_kind = SizerMaxNewSizeOnly; _sizer_kind = SizerMaxNewSizeOnly;
} }
} }
size_t G1YoungGenSizer::calculate_default_min_length(size_t new_number_of_heap_regions) { uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) {
size_t default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100; uint default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100;
return MAX2((size_t)1, default_value); return MAX2(1U, default_value);
} }
size_t G1YoungGenSizer::calculate_default_max_length(size_t new_number_of_heap_regions) { uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) {
size_t default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100; uint default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100;
return MAX2((size_t)1, default_value); return MAX2(1U, default_value);
} }
void G1YoungGenSizer::heap_size_changed(size_t new_number_of_heap_regions) { void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
assert(new_number_of_heap_regions > 0, "Heap must be initialized"); assert(new_number_of_heap_regions > 0, "Heap must be initialized");
switch (_sizer_kind) { switch (_sizer_kind) {
@ -512,16 +517,16 @@ void G1CollectorPolicy::initialize_gc_policy_counters() {
_gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3); _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
} }
bool G1CollectorPolicy::predict_will_fit(size_t young_length, bool G1CollectorPolicy::predict_will_fit(uint young_length,
double base_time_ms, double base_time_ms,
size_t base_free_regions, uint base_free_regions,
double target_pause_time_ms) { double target_pause_time_ms) {
if (young_length >= base_free_regions) { if (young_length >= base_free_regions) {
// end condition 1: not enough space for the young regions // end condition 1: not enough space for the young regions
return false; return false;
} }
double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1)); double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
size_t bytes_to_copy = size_t bytes_to_copy =
(size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
@ -543,25 +548,25 @@ bool G1CollectorPolicy::predict_will_fit(size_t young_length,
return true; return true;
} }
void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) { void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
// re-calculate the necessary reserve // re-calculate the necessary reserve
double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
// We use ceiling so that if reserve_regions_d is > 0.0 (but // We use ceiling so that if reserve_regions_d is > 0.0 (but
// smaller than 1.0) we'll get 1. // smaller than 1.0) we'll get 1.
_reserve_regions = (size_t) ceil(reserve_regions_d); _reserve_regions = (uint) ceil(reserve_regions_d);
_young_gen_sizer->heap_size_changed(new_number_of_regions); _young_gen_sizer->heap_size_changed(new_number_of_regions);
} }
size_t G1CollectorPolicy::calculate_young_list_desired_min_length( uint G1CollectorPolicy::calculate_young_list_desired_min_length(
size_t base_min_length) { uint base_min_length) {
size_t desired_min_length = 0; uint desired_min_length = 0;
if (adaptive_young_list_length()) { if (adaptive_young_list_length()) {
if (_alloc_rate_ms_seq->num() > 3) { if (_alloc_rate_ms_seq->num() > 3) {
double now_sec = os::elapsedTime(); double now_sec = os::elapsedTime();
double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
double alloc_rate_ms = predict_alloc_rate_ms(); double alloc_rate_ms = predict_alloc_rate_ms();
desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms); desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
} else { } else {
// otherwise we don't have enough info to make the prediction // otherwise we don't have enough info to make the prediction
} }
@ -571,7 +576,7 @@ size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length); return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
} }
size_t G1CollectorPolicy::calculate_young_list_desired_max_length() { uint G1CollectorPolicy::calculate_young_list_desired_max_length() {
// Here, we might want to also take into account any additional // Here, we might want to also take into account any additional
// constraints (i.e., user-defined minimum bound). Currently, we // constraints (i.e., user-defined minimum bound). Currently, we
// effectively don't set this bound. // effectively don't set this bound.
@ -588,11 +593,11 @@ void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
// Calculate the absolute and desired min bounds. // Calculate the absolute and desired min bounds.
// This is how many young regions we already have (currently: the survivors). // This is how many young regions we already have (currently: the survivors).
size_t base_min_length = recorded_survivor_regions(); uint base_min_length = recorded_survivor_regions();
// This is the absolute minimum young length, which ensures that we // This is the absolute minimum young length, which ensures that we
// can allocate one eden region in the worst-case. // can allocate one eden region in the worst-case.
size_t absolute_min_length = base_min_length + 1; uint absolute_min_length = base_min_length + 1;
size_t desired_min_length = uint desired_min_length =
calculate_young_list_desired_min_length(base_min_length); calculate_young_list_desired_min_length(base_min_length);
if (desired_min_length < absolute_min_length) { if (desired_min_length < absolute_min_length) {
desired_min_length = absolute_min_length; desired_min_length = absolute_min_length;
@ -601,16 +606,16 @@ void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
// Calculate the absolute and desired max bounds. // Calculate the absolute and desired max bounds.
// We will try our best not to "eat" into the reserve. // We will try our best not to "eat" into the reserve.
size_t absolute_max_length = 0; uint absolute_max_length = 0;
if (_free_regions_at_end_of_collection > _reserve_regions) { if (_free_regions_at_end_of_collection > _reserve_regions) {
absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
} }
size_t desired_max_length = calculate_young_list_desired_max_length(); uint desired_max_length = calculate_young_list_desired_max_length();
if (desired_max_length > absolute_max_length) { if (desired_max_length > absolute_max_length) {
desired_max_length = absolute_max_length; desired_max_length = absolute_max_length;
} }
size_t young_list_target_length = 0; uint young_list_target_length = 0;
if (adaptive_young_list_length()) { if (adaptive_young_list_length()) {
if (gcs_are_young()) { if (gcs_are_young()) {
young_list_target_length = young_list_target_length =
@ -648,11 +653,11 @@ void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
update_max_gc_locker_expansion(); update_max_gc_locker_expansion();
} }
size_t uint
G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
size_t base_min_length, uint base_min_length,
size_t desired_min_length, uint desired_min_length,
size_t desired_max_length) { uint desired_max_length) {
assert(adaptive_young_list_length(), "pre-condition"); assert(adaptive_young_list_length(), "pre-condition");
assert(gcs_are_young(), "only call this for young GCs"); assert(gcs_are_young(), "only call this for young GCs");
@ -667,9 +672,9 @@ G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
// will be reflected in the predictions by the // will be reflected in the predictions by the
// survivor_regions_evac_time prediction. // survivor_regions_evac_time prediction.
assert(desired_min_length > base_min_length, "invariant"); assert(desired_min_length > base_min_length, "invariant");
size_t min_young_length = desired_min_length - base_min_length; uint min_young_length = desired_min_length - base_min_length;
assert(desired_max_length > base_min_length, "invariant"); assert(desired_max_length > base_min_length, "invariant");
size_t max_young_length = desired_max_length - base_min_length; uint max_young_length = desired_max_length - base_min_length;
double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
double survivor_regions_evac_time = predict_survivor_regions_evac_time(); double survivor_regions_evac_time = predict_survivor_regions_evac_time();
@ -679,8 +684,8 @@ G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
double base_time_ms = double base_time_ms =
predict_base_elapsed_time_ms(pending_cards, scanned_cards) + predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
survivor_regions_evac_time; survivor_regions_evac_time;
size_t available_free_regions = _free_regions_at_end_of_collection; uint available_free_regions = _free_regions_at_end_of_collection;
size_t base_free_regions = 0; uint base_free_regions = 0;
if (available_free_regions > _reserve_regions) { if (available_free_regions > _reserve_regions) {
base_free_regions = available_free_regions - _reserve_regions; base_free_regions = available_free_regions - _reserve_regions;
} }
@ -717,9 +722,9 @@ G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
// the new max. This way we maintain the loop invariants. // the new max. This way we maintain the loop invariants.
assert(min_young_length < max_young_length, "invariant"); assert(min_young_length < max_young_length, "invariant");
size_t diff = (max_young_length - min_young_length) / 2; uint diff = (max_young_length - min_young_length) / 2;
while (diff > 0) { while (diff > 0) {
size_t young_length = min_young_length + diff; uint young_length = min_young_length + diff;
if (predict_will_fit(young_length, base_time_ms, if (predict_will_fit(young_length, base_time_ms,
base_free_regions, target_pause_time_ms)) { base_free_regions, target_pause_time_ms)) {
min_young_length = young_length; min_young_length = young_length;
@ -1322,7 +1327,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
// given that humongous object allocations do not really affect // given that humongous object allocations do not really affect
// either the pause's duration nor when the next pause will take // either the pause's duration nor when the next pause will take
// place we can safely ignore them here. // place we can safely ignore them here.
size_t regions_allocated = eden_cset_region_length(); uint regions_allocated = eden_cset_region_length();
double alloc_rate_ms = (double) regions_allocated / app_time_ms; double alloc_rate_ms = (double) regions_allocated / app_time_ms;
_alloc_rate_ms_seq->add(alloc_rate_ms); _alloc_rate_ms_seq->add(alloc_rate_ms);
@ -1506,8 +1511,9 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
double pause_time_ms = elapsed_ms; double pause_time_ms = elapsed_ms;
size_t diff = 0; size_t diff = 0;
if (_max_pending_cards >= _pending_cards) if (_max_pending_cards >= _pending_cards) {
diff = _max_pending_cards - _pending_cards; diff = _max_pending_cards - _pending_cards;
}
_pending_card_diff_seq->add((double) diff); _pending_card_diff_seq->add((double) diff);
double cost_per_card_ms = 0.0; double cost_per_card_ms = 0.0;
@ -1741,8 +1747,7 @@ G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
return region_elapsed_time_ms; return region_elapsed_time_ms;
} }
size_t size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
size_t bytes_to_copy; size_t bytes_to_copy;
if (hr->is_marked()) if (hr->is_marked())
bytes_to_copy = hr->max_live_bytes(); bytes_to_copy = hr->max_live_bytes();
@ -1756,8 +1761,8 @@ G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
} }
void void
G1CollectorPolicy::init_cset_region_lengths(size_t eden_cset_region_length, G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
size_t survivor_cset_region_length) { uint survivor_cset_region_length) {
_eden_cset_region_length = eden_cset_region_length; _eden_cset_region_length = eden_cset_region_length;
_survivor_cset_region_length = survivor_cset_region_length; _survivor_cset_region_length = survivor_cset_region_length;
_old_cset_region_length = 0; _old_cset_region_length = 0;
@ -2021,7 +2026,7 @@ region_num_to_mbs(int length) {
} }
#endif // PRODUCT #endif // PRODUCT
size_t G1CollectorPolicy::max_regions(int purpose) { uint G1CollectorPolicy::max_regions(int purpose) {
switch (purpose) { switch (purpose) {
case GCAllocForSurvived: case GCAllocForSurvived:
return _max_survivor_regions; return _max_survivor_regions;
@ -2034,13 +2039,13 @@ size_t G1CollectorPolicy::max_regions(int purpose) {
} }
void G1CollectorPolicy::update_max_gc_locker_expansion() { void G1CollectorPolicy::update_max_gc_locker_expansion() {
size_t expansion_region_num = 0; uint expansion_region_num = 0;
if (GCLockerEdenExpansionPercent > 0) { if (GCLockerEdenExpansionPercent > 0) {
double perc = (double) GCLockerEdenExpansionPercent / 100.0; double perc = (double) GCLockerEdenExpansionPercent / 100.0;
double expansion_region_num_d = perc * (double) _young_list_target_length; double expansion_region_num_d = perc * (double) _young_list_target_length;
// We use ceiling so that if expansion_region_num_d is > 0.0 (but // We use ceiling so that if expansion_region_num_d is > 0.0 (but
// less than 1.0) we'll get 1. // less than 1.0) we'll get 1.
expansion_region_num = (size_t) ceil(expansion_region_num_d); expansion_region_num = (uint) ceil(expansion_region_num_d);
} else { } else {
assert(expansion_region_num == 0, "sanity"); assert(expansion_region_num == 0, "sanity");
} }
@ -2054,7 +2059,7 @@ void G1CollectorPolicy::update_survivors_policy() {
(double) _young_list_target_length / (double) SurvivorRatio; (double) _young_list_target_length / (double) SurvivorRatio;
// We use ceiling so that if max_survivor_regions_d is > 0.0 (but // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
// smaller than 1.0) we'll get 1. // smaller than 1.0) we'll get 1.
_max_survivor_regions = (size_t) ceil(max_survivor_regions_d); _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
_tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
HeapRegion::GrainWords * _max_survivor_regions); HeapRegion::GrainWords * _max_survivor_regions);
@ -2288,27 +2293,25 @@ G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
(clear_marked_end_sec - start_sec) * 1000.0); (clear_marked_end_sec - start_sec) * 1000.0);
} }
uint region_num = _g1->n_regions();
if (G1CollectedHeap::use_parallel_gc_threads()) { if (G1CollectedHeap::use_parallel_gc_threads()) {
const size_t OverpartitionFactor = 4; const uint OverpartitionFactor = 4;
size_t WorkUnit; uint WorkUnit;
// The use of MinChunkSize = 8 in the original code // The use of MinChunkSize = 8 in the original code
// causes some assertion failures when the total number of // causes some assertion failures when the total number of
// region is less than 8. The code here tries to fix that. // region is less than 8. The code here tries to fix that.
// Should the original code also be fixed? // Should the original code also be fixed?
if (no_of_gc_threads > 0) { if (no_of_gc_threads > 0) {
const size_t MinWorkUnit = const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U);
MAX2(_g1->n_regions() / no_of_gc_threads, (size_t) 1U); WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor),
WorkUnit =
MAX2(_g1->n_regions() / (no_of_gc_threads * OverpartitionFactor),
MinWorkUnit); MinWorkUnit);
} else { } else {
assert(no_of_gc_threads > 0, assert(no_of_gc_threads > 0,
"The active gc workers should be greater than 0"); "The active gc workers should be greater than 0");
// In a product build do something reasonable to avoid a crash. // In a product build do something reasonable to avoid a crash.
const size_t MinWorkUnit = const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);
MAX2(_g1->n_regions() / ParallelGCThreads, (size_t) 1U);
WorkUnit = WorkUnit =
MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor), MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
MinWorkUnit); MinWorkUnit);
} }
_collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(), _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
@ -2624,8 +2627,8 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
// pause are appended to the RHS of the young list, i.e. // pause are appended to the RHS of the young list, i.e.
// [Newly Young Regions ++ Survivors from last pause]. // [Newly Young Regions ++ Survivors from last pause].
size_t survivor_region_length = young_list->survivor_length(); uint survivor_region_length = young_list->survivor_length();
size_t eden_region_length = young_list->length() - survivor_region_length; uint eden_region_length = young_list->length() - survivor_region_length;
init_cset_region_lengths(eden_region_length, survivor_region_length); init_cset_region_lengths(eden_region_length, survivor_region_length);
hr = young_list->first_survivor_region(); hr = young_list->first_survivor_region();
while (hr != NULL) { while (hr != NULL) {
@ -2664,10 +2667,10 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
if (!gcs_are_young()) { if (!gcs_are_young()) {
CollectionSetChooser* cset_chooser = _collectionSetChooser; CollectionSetChooser* cset_chooser = _collectionSetChooser;
assert(cset_chooser->verify(), "CSet Chooser verification - pre"); assert(cset_chooser->verify(), "CSet Chooser verification - pre");
const size_t min_old_cset_length = cset_chooser->calcMinOldCSetLength(); const uint min_old_cset_length = cset_chooser->calcMinOldCSetLength();
const size_t max_old_cset_length = cset_chooser->calcMaxOldCSetLength(); const uint max_old_cset_length = cset_chooser->calcMaxOldCSetLength();
size_t expensive_region_num = 0; uint expensive_region_num = 0;
bool check_time_remaining = adaptive_young_list_length(); bool check_time_remaining = adaptive_young_list_length();
HeapRegion* hr = cset_chooser->peek(); HeapRegion* hr = cset_chooser->peek();
while (hr != NULL) { while (hr != NULL) {

View file

@ -128,19 +128,19 @@ private:
SizerNewRatio SizerNewRatio
}; };
SizerKind _sizer_kind; SizerKind _sizer_kind;
size_t _min_desired_young_length; uint _min_desired_young_length;
size_t _max_desired_young_length; uint _max_desired_young_length;
bool _adaptive_size; bool _adaptive_size;
size_t calculate_default_min_length(size_t new_number_of_heap_regions); uint calculate_default_min_length(uint new_number_of_heap_regions);
size_t calculate_default_max_length(size_t new_number_of_heap_regions); uint calculate_default_max_length(uint new_number_of_heap_regions);
public: public:
G1YoungGenSizer(); G1YoungGenSizer();
void heap_size_changed(size_t new_number_of_heap_regions); void heap_size_changed(uint new_number_of_heap_regions);
size_t min_desired_young_length() { uint min_desired_young_length() {
return _min_desired_young_length; return _min_desired_young_length;
} }
size_t max_desired_young_length() { uint max_desired_young_length() {
return _max_desired_young_length; return _max_desired_young_length;
} }
bool adaptive_young_list_length() { bool adaptive_young_list_length() {
@ -175,7 +175,7 @@ private:
double _cur_collection_start_sec; double _cur_collection_start_sec;
size_t _cur_collection_pause_used_at_start_bytes; size_t _cur_collection_pause_used_at_start_bytes;
size_t _cur_collection_pause_used_regions_at_start; uint _cur_collection_pause_used_regions_at_start;
double _cur_collection_par_time_ms; double _cur_collection_par_time_ms;
double _cur_collection_code_root_fixup_time_ms; double _cur_collection_code_root_fixup_time_ms;
@ -233,13 +233,13 @@ private:
// indicates whether we are in young or mixed GC mode // indicates whether we are in young or mixed GC mode
bool _gcs_are_young; bool _gcs_are_young;
size_t _young_list_target_length; uint _young_list_target_length;
size_t _young_list_fixed_length; uint _young_list_fixed_length;
size_t _prev_eden_capacity; // used for logging size_t _prev_eden_capacity; // used for logging
// The max number of regions we can extend the eden by while the GC // The max number of regions we can extend the eden by while the GC
// locker is active. This should be >= _young_list_target_length; // locker is active. This should be >= _young_list_target_length;
size_t _young_list_max_length; uint _young_list_max_length;
bool _last_gc_was_young; bool _last_gc_was_young;
@ -257,7 +257,7 @@ private:
double _gc_overhead_perc; double _gc_overhead_perc;
double _reserve_factor; double _reserve_factor;
size_t _reserve_regions; uint _reserve_regions;
bool during_marking() { bool during_marking() {
return _during_marking; return _during_marking;
@ -292,18 +292,18 @@ private:
G1YoungGenSizer* _young_gen_sizer; G1YoungGenSizer* _young_gen_sizer;
size_t _eden_cset_region_length; uint _eden_cset_region_length;
size_t _survivor_cset_region_length; uint _survivor_cset_region_length;
size_t _old_cset_region_length; uint _old_cset_region_length;
void init_cset_region_lengths(size_t eden_cset_region_length, void init_cset_region_lengths(uint eden_cset_region_length,
size_t survivor_cset_region_length); uint survivor_cset_region_length);
size_t eden_cset_region_length() { return _eden_cset_region_length; } uint eden_cset_region_length() { return _eden_cset_region_length; }
size_t survivor_cset_region_length() { return _survivor_cset_region_length; } uint survivor_cset_region_length() { return _survivor_cset_region_length; }
size_t old_cset_region_length() { return _old_cset_region_length; } uint old_cset_region_length() { return _old_cset_region_length; }
size_t _free_regions_at_end_of_collection; uint _free_regions_at_end_of_collection;
size_t _recorded_rs_lengths; size_t _recorded_rs_lengths;
size_t _max_rs_lengths; size_t _max_rs_lengths;
@ -496,9 +496,9 @@ public:
void set_recorded_rs_lengths(size_t rs_lengths); void set_recorded_rs_lengths(size_t rs_lengths);
size_t cset_region_length() { return young_cset_region_length() + uint cset_region_length() { return young_cset_region_length() +
old_cset_region_length(); } old_cset_region_length(); }
size_t young_cset_region_length() { return eden_cset_region_length() + uint young_cset_region_length() { return eden_cset_region_length() +
survivor_cset_region_length(); } survivor_cset_region_length(); }
void record_young_free_cset_time_ms(double time_ms) { void record_young_free_cset_time_ms(double time_ms) {
@ -720,12 +720,12 @@ private:
// Calculate and return the minimum desired young list target // Calculate and return the minimum desired young list target
// length. This is the minimum desired young list length according // length. This is the minimum desired young list length according
// to the user's inputs. // to the user's inputs.
size_t calculate_young_list_desired_min_length(size_t base_min_length); uint calculate_young_list_desired_min_length(uint base_min_length);
// Calculate and return the maximum desired young list target // Calculate and return the maximum desired young list target
// length. This is the maximum desired young list length according // length. This is the maximum desired young list length according
// to the user's inputs. // to the user's inputs.
size_t calculate_young_list_desired_max_length(); uint calculate_young_list_desired_max_length();
// Calculate and return the maximum young list target length that // Calculate and return the maximum young list target length that
// can fit into the pause time goal. The parameters are: rs_lengths // can fit into the pause time goal. The parameters are: rs_lengths
@ -733,18 +733,18 @@ private:
// be, base_min_length is the alreay existing number of regions in // be, base_min_length is the alreay existing number of regions in
// the young list, min_length and max_length are the desired min and // the young list, min_length and max_length are the desired min and
// max young list length according to the user's inputs. // max young list length according to the user's inputs.
size_t calculate_young_list_target_length(size_t rs_lengths, uint calculate_young_list_target_length(size_t rs_lengths,
size_t base_min_length, uint base_min_length,
size_t desired_min_length, uint desired_min_length,
size_t desired_max_length); uint desired_max_length);
// Check whether a given young length (young_length) fits into the // Check whether a given young length (young_length) fits into the
// given target pause time and whether the prediction for the amount // given target pause time and whether the prediction for the amount
// of objects to be copied for the given length will fit into the // of objects to be copied for the given length will fit into the
// given free space (expressed by base_free_regions). It is used by // given free space (expressed by base_free_regions). It is used by
// calculate_young_list_target_length(). // calculate_young_list_target_length().
bool predict_will_fit(size_t young_length, double base_time_ms, bool predict_will_fit(uint young_length, double base_time_ms,
size_t base_free_regions, double target_pause_time_ms); uint base_free_regions, double target_pause_time_ms);
// Count the number of bytes used in the CS. // Count the number of bytes used in the CS.
void count_CS_bytes_used(); void count_CS_bytes_used();
@ -773,7 +773,7 @@ public:
} }
// This should be called after the heap is resized. // This should be called after the heap is resized.
void record_new_heap_size(size_t new_number_of_regions); void record_new_heap_size(uint new_number_of_regions);
void init(); void init();
@ -1048,18 +1048,18 @@ public:
} }
bool is_young_list_full() { bool is_young_list_full() {
size_t young_list_length = _g1->young_list()->length(); uint young_list_length = _g1->young_list()->length();
size_t young_list_target_length = _young_list_target_length; uint young_list_target_length = _young_list_target_length;
return young_list_length >= young_list_target_length; return young_list_length >= young_list_target_length;
} }
bool can_expand_young_list() { bool can_expand_young_list() {
size_t young_list_length = _g1->young_list()->length(); uint young_list_length = _g1->young_list()->length();
size_t young_list_max_length = _young_list_max_length; uint young_list_max_length = _young_list_max_length;
return young_list_length < young_list_max_length; return young_list_length < young_list_max_length;
} }
size_t young_list_max_length() { uint young_list_max_length() {
return _young_list_max_length; return _young_list_max_length;
} }
@ -1097,7 +1097,7 @@ private:
int _tenuring_threshold; int _tenuring_threshold;
// The limit on the number of regions allocated for survivors. // The limit on the number of regions allocated for survivors.
size_t _max_survivor_regions; uint _max_survivor_regions;
// For reporting purposes. // For reporting purposes.
size_t _eden_bytes_before_gc; size_t _eden_bytes_before_gc;
@ -1105,7 +1105,7 @@ private:
size_t _capacity_before_gc; size_t _capacity_before_gc;
// The amount of survor regions after a collection. // The amount of survor regions after a collection.
size_t _recorded_survivor_regions; uint _recorded_survivor_regions;
// List of survivor regions. // List of survivor regions.
HeapRegion* _recorded_survivor_head; HeapRegion* _recorded_survivor_head;
HeapRegion* _recorded_survivor_tail; HeapRegion* _recorded_survivor_tail;
@ -1127,9 +1127,9 @@ public:
return purpose == GCAllocForSurvived; return purpose == GCAllocForSurvived;
} }
static const size_t REGIONS_UNLIMITED = ~(size_t)0; static const uint REGIONS_UNLIMITED = (uint) -1;
size_t max_regions(int purpose); uint max_regions(int purpose);
// The limit on regions for a particular purpose is reached. // The limit on regions for a particular purpose is reached.
void note_alloc_region_limit_reached(int purpose) { void note_alloc_region_limit_reached(int purpose) {
@ -1146,7 +1146,7 @@ public:
_survivor_surv_rate_group->stop_adding_regions(); _survivor_surv_rate_group->stop_adding_regions();
} }
void record_survivor_regions(size_t regions, void record_survivor_regions(uint regions,
HeapRegion* head, HeapRegion* head,
HeapRegion* tail) { HeapRegion* tail) {
_recorded_survivor_regions = regions; _recorded_survivor_regions = regions;
@ -1154,12 +1154,11 @@ public:
_recorded_survivor_tail = tail; _recorded_survivor_tail = tail;
} }
size_t recorded_survivor_regions() { uint recorded_survivor_regions() {
return _recorded_survivor_regions; return _recorded_survivor_regions;
} }
void record_thread_age_table(ageTable* age_table) void record_thread_age_table(ageTable* age_table) {
{
_survivors_age_table.merge_par(age_table); _survivors_age_table.merge_par(age_table);
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -120,7 +120,7 @@ public:
// Single parameter format strings // Single parameter format strings
#define ergo_format_str(_name_) ", " _name_ ": %s" #define ergo_format_str(_name_) ", " _name_ ": %s"
#define ergo_format_region(_name_) ", " _name_ ": "SIZE_FORMAT" regions" #define ergo_format_region(_name_) ", " _name_ ": %u regions"
#define ergo_format_byte(_name_) ", " _name_ ": "SIZE_FORMAT" bytes" #define ergo_format_byte(_name_) ", " _name_ ": "SIZE_FORMAT" bytes"
#define ergo_format_double(_name_) ", " _name_ ": %1.2f" #define ergo_format_double(_name_) ", " _name_ ": %1.2f"
#define ergo_format_perc(_name_) ", " _name_ ": %1.2f %%" #define ergo_format_perc(_name_) ", " _name_ ": %1.2f %%"

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -177,19 +177,19 @@ void G1MonitoringSupport::recalculate_sizes() {
// values we read here are possible (i.e., at a STW phase at the end // values we read here are possible (i.e., at a STW phase at the end
// of a GC). // of a GC).
size_t young_list_length = g1->young_list()->length(); uint young_list_length = g1->young_list()->length();
size_t survivor_list_length = g1->g1_policy()->recorded_survivor_regions(); uint survivor_list_length = g1->g1_policy()->recorded_survivor_regions();
assert(young_list_length >= survivor_list_length, "invariant"); assert(young_list_length >= survivor_list_length, "invariant");
size_t eden_list_length = young_list_length - survivor_list_length; uint eden_list_length = young_list_length - survivor_list_length;
// Max length includes any potential extensions to the young gen // Max length includes any potential extensions to the young gen
// we'll do when the GC locker is active. // we'll do when the GC locker is active.
size_t young_list_max_length = g1->g1_policy()->young_list_max_length(); uint young_list_max_length = g1->g1_policy()->young_list_max_length();
assert(young_list_max_length >= survivor_list_length, "invariant"); assert(young_list_max_length >= survivor_list_length, "invariant");
size_t eden_list_max_length = young_list_max_length - survivor_list_length; uint eden_list_max_length = young_list_max_length - survivor_list_length;
_overall_used = g1->used_unlocked(); _overall_used = g1->used_unlocked();
_eden_used = eden_list_length * HeapRegion::GrainBytes; _eden_used = (size_t) eden_list_length * HeapRegion::GrainBytes;
_survivor_used = survivor_list_length * HeapRegion::GrainBytes; _survivor_used = (size_t) survivor_list_length * HeapRegion::GrainBytes;
_young_region_num = young_list_length; _young_region_num = young_list_length;
_old_used = subtract_up_to_zero(_overall_used, _eden_used + _survivor_used); _old_used = subtract_up_to_zero(_overall_used, _eden_used + _survivor_used);
@ -207,7 +207,7 @@ void G1MonitoringSupport::recalculate_sizes() {
committed -= _survivor_committed + _old_committed; committed -= _survivor_committed + _old_committed;
// Next, calculate and remove the committed size for the eden. // Next, calculate and remove the committed size for the eden.
_eden_committed = eden_list_max_length * HeapRegion::GrainBytes; _eden_committed = (size_t) eden_list_max_length * HeapRegion::GrainBytes;
// Somewhat defensive: be robust in case there are inaccuracies in // Somewhat defensive: be robust in case there are inaccuracies in
// the calculations // the calculations
_eden_committed = MIN2(_eden_committed, committed); _eden_committed = MIN2(_eden_committed, committed);
@ -237,10 +237,10 @@ void G1MonitoringSupport::recalculate_eden_size() {
// When a new eden region is allocated, only the eden_used size is // When a new eden region is allocated, only the eden_used size is
// affected (since we have recalculated everything else at the last GC). // affected (since we have recalculated everything else at the last GC).
size_t young_region_num = g1h()->young_list()->length(); uint young_region_num = g1h()->young_list()->length();
if (young_region_num > _young_region_num) { if (young_region_num > _young_region_num) {
size_t diff = young_region_num - _young_region_num; uint diff = young_region_num - _young_region_num;
_eden_used += diff * HeapRegion::GrainBytes; _eden_used += (size_t) diff * HeapRegion::GrainBytes;
// Somewhat defensive: cap the eden used size to make sure it // Somewhat defensive: cap the eden used size to make sure it
// never exceeds the committed size. // never exceeds the committed size.
_eden_used = MIN2(_eden_used, _eden_committed); _eden_used = MIN2(_eden_used, _eden_committed);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -147,7 +147,7 @@ class G1MonitoringSupport : public CHeapObj {
size_t _overall_committed; size_t _overall_committed;
size_t _overall_used; size_t _overall_used;
size_t _young_region_num; uint _young_region_num;
size_t _young_gen_committed; size_t _young_gen_committed;
size_t _eden_committed; size_t _eden_committed;
size_t _eden_used; size_t _eden_used;

View file

@ -334,7 +334,7 @@ void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
guarantee(GrainWords == 0, "we should only set it once"); guarantee(GrainWords == 0, "we should only set it once");
GrainWords = GrainBytes >> LogHeapWordSize; GrainWords = GrainBytes >> LogHeapWordSize;
guarantee((size_t)(1 << LogOfHRGrainWords) == GrainWords, "sanity"); guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
guarantee(CardsPerRegion == 0, "we should only set it once"); guarantee(CardsPerRegion == 0, "we should only set it once");
CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift; CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
@ -482,10 +482,10 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
#endif // _MSC_VER #endif // _MSC_VER
HeapRegion:: HeapRegion::HeapRegion(uint hrs_index,
HeapRegion(size_t hrs_index, G1BlockOffsetSharedArray* sharedOffsetArray, G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr, bool is_zeroed) MemRegion mr, bool is_zeroed) :
: G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed), G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
_hrs_index(hrs_index), _hrs_index(hrs_index),
_humongous_type(NotHumongous), _humongous_start_region(NULL), _humongous_type(NotHumongous), _humongous_start_region(NULL),
_in_collection_set(false), _in_collection_set(false),

View file

@ -52,12 +52,15 @@ class HeapRegionRemSetIterator;
class HeapRegion; class HeapRegion;
class HeapRegionSetBase; class HeapRegionSetBase;
#define HR_FORMAT SIZE_FORMAT":(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]" #define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
#define HR_FORMAT_PARAMS(_hr_) \ #define HR_FORMAT_PARAMS(_hr_) \
(_hr_)->hrs_index(), \ (_hr_)->hrs_index(), \
(_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : "-", \ (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : "-", \
(_hr_)->bottom(), (_hr_)->top(), (_hr_)->end() (_hr_)->bottom(), (_hr_)->top(), (_hr_)->end()
// sentinel value for hrs_index
#define G1_NULL_HRS_INDEX ((uint) -1)
// A dirty card to oop closure for heap regions. It // A dirty card to oop closure for heap regions. It
// knows how to get the G1 heap and how to use the bitmap // knows how to get the G1 heap and how to use the bitmap
// in the concurrent marker used by G1 to filter remembered // in the concurrent marker used by G1 to filter remembered
@ -235,7 +238,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
protected: protected:
// The index of this region in the heap region sequence. // The index of this region in the heap region sequence.
size_t _hrs_index; uint _hrs_index;
HumongousType _humongous_type; HumongousType _humongous_type;
// For a humongous region, region in which it starts. // For a humongous region, region in which it starts.
@ -342,7 +345,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
public: public:
// If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros. // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
HeapRegion(size_t hrs_index, HeapRegion(uint hrs_index,
G1BlockOffsetSharedArray* sharedOffsetArray, G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr, bool is_zeroed); MemRegion mr, bool is_zeroed);
@ -389,7 +392,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
// If this region is a member of a HeapRegionSeq, the index in that // If this region is a member of a HeapRegionSeq, the index in that
// sequence, otherwise -1. // sequence, otherwise -1.
size_t hrs_index() const { return _hrs_index; } uint hrs_index() const { return _hrs_index; }
// The number of bytes marked live in the region in the last marking phase. // The number of bytes marked live in the region in the last marking phase.
size_t marked_bytes() { return _prev_marked_bytes; } size_t marked_bytes() { return _prev_marked_bytes; }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -577,7 +577,7 @@ void OtherRegionsTable::print_from_card_cache() {
#endif #endif
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) { void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
size_t cur_hrs_ind = hr()->hrs_index(); size_t cur_hrs_ind = (size_t) hr()->hrs_index();
#if HRRS_VERBOSE #if HRRS_VERBOSE
gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").", gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
@ -841,7 +841,7 @@ PosParPRT* OtherRegionsTable::delete_region_table() {
#endif #endif
// Set the corresponding coarse bit. // Set the corresponding coarse bit.
size_t max_hrs_index = max->hr()->hrs_index(); size_t max_hrs_index = (size_t) max->hr()->hrs_index();
if (!_coarse_map.at(max_hrs_index)) { if (!_coarse_map.at(max_hrs_index)) {
_coarse_map.at_put(max_hrs_index, true); _coarse_map.at_put(max_hrs_index, true);
_n_coarse_entries++; _n_coarse_entries++;
@ -866,17 +866,20 @@ PosParPRT* OtherRegionsTable::delete_region_table() {
void OtherRegionsTable::scrub(CardTableModRefBS* ctbs, void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
BitMap* region_bm, BitMap* card_bm) { BitMap* region_bm, BitMap* card_bm) {
// First eliminated garbage regions from the coarse map. // First eliminated garbage regions from the coarse map.
if (G1RSScrubVerbose) if (G1RSScrubVerbose) {
gclog_or_tty->print_cr("Scrubbing region "SIZE_FORMAT":", gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrs_index());
hr()->hrs_index()); }
assert(_coarse_map.size() == region_bm->size(), "Precondition"); assert(_coarse_map.size() == region_bm->size(), "Precondition");
if (G1RSScrubVerbose) if (G1RSScrubVerbose) {
gclog_or_tty->print(" Coarse map: before = %d...", _n_coarse_entries); gclog_or_tty->print(" Coarse map: before = "SIZE_FORMAT"...",
_n_coarse_entries);
}
_coarse_map.set_intersection(*region_bm); _coarse_map.set_intersection(*region_bm);
_n_coarse_entries = _coarse_map.count_one_bits(); _n_coarse_entries = _coarse_map.count_one_bits();
if (G1RSScrubVerbose) if (G1RSScrubVerbose) {
gclog_or_tty->print_cr(" after = %d.", _n_coarse_entries); gclog_or_tty->print_cr(" after = "SIZE_FORMAT".", _n_coarse_entries);
}
// Now do the fine-grained maps. // Now do the fine-grained maps.
for (size_t i = 0; i < _max_fine_entries; i++) { for (size_t i = 0; i < _max_fine_entries; i++) {
@ -885,23 +888,27 @@ void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
while (cur != NULL) { while (cur != NULL) {
PosParPRT* nxt = cur->next(); PosParPRT* nxt = cur->next();
// If the entire region is dead, eliminate. // If the entire region is dead, eliminate.
if (G1RSScrubVerbose) if (G1RSScrubVerbose) {
gclog_or_tty->print_cr(" For other region "SIZE_FORMAT":", gclog_or_tty->print_cr(" For other region %u:",
cur->hr()->hrs_index()); cur->hr()->hrs_index());
if (!region_bm->at(cur->hr()->hrs_index())) { }
if (!region_bm->at((size_t) cur->hr()->hrs_index())) {
*prev = nxt; *prev = nxt;
cur->set_next(NULL); cur->set_next(NULL);
_n_fine_entries--; _n_fine_entries--;
if (G1RSScrubVerbose) if (G1RSScrubVerbose) {
gclog_or_tty->print_cr(" deleted via region map."); gclog_or_tty->print_cr(" deleted via region map.");
}
PosParPRT::free(cur); PosParPRT::free(cur);
} else { } else {
// Do fine-grain elimination. // Do fine-grain elimination.
if (G1RSScrubVerbose) if (G1RSScrubVerbose) {
gclog_or_tty->print(" occ: before = %4d.", cur->occupied()); gclog_or_tty->print(" occ: before = %4d.", cur->occupied());
}
cur->scrub(ctbs, card_bm); cur->scrub(ctbs, card_bm);
if (G1RSScrubVerbose) if (G1RSScrubVerbose) {
gclog_or_tty->print_cr(" after = %4d.", cur->occupied()); gclog_or_tty->print_cr(" after = %4d.", cur->occupied());
}
// Did that empty the table completely? // Did that empty the table completely?
if (cur->occupied() == 0) { if (cur->occupied() == 0) {
*prev = nxt; *prev = nxt;
@ -1003,7 +1010,7 @@ void OtherRegionsTable::clear() {
void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) { void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
size_t hrs_ind = from_hr->hrs_index(); size_t hrs_ind = (size_t) from_hr->hrs_index();
size_t ind = hrs_ind & _mod_max_fine_entries_mask; size_t ind = hrs_ind & _mod_max_fine_entries_mask;
if (del_single_region_table(ind, from_hr)) { if (del_single_region_table(ind, from_hr)) {
assert(!_coarse_map.at(hrs_ind), "Inv"); assert(!_coarse_map.at(hrs_ind), "Inv");
@ -1011,7 +1018,7 @@ void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
_coarse_map.par_at_put(hrs_ind, 0); _coarse_map.par_at_put(hrs_ind, 0);
} }
// Check to see if any of the fcc entries come from here. // Check to see if any of the fcc entries come from here.
size_t hr_ind = hr()->hrs_index(); size_t hr_ind = (size_t) hr()->hrs_index();
for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) { for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
int fcc_ent = _from_card_cache[tid][hr_ind]; int fcc_ent = _from_card_cache[tid][hr_ind];
if (fcc_ent != -1) { if (fcc_ent != -1) {
@ -1223,7 +1230,7 @@ bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
if ((size_t)_coarse_cur_region_index < _coarse_map->size()) { if ((size_t)_coarse_cur_region_index < _coarse_map->size()) {
_coarse_cur_region_cur_card = 0; _coarse_cur_region_cur_card = 0;
HeapWord* r_bot = HeapWord* r_bot =
_g1h->region_at(_coarse_cur_region_index)->bottom(); _g1h->region_at((uint) _coarse_cur_region_index)->bottom();
_cur_region_card_offset = _bosa->index_for(r_bot); _cur_region_card_offset = _bosa->index_for(r_bot);
} else { } else {
return false; return false;

View file

@ -329,13 +329,13 @@ public:
// Declare the heap size (in # of regions) to the HeapRegionRemSet(s). // Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
// (Uses it to initialize from_card_cache). // (Uses it to initialize from_card_cache).
static void init_heap(size_t max_regions) { static void init_heap(uint max_regions) {
OtherRegionsTable::init_from_card_cache(max_regions); OtherRegionsTable::init_from_card_cache((size_t) max_regions);
} }
// Declares that only regions i s.t. 0 <= i < new_n_regs are in use. // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
static void shrink_heap(size_t new_n_regs) { static void shrink_heap(uint new_n_regs) {
OtherRegionsTable::shrink_from_card_cache(new_n_regs); OtherRegionsTable::shrink_from_card_cache((size_t) new_n_regs);
} }
#ifndef PRODUCT #ifndef PRODUCT

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -31,16 +31,15 @@
// Private // Private
size_t HeapRegionSeq::find_contiguous_from(size_t from, size_t num) { uint HeapRegionSeq::find_contiguous_from(uint from, uint num) {
size_t len = length(); uint len = length();
assert(num > 1, "use this only for sequences of length 2 or greater"); assert(num > 1, "use this only for sequences of length 2 or greater");
assert(from <= len, assert(from <= len,
err_msg("from: "SIZE_FORMAT" should be valid and <= than "SIZE_FORMAT, err_msg("from: %u should be valid and <= than %u", from, len));
from, len));
size_t curr = from; uint curr = from;
size_t first = G1_NULL_HRS_INDEX; uint first = G1_NULL_HRS_INDEX;
size_t num_so_far = 0; uint num_so_far = 0;
while (curr < len && num_so_far < num) { while (curr < len && num_so_far < num) {
if (at(curr)->is_empty()) { if (at(curr)->is_empty()) {
if (first == G1_NULL_HRS_INDEX) { if (first == G1_NULL_HRS_INDEX) {
@ -60,7 +59,7 @@ size_t HeapRegionSeq::find_contiguous_from(size_t from, size_t num) {
// we found enough space for the humongous object // we found enough space for the humongous object
assert(from <= first && first < len, "post-condition"); assert(from <= first && first < len, "post-condition");
assert(first < curr && (curr - first) == num, "post-condition"); assert(first < curr && (curr - first) == num, "post-condition");
for (size_t i = first; i < first + num; ++i) { for (uint i = first; i < first + num; ++i) {
assert(at(i)->is_empty(), "post-condition"); assert(at(i)->is_empty(), "post-condition");
} }
return first; return first;
@ -73,10 +72,10 @@ size_t HeapRegionSeq::find_contiguous_from(size_t from, size_t num) {
// Public // Public
void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end, void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
size_t max_length) { uint max_length) {
assert((size_t) bottom % HeapRegion::GrainBytes == 0, assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0,
"bottom should be heap region aligned"); "bottom should be heap region aligned");
assert((size_t) end % HeapRegion::GrainBytes == 0, assert((uintptr_t) end % HeapRegion::GrainBytes == 0,
"end should be heap region aligned"); "end should be heap region aligned");
_length = 0; _length = 0;
@ -88,8 +87,8 @@ void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
_max_length = max_length; _max_length = max_length;
_regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length); _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length);
memset(_regions, 0, max_length * sizeof(HeapRegion*)); memset(_regions, 0, (size_t) max_length * sizeof(HeapRegion*));
_regions_biased = _regions - ((size_t) bottom >> _region_shift); _regions_biased = _regions - ((uintx) bottom >> _region_shift);
assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)], assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
"bottom should be included in the region with index 0"); "bottom should be included in the region with index 0");
@ -105,7 +104,7 @@ MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
assert(_heap_bottom <= next_bottom, "invariant"); assert(_heap_bottom <= next_bottom, "invariant");
while (next_bottom < new_end) { while (next_bottom < new_end) {
assert(next_bottom < _heap_end, "invariant"); assert(next_bottom < _heap_end, "invariant");
size_t index = length(); uint index = length();
assert(index < _max_length, "otherwise we cannot expand further"); assert(index < _max_length, "otherwise we cannot expand further");
if (index == 0) { if (index == 0) {
@ -139,9 +138,9 @@ MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
return MemRegion(old_end, next_bottom); return MemRegion(old_end, next_bottom);
} }
size_t HeapRegionSeq::free_suffix() { uint HeapRegionSeq::free_suffix() {
size_t res = 0; uint res = 0;
size_t index = length(); uint index = length();
while (index > 0) { while (index > 0) {
index -= 1; index -= 1;
if (!at(index)->is_empty()) { if (!at(index)->is_empty()) {
@ -152,27 +151,24 @@ size_t HeapRegionSeq::free_suffix() {
return res; return res;
} }
size_t HeapRegionSeq::find_contiguous(size_t num) { uint HeapRegionSeq::find_contiguous(uint num) {
assert(num > 1, "use this only for sequences of length 2 or greater"); assert(num > 1, "use this only for sequences of length 2 or greater");
assert(_next_search_index <= length(), assert(_next_search_index <= length(),
err_msg("_next_search_indeex: "SIZE_FORMAT" " err_msg("_next_search_index: %u should be valid and <= than %u",
"should be valid and <= than "SIZE_FORMAT,
_next_search_index, length())); _next_search_index, length()));
size_t start = _next_search_index; uint start = _next_search_index;
size_t res = find_contiguous_from(start, num); uint res = find_contiguous_from(start, num);
if (res == G1_NULL_HRS_INDEX && start > 0) { if (res == G1_NULL_HRS_INDEX && start > 0) {
// Try starting from the beginning. If _next_search_index was 0, // Try starting from the beginning. If _next_search_index was 0,
// no point in doing this again. // no point in doing this again.
res = find_contiguous_from(0, num); res = find_contiguous_from(0, num);
} }
if (res != G1_NULL_HRS_INDEX) { if (res != G1_NULL_HRS_INDEX) {
assert(res < length(), assert(res < length(), err_msg("res: %u should be valid", res));
err_msg("res: "SIZE_FORMAT" should be valid", res));
_next_search_index = res + num; _next_search_index = res + num;
assert(_next_search_index <= length(), assert(_next_search_index <= length(),
err_msg("_next_search_indeex: "SIZE_FORMAT" " err_msg("_next_search_index: %u should be valid and <= than %u",
"should be valid and <= than "SIZE_FORMAT,
_next_search_index, length())); _next_search_index, length()));
} }
return res; return res;
@ -183,20 +179,20 @@ void HeapRegionSeq::iterate(HeapRegionClosure* blk) const {
} }
void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const { void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const {
size_t hr_index = 0; uint hr_index = 0;
if (hr != NULL) { if (hr != NULL) {
hr_index = (size_t) hr->hrs_index(); hr_index = hr->hrs_index();
} }
size_t len = length(); uint len = length();
for (size_t i = hr_index; i < len; i += 1) { for (uint i = hr_index; i < len; i += 1) {
bool res = blk->doHeapRegion(at(i)); bool res = blk->doHeapRegion(at(i));
if (res) { if (res) {
blk->incomplete(); blk->incomplete();
return; return;
} }
} }
for (size_t i = 0; i < hr_index; i += 1) { for (uint i = 0; i < hr_index; i += 1) {
bool res = blk->doHeapRegion(at(i)); bool res = blk->doHeapRegion(at(i));
if (res) { if (res) {
blk->incomplete(); blk->incomplete();
@ -206,7 +202,7 @@ void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const {
} }
MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes, MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
size_t* num_regions_deleted) { uint* num_regions_deleted) {
// Reset this in case it's currently pointing into the regions that // Reset this in case it's currently pointing into the regions that
// we just removed. // we just removed.
_next_search_index = 0; _next_search_index = 0;
@ -218,7 +214,7 @@ MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
assert(_allocated_length > 0, "we should have at least one region committed"); assert(_allocated_length > 0, "we should have at least one region committed");
// around the loop, i will be the next region to be removed // around the loop, i will be the next region to be removed
size_t i = length() - 1; uint i = length() - 1;
assert(i > 0, "we should never remove all regions"); assert(i > 0, "we should never remove all regions");
// [last_start, end) is the MemRegion that covers the regions we will remove. // [last_start, end) is the MemRegion that covers the regions we will remove.
HeapWord* end = at(i)->end(); HeapWord* end = at(i)->end();
@ -249,29 +245,24 @@ MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
#ifndef PRODUCT #ifndef PRODUCT
void HeapRegionSeq::verify_optional() { void HeapRegionSeq::verify_optional() {
guarantee(_length <= _allocated_length, guarantee(_length <= _allocated_length,
err_msg("invariant: _length: "SIZE_FORMAT" " err_msg("invariant: _length: %u _allocated_length: %u",
"_allocated_length: "SIZE_FORMAT,
_length, _allocated_length)); _length, _allocated_length));
guarantee(_allocated_length <= _max_length, guarantee(_allocated_length <= _max_length,
err_msg("invariant: _allocated_length: "SIZE_FORMAT" " err_msg("invariant: _allocated_length: %u _max_length: %u",
"_max_length: "SIZE_FORMAT,
_allocated_length, _max_length)); _allocated_length, _max_length));
guarantee(_next_search_index <= _length, guarantee(_next_search_index <= _length,
err_msg("invariant: _next_search_index: "SIZE_FORMAT" " err_msg("invariant: _next_search_index: %u _length: %u",
"_length: "SIZE_FORMAT,
_next_search_index, _length)); _next_search_index, _length));
HeapWord* prev_end = _heap_bottom; HeapWord* prev_end = _heap_bottom;
for (size_t i = 0; i < _allocated_length; i += 1) { for (uint i = 0; i < _allocated_length; i += 1) {
HeapRegion* hr = _regions[i]; HeapRegion* hr = _regions[i];
guarantee(hr != NULL, err_msg("invariant: i: "SIZE_FORMAT, i)); guarantee(hr != NULL, err_msg("invariant: i: %u", i));
guarantee(hr->bottom() == prev_end, guarantee(hr->bottom() == prev_end,
err_msg("invariant i: "SIZE_FORMAT" "HR_FORMAT" " err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
"prev_end: "PTR_FORMAT,
i, HR_FORMAT_PARAMS(hr), prev_end)); i, HR_FORMAT_PARAMS(hr), prev_end));
guarantee(hr->hrs_index() == i, guarantee(hr->hrs_index() == i,
err_msg("invariant: i: "SIZE_FORMAT" hrs_index(): "SIZE_FORMAT, err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
i, hr->hrs_index()));
if (i < _length) { if (i < _length) {
// Asserts will fire if i is >= _length // Asserts will fire if i is >= _length
HeapWord* addr = hr->bottom(); HeapWord* addr = hr->bottom();
@ -290,8 +281,8 @@ void HeapRegionSeq::verify_optional() {
prev_end = hr->end(); prev_end = hr->end();
} }
} }
for (size_t i = _allocated_length; i < _max_length; i += 1) { for (uint i = _allocated_length; i < _max_length; i += 1) {
guarantee(_regions[i] == NULL, err_msg("invariant i: "SIZE_FORMAT, i)); guarantee(_regions[i] == NULL, err_msg("invariant i: %u", i));
} }
} }
#endif // PRODUCT #endif // PRODUCT

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,8 +29,6 @@ class HeapRegion;
class HeapRegionClosure; class HeapRegionClosure;
class FreeRegionList; class FreeRegionList;
#define G1_NULL_HRS_INDEX ((size_t) -1)
// This class keeps track of the region metadata (i.e., HeapRegion // This class keeps track of the region metadata (i.e., HeapRegion
// instances). They are kept in the _regions array in address // instances). They are kept in the _regions array in address
// order. A region's index in the array corresponds to its index in // order. A region's index in the array corresponds to its index in
@ -65,7 +63,7 @@ class HeapRegionSeq: public CHeapObj {
HeapRegion** _regions_biased; HeapRegion** _regions_biased;
// The number of regions committed in the heap. // The number of regions committed in the heap.
size_t _length; uint _length;
// The address of the first reserved word in the heap. // The address of the first reserved word in the heap.
HeapWord* _heap_bottom; HeapWord* _heap_bottom;
@ -74,32 +72,32 @@ class HeapRegionSeq: public CHeapObj {
HeapWord* _heap_end; HeapWord* _heap_end;
// The log of the region byte size. // The log of the region byte size.
size_t _region_shift; uint _region_shift;
// A hint for which index to start searching from for humongous // A hint for which index to start searching from for humongous
// allocations. // allocations.
size_t _next_search_index; uint _next_search_index;
// The number of regions for which we have allocated HeapRegions for. // The number of regions for which we have allocated HeapRegions for.
size_t _allocated_length; uint _allocated_length;
// The maximum number of regions in the heap. // The maximum number of regions in the heap.
size_t _max_length; uint _max_length;
// Find a contiguous set of empty regions of length num, starting // Find a contiguous set of empty regions of length num, starting
// from the given index. // from the given index.
size_t find_contiguous_from(size_t from, size_t num); uint find_contiguous_from(uint from, uint num);
// Map a heap address to a biased region index. Assume that the // Map a heap address to a biased region index. Assume that the
// address is valid. // address is valid.
inline size_t addr_to_index_biased(HeapWord* addr) const; inline uintx addr_to_index_biased(HeapWord* addr) const;
void increment_length(size_t* length) { void increment_length(uint* length) {
assert(*length < _max_length, "pre-condition"); assert(*length < _max_length, "pre-condition");
*length += 1; *length += 1;
} }
void decrement_length(size_t* length) { void decrement_length(uint* length) {
assert(*length > 0, "pre-condition"); assert(*length > 0, "pre-condition");
*length -= 1; *length -= 1;
} }
@ -108,11 +106,11 @@ class HeapRegionSeq: public CHeapObj {
// Empty contructor, we'll initialize it with the initialize() method. // Empty contructor, we'll initialize it with the initialize() method.
HeapRegionSeq() { } HeapRegionSeq() { }
void initialize(HeapWord* bottom, HeapWord* end, size_t max_length); void initialize(HeapWord* bottom, HeapWord* end, uint max_length);
// Return the HeapRegion at the given index. Assume that the index // Return the HeapRegion at the given index. Assume that the index
// is valid. // is valid.
inline HeapRegion* at(size_t index) const; inline HeapRegion* at(uint index) const;
// If addr is within the committed space return its corresponding // If addr is within the committed space return its corresponding
// HeapRegion, otherwise return NULL. // HeapRegion, otherwise return NULL.
@ -123,10 +121,10 @@ class HeapRegionSeq: public CHeapObj {
inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const; inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
// Return the number of regions that have been committed in the heap. // Return the number of regions that have been committed in the heap.
size_t length() const { return _length; } uint length() const { return _length; }
// Return the maximum number of regions in the heap. // Return the maximum number of regions in the heap.
size_t max_length() const { return _max_length; } uint max_length() const { return _max_length; }
// Expand the sequence to reflect that the heap has grown from // Expand the sequence to reflect that the heap has grown from
// old_end to new_end. Either create new HeapRegions, or re-use // old_end to new_end. Either create new HeapRegions, or re-use
@ -139,12 +137,12 @@ class HeapRegionSeq: public CHeapObj {
// Return the number of contiguous regions at the end of the sequence // Return the number of contiguous regions at the end of the sequence
// that are available for allocation. // that are available for allocation.
size_t free_suffix(); uint free_suffix();
// Find a contiguous set of empty regions of length num and return // Find a contiguous set of empty regions of length num and return
// the index of the first region or G1_NULL_HRS_INDEX if the // the index of the first region or G1_NULL_HRS_INDEX if the
// search was unsuccessful. // search was unsuccessful.
size_t find_contiguous(size_t num); uint find_contiguous(uint num);
// Apply blk->doHeapRegion() on all committed regions in address order, // Apply blk->doHeapRegion() on all committed regions in address order,
// terminating the iteration early if doHeapRegion() returns true. // terminating the iteration early if doHeapRegion() returns true.
@ -159,7 +157,7 @@ class HeapRegionSeq: public CHeapObj {
// sequence. Return a MemRegion that corresponds to the address // sequence. Return a MemRegion that corresponds to the address
// range of the uncommitted regions. Assume shrink_bytes is page and // range of the uncommitted regions. Assume shrink_bytes is page and
// heap region aligned. // heap region aligned.
MemRegion shrink_by(size_t shrink_bytes, size_t* num_regions_deleted); MemRegion shrink_by(size_t shrink_bytes, uint* num_regions_deleted);
// Do some sanity checking. // Do some sanity checking.
void verify_optional() PRODUCT_RETURN; void verify_optional() PRODUCT_RETURN;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -28,11 +28,11 @@
#include "gc_implementation/g1/heapRegion.hpp" #include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp" #include "gc_implementation/g1/heapRegionSeq.hpp"
inline size_t HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const { inline uintx HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
assert(_heap_bottom <= addr && addr < _heap_end, assert(_heap_bottom <= addr && addr < _heap_end,
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT, err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
addr, _heap_bottom, _heap_end)); addr, _heap_bottom, _heap_end));
size_t index = (size_t) addr >> _region_shift; uintx index = (uintx) addr >> _region_shift;
return index; return index;
} }
@ -40,7 +40,7 @@ inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const {
assert(_heap_bottom <= addr && addr < _heap_end, assert(_heap_bottom <= addr && addr < _heap_end,
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT, err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
addr, _heap_bottom, _heap_end)); addr, _heap_bottom, _heap_end));
size_t index_biased = addr_to_index_biased(addr); uintx index_biased = addr_to_index_biased(addr);
HeapRegion* hr = _regions_biased[index_biased]; HeapRegion* hr = _regions_biased[index_biased];
assert(hr != NULL, "invariant"); assert(hr != NULL, "invariant");
return hr; return hr;
@ -55,7 +55,7 @@ inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
return NULL; return NULL;
} }
inline HeapRegion* HeapRegionSeq::at(size_t index) const { inline HeapRegion* HeapRegionSeq::at(uint index) const {
assert(index < length(), "pre-condition"); assert(index < length(), "pre-condition");
HeapRegion* hr = _regions[index]; HeapRegion* hr = _regions[index];
assert(hr != NULL, "sanity"); assert(hr != NULL, "sanity");

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,28 +25,26 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc_implementation/g1/heapRegionSet.inline.hpp" #include "gc_implementation/g1/heapRegionSet.inline.hpp"
size_t HeapRegionSetBase::_unrealistically_long_length = 0; uint HeapRegionSetBase::_unrealistically_long_length = 0;
HRSPhase HeapRegionSetBase::_phase = HRSPhaseNone; HRSPhase HeapRegionSetBase::_phase = HRSPhaseNone;
//////////////////// HeapRegionSetBase //////////////////// //////////////////// HeapRegionSetBase ////////////////////
void HeapRegionSetBase::set_unrealistically_long_length(size_t len) { void HeapRegionSetBase::set_unrealistically_long_length(uint len) {
guarantee(_unrealistically_long_length == 0, "should only be set once"); guarantee(_unrealistically_long_length == 0, "should only be set once");
_unrealistically_long_length = len; _unrealistically_long_length = len;
} }
size_t HeapRegionSetBase::calculate_region_num(HeapRegion* hr) { uint HeapRegionSetBase::calculate_region_num(HeapRegion* hr) {
assert(hr->startsHumongous(), "pre-condition"); assert(hr->startsHumongous(), "pre-condition");
assert(hr->capacity() % HeapRegion::GrainBytes == 0, "invariant"); assert(hr->capacity() % HeapRegion::GrainBytes == 0, "invariant");
size_t region_num = hr->capacity() >> HeapRegion::LogOfHRGrainBytes; uint region_num = (uint) (hr->capacity() >> HeapRegion::LogOfHRGrainBytes);
assert(region_num > 0, "sanity"); assert(region_num > 0, "sanity");
return region_num; return region_num;
} }
void HeapRegionSetBase::fill_in_ext_msg(hrs_ext_msg* msg, const char* message) { void HeapRegionSetBase::fill_in_ext_msg(hrs_ext_msg* msg, const char* message) {
msg->append("[%s] %s " msg->append("[%s] %s ln: %u rn: %u cy: "SIZE_FORMAT" ud: "SIZE_FORMAT,
"ln: "SIZE_FORMAT" rn: "SIZE_FORMAT" "
"cy: "SIZE_FORMAT" ud: "SIZE_FORMAT,
name(), message, length(), region_num(), name(), message, length(), region_num(),
total_capacity_bytes(), total_used_bytes()); total_capacity_bytes(), total_used_bytes());
fill_in_ext_msg_extra(msg); fill_in_ext_msg_extra(msg);
@ -170,13 +168,11 @@ void HeapRegionSetBase::verify_end() {
hrs_ext_msg(this, "verification should be in progress")); hrs_ext_msg(this, "verification should be in progress"));
guarantee(length() == _calc_length, guarantee(length() == _calc_length,
hrs_err_msg("[%s] length: "SIZE_FORMAT" should be == " hrs_err_msg("[%s] length: %u should be == calc length: %u",
"calc length: "SIZE_FORMAT,
name(), length(), _calc_length)); name(), length(), _calc_length));
guarantee(region_num() == _calc_region_num, guarantee(region_num() == _calc_region_num,
hrs_err_msg("[%s] region num: "SIZE_FORMAT" should be == " hrs_err_msg("[%s] region num: %u should be == calc region num: %u",
"calc region num: "SIZE_FORMAT,
name(), region_num(), _calc_region_num)); name(), region_num(), _calc_region_num));
guarantee(total_capacity_bytes() == _calc_total_capacity_bytes, guarantee(total_capacity_bytes() == _calc_total_capacity_bytes,
@ -211,8 +207,8 @@ void HeapRegionSetBase::print_on(outputStream* out, bool print_contents) {
out->print_cr(" humongous : %s", BOOL_TO_STR(regions_humongous())); out->print_cr(" humongous : %s", BOOL_TO_STR(regions_humongous()));
out->print_cr(" empty : %s", BOOL_TO_STR(regions_empty())); out->print_cr(" empty : %s", BOOL_TO_STR(regions_empty()));
out->print_cr(" Attributes"); out->print_cr(" Attributes");
out->print_cr(" length : "SIZE_FORMAT_W(14), length()); out->print_cr(" length : %14u", length());
out->print_cr(" region num : "SIZE_FORMAT_W(14), region_num()); out->print_cr(" region num : %14u", region_num());
out->print_cr(" total capacity : "SIZE_FORMAT_W(14)" bytes", out->print_cr(" total capacity : "SIZE_FORMAT_W(14)" bytes",
total_capacity_bytes()); total_capacity_bytes());
out->print_cr(" total used : "SIZE_FORMAT_W(14)" bytes", out->print_cr(" total used : "SIZE_FORMAT_W(14)" bytes",
@ -243,14 +239,12 @@ void HeapRegionSet::update_from_proxy(HeapRegionSet* proxy_set) {
if (proxy_set->is_empty()) return; if (proxy_set->is_empty()) return;
assert(proxy_set->length() <= _length, assert(proxy_set->length() <= _length,
hrs_err_msg("[%s] proxy set length: "SIZE_FORMAT" " hrs_err_msg("[%s] proxy set length: %u should be <= length: %u",
"should be <= length: "SIZE_FORMAT,
name(), proxy_set->length(), _length)); name(), proxy_set->length(), _length));
_length -= proxy_set->length(); _length -= proxy_set->length();
assert(proxy_set->region_num() <= _region_num, assert(proxy_set->region_num() <= _region_num,
hrs_err_msg("[%s] proxy set region num: "SIZE_FORMAT" " hrs_err_msg("[%s] proxy set region num: %u should be <= region num: %u",
"should be <= region num: "SIZE_FORMAT,
name(), proxy_set->region_num(), _region_num)); name(), proxy_set->region_num(), _region_num));
_region_num -= proxy_set->region_num(); _region_num -= proxy_set->region_num();
@ -369,17 +363,17 @@ void HeapRegionLinkedList::remove_all() {
verify_optional(); verify_optional();
} }
void HeapRegionLinkedList::remove_all_pending(size_t target_count) { void HeapRegionLinkedList::remove_all_pending(uint target_count) {
hrs_assert_mt_safety_ok(this); hrs_assert_mt_safety_ok(this);
assert(target_count > 1, hrs_ext_msg(this, "pre-condition")); assert(target_count > 1, hrs_ext_msg(this, "pre-condition"));
assert(!is_empty(), hrs_ext_msg(this, "pre-condition")); assert(!is_empty(), hrs_ext_msg(this, "pre-condition"));
verify_optional(); verify_optional();
DEBUG_ONLY(size_t old_length = length();) DEBUG_ONLY(uint old_length = length();)
HeapRegion* curr = _head; HeapRegion* curr = _head;
HeapRegion* prev = NULL; HeapRegion* prev = NULL;
size_t count = 0; uint count = 0;
while (curr != NULL) { while (curr != NULL) {
hrs_assert_region_ok(this, curr, this); hrs_assert_region_ok(this, curr, this);
HeapRegion* next = curr->next(); HeapRegion* next = curr->next();
@ -387,7 +381,7 @@ void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
if (curr->pending_removal()) { if (curr->pending_removal()) {
assert(count < target_count, assert(count < target_count,
hrs_err_msg("[%s] should not come across more regions " hrs_err_msg("[%s] should not come across more regions "
"pending for removal than target_count: "SIZE_FORMAT, "pending for removal than target_count: %u",
name(), target_count)); name(), target_count));
if (prev == NULL) { if (prev == NULL) {
@ -422,12 +416,11 @@ void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
} }
assert(count == target_count, assert(count == target_count,
hrs_err_msg("[%s] count: "SIZE_FORMAT" should be == " hrs_err_msg("[%s] count: %u should be == target_count: %u",
"target_count: "SIZE_FORMAT, name(), count, target_count)); name(), count, target_count));
assert(length() + target_count == old_length, assert(length() + target_count == old_length,
hrs_err_msg("[%s] new length should be consistent " hrs_err_msg("[%s] new length should be consistent "
"new length: "SIZE_FORMAT" old length: "SIZE_FORMAT" " "new length: %u old length: %u target_count: %u",
"target_count: "SIZE_FORMAT,
name(), length(), old_length, target_count)); name(), length(), old_length, target_count));
verify_optional(); verify_optional();
@ -444,16 +437,16 @@ void HeapRegionLinkedList::verify() {
HeapRegion* curr = _head; HeapRegion* curr = _head;
HeapRegion* prev1 = NULL; HeapRegion* prev1 = NULL;
HeapRegion* prev0 = NULL; HeapRegion* prev0 = NULL;
size_t count = 0; uint count = 0;
while (curr != NULL) { while (curr != NULL) {
verify_next_region(curr); verify_next_region(curr);
count += 1; count += 1;
guarantee(count < _unrealistically_long_length, guarantee(count < _unrealistically_long_length,
hrs_err_msg("[%s] the calculated length: "SIZE_FORMAT" " hrs_err_msg("[%s] the calculated length: %u "
"seems very long, is there maybe a cycle? " "seems very long, is there maybe a cycle? "
"curr: "PTR_FORMAT" prev0: "PTR_FORMAT" " "curr: "PTR_FORMAT" prev0: "PTR_FORMAT" "
"prev1: "PTR_FORMAT" length: "SIZE_FORMAT, "prev1: "PTR_FORMAT" length: %u",
name(), count, curr, prev0, prev1, length())); name(), count, curr, prev0, prev1, length()));
prev1 = prev0; prev1 = prev0;

View file

@ -62,20 +62,20 @@ class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC {
friend class VMStructs; friend class VMStructs;
protected: protected:
static size_t calculate_region_num(HeapRegion* hr); static uint calculate_region_num(HeapRegion* hr);
static size_t _unrealistically_long_length; static uint _unrealistically_long_length;
// The number of regions added to the set. If the set contains // The number of regions added to the set. If the set contains
// only humongous regions, this reflects only 'starts humongous' // only humongous regions, this reflects only 'starts humongous'
// regions and does not include 'continues humongous' ones. // regions and does not include 'continues humongous' ones.
size_t _length; uint _length;
// The total number of regions represented by the set. If the set // The total number of regions represented by the set. If the set
// does not contain humongous regions, this should be the same as // does not contain humongous regions, this should be the same as
// _length. If the set contains only humongous regions, this will // _length. If the set contains only humongous regions, this will
// include the 'continues humongous' regions. // include the 'continues humongous' regions.
size_t _region_num; uint _region_num;
// We don't keep track of the total capacity explicitly, we instead // We don't keep track of the total capacity explicitly, we instead
// recalculate it based on _region_num and the heap region size. // recalculate it based on _region_num and the heap region size.
@ -86,8 +86,8 @@ protected:
const char* _name; const char* _name;
bool _verify_in_progress; bool _verify_in_progress;
size_t _calc_length; uint _calc_length;
size_t _calc_region_num; uint _calc_region_num;
size_t _calc_total_capacity_bytes; size_t _calc_total_capacity_bytes;
size_t _calc_total_used_bytes; size_t _calc_total_used_bytes;
@ -153,18 +153,18 @@ protected:
HeapRegionSetBase(const char* name); HeapRegionSetBase(const char* name);
public: public:
static void set_unrealistically_long_length(size_t len); static void set_unrealistically_long_length(uint len);
const char* name() { return _name; } const char* name() { return _name; }
size_t length() { return _length; } uint length() { return _length; }
bool is_empty() { return _length == 0; } bool is_empty() { return _length == 0; }
size_t region_num() { return _region_num; } uint region_num() { return _region_num; }
size_t total_capacity_bytes() { size_t total_capacity_bytes() {
return region_num() << HeapRegion::LogOfHRGrainBytes; return (size_t) region_num() << HeapRegion::LogOfHRGrainBytes;
} }
size_t total_used_bytes() { return _total_used_bytes; } size_t total_used_bytes() { return _total_used_bytes; }
@ -341,7 +341,7 @@ public:
// of regions that are pending for removal in the list, and // of regions that are pending for removal in the list, and
// target_count should be > 1 (currently, we never need to remove a // target_count should be > 1 (currently, we never need to remove a
// single region using this). // single region using this).
void remove_all_pending(size_t target_count); void remove_all_pending(uint target_count);
virtual void verify(); virtual void verify();

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -54,15 +54,15 @@ inline void HeapRegionSetBase::update_for_removal(HeapRegion* hr) {
assert(_length > 0, hrs_ext_msg(this, "pre-condition")); assert(_length > 0, hrs_ext_msg(this, "pre-condition"));
_length -= 1; _length -= 1;
size_t region_num_diff; uint region_num_diff;
if (!hr->isHumongous()) { if (!hr->isHumongous()) {
region_num_diff = 1; region_num_diff = 1;
} else { } else {
region_num_diff = calculate_region_num(hr); region_num_diff = calculate_region_num(hr);
} }
assert(region_num_diff <= _region_num, assert(region_num_diff <= _region_num,
hrs_err_msg("[%s] region's region num: "SIZE_FORMAT" " hrs_err_msg("[%s] region's region num: %u "
"should be <= region num: "SIZE_FORMAT, "should be <= region num: %u",
name(), region_num_diff, _region_num)); name(), region_num_diff, _region_num));
_region_num -= region_num_diff; _region_num -= region_num_diff;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -481,8 +481,7 @@ size_t SparsePRT::mem_size() const {
bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) { bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) {
#if SPARSE_PRT_VERBOSE #if SPARSE_PRT_VERBOSE
gclog_or_tty->print_cr(" Adding card %d from region %d to region " gclog_or_tty->print_cr(" Adding card %d from region %d to region %u sparse.",
SIZE_FORMAT" sparse.",
card_index, region_id, _hr->hrs_index()); card_index, region_id, _hr->hrs_index());
#endif #endif
if (_next->occupied_entries() * 2 > _next->capacity()) { if (_next->occupied_entries() * 2 > _next->capacity()) {
@ -534,7 +533,7 @@ void SparsePRT::expand() {
_next = new RSHashTable(last->capacity() * 2); _next = new RSHashTable(last->capacity() * 2);
#if SPARSE_PRT_VERBOSE #if SPARSE_PRT_VERBOSE
gclog_or_tty->print_cr(" Expanded sparse table for "SIZE_FORMAT" to %d.", gclog_or_tty->print_cr(" Expanded sparse table for %u to %d.",
_hr->hrs_index(), _next->capacity()); _hr->hrs_index(), _next->capacity());
#endif #endif
for (size_t i = 0; i < last->capacity(); i++) { for (size_t i = 0; i < last->capacity(); i++) {

View file

@ -34,7 +34,7 @@
static_field(HeapRegion, GrainBytes, size_t) \ static_field(HeapRegion, GrainBytes, size_t) \
\ \
nonstatic_field(HeapRegionSeq, _regions, HeapRegion**) \ nonstatic_field(HeapRegionSeq, _regions, HeapRegion**) \
nonstatic_field(HeapRegionSeq, _length, size_t) \ nonstatic_field(HeapRegionSeq, _length, uint) \
\ \
nonstatic_field(G1CollectedHeap, _hrs, HeapRegionSeq) \ nonstatic_field(G1CollectedHeap, _hrs, HeapRegionSeq) \
nonstatic_field(G1CollectedHeap, _g1_committed, MemRegion) \ nonstatic_field(G1CollectedHeap, _g1_committed, MemRegion) \
@ -50,8 +50,8 @@
nonstatic_field(G1MonitoringSupport, _old_committed, size_t) \ nonstatic_field(G1MonitoringSupport, _old_committed, size_t) \
nonstatic_field(G1MonitoringSupport, _old_used, size_t) \ nonstatic_field(G1MonitoringSupport, _old_used, size_t) \
\ \
nonstatic_field(HeapRegionSetBase, _length, size_t) \ nonstatic_field(HeapRegionSetBase, _length, uint) \
nonstatic_field(HeapRegionSetBase, _region_num, size_t) \ nonstatic_field(HeapRegionSetBase, _region_num, uint) \
nonstatic_field(HeapRegionSetBase, _total_used_bytes, size_t) \ nonstatic_field(HeapRegionSetBase, _total_used_bytes, size_t) \