mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-27 23:04:50 +02:00
8055816: Remove dead code in g1BlockOffsetTable
Reviewed-by: tschatzl, mgerdin
This commit is contained in:
parent
9086b5795f
commit
bbe82cb85b
5 changed files with 13 additions and 374 deletions
|
@ -32,13 +32,6 @@
|
|||
|
||||
|
||||
|
||||
void G1BlockOffsetSharedArrayMappingChangedListener::on_commit(uint start_idx, size_t num_regions) {
|
||||
// Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot
|
||||
// retrieve it here since this would cause firing of several asserts. The code
|
||||
// executed after commit of a region already needs to do some re-initialization of
|
||||
// the HeapRegion, so we combine that.
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// G1BlockOffsetSharedArray
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
@ -72,26 +65,16 @@ bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
|
|||
return (delta & right_n_bits(LogN_words)) == (size_t)NoBits;
|
||||
}
|
||||
|
||||
void G1BlockOffsetSharedArray::set_offset_array(HeapWord* left, HeapWord* right, u_char offset) {
|
||||
set_offset_array(index_for(left), index_for(right -1), offset);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// G1BlockOffsetArray
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
|
||||
MemRegion mr, bool init_to_zero) :
|
||||
MemRegion mr) :
|
||||
G1BlockOffsetTable(mr.start(), mr.end()),
|
||||
_unallocated_block(_bottom),
|
||||
_array(array), _gsp(NULL),
|
||||
_init_to_zero(init_to_zero) {
|
||||
_array(array), _gsp(NULL) {
|
||||
assert(_bottom <= _end, "arguments out of order");
|
||||
if (!_init_to_zero) {
|
||||
// initialize cards to point back to mr.start()
|
||||
set_remainder_to_point_to_start(mr.start() + N_words, mr.end());
|
||||
_array->set_offset_array(0, 0); // set first card to 0
|
||||
}
|
||||
}
|
||||
|
||||
void G1BlockOffsetArray::set_space(G1OffsetTableContigSpace* sp) {
|
||||
|
@ -181,93 +164,6 @@ G1BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size
|
|||
DEBUG_ONLY(check_all_cards(start_card, end_card);)
|
||||
}
|
||||
|
||||
// The block [blk_start, blk_end) has been allocated;
|
||||
// adjust the block offset table to represent this information;
|
||||
// right-open interval: [blk_start, blk_end)
|
||||
void
|
||||
G1BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
|
||||
mark_block(blk_start, blk_end);
|
||||
allocated(blk_start, blk_end);
|
||||
}
|
||||
|
||||
// Adjust BOT to show that a previously whole block has been split
|
||||
// into two.
|
||||
void G1BlockOffsetArray::split_block(HeapWord* blk, size_t blk_size,
|
||||
size_t left_blk_size) {
|
||||
// Verify that the BOT shows [blk, blk + blk_size) to be one block.
|
||||
verify_single_block(blk, blk_size);
|
||||
// Update the BOT to indicate that [blk + left_blk_size, blk + blk_size)
|
||||
// is one single block.
|
||||
mark_block(blk + left_blk_size, blk + blk_size);
|
||||
}
|
||||
|
||||
|
||||
// Action_mark - update the BOT for the block [blk_start, blk_end).
|
||||
// Current typical use is for splitting a block.
|
||||
// Action_single - update the BOT for an allocation.
|
||||
// Action_verify - BOT verification.
|
||||
void G1BlockOffsetArray::do_block_internal(HeapWord* blk_start,
|
||||
HeapWord* blk_end,
|
||||
Action action) {
|
||||
assert(Universe::heap()->is_in_reserved(blk_start),
|
||||
"reference must be into the heap");
|
||||
assert(Universe::heap()->is_in_reserved(blk_end-1),
|
||||
"limit must be within the heap");
|
||||
// This is optimized to make the test fast, assuming we only rarely
|
||||
// cross boundaries.
|
||||
uintptr_t end_ui = (uintptr_t)(blk_end - 1);
|
||||
uintptr_t start_ui = (uintptr_t)blk_start;
|
||||
// Calculate the last card boundary preceding end of blk
|
||||
intptr_t boundary_before_end = (intptr_t)end_ui;
|
||||
clear_bits(boundary_before_end, right_n_bits(LogN));
|
||||
if (start_ui <= (uintptr_t)boundary_before_end) {
|
||||
// blk starts at or crosses a boundary
|
||||
// Calculate index of card on which blk begins
|
||||
size_t start_index = _array->index_for(blk_start);
|
||||
// Index of card on which blk ends
|
||||
size_t end_index = _array->index_for(blk_end - 1);
|
||||
// Start address of card on which blk begins
|
||||
HeapWord* boundary = _array->address_for_index(start_index);
|
||||
assert(boundary <= blk_start, "blk should start at or after boundary");
|
||||
if (blk_start != boundary) {
|
||||
// blk starts strictly after boundary
|
||||
// adjust card boundary and start_index forward to next card
|
||||
boundary += N_words;
|
||||
start_index++;
|
||||
}
|
||||
assert(start_index <= end_index, "monotonicity of index_for()");
|
||||
assert(boundary <= (HeapWord*)boundary_before_end, "tautology");
|
||||
switch (action) {
|
||||
case Action_mark: {
|
||||
if (init_to_zero()) {
|
||||
_array->set_offset_array(start_index, boundary, blk_start);
|
||||
break;
|
||||
} // Else fall through to the next case
|
||||
}
|
||||
case Action_single: {
|
||||
_array->set_offset_array(start_index, boundary, blk_start);
|
||||
// We have finished marking the "offset card". We need to now
|
||||
// mark the subsequent cards that this blk spans.
|
||||
if (start_index < end_index) {
|
||||
HeapWord* rem_st = _array->address_for_index(start_index) + N_words;
|
||||
HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
|
||||
set_remainder_to_point_to_start(rem_st, rem_end);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Action_check: {
|
||||
_array->check_offset_array(start_index, boundary, blk_start);
|
||||
// We have finished checking the "offset card". We need to now
|
||||
// check the subsequent cards that this blk spans.
|
||||
check_all_cards(start_index + 1, end_index);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The card-interval [start_card, end_card] is a closed interval; this
|
||||
// is an expensive check -- use with care and only under protection of
|
||||
// suitable flag.
|
||||
|
@ -306,25 +202,6 @@ void G1BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) con
|
|||
}
|
||||
}
|
||||
|
||||
// The range [blk_start, blk_end) represents a single contiguous block
|
||||
// of storage; modify the block offset table to represent this
|
||||
// information; Right-open interval: [blk_start, blk_end)
|
||||
// NOTE: this method does _not_ adjust _unallocated_block.
|
||||
void
|
||||
G1BlockOffsetArray::single_block(HeapWord* blk_start, HeapWord* blk_end) {
|
||||
do_block_internal(blk_start, blk_end, Action_single);
|
||||
}
|
||||
|
||||
// Mark the BOT such that if [blk_start, blk_end) straddles a card
|
||||
// boundary, the card following the first such boundary is marked
|
||||
// with the appropriate offset.
|
||||
// NOTE: this method does _not_ adjust _unallocated_block or
|
||||
// any cards subsequent to the first one.
|
||||
void
|
||||
G1BlockOffsetArray::mark_block(HeapWord* blk_start, HeapWord* blk_end) {
|
||||
do_block_internal(blk_start, blk_end, Action_mark);
|
||||
}
|
||||
|
||||
HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) {
|
||||
assert(_bottom <= addr && addr < _end,
|
||||
"addr must be covered by this Array");
|
||||
|
@ -397,57 +274,13 @@ G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
|
|||
return forward_to_block_containing_addr_const(q, n, addr);
|
||||
}
|
||||
|
||||
HeapWord* G1BlockOffsetArray::block_start_careful(const void* addr) const {
|
||||
assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
|
||||
|
||||
assert(_bottom <= addr && addr < _end,
|
||||
"addr must be covered by this Array");
|
||||
// Must read this exactly once because it can be modified by parallel
|
||||
// allocation.
|
||||
HeapWord* ub = _unallocated_block;
|
||||
if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
|
||||
assert(ub < _end, "tautology (see above)");
|
||||
return ub;
|
||||
}
|
||||
|
||||
// Otherwise, find the block start using the table, but taking
|
||||
// care (cf block_start_unsafe() above) not to parse any objects/blocks
|
||||
// on the cards themselves.
|
||||
size_t index = _array->index_for(addr);
|
||||
assert(_array->address_for_index(index) == addr,
|
||||
"arg should be start of card");
|
||||
|
||||
HeapWord* q = (HeapWord*)addr;
|
||||
uint offset;
|
||||
do {
|
||||
offset = _array->offset_array(index--);
|
||||
q -= offset;
|
||||
} while (offset == N_words);
|
||||
assert(q <= addr, "block start should be to left of arg");
|
||||
return q;
|
||||
}
|
||||
|
||||
// Note that the committed size of the covered space may have changed,
|
||||
// so the table size might also wish to change.
|
||||
void G1BlockOffsetArray::resize(size_t new_word_size) {
|
||||
HeapWord* new_end = _bottom + new_word_size;
|
||||
if (_end < new_end && !init_to_zero()) {
|
||||
// verify that the old and new boundaries are also card boundaries
|
||||
assert(_array->is_card_boundary(_end),
|
||||
"_end not a card boundary");
|
||||
assert(_array->is_card_boundary(new_end),
|
||||
"new _end would not be a card boundary");
|
||||
// set all the newly added cards
|
||||
_array->set_offset_array(_end, new_end, N_words);
|
||||
}
|
||||
_end = new_end; // update _end
|
||||
}
|
||||
|
||||
void G1BlockOffsetArray::set_region(MemRegion mr) {
|
||||
_bottom = mr.start();
|
||||
_end = mr.end();
|
||||
}
|
||||
|
||||
//
|
||||
// threshold_
|
||||
// | _index_
|
||||
|
@ -606,7 +439,7 @@ block_start_unsafe_const(const void* addr) const {
|
|||
G1BlockOffsetArrayContigSpace::
|
||||
G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
|
||||
MemRegion mr) :
|
||||
G1BlockOffsetArray(array, mr, true)
|
||||
G1BlockOffsetArray(array, mr)
|
||||
{
|
||||
_next_offset_threshold = NULL;
|
||||
_next_offset_index = 0;
|
||||
|
@ -641,15 +474,6 @@ HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
|
|||
return _next_offset_threshold;
|
||||
}
|
||||
|
||||
void G1BlockOffsetArrayContigSpace::zero_bottom_entry() {
|
||||
assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
|
||||
"just checking");
|
||||
size_t bottom_index = _array->index_for(_bottom);
|
||||
assert(_array->address_for_index(bottom_index) == _bottom,
|
||||
"Precondition of call");
|
||||
_array->set_offset_array(bottom_index, 0);
|
||||
}
|
||||
|
||||
void
|
||||
G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
|
||||
assert(new_top <= _end, "_end should have already been updated");
|
||||
|
|
|
@ -109,7 +109,12 @@ public:
|
|||
|
||||
class G1BlockOffsetSharedArrayMappingChangedListener : public G1MappingChangedListener {
|
||||
public:
|
||||
virtual void on_commit(uint start_idx, size_t num_regions);
|
||||
virtual void on_commit(uint start_idx, size_t num_regions) {
|
||||
// Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot
|
||||
// retrieve it here since this would cause firing of several asserts. The code
|
||||
// executed after commit of a region already needs to do some re-initialization of
|
||||
// the HeapRegion, so we combine that.
|
||||
}
|
||||
};
|
||||
|
||||
// This implementation of "G1BlockOffsetTable" divides the covered region
|
||||
|
@ -153,8 +158,6 @@ private:
|
|||
// For performance these have to devolve to array accesses in product builds.
|
||||
inline u_char offset_array(size_t index) const;
|
||||
|
||||
void set_offset_array(HeapWord* left, HeapWord* right, u_char offset);
|
||||
|
||||
void set_offset_array_raw(size_t index, u_char offset) {
|
||||
_offset_array[index] = offset;
|
||||
}
|
||||
|
@ -165,8 +168,6 @@ private:
|
|||
|
||||
inline void set_offset_array(size_t left, size_t right, u_char offset);
|
||||
|
||||
inline void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const;
|
||||
|
||||
bool is_card_boundary(HeapWord* p) const;
|
||||
|
||||
public:
|
||||
|
@ -193,8 +194,6 @@ public:
|
|||
// G1BlockOffsetTable(s) to initialize cards.
|
||||
G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage);
|
||||
|
||||
void set_bottom(HeapWord* new_bottom);
|
||||
|
||||
// Return the appropriate index into "_offset_array" for "p".
|
||||
inline size_t index_for(const void* p) const;
|
||||
inline size_t index_for_raw(const void* p) const;
|
||||
|
@ -220,14 +219,6 @@ private:
|
|||
LogN = G1BlockOffsetSharedArray::LogN
|
||||
};
|
||||
|
||||
// The following enums are used by do_block_helper
|
||||
enum Action {
|
||||
Action_single, // BOT records a single block (see single_block())
|
||||
Action_mark, // BOT marks the start of a block (see mark_block())
|
||||
Action_check // Check that BOT records block correctly
|
||||
// (see verify_single_block()).
|
||||
};
|
||||
|
||||
// This is the array, which can be shared by several BlockOffsetArray's
|
||||
// servicing different
|
||||
G1BlockOffsetSharedArray* _array;
|
||||
|
@ -235,10 +226,6 @@ private:
|
|||
// The space that owns this subregion.
|
||||
G1OffsetTableContigSpace* _gsp;
|
||||
|
||||
// If true, array entries are initialized to 0; otherwise, they are
|
||||
// initialized to point backwards to the beginning of the covered region.
|
||||
bool _init_to_zero;
|
||||
|
||||
// The portion [_unallocated_block, _sp.end()) of the space that
|
||||
// is a single block known not to contain any objects.
|
||||
// NOTE: See BlockOffsetArrayUseUnallocatedBlock flag.
|
||||
|
@ -253,9 +240,6 @@ private:
|
|||
// that is closed: [start_index, end_index]
|
||||
void set_remainder_to_point_to_start_incl(size_t start, size_t end);
|
||||
|
||||
// A helper function for BOT adjustment/verification work
|
||||
void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action);
|
||||
|
||||
protected:
|
||||
|
||||
G1OffsetTableContigSpace* gsp() const { return _gsp; }
|
||||
|
@ -303,11 +287,9 @@ protected:
|
|||
|
||||
public:
|
||||
// The space may not have it's bottom and top set yet, which is why the
|
||||
// region is passed as a parameter. If "init_to_zero" is true, the
|
||||
// elements of the array are initialized to zero. Otherwise, they are
|
||||
// initialized to point backwards to the beginning.
|
||||
G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr,
|
||||
bool init_to_zero);
|
||||
// region is passed as a parameter. The elements of the array are
|
||||
// initialized to zero.
|
||||
G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr);
|
||||
|
||||
// Note: this ought to be part of the constructor, but that would require
|
||||
// "this" to be passed as a parameter to a member constructor for
|
||||
|
@ -315,114 +297,19 @@ public:
|
|||
// This would be legal C++, but MS VC++ doesn't allow it.
|
||||
void set_space(G1OffsetTableContigSpace* sp);
|
||||
|
||||
// Resets the covered region to the given "mr".
|
||||
void set_region(MemRegion mr);
|
||||
|
||||
// Resets the covered region to one with the same _bottom as before but
|
||||
// the "new_word_size".
|
||||
void resize(size_t new_word_size);
|
||||
|
||||
// These must be guaranteed to work properly (i.e., do nothing)
|
||||
// when "blk_start" ("blk" for second version) is "NULL".
|
||||
virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
|
||||
virtual void alloc_block(HeapWord* blk, size_t size) {
|
||||
alloc_block(blk, blk + size);
|
||||
}
|
||||
|
||||
// The following methods are useful and optimized for a
|
||||
// general, non-contiguous space.
|
||||
|
||||
// Given a block [blk_start, blk_start + full_blk_size), and
|
||||
// a left_blk_size < full_blk_size, adjust the BOT to show two
|
||||
// blocks [blk_start, blk_start + left_blk_size) and
|
||||
// [blk_start + left_blk_size, blk_start + full_blk_size).
|
||||
// It is assumed (and verified in the non-product VM) that the
|
||||
// BOT was correct for the original block.
|
||||
void split_block(HeapWord* blk_start, size_t full_blk_size,
|
||||
size_t left_blk_size);
|
||||
|
||||
// Adjust the BOT to show that it has a single block in the
|
||||
// range [blk_start, blk_start + size). All necessary BOT
|
||||
// cards are adjusted, but _unallocated_block isn't.
|
||||
void single_block(HeapWord* blk_start, HeapWord* blk_end);
|
||||
void single_block(HeapWord* blk, size_t size) {
|
||||
single_block(blk, blk + size);
|
||||
}
|
||||
|
||||
// Adjust BOT to show that it has a block in the range
|
||||
// [blk_start, blk_start + size). Only the first card
|
||||
// of BOT is touched. It is assumed (and verified in the
|
||||
// non-product VM) that the remaining cards of the block
|
||||
// are correct.
|
||||
void mark_block(HeapWord* blk_start, HeapWord* blk_end);
|
||||
void mark_block(HeapWord* blk, size_t size) {
|
||||
mark_block(blk, blk + size);
|
||||
}
|
||||
|
||||
// Adjust _unallocated_block to indicate that a particular
|
||||
// block has been newly allocated or freed. It is assumed (and
|
||||
// verified in the non-product VM) that the BOT is correct for
|
||||
// the given block.
|
||||
inline void allocated(HeapWord* blk_start, HeapWord* blk_end) {
|
||||
// Verify that the BOT shows [blk, blk + blk_size) to be one block.
|
||||
verify_single_block(blk_start, blk_end);
|
||||
if (BlockOffsetArrayUseUnallocatedBlock) {
|
||||
_unallocated_block = MAX2(_unallocated_block, blk_end);
|
||||
}
|
||||
}
|
||||
|
||||
inline void allocated(HeapWord* blk, size_t size) {
|
||||
allocated(blk, blk + size);
|
||||
}
|
||||
|
||||
inline void freed(HeapWord* blk_start, HeapWord* blk_end);
|
||||
|
||||
inline void freed(HeapWord* blk, size_t size);
|
||||
|
||||
virtual HeapWord* block_start_unsafe(const void* addr);
|
||||
virtual HeapWord* block_start_unsafe_const(const void* addr) const;
|
||||
|
||||
// Requires "addr" to be the start of a card and returns the
|
||||
// start of the block that contains the given address.
|
||||
HeapWord* block_start_careful(const void* addr) const;
|
||||
|
||||
// If true, initialize array slots with no allocated blocks to zero.
|
||||
// Otherwise, make them point back to the front.
|
||||
bool init_to_zero() { return _init_to_zero; }
|
||||
|
||||
// Verification & debugging - ensure that the offset table reflects the fact
|
||||
// that the block [blk_start, blk_end) or [blk, blk + size) is a
|
||||
// single block of storage. NOTE: can;t const this because of
|
||||
// call to non-const do_block_internal() below.
|
||||
inline void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) {
|
||||
if (VerifyBlockOffsetArray) {
|
||||
do_block_internal(blk_start, blk_end, Action_check);
|
||||
}
|
||||
}
|
||||
|
||||
inline void verify_single_block(HeapWord* blk, size_t size) {
|
||||
verify_single_block(blk, blk + size);
|
||||
}
|
||||
|
||||
// Used by region verification. Checks that the contents of the
|
||||
// BOT reflect that there's a single object that spans the address
|
||||
// range [obj_start, obj_start + word_size); returns true if this is
|
||||
// the case, returns false if it's not.
|
||||
bool verify_for_object(HeapWord* obj_start, size_t word_size) const;
|
||||
|
||||
// Verify that the given block is before _unallocated_block
|
||||
inline void verify_not_unallocated(HeapWord* blk_start,
|
||||
HeapWord* blk_end) const {
|
||||
if (BlockOffsetArrayUseUnallocatedBlock) {
|
||||
assert(blk_start < blk_end, "Block inconsistency?");
|
||||
assert(blk_end <= _unallocated_block, "_unallocated_block problem");
|
||||
}
|
||||
}
|
||||
|
||||
inline void verify_not_unallocated(HeapWord* blk, size_t size) const {
|
||||
verify_not_unallocated(blk, blk + size);
|
||||
}
|
||||
|
||||
void check_all_cards(size_t left_card, size_t right_card) const;
|
||||
|
||||
virtual void print_on(outputStream* out) PRODUCT_RETURN;
|
||||
|
@ -445,14 +332,12 @@ class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
|
|||
blk_start, blk_end);
|
||||
}
|
||||
|
||||
// Variant of zero_bottom_entry that does not check for availability of the
|
||||
// Zero out the entry for _bottom (offset will be zero). Does not check for availability of the
|
||||
// memory first.
|
||||
void zero_bottom_entry_raw();
|
||||
// Variant of initialize_threshold that does not check for availability of the
|
||||
// memory first.
|
||||
HeapWord* initialize_threshold_raw();
|
||||
// Zero out the entry for _bottom (offset will be zero).
|
||||
void zero_bottom_entry();
|
||||
public:
|
||||
G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);
|
||||
|
||||
|
|
|
@ -91,13 +91,6 @@ void G1BlockOffsetSharedArray::set_offset_array(size_t left, size_t right, u_cha
|
|||
}
|
||||
}
|
||||
|
||||
void G1BlockOffsetSharedArray::check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
|
||||
check_index(index, "index out of range");
|
||||
assert(high >= low, "addresses out of order");
|
||||
check_offset(pointer_delta(high, low), "offset too large");
|
||||
assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset");
|
||||
}
|
||||
|
||||
// Variant of index_for that does not check the index for validity.
|
||||
inline size_t G1BlockOffsetSharedArray::index_for_raw(const void* p) const {
|
||||
return pointer_delta((char*)p, _reserved.start(), sizeof(char)) >> LogN;
|
||||
|
@ -193,28 +186,4 @@ G1BlockOffsetArray::forward_to_block_containing_addr(HeapWord* q,
|
|||
return q;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// BlockOffsetArrayNonContigSpace inlines
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
inline void G1BlockOffsetArray::freed(HeapWord* blk_start, HeapWord* blk_end) {
|
||||
// Verify that the BOT shows [blk_start, blk_end) to be one block.
|
||||
verify_single_block(blk_start, blk_end);
|
||||
// adjust _unallocated_block upward or downward
|
||||
// as appropriate
|
||||
if (BlockOffsetArrayUseUnallocatedBlock) {
|
||||
assert(_unallocated_block <= _end,
|
||||
"Inconsistent value for _unallocated_block");
|
||||
if (blk_end >= _unallocated_block && blk_start <= _unallocated_block) {
|
||||
// CMS-specific note: a block abutting _unallocated_block to
|
||||
// its left is being freed, a new block is being added or
|
||||
// we are resetting following a compaction
|
||||
_unallocated_block = blk_start;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void G1BlockOffsetArray::freed(HeapWord* blk, size_t size) {
|
||||
freed(blk, blk + size);
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP
|
||||
|
|
|
@ -322,29 +322,6 @@ bool HeapRegion::claimHeapRegion(jint claimValue) {
|
|||
return false;
|
||||
}
|
||||
|
||||
HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
|
||||
HeapWord* low = addr;
|
||||
HeapWord* high = end();
|
||||
while (low < high) {
|
||||
size_t diff = pointer_delta(high, low);
|
||||
// Must add one below to bias toward the high amount. Otherwise, if
|
||||
// "high" were at the desired value, and "low" were one less, we
|
||||
// would not converge on "high". This is not symmetric, because
|
||||
// we set "high" to a block start, which might be the right one,
|
||||
// which we don't do for "low".
|
||||
HeapWord* middle = low + (diff+1)/2;
|
||||
if (middle == high) return high;
|
||||
HeapWord* mid_bs = block_start_careful(middle);
|
||||
if (mid_bs < addr) {
|
||||
low = middle;
|
||||
} else {
|
||||
high = mid_bs;
|
||||
}
|
||||
}
|
||||
assert(low == high && low >= addr, "Didn't work.");
|
||||
return low;
|
||||
}
|
||||
|
||||
HeapRegion::HeapRegion(uint hrm_index,
|
||||
G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||
MemRegion mr) :
|
||||
|
|
|
@ -206,10 +206,6 @@ class G1OffsetTableContigSpace: public CompactibleSpace {
|
|||
_offsets.reset_bot();
|
||||
}
|
||||
|
||||
void update_bot_for_object(HeapWord* start, size_t word_size) {
|
||||
_offsets.alloc_block(start, word_size);
|
||||
}
|
||||
|
||||
void print_bot_on(outputStream* out) {
|
||||
_offsets.print_on(out);
|
||||
}
|
||||
|
@ -737,18 +733,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||
bool filter_young,
|
||||
jbyte* card_ptr);
|
||||
|
||||
// A version of block start that is guaranteed to find *some* block
|
||||
// boundary at or before "p", but does not object iteration, and may
|
||||
// therefore be used safely when the heap is unparseable.
|
||||
HeapWord* block_start_careful(const void* p) const {
|
||||
return _offsets.block_start_careful(p);
|
||||
}
|
||||
|
||||
// Requires that "addr" is within the region. Returns the start of the
|
||||
// first ("careful") block that starts at or after "addr", or else the
|
||||
// "end" of the region if there is no such block.
|
||||
HeapWord* next_block_start_careful(HeapWord* addr);
|
||||
|
||||
size_t recorded_rs_length() const { return _recorded_rs_length; }
|
||||
double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
|
||||
size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; }
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue