mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-21 11:34:38 +02:00
Merge
This commit is contained in:
commit
a484211e0b
10 changed files with 512 additions and 339 deletions
|
@ -7879,25 +7879,23 @@ SweepClosure::SweepClosure(CMSCollector* collector,
|
|||
}
|
||||
|
||||
// We need this destructor to reclaim any space at the end
|
||||
// of the space, which do_blk below may not have added back to
|
||||
// the free lists. [basically dealing with the "fringe effect"]
|
||||
// of the space, which do_blk below may not yet have added back to
|
||||
// the free lists.
|
||||
SweepClosure::~SweepClosure() {
|
||||
assert_lock_strong(_freelistLock);
|
||||
// this should be treated as the end of a free run if any
|
||||
// The current free range should be returned to the free lists
|
||||
// as one coalesced chunk.
|
||||
assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
|
||||
"sweep _limit out of bounds");
|
||||
// Flush any remaining coterminal free run as a single
|
||||
// coalesced chunk to the appropriate free list.
|
||||
if (inFreeRange()) {
|
||||
flushCurFreeChunk(freeFinger(),
|
||||
pointer_delta(_limit, freeFinger()));
|
||||
assert(freeFinger() < _limit, "the finger pointeth off base");
|
||||
assert(freeFinger() < _limit, "freeFinger points too high");
|
||||
flush_cur_free_chunk(freeFinger(), pointer_delta(_limit, freeFinger()));
|
||||
if (CMSTraceSweeper) {
|
||||
gclog_or_tty->print("destructor:");
|
||||
gclog_or_tty->print("Sweep:put_free_blk 0x%x ("SIZE_FORMAT") "
|
||||
"[coalesced:"SIZE_FORMAT"]\n",
|
||||
freeFinger(), pointer_delta(_limit, freeFinger()),
|
||||
lastFreeRangeCoalesced());
|
||||
gclog_or_tty->print("Sweep: last chunk: ");
|
||||
gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") [coalesced:"SIZE_FORMAT"]\n",
|
||||
freeFinger(), pointer_delta(_limit, freeFinger()), lastFreeRangeCoalesced());
|
||||
}
|
||||
}
|
||||
} // else nothing to flush
|
||||
NOT_PRODUCT(
|
||||
if (Verbose && PrintGC) {
|
||||
gclog_or_tty->print("Collected "SIZE_FORMAT" objects, "
|
||||
|
@ -7934,9 +7932,8 @@ SweepClosure::~SweepClosure() {
|
|||
void SweepClosure::initialize_free_range(HeapWord* freeFinger,
|
||||
bool freeRangeInFreeLists) {
|
||||
if (CMSTraceSweeper) {
|
||||
gclog_or_tty->print("---- Start free range 0x%x with free block [%d] (%d)\n",
|
||||
freeFinger, _sp->block_size(freeFinger),
|
||||
freeRangeInFreeLists);
|
||||
gclog_or_tty->print("---- Start free range at 0x%x with free block (%d)\n",
|
||||
freeFinger, freeRangeInFreeLists);
|
||||
}
|
||||
assert(!inFreeRange(), "Trampling existing free range");
|
||||
set_inFreeRange(true);
|
||||
|
@ -7991,21 +7988,36 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
|
|||
// may have caused us to coalesce the block ending at the address _limit
|
||||
// with a newly expanded chunk (this happens when _limit was set to the
|
||||
// previous _end of the space), so we may have stepped past _limit; see CR 6977970.
|
||||
if (addr >= _limit) { // we have swept up to or past the limit, do nothing more
|
||||
if (addr >= _limit) { // we have swept up to or past the limit: finish up
|
||||
assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
|
||||
"sweep _limit out of bounds");
|
||||
assert(addr < _sp->end(), "addr out of bounds");
|
||||
// help the closure application finish
|
||||
// Flush any remaining coterminal free run as a single
|
||||
// coalesced chunk to the appropriate free list.
|
||||
if (inFreeRange()) {
|
||||
assert(freeFinger() < _limit, "finger points too high");
|
||||
flush_cur_free_chunk(freeFinger(),
|
||||
pointer_delta(addr, freeFinger()));
|
||||
if (CMSTraceSweeper) {
|
||||
gclog_or_tty->print("Sweep: last chunk: ");
|
||||
gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") "
|
||||
"[coalesced:"SIZE_FORMAT"]\n",
|
||||
freeFinger(), pointer_delta(addr, freeFinger()),
|
||||
lastFreeRangeCoalesced());
|
||||
}
|
||||
}
|
||||
|
||||
// help the iterator loop finish
|
||||
return pointer_delta(_sp->end(), addr);
|
||||
}
|
||||
assert(addr < _limit, "sweep invariant");
|
||||
|
||||
assert(addr < _limit, "sweep invariant");
|
||||
// check if we should yield
|
||||
do_yield_check(addr);
|
||||
if (fc->isFree()) {
|
||||
// Chunk that is already free
|
||||
res = fc->size();
|
||||
doAlreadyFreeChunk(fc);
|
||||
do_already_free_chunk(fc);
|
||||
debug_only(_sp->verifyFreeLists());
|
||||
assert(res == fc->size(), "Don't expect the size to change");
|
||||
NOT_PRODUCT(
|
||||
|
@ -8015,7 +8027,7 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
|
|||
NOT_PRODUCT(_last_fc = fc;)
|
||||
} else if (!_bitMap->isMarked(addr)) {
|
||||
// Chunk is fresh garbage
|
||||
res = doGarbageChunk(fc);
|
||||
res = do_garbage_chunk(fc);
|
||||
debug_only(_sp->verifyFreeLists());
|
||||
NOT_PRODUCT(
|
||||
_numObjectsFreed++;
|
||||
|
@ -8023,7 +8035,7 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
|
|||
)
|
||||
} else {
|
||||
// Chunk that is alive.
|
||||
res = doLiveChunk(fc);
|
||||
res = do_live_chunk(fc);
|
||||
debug_only(_sp->verifyFreeLists());
|
||||
NOT_PRODUCT(
|
||||
_numObjectsLive++;
|
||||
|
@ -8076,7 +8088,7 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
|
|||
// to a free list which may be overpopulated.
|
||||
//
|
||||
|
||||
void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) {
|
||||
void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
|
||||
size_t size = fc->size();
|
||||
// Chunks that cannot be coalesced are not in the
|
||||
// free lists.
|
||||
|
@ -8092,23 +8104,23 @@ void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) {
|
|||
// addr and purported end of this block.
|
||||
_bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
|
||||
|
||||
// Some chunks cannot be coalesced in under any circumstances.
|
||||
// Some chunks cannot be coalesced under any circumstances.
|
||||
// See the definition of cantCoalesce().
|
||||
if (!fc->cantCoalesce()) {
|
||||
// This chunk can potentially be coalesced.
|
||||
if (_sp->adaptive_freelists()) {
|
||||
// All the work is done in
|
||||
doPostIsFreeOrGarbageChunk(fc, size);
|
||||
do_post_free_or_garbage_chunk(fc, size);
|
||||
} else { // Not adaptive free lists
|
||||
// this is a free chunk that can potentially be coalesced by the sweeper;
|
||||
if (!inFreeRange()) {
|
||||
// if the next chunk is a free block that can't be coalesced
|
||||
// it doesn't make sense to remove this chunk from the free lists
|
||||
FreeChunk* nextChunk = (FreeChunk*)(addr + size);
|
||||
assert((HeapWord*)nextChunk <= _limit, "sweep invariant");
|
||||
if ((HeapWord*)nextChunk < _limit && // there's a next chunk...
|
||||
nextChunk->isFree() && // which is free...
|
||||
nextChunk->cantCoalesce()) { // ... but cant be coalesced
|
||||
assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
|
||||
if ((HeapWord*)nextChunk < _sp->end() && // There is another free chunk to the right ...
|
||||
nextChunk->isFree() && // ... which is free...
|
||||
nextChunk->cantCoalesce()) { // ... but can't be coalesced
|
||||
// nothing to do
|
||||
} else {
|
||||
// Potentially the start of a new free range:
|
||||
|
@ -8154,14 +8166,14 @@ void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) {
|
|||
// as the end of a free run if any
|
||||
if (inFreeRange()) {
|
||||
// we kicked some butt; time to pick up the garbage
|
||||
assert(freeFinger() < addr, "the finger pointeth off base");
|
||||
flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
|
||||
assert(freeFinger() < addr, "freeFinger points too high");
|
||||
flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
|
||||
}
|
||||
// else, nothing to do, just continue
|
||||
}
|
||||
}
|
||||
|
||||
size_t SweepClosure::doGarbageChunk(FreeChunk* fc) {
|
||||
size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
|
||||
// This is a chunk of garbage. It is not in any free list.
|
||||
// Add it to a free list or let it possibly be coalesced into
|
||||
// a larger chunk.
|
||||
|
@ -8173,7 +8185,7 @@ size_t SweepClosure::doGarbageChunk(FreeChunk* fc) {
|
|||
// addr and purported end of just dead object.
|
||||
_bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
|
||||
|
||||
doPostIsFreeOrGarbageChunk(fc, size);
|
||||
do_post_free_or_garbage_chunk(fc, size);
|
||||
} else {
|
||||
if (!inFreeRange()) {
|
||||
// start of a new free range
|
||||
|
@ -8212,35 +8224,16 @@ size_t SweepClosure::doGarbageChunk(FreeChunk* fc) {
|
|||
return size;
|
||||
}
|
||||
|
||||
size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
|
||||
size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
|
||||
HeapWord* addr = (HeapWord*) fc;
|
||||
// The sweeper has just found a live object. Return any accumulated
|
||||
// left hand chunk to the free lists.
|
||||
if (inFreeRange()) {
|
||||
if (_sp->adaptive_freelists()) {
|
||||
flushCurFreeChunk(freeFinger(),
|
||||
pointer_delta(addr, freeFinger()));
|
||||
} else { // not adaptive freelists
|
||||
set_inFreeRange(false);
|
||||
// Add the free range back to the free list if it is not already
|
||||
// there.
|
||||
if (!freeRangeInFreeLists()) {
|
||||
assert(freeFinger() < addr, "the finger pointeth off base");
|
||||
if (CMSTraceSweeper) {
|
||||
gclog_or_tty->print("Sweep:put_free_blk 0x%x (%d) "
|
||||
"[coalesced:%d]\n",
|
||||
freeFinger(), pointer_delta(addr, freeFinger()),
|
||||
lastFreeRangeCoalesced());
|
||||
}
|
||||
_sp->addChunkAndRepairOffsetTable(freeFinger(),
|
||||
pointer_delta(addr, freeFinger()), lastFreeRangeCoalesced());
|
||||
}
|
||||
}
|
||||
assert(freeFinger() < addr, "freeFinger points too high");
|
||||
flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
|
||||
}
|
||||
|
||||
// Common code path for original and adaptive free lists.
|
||||
|
||||
// this object is live: we'd normally expect this to be
|
||||
// This object is live: we'd normally expect this to be
|
||||
// an oop, and like to assert the following:
|
||||
// assert(oop(addr)->is_oop(), "live block should be an oop");
|
||||
// However, as we commented above, this may be an object whose
|
||||
|
@ -8255,7 +8248,7 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
|
|||
assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
|
||||
"alignment problem");
|
||||
|
||||
#ifdef DEBUG
|
||||
#ifdef DEBUG
|
||||
if (oop(addr)->klass_or_null() != NULL &&
|
||||
( !_collector->should_unload_classes()
|
||||
|| (oop(addr)->is_parsable()) &&
|
||||
|
@ -8269,7 +8262,7 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
|
|||
CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
|
||||
"P-mark and computed size do not agree");
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
} else {
|
||||
// This should be an initialized object that's alive.
|
||||
|
@ -8296,19 +8289,17 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
|
|||
return size;
|
||||
}
|
||||
|
||||
void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc,
|
||||
size_t chunkSize) {
|
||||
// doPostIsFreeOrGarbageChunk() should only be called in the smart allocation
|
||||
// scheme.
|
||||
void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
|
||||
size_t chunkSize) {
|
||||
// do_post_free_or_garbage_chunk() should only be called in the case
|
||||
// of the adaptive free list allocator.
|
||||
bool fcInFreeLists = fc->isFree();
|
||||
assert(_sp->adaptive_freelists(), "Should only be used in this case.");
|
||||
assert((HeapWord*)fc <= _limit, "sweep invariant");
|
||||
if (CMSTestInFreeList && fcInFreeLists) {
|
||||
assert(_sp->verifyChunkInFreeLists(fc),
|
||||
"free chunk is not in free lists");
|
||||
assert(_sp->verifyChunkInFreeLists(fc), "free chunk is not in free lists");
|
||||
}
|
||||
|
||||
|
||||
if (CMSTraceSweeper) {
|
||||
gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
|
||||
}
|
||||
|
@ -8380,20 +8371,21 @@ void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc,
|
|||
if (inFreeRange()) {
|
||||
// In a free range but cannot coalesce with the right hand chunk.
|
||||
// Put the current free range into the free lists.
|
||||
flushCurFreeChunk(freeFinger(),
|
||||
pointer_delta(addr, freeFinger()));
|
||||
flush_cur_free_chunk(freeFinger(),
|
||||
pointer_delta(addr, freeFinger()));
|
||||
}
|
||||
// Set up for new free range. Pass along whether the right hand
|
||||
// chunk is in the free lists.
|
||||
initialize_free_range((HeapWord*)fc, fcInFreeLists);
|
||||
}
|
||||
}
|
||||
void SweepClosure::flushCurFreeChunk(HeapWord* chunk, size_t size) {
|
||||
|
||||
void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
|
||||
assert(inFreeRange(), "Should only be called if currently in a free range.");
|
||||
assert(size > 0,
|
||||
"A zero sized chunk cannot be added to the free lists.");
|
||||
if (!freeRangeInFreeLists()) {
|
||||
if(CMSTestInFreeList) {
|
||||
if (CMSTestInFreeList) {
|
||||
FreeChunk* fc = (FreeChunk*) chunk;
|
||||
fc->setSize(size);
|
||||
assert(!_sp->verifyChunkInFreeLists(fc),
|
||||
|
@ -8428,7 +8420,7 @@ void SweepClosure::do_yield_work(HeapWord* addr) {
|
|||
// chunk just flushed, they will need to wait for the next
|
||||
// sweep to be coalesced.
|
||||
if (inFreeRange()) {
|
||||
flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
|
||||
flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
|
||||
}
|
||||
|
||||
// First give up the locks, then yield, then re-lock.
|
||||
|
|
|
@ -1701,7 +1701,9 @@ class SweepClosure: public BlkClosureCareful {
|
|||
CMSCollector* _collector; // collector doing the work
|
||||
ConcurrentMarkSweepGeneration* _g; // Generation being swept
|
||||
CompactibleFreeListSpace* _sp; // Space being swept
|
||||
HeapWord* _limit;
|
||||
HeapWord* _limit;// the address at which the sweep should stop because
|
||||
// we do not expect blocks eligible for sweeping past
|
||||
// that address.
|
||||
Mutex* _freelistLock; // Free list lock (in space)
|
||||
CMSBitMap* _bitMap; // Marking bit map (in
|
||||
// generation)
|
||||
|
@ -1745,14 +1747,13 @@ class SweepClosure: public BlkClosureCareful {
|
|||
private:
|
||||
// Code that is common to a free chunk or garbage when
|
||||
// encountered during sweeping.
|
||||
void doPostIsFreeOrGarbageChunk(FreeChunk *fc,
|
||||
size_t chunkSize);
|
||||
void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize);
|
||||
// Process a free chunk during sweeping.
|
||||
void doAlreadyFreeChunk(FreeChunk *fc);
|
||||
void do_already_free_chunk(FreeChunk *fc);
|
||||
// Process a garbage chunk during sweeping.
|
||||
size_t doGarbageChunk(FreeChunk *fc);
|
||||
size_t do_garbage_chunk(FreeChunk *fc);
|
||||
// Process a live chunk during sweeping.
|
||||
size_t doLiveChunk(FreeChunk* fc);
|
||||
size_t do_live_chunk(FreeChunk* fc);
|
||||
|
||||
// Accessors.
|
||||
HeapWord* freeFinger() const { return _freeFinger; }
|
||||
|
@ -1769,7 +1770,7 @@ class SweepClosure: public BlkClosureCareful {
|
|||
// Initialize a free range.
|
||||
void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
|
||||
// Return this chunk to the free lists.
|
||||
void flushCurFreeChunk(HeapWord* chunk, size_t size);
|
||||
void flush_cur_free_chunk(HeapWord* chunk, size_t size);
|
||||
|
||||
// Check if we should yield and do so when necessary.
|
||||
inline void do_yield_check(HeapWord* addr);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -222,7 +222,7 @@ void G1BlockOffsetArray::split_block(HeapWord* blk, size_t blk_size,
|
|||
|
||||
// Action_mark - update the BOT for the block [blk_start, blk_end).
|
||||
// Current typical use is for splitting a block.
|
||||
// Action_single - udpate the BOT for an allocation.
|
||||
// Action_single - update the BOT for an allocation.
|
||||
// Action_verify - BOT verification.
|
||||
void G1BlockOffsetArray::do_block_internal(HeapWord* blk_start,
|
||||
HeapWord* blk_end,
|
||||
|
@ -331,47 +331,6 @@ G1BlockOffsetArray::mark_block(HeapWord* blk_start, HeapWord* blk_end) {
|
|||
do_block_internal(blk_start, blk_end, Action_mark);
|
||||
}
|
||||
|
||||
void G1BlockOffsetArray::join_blocks(HeapWord* blk1, HeapWord* blk2) {
|
||||
HeapWord* blk1_start = Universe::heap()->block_start(blk1);
|
||||
HeapWord* blk2_start = Universe::heap()->block_start(blk2);
|
||||
assert(blk1 == blk1_start && blk2 == blk2_start,
|
||||
"Must be block starts.");
|
||||
assert(blk1 + _sp->block_size(blk1) == blk2, "Must be contiguous.");
|
||||
size_t blk1_start_index = _array->index_for(blk1);
|
||||
size_t blk2_start_index = _array->index_for(blk2);
|
||||
assert(blk1_start_index <= blk2_start_index, "sanity");
|
||||
HeapWord* blk2_card_start = _array->address_for_index(blk2_start_index);
|
||||
if (blk2 == blk2_card_start) {
|
||||
// blk2 starts a card. Does blk1 start on the prevous card, or futher
|
||||
// back?
|
||||
assert(blk1_start_index < blk2_start_index, "must be lower card.");
|
||||
if (blk1_start_index + 1 == blk2_start_index) {
|
||||
// previous card; new value for blk2 card is size of blk1.
|
||||
_array->set_offset_array(blk2_start_index, (u_char) _sp->block_size(blk1));
|
||||
} else {
|
||||
// Earlier card; go back a card.
|
||||
_array->set_offset_array(blk2_start_index, N_words);
|
||||
}
|
||||
} else {
|
||||
// blk2 does not start a card. Does it cross a card? If not, nothing
|
||||
// to do.
|
||||
size_t blk2_end_index =
|
||||
_array->index_for(blk2 + _sp->block_size(blk2) - 1);
|
||||
assert(blk2_end_index >= blk2_start_index, "sanity");
|
||||
if (blk2_end_index > blk2_start_index) {
|
||||
// Yes, it crosses a card. The value for the next card must change.
|
||||
if (blk1_start_index + 1 == blk2_start_index) {
|
||||
// previous card; new value for second blk2 card is size of blk1.
|
||||
_array->set_offset_array(blk2_start_index + 1,
|
||||
(u_char) _sp->block_size(blk1));
|
||||
} else {
|
||||
// Earlier card; go back a card.
|
||||
_array->set_offset_array(blk2_start_index + 1, N_words);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) {
|
||||
assert(_bottom <= addr && addr < _end,
|
||||
"addr must be covered by this Array");
|
||||
|
@ -580,16 +539,51 @@ void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_
|
|||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
G1BlockOffsetArray::set_for_starts_humongous(HeapWord* new_end) {
|
||||
assert(_end == new_end, "_end should have already been updated");
|
||||
|
||||
// The first BOT entry should have offset 0.
|
||||
_array->set_offset_array(_array->index_for(_bottom), 0);
|
||||
// The rest should point to the first one.
|
||||
set_remainder_to_point_to_start(_bottom + N_words, new_end);
|
||||
bool
|
||||
G1BlockOffsetArray::verify_for_object(HeapWord* obj_start,
|
||||
size_t word_size) const {
|
||||
size_t first_card = _array->index_for(obj_start);
|
||||
size_t last_card = _array->index_for(obj_start + word_size - 1);
|
||||
if (!_array->is_card_boundary(obj_start)) {
|
||||
// If the object is not on a card boundary the BOT entry of the
|
||||
// first card should point to another object so we should not
|
||||
// check that one.
|
||||
first_card += 1;
|
||||
}
|
||||
for (size_t card = first_card; card <= last_card; card += 1) {
|
||||
HeapWord* card_addr = _array->address_for_index(card);
|
||||
HeapWord* block_start = block_start_const(card_addr);
|
||||
if (block_start != obj_start) {
|
||||
gclog_or_tty->print_cr("block start: "PTR_FORMAT" is incorrect - "
|
||||
"card index: "SIZE_FORMAT" "
|
||||
"card addr: "PTR_FORMAT" BOT entry: %u "
|
||||
"obj: "PTR_FORMAT" word size: "SIZE_FORMAT" "
|
||||
"cards: ["SIZE_FORMAT","SIZE_FORMAT"]",
|
||||
block_start, card, card_addr,
|
||||
_array->offset_array(card),
|
||||
obj_start, word_size, first_card, last_card);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void
|
||||
G1BlockOffsetArray::print_on(outputStream* out) {
|
||||
size_t from_index = _array->index_for(_bottom);
|
||||
size_t to_index = _array->index_for(_end);
|
||||
out->print_cr(">> BOT for area ["PTR_FORMAT","PTR_FORMAT") "
|
||||
"cards ["SIZE_FORMAT","SIZE_FORMAT")",
|
||||
_bottom, _end, from_index, to_index);
|
||||
for (size_t i = from_index; i < to_index; ++i) {
|
||||
out->print_cr(" entry "SIZE_FORMAT_W(8)" | "PTR_FORMAT" : %3u",
|
||||
i, _array->address_for_index(i),
|
||||
(uint) _array->offset_array(i));
|
||||
}
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// G1BlockOffsetArrayContigSpace
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
@ -641,10 +635,20 @@ void G1BlockOffsetArrayContigSpace::zero_bottom_entry() {
|
|||
}
|
||||
|
||||
void
|
||||
G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_end) {
|
||||
G1BlockOffsetArray::set_for_starts_humongous(new_end);
|
||||
G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
|
||||
assert(new_top <= _end, "_end should have already been updated");
|
||||
|
||||
// Make sure _next_offset_threshold and _next_offset_index point to new_end.
|
||||
_next_offset_threshold = new_end;
|
||||
_next_offset_index = _array->index_for(new_end);
|
||||
// The first BOT entry should have offset 0.
|
||||
zero_bottom_entry();
|
||||
initialize_threshold();
|
||||
alloc_block(_bottom, new_top);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void
|
||||
G1BlockOffsetArrayContigSpace::print_on(outputStream* out) {
|
||||
G1BlockOffsetArray::print_on(out);
|
||||
out->print_cr(" next offset threshold: "PTR_FORMAT, _next_offset_threshold);
|
||||
out->print_cr(" next offset index: "SIZE_FORMAT, _next_offset_index);
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -352,11 +352,6 @@ public:
|
|||
// The following methods are useful and optimized for a
|
||||
// general, non-contiguous space.
|
||||
|
||||
// The given arguments are required to be the starts of adjacent ("blk1"
|
||||
// before "blk2") well-formed blocks covered by "this". After this call,
|
||||
// they should be considered to form one block.
|
||||
virtual void join_blocks(HeapWord* blk1, HeapWord* blk2);
|
||||
|
||||
// Given a block [blk_start, blk_start + full_blk_size), and
|
||||
// a left_blk_size < full_blk_size, adjust the BOT to show two
|
||||
// blocks [blk_start, blk_start + left_blk_size) and
|
||||
|
@ -429,6 +424,12 @@ public:
|
|||
verify_single_block(blk, blk + size);
|
||||
}
|
||||
|
||||
// Used by region verification. Checks that the contents of the
|
||||
// BOT reflect that there's a single object that spans the address
|
||||
// range [obj_start, obj_start + word_size); returns true if this is
|
||||
// the case, returns false if it's not.
|
||||
bool verify_for_object(HeapWord* obj_start, size_t word_size) const;
|
||||
|
||||
// Verify that the given block is before _unallocated_block
|
||||
inline void verify_not_unallocated(HeapWord* blk_start,
|
||||
HeapWord* blk_end) const {
|
||||
|
@ -444,7 +445,7 @@ public:
|
|||
|
||||
void check_all_cards(size_t left_card, size_t right_card) const;
|
||||
|
||||
virtual void set_for_starts_humongous(HeapWord* new_end);
|
||||
virtual void print_on(outputStream* out) PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
// A subtype of BlockOffsetArray that takes advantage of the fact
|
||||
|
@ -494,7 +495,9 @@ class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
|
|||
HeapWord* block_start_unsafe(const void* addr);
|
||||
HeapWord* block_start_unsafe_const(const void* addr) const;
|
||||
|
||||
virtual void set_for_starts_humongous(HeapWord* new_end);
|
||||
void set_for_starts_humongous(HeapWord* new_top);
|
||||
|
||||
virtual void print_on(outputStream* out) PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -610,6 +610,39 @@ G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) {
|
|||
// of the free region list is revamped as part of CR 6977804.
|
||||
wait_for_cleanup_complete();
|
||||
|
||||
// Other threads might still be trying to allocate using CASes out
|
||||
// of the region we are retiring, as they can do so without holding
|
||||
// the Heap_lock. So we first have to make sure that noone else can
|
||||
// allocate in it by doing a maximal allocation. Even if our CAS
|
||||
// attempt fails a few times, we'll succeed sooner or later given
|
||||
// that a failed CAS attempt mean that the region is getting closed
|
||||
// to being full (someone else succeeded in allocating into it).
|
||||
size_t free_word_size = cur_alloc_region->free() / HeapWordSize;
|
||||
|
||||
// This is the minimum free chunk we can turn into a dummy
|
||||
// object. If the free space falls below this, then noone can
|
||||
// allocate in this region anyway (all allocation requests will be
|
||||
// of a size larger than this) so we won't have to perform the dummy
|
||||
// allocation.
|
||||
size_t min_word_size_to_fill = CollectedHeap::min_fill_size();
|
||||
|
||||
while (free_word_size >= min_word_size_to_fill) {
|
||||
HeapWord* dummy =
|
||||
cur_alloc_region->par_allocate_no_bot_updates(free_word_size);
|
||||
if (dummy != NULL) {
|
||||
// If the allocation was successful we should fill in the space.
|
||||
CollectedHeap::fill_with_object(dummy, free_word_size);
|
||||
break;
|
||||
}
|
||||
|
||||
free_word_size = cur_alloc_region->free() / HeapWordSize;
|
||||
// It's also possible that someone else beats us to the
|
||||
// allocation and they fill up the region. In that case, we can
|
||||
// just get out of the loop
|
||||
}
|
||||
assert(cur_alloc_region->free() / HeapWordSize < min_word_size_to_fill,
|
||||
"sanity");
|
||||
|
||||
retire_cur_alloc_region_common(cur_alloc_region);
|
||||
assert(_cur_alloc_region == NULL, "post-condition");
|
||||
}
|
||||
|
@ -661,27 +694,29 @@ G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size,
|
|||
// young type.
|
||||
OrderAccess::storestore();
|
||||
|
||||
// Now allocate out of the new current alloc region. We could
|
||||
// have re-used allocate_from_cur_alloc_region() but its
|
||||
// operation is slightly different to what we need here. First,
|
||||
// allocate_from_cur_alloc_region() is only called outside a
|
||||
// safepoint and will always unlock the Heap_lock if it returns
|
||||
// a non-NULL result. Second, it assumes that the current alloc
|
||||
// region is what's already assigned in _cur_alloc_region. What
|
||||
// we want here is to actually do the allocation first before we
|
||||
// assign the new region to _cur_alloc_region. This ordering is
|
||||
// not currently important, but it will be essential when we
|
||||
// change the code to support CAS allocation in the future (see
|
||||
// CR 6994297).
|
||||
//
|
||||
// This allocate method does BOT updates and we don't need them in
|
||||
// the young generation. This will be fixed in the near future by
|
||||
// CR 6994297.
|
||||
HeapWord* result = new_cur_alloc_region->allocate(word_size);
|
||||
// Now, perform the allocation out of the region we just
|
||||
// allocated. Note that noone else can access that region at
|
||||
// this point (as _cur_alloc_region has not been updated yet),
|
||||
// so we can just go ahead and do the allocation without any
|
||||
// atomics (and we expect this allocation attempt to
|
||||
// suceeded). Given that other threads can attempt an allocation
|
||||
// with a CAS and without needing the Heap_lock, if we assigned
|
||||
// the new region to _cur_alloc_region before first allocating
|
||||
// into it other threads might have filled up the new region
|
||||
// before we got a chance to do the allocation ourselves. In
|
||||
// that case, we would have needed to retire the region, grab a
|
||||
// new one, and go through all this again. Allocating out of the
|
||||
// new region before assigning it to _cur_alloc_region avoids
|
||||
// all this.
|
||||
HeapWord* result =
|
||||
new_cur_alloc_region->allocate_no_bot_updates(word_size);
|
||||
assert(result != NULL, "we just allocate out of an empty region "
|
||||
"so allocation should have been successful");
|
||||
assert(is_in(result), "result should be in the heap");
|
||||
|
||||
// Now make sure that the store to _cur_alloc_region does not
|
||||
// float above the store to top.
|
||||
OrderAccess::storestore();
|
||||
_cur_alloc_region = new_cur_alloc_region;
|
||||
|
||||
if (!at_safepoint) {
|
||||
|
@ -718,6 +753,9 @@ G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
|
|||
for (int try_count = 1; /* we'll return or break */; try_count += 1) {
|
||||
bool succeeded = true;
|
||||
|
||||
// Every time we go round the loop we should be holding the Heap_lock.
|
||||
assert_heap_locked();
|
||||
|
||||
{
|
||||
// We may have concurrent cleanup working at the time. Wait for
|
||||
// it to complete. In the future we would probably want to make
|
||||
|
@ -734,7 +772,8 @@ G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
|
|||
// attempt as it's redundant (we only reach here after an
|
||||
// allocation attempt has been unsuccessful).
|
||||
wait_for_cleanup_complete();
|
||||
HeapWord* result = attempt_allocation(word_size);
|
||||
|
||||
HeapWord* result = attempt_allocation_locked(word_size);
|
||||
if (result != NULL) {
|
||||
assert_heap_not_locked();
|
||||
return result;
|
||||
|
@ -748,7 +787,6 @@ G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
|
|||
if (g1_policy()->can_expand_young_list()) {
|
||||
// Yes, we are allowed to expand the young gen. Let's try to
|
||||
// allocate a new current alloc region.
|
||||
|
||||
HeapWord* result =
|
||||
replace_cur_alloc_region_and_allocate(word_size,
|
||||
false, /* at_safepoint */
|
||||
|
@ -771,20 +809,23 @@ G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
|
|||
// rather than causing more, now probably unnecessary, GC attempts.
|
||||
JavaThread* jthr = JavaThread::current();
|
||||
assert(jthr != NULL, "sanity");
|
||||
if (!jthr->in_critical()) {
|
||||
MutexUnlocker mul(Heap_lock);
|
||||
GC_locker::stall_until_clear();
|
||||
|
||||
// We'll then fall off the end of the ("if GC locker active")
|
||||
// if-statement and retry the allocation further down in the
|
||||
// loop.
|
||||
} else {
|
||||
if (jthr->in_critical()) {
|
||||
if (CheckJNICalls) {
|
||||
fatal("Possible deadlock due to allocating while"
|
||||
" in jni critical section");
|
||||
}
|
||||
// We are returning NULL so the protocol is that we're still
|
||||
// holding the Heap_lock.
|
||||
assert_heap_locked();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Heap_lock->unlock();
|
||||
GC_locker::stall_until_clear();
|
||||
|
||||
// No need to relock the Heap_lock. We'll fall off to the code
|
||||
// below the else-statement which assumes that we are not
|
||||
// holding the Heap_lock.
|
||||
} else {
|
||||
// We are not locked out. So, let's try to do a GC. The VM op
|
||||
// will retry the allocation before it completes.
|
||||
|
@ -805,11 +846,10 @@ G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
|
|||
dirty_young_block(result, word_size);
|
||||
return result;
|
||||
}
|
||||
|
||||
Heap_lock->lock();
|
||||
}
|
||||
|
||||
assert_heap_locked();
|
||||
// Both paths that get us here from above unlock the Heap_lock.
|
||||
assert_heap_not_locked();
|
||||
|
||||
// We can reach here when we were unsuccessful in doing a GC,
|
||||
// because another thread beat us to it, or because we were locked
|
||||
|
@ -948,10 +988,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
|
|||
if (!expect_null_cur_alloc_region) {
|
||||
HeapRegion* cur_alloc_region = _cur_alloc_region;
|
||||
if (cur_alloc_region != NULL) {
|
||||
// This allocate method does BOT updates and we don't need them in
|
||||
// the young generation. This will be fixed in the near future by
|
||||
// CR 6994297.
|
||||
HeapWord* result = cur_alloc_region->allocate(word_size);
|
||||
// We are at a safepoint so no reason to use the MT-safe version.
|
||||
HeapWord* result = cur_alloc_region->allocate_no_bot_updates(word_size);
|
||||
if (result != NULL) {
|
||||
assert(is_in(result), "result should be in the heap");
|
||||
|
||||
|
@ -983,20 +1021,17 @@ HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
|
|||
assert_heap_not_locked_and_not_at_safepoint();
|
||||
assert(!isHumongous(word_size), "we do not allow TLABs of humongous size");
|
||||
|
||||
Heap_lock->lock();
|
||||
|
||||
// First attempt: try allocating out of the current alloc region or
|
||||
// after replacing the current alloc region.
|
||||
// First attempt: Try allocating out of the current alloc region
|
||||
// using a CAS. If that fails, take the Heap_lock and retry the
|
||||
// allocation, potentially replacing the current alloc region.
|
||||
HeapWord* result = attempt_allocation(word_size);
|
||||
if (result != NULL) {
|
||||
assert_heap_not_locked();
|
||||
return result;
|
||||
}
|
||||
|
||||
assert_heap_locked();
|
||||
|
||||
// Second attempt: go into the even slower path where we might
|
||||
// try to schedule a collection.
|
||||
// Second attempt: Go to the slower path where we might try to
|
||||
// schedule a collection.
|
||||
result = attempt_allocation_slow(word_size);
|
||||
if (result != NULL) {
|
||||
assert_heap_not_locked();
|
||||
|
@ -1004,6 +1039,7 @@ HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
|
|||
}
|
||||
|
||||
assert_heap_locked();
|
||||
// Need to unlock the Heap_lock before returning.
|
||||
Heap_lock->unlock();
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1022,11 +1058,10 @@ G1CollectedHeap::mem_allocate(size_t word_size,
|
|||
for (int try_count = 1; /* we'll return */; try_count += 1) {
|
||||
unsigned int gc_count_before;
|
||||
{
|
||||
Heap_lock->lock();
|
||||
|
||||
if (!isHumongous(word_size)) {
|
||||
// First attempt: try allocating out of the current alloc
|
||||
// region or after replacing the current alloc region.
|
||||
// First attempt: Try allocating out of the current alloc region
|
||||
// using a CAS. If that fails, take the Heap_lock and retry the
|
||||
// allocation, potentially replacing the current alloc region.
|
||||
HeapWord* result = attempt_allocation(word_size);
|
||||
if (result != NULL) {
|
||||
assert_heap_not_locked();
|
||||
|
@ -1035,14 +1070,17 @@ G1CollectedHeap::mem_allocate(size_t word_size,
|
|||
|
||||
assert_heap_locked();
|
||||
|
||||
// Second attempt: go into the even slower path where we might
|
||||
// try to schedule a collection.
|
||||
// Second attempt: Go to the slower path where we might try to
|
||||
// schedule a collection.
|
||||
result = attempt_allocation_slow(word_size);
|
||||
if (result != NULL) {
|
||||
assert_heap_not_locked();
|
||||
return result;
|
||||
}
|
||||
} else {
|
||||
// attempt_allocation_humongous() requires the Heap_lock to be held.
|
||||
Heap_lock->lock();
|
||||
|
||||
HeapWord* result = attempt_allocation_humongous(word_size,
|
||||
false /* at_safepoint */);
|
||||
if (result != NULL) {
|
||||
|
@ -1054,7 +1092,8 @@ G1CollectedHeap::mem_allocate(size_t word_size,
|
|||
assert_heap_locked();
|
||||
// Read the gc count while the heap lock is held.
|
||||
gc_count_before = SharedHeap::heap()->total_collections();
|
||||
// We cannot be at a safepoint, so it is safe to unlock the Heap_lock
|
||||
|
||||
// Release the Heap_lock before attempting the collection.
|
||||
Heap_lock->unlock();
|
||||
}
|
||||
|
||||
|
@ -1868,7 +1907,7 @@ jint G1CollectedHeap::initialize() {
|
|||
|
||||
ReservedSpace heap_rs(max_byte_size + pgs->max_size(),
|
||||
HeapRegion::GrainBytes,
|
||||
false /*ism*/, addr);
|
||||
UseLargePages, addr);
|
||||
|
||||
if (UseCompressedOops) {
|
||||
if (addr != NULL && !heap_rs.is_reserved()) {
|
||||
|
@ -1877,13 +1916,13 @@ jint G1CollectedHeap::initialize() {
|
|||
// Try again to reserver heap higher.
|
||||
addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
|
||||
ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes,
|
||||
false /*ism*/, addr);
|
||||
UseLargePages, addr);
|
||||
if (addr != NULL && !heap_rs0.is_reserved()) {
|
||||
// Failed to reserve at specified address again - give up.
|
||||
addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
|
||||
assert(addr == NULL, "");
|
||||
ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes,
|
||||
false /*ism*/, addr);
|
||||
UseLargePages, addr);
|
||||
heap_rs = heap_rs1;
|
||||
} else {
|
||||
heap_rs = heap_rs0;
|
||||
|
@ -3856,13 +3895,15 @@ private:
|
|||
size_t _next_marked_bytes;
|
||||
OopsInHeapRegionClosure *_cl;
|
||||
public:
|
||||
RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) :
|
||||
_g1(g1), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0),
|
||||
RemoveSelfPointerClosure(G1CollectedHeap* g1, HeapRegion* hr,
|
||||
OopsInHeapRegionClosure* cl) :
|
||||
_g1(g1), _hr(hr), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0),
|
||||
_next_marked_bytes(0), _cl(cl) {}
|
||||
|
||||
size_t prev_marked_bytes() { return _prev_marked_bytes; }
|
||||
size_t next_marked_bytes() { return _next_marked_bytes; }
|
||||
|
||||
// <original comment>
|
||||
// The original idea here was to coalesce evacuated and dead objects.
|
||||
// However that caused complications with the block offset table (BOT).
|
||||
// In particular if there were two TLABs, one of them partially refined.
|
||||
|
@ -3871,15 +3912,24 @@ public:
|
|||
// of TLAB_2. If the last object of the TLAB_1 and the first object
|
||||
// of TLAB_2 are coalesced, then the cards of the unrefined part
|
||||
// would point into middle of the filler object.
|
||||
//
|
||||
// The current approach is to not coalesce and leave the BOT contents intact.
|
||||
// </original comment>
|
||||
//
|
||||
// We now reset the BOT when we start the object iteration over the
|
||||
// region and refine its entries for every object we come across. So
|
||||
// the above comment is not really relevant and we should be able
|
||||
// to coalesce dead objects if we want to.
|
||||
void do_object(oop obj) {
|
||||
HeapWord* obj_addr = (HeapWord*) obj;
|
||||
assert(_hr->is_in(obj_addr), "sanity");
|
||||
size_t obj_size = obj->size();
|
||||
_hr->update_bot_for_object(obj_addr, obj_size);
|
||||
if (obj->is_forwarded() && obj->forwardee() == obj) {
|
||||
// The object failed to move.
|
||||
assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs.");
|
||||
_cm->markPrev(obj);
|
||||
assert(_cm->isPrevMarked(obj), "Should be marked!");
|
||||
_prev_marked_bytes += (obj->size() * HeapWordSize);
|
||||
_prev_marked_bytes += (obj_size * HeapWordSize);
|
||||
if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) {
|
||||
_cm->markAndGrayObjectIfNecessary(obj);
|
||||
}
|
||||
|
@ -3901,7 +3951,7 @@ public:
|
|||
} else {
|
||||
// The object has been either evacuated or is dead. Fill it with a
|
||||
// dummy object.
|
||||
MemRegion mr((HeapWord*)obj, obj->size());
|
||||
MemRegion mr((HeapWord*)obj, obj_size);
|
||||
CollectedHeap::fill_with_object(mr);
|
||||
_cm->clearRangeBothMaps(mr);
|
||||
}
|
||||
|
@ -3921,10 +3971,13 @@ void G1CollectedHeap::remove_self_forwarding_pointers() {
|
|||
HeapRegion* cur = g1_policy()->collection_set();
|
||||
while (cur != NULL) {
|
||||
assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
|
||||
assert(!cur->isHumongous(), "sanity");
|
||||
|
||||
RemoveSelfPointerClosure rspc(_g1h, cl);
|
||||
if (cur->evacuation_failed()) {
|
||||
assert(cur->in_collection_set(), "bad CS");
|
||||
RemoveSelfPointerClosure rspc(_g1h, cur, cl);
|
||||
|
||||
cur->reset_bot();
|
||||
cl->set_region(cur);
|
||||
cur->object_iterate(&rspc);
|
||||
|
||||
|
@ -3989,15 +4042,6 @@ void G1CollectedHeap::drain_evac_failure_scan_stack() {
|
|||
}
|
||||
}
|
||||
|
||||
void G1CollectedHeap::handle_evacuation_failure(oop old) {
|
||||
markOop m = old->mark();
|
||||
// forward to self
|
||||
assert(!old->is_forwarded(), "precondition");
|
||||
|
||||
old->forward_to(old);
|
||||
handle_evacuation_failure_common(old, m);
|
||||
}
|
||||
|
||||
oop
|
||||
G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
|
||||
oop old) {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -430,7 +430,8 @@ protected:
|
|||
bool* gc_overhead_limit_was_exceeded);
|
||||
|
||||
// The following methods, allocate_from_cur_allocation_region(),
|
||||
// attempt_allocation(), replace_cur_alloc_region_and_allocate(),
|
||||
// attempt_allocation(), attempt_allocation_locked(),
|
||||
// replace_cur_alloc_region_and_allocate(),
|
||||
// attempt_allocation_slow(), and attempt_allocation_humongous()
|
||||
// have very awkward pre- and post-conditions with respect to
|
||||
// locking:
|
||||
|
@ -481,20 +482,30 @@ protected:
|
|||
// successfully manage to allocate it, or NULL.
|
||||
|
||||
// It tries to satisfy an allocation request out of the current
|
||||
// allocating region, which is passed as a parameter. It assumes
|
||||
// that the caller has checked that the current allocating region is
|
||||
// not NULL. Given that the caller has to check the current
|
||||
// allocating region for at least NULL, it might as well pass it as
|
||||
// the first parameter so that the method doesn't have to read it
|
||||
// from the _cur_alloc_region field again.
|
||||
// alloc region, which is passed as a parameter. It assumes that the
|
||||
// caller has checked that the current alloc region is not NULL.
|
||||
// Given that the caller has to check the current alloc region for
|
||||
// at least NULL, it might as well pass it as the first parameter so
|
||||
// that the method doesn't have to read it from the
|
||||
// _cur_alloc_region field again. It is called from both
|
||||
// attempt_allocation() and attempt_allocation_locked() and the
|
||||
// with_heap_lock parameter indicates whether the caller was holding
|
||||
// the heap lock when it called it or not.
|
||||
inline HeapWord* allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
|
||||
size_t word_size);
|
||||
size_t word_size,
|
||||
bool with_heap_lock);
|
||||
|
||||
// It attempts to allocate out of the current alloc region. If that
|
||||
// fails, it retires the current alloc region (if there is one),
|
||||
// tries to get a new one and retries the allocation.
|
||||
// First-level of allocation slow path: it attempts to allocate out
|
||||
// of the current alloc region in a lock-free manner using a CAS. If
|
||||
// that fails it takes the Heap_lock and calls
|
||||
// attempt_allocation_locked() for the second-level slow path.
|
||||
inline HeapWord* attempt_allocation(size_t word_size);
|
||||
|
||||
// Second-level of allocation slow path: while holding the Heap_lock
|
||||
// it tries to allocate out of the current alloc region and, if that
|
||||
// fails, tries to allocate out of a new current alloc region.
|
||||
inline HeapWord* attempt_allocation_locked(size_t word_size);
|
||||
|
||||
// It assumes that the current alloc region has been retired and
|
||||
// tries to allocate a new one. If it's successful, it performs the
|
||||
// allocation out of the new current alloc region and updates
|
||||
|
@ -506,11 +517,11 @@ protected:
|
|||
bool do_dirtying,
|
||||
bool can_expand);
|
||||
|
||||
// The slow path when we are unable to allocate a new current alloc
|
||||
// region to satisfy an allocation request (i.e., when
|
||||
// attempt_allocation() fails). It will try to do an evacuation
|
||||
// pause, which might stall due to the GC locker, and retry the
|
||||
// allocation attempt when appropriate.
|
||||
// Third-level of allocation slow path: when we are unable to
|
||||
// allocate a new current alloc region to satisfy an allocation
|
||||
// request (i.e., when attempt_allocation_locked() fails). It will
|
||||
// try to do an evacuation pause, which might stall due to the GC
|
||||
// locker, and retry the allocation attempt when appropriate.
|
||||
HeapWord* attempt_allocation_slow(size_t word_size);
|
||||
|
||||
// The method that tries to satisfy a humongous allocation
|
||||
|
@ -826,7 +837,6 @@ protected:
|
|||
void finalize_for_evac_failure();
|
||||
|
||||
// An attempt to evacuate "obj" has failed; take necessary steps.
|
||||
void handle_evacuation_failure(oop obj);
|
||||
oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
|
||||
void handle_evacuation_failure_common(oop obj, markOop m);
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -63,10 +63,12 @@ inline bool G1CollectedHeap::obj_in_cs(oop obj) {
|
|||
// assumptions of this method (and other related ones).
|
||||
inline HeapWord*
|
||||
G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
|
||||
size_t word_size) {
|
||||
assert_heap_locked_and_not_at_safepoint();
|
||||
size_t word_size,
|
||||
bool with_heap_lock) {
|
||||
assert_not_at_safepoint();
|
||||
assert(with_heap_lock == Heap_lock->owned_by_self(),
|
||||
"with_heap_lock and Heap_lock->owned_by_self() should be a tautology");
|
||||
assert(cur_alloc_region != NULL, "pre-condition of the method");
|
||||
assert(cur_alloc_region == _cur_alloc_region, "pre-condition of the method");
|
||||
assert(cur_alloc_region->is_young(),
|
||||
"we only support young current alloc regions");
|
||||
assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() "
|
||||
|
@ -76,20 +78,24 @@ G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
|
|||
assert(!cur_alloc_region->is_empty(),
|
||||
err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty",
|
||||
cur_alloc_region->bottom(), cur_alloc_region->end()));
|
||||
// This allocate method does BOT updates and we don't need them in
|
||||
// the young generation. This will be fixed in the near future by
|
||||
// CR 6994297.
|
||||
HeapWord* result = cur_alloc_region->allocate(word_size);
|
||||
HeapWord* result = cur_alloc_region->par_allocate_no_bot_updates(word_size);
|
||||
if (result != NULL) {
|
||||
assert(is_in(result), "result should be in the heap");
|
||||
Heap_lock->unlock();
|
||||
|
||||
if (with_heap_lock) {
|
||||
Heap_lock->unlock();
|
||||
}
|
||||
assert_heap_not_locked();
|
||||
// Do the dirtying after we release the Heap_lock.
|
||||
dirty_young_block(result, word_size);
|
||||
return result;
|
||||
}
|
||||
|
||||
assert_heap_locked();
|
||||
if (with_heap_lock) {
|
||||
assert_heap_locked();
|
||||
} else {
|
||||
assert_heap_not_locked();
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -97,31 +103,27 @@ G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
|
|||
// assumptions of this method (and other related ones).
|
||||
inline HeapWord*
|
||||
G1CollectedHeap::attempt_allocation(size_t word_size) {
|
||||
assert_heap_locked_and_not_at_safepoint();
|
||||
assert_heap_not_locked_and_not_at_safepoint();
|
||||
assert(!isHumongous(word_size), "attempt_allocation() should not be called "
|
||||
"for humongous allocation requests");
|
||||
|
||||
HeapRegion* cur_alloc_region = _cur_alloc_region;
|
||||
if (cur_alloc_region != NULL) {
|
||||
HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region,
|
||||
word_size);
|
||||
word_size,
|
||||
false /* with_heap_lock */);
|
||||
assert_heap_not_locked();
|
||||
if (result != NULL) {
|
||||
assert_heap_not_locked();
|
||||
return result;
|
||||
}
|
||||
|
||||
assert_heap_locked();
|
||||
|
||||
// Since we couldn't successfully allocate into it, retire the
|
||||
// current alloc region.
|
||||
retire_cur_alloc_region(cur_alloc_region);
|
||||
}
|
||||
|
||||
// Try to get a new region and allocate out of it
|
||||
HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
|
||||
false, /* at_safepoint */
|
||||
true, /* do_dirtying */
|
||||
false /* can_expand */);
|
||||
// Our attempt to allocate lock-free failed as the current
|
||||
// allocation region is either NULL or full. So, we'll now take the
|
||||
// Heap_lock and retry.
|
||||
Heap_lock->lock();
|
||||
|
||||
HeapWord* result = attempt_allocation_locked(word_size);
|
||||
if (result != NULL) {
|
||||
assert_heap_not_locked();
|
||||
return result;
|
||||
|
@ -145,6 +147,45 @@ G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) {
|
|||
_cur_alloc_region = NULL;
|
||||
}
|
||||
|
||||
inline HeapWord*
|
||||
G1CollectedHeap::attempt_allocation_locked(size_t word_size) {
|
||||
assert_heap_locked_and_not_at_safepoint();
|
||||
assert(!isHumongous(word_size), "attempt_allocation_locked() "
|
||||
"should not be called for humongous allocation requests");
|
||||
|
||||
// First, reread the current alloc region and retry the allocation
|
||||
// in case somebody replaced it while we were waiting to get the
|
||||
// Heap_lock.
|
||||
HeapRegion* cur_alloc_region = _cur_alloc_region;
|
||||
if (cur_alloc_region != NULL) {
|
||||
HeapWord* result = allocate_from_cur_alloc_region(
|
||||
cur_alloc_region, word_size,
|
||||
true /* with_heap_lock */);
|
||||
if (result != NULL) {
|
||||
assert_heap_not_locked();
|
||||
return result;
|
||||
}
|
||||
|
||||
// We failed to allocate out of the current alloc region, so let's
|
||||
// retire it before getting a new one.
|
||||
retire_cur_alloc_region(cur_alloc_region);
|
||||
}
|
||||
|
||||
assert_heap_locked();
|
||||
// Try to get a new region and allocate out of it
|
||||
HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
|
||||
false, /* at_safepoint */
|
||||
true, /* do_dirtying */
|
||||
false /* can_expand */);
|
||||
if (result != NULL) {
|
||||
assert_heap_not_locked();
|
||||
return result;
|
||||
}
|
||||
|
||||
assert_heap_locked();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// It dirties the cards that cover the block so that so that the post
|
||||
// write barrier never queues anything when updating objects on this
|
||||
// block. It is assumed (and in fact we assert) that the block
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -386,26 +386,27 @@ void HeapRegion::calc_gc_efficiency() {
|
|||
}
|
||||
// </PREDICTION>
|
||||
|
||||
void HeapRegion::set_startsHumongous(HeapWord* new_end) {
|
||||
void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
|
||||
assert(end() == _orig_end,
|
||||
"Should be normal before the humongous object allocation");
|
||||
assert(top() == bottom(), "should be empty");
|
||||
assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
|
||||
|
||||
_humongous_type = StartsHumongous;
|
||||
_humongous_start_region = this;
|
||||
|
||||
set_end(new_end);
|
||||
_offsets.set_for_starts_humongous(new_end);
|
||||
_offsets.set_for_starts_humongous(new_top);
|
||||
}
|
||||
|
||||
void HeapRegion::set_continuesHumongous(HeapRegion* start) {
|
||||
void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
|
||||
assert(end() == _orig_end,
|
||||
"Should be normal before the humongous object allocation");
|
||||
assert(top() == bottom(), "should be empty");
|
||||
assert(start->startsHumongous(), "pre-condition");
|
||||
assert(first_hr->startsHumongous(), "pre-condition");
|
||||
|
||||
_humongous_type = ContinuesHumongous;
|
||||
_humongous_start_region = start;
|
||||
_humongous_start_region = first_hr;
|
||||
}
|
||||
|
||||
bool HeapRegion::claimHeapRegion(jint claimValue) {
|
||||
|
@ -782,9 +783,6 @@ void HeapRegion::verify(bool allow_dirty) const {
|
|||
verify(allow_dirty, /* use_prev_marking */ true, /* failures */ &dummy);
|
||||
}
|
||||
|
||||
#define OBJ_SAMPLE_INTERVAL 0
|
||||
#define BLOCK_SAMPLE_INTERVAL 100
|
||||
|
||||
// This really ought to be commoned up into OffsetTableContigSpace somehow.
|
||||
// We would need a mechanism to make that code skip dead objects.
|
||||
|
||||
|
@ -795,83 +793,125 @@ void HeapRegion::verify(bool allow_dirty,
|
|||
*failures = false;
|
||||
HeapWord* p = bottom();
|
||||
HeapWord* prev_p = NULL;
|
||||
int objs = 0;
|
||||
int blocks = 0;
|
||||
VerifyLiveClosure vl_cl(g1, use_prev_marking);
|
||||
bool is_humongous = isHumongous();
|
||||
bool do_bot_verify = !is_young();
|
||||
size_t object_num = 0;
|
||||
while (p < top()) {
|
||||
size_t size = oop(p)->size();
|
||||
if (is_humongous != g1->isHumongous(size)) {
|
||||
oop obj = oop(p);
|
||||
size_t obj_size = obj->size();
|
||||
object_num += 1;
|
||||
|
||||
if (is_humongous != g1->isHumongous(obj_size)) {
|
||||
gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
|
||||
SIZE_FORMAT" words) in a %shumongous region",
|
||||
p, g1->isHumongous(size) ? "" : "non-",
|
||||
size, is_humongous ? "" : "non-");
|
||||
p, g1->isHumongous(obj_size) ? "" : "non-",
|
||||
obj_size, is_humongous ? "" : "non-");
|
||||
*failures = true;
|
||||
return;
|
||||
}
|
||||
object_num += 1;
|
||||
if (blocks == BLOCK_SAMPLE_INTERVAL) {
|
||||
HeapWord* res = block_start_const(p + (size/2));
|
||||
if (p != res) {
|
||||
gclog_or_tty->print_cr("offset computation 1 for "PTR_FORMAT" and "
|
||||
SIZE_FORMAT" returned "PTR_FORMAT,
|
||||
p, size, res);
|
||||
*failures = true;
|
||||
return;
|
||||
}
|
||||
blocks = 0;
|
||||
} else {
|
||||
blocks++;
|
||||
|
||||
// If it returns false, verify_for_object() will output the
|
||||
// appropriate messasge.
|
||||
if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) {
|
||||
*failures = true;
|
||||
return;
|
||||
}
|
||||
if (objs == OBJ_SAMPLE_INTERVAL) {
|
||||
oop obj = oop(p);
|
||||
if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) {
|
||||
if (obj->is_oop()) {
|
||||
klassOop klass = obj->klass();
|
||||
if (!klass->is_perm()) {
|
||||
gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
|
||||
"not in perm", klass, obj);
|
||||
*failures = true;
|
||||
return;
|
||||
} else if (!klass->is_klass()) {
|
||||
gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
|
||||
"not a klass", klass, obj);
|
||||
*failures = true;
|
||||
return;
|
||||
} else {
|
||||
vl_cl.set_containing_obj(obj);
|
||||
obj->oop_iterate(&vl_cl);
|
||||
if (vl_cl.failures()) {
|
||||
*failures = true;
|
||||
}
|
||||
if (G1MaxVerifyFailures >= 0 &&
|
||||
vl_cl.n_failures() >= G1MaxVerifyFailures) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
|
||||
|
||||
if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) {
|
||||
if (obj->is_oop()) {
|
||||
klassOop klass = obj->klass();
|
||||
if (!klass->is_perm()) {
|
||||
gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
|
||||
"not in perm", klass, obj);
|
||||
*failures = true;
|
||||
return;
|
||||
} else if (!klass->is_klass()) {
|
||||
gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
|
||||
"not a klass", klass, obj);
|
||||
*failures = true;
|
||||
return;
|
||||
} else {
|
||||
vl_cl.set_containing_obj(obj);
|
||||
obj->oop_iterate(&vl_cl);
|
||||
if (vl_cl.failures()) {
|
||||
*failures = true;
|
||||
}
|
||||
if (G1MaxVerifyFailures >= 0 &&
|
||||
vl_cl.n_failures() >= G1MaxVerifyFailures) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
objs = 0;
|
||||
} else {
|
||||
objs++;
|
||||
}
|
||||
prev_p = p;
|
||||
p += size;
|
||||
}
|
||||
HeapWord* rend = end();
|
||||
HeapWord* rtop = top();
|
||||
if (rtop < rend) {
|
||||
HeapWord* res = block_start_const(rtop + (rend - rtop) / 2);
|
||||
if (res != rtop) {
|
||||
gclog_or_tty->print_cr("offset computation 2 for "PTR_FORMAT" and "
|
||||
PTR_FORMAT" returned "PTR_FORMAT,
|
||||
rtop, rend, res);
|
||||
} else {
|
||||
gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
|
||||
*failures = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
prev_p = p;
|
||||
p += obj_size;
|
||||
}
|
||||
|
||||
if (p != top()) {
|
||||
gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
|
||||
"does not match top "PTR_FORMAT, p, top());
|
||||
*failures = true;
|
||||
return;
|
||||
}
|
||||
|
||||
HeapWord* the_end = end();
|
||||
assert(p == top(), "it should still hold");
|
||||
// Do some extra BOT consistency checking for addresses in the
|
||||
// range [top, end). BOT look-ups in this range should yield
|
||||
// top. No point in doing that if top == end (there's nothing there).
|
||||
if (p < the_end) {
|
||||
// Look up top
|
||||
HeapWord* addr_1 = p;
|
||||
HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
|
||||
if (b_start_1 != p) {
|
||||
gclog_or_tty->print_cr("BOT look up for top: "PTR_FORMAT" "
|
||||
" yielded "PTR_FORMAT", expecting "PTR_FORMAT,
|
||||
addr_1, b_start_1, p);
|
||||
*failures = true;
|
||||
return;
|
||||
}
|
||||
|
||||
// Look up top + 1
|
||||
HeapWord* addr_2 = p + 1;
|
||||
if (addr_2 < the_end) {
|
||||
HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
|
||||
if (b_start_2 != p) {
|
||||
gclog_or_tty->print_cr("BOT look up for top + 1: "PTR_FORMAT" "
|
||||
" yielded "PTR_FORMAT", expecting "PTR_FORMAT,
|
||||
addr_2, b_start_2, p);
|
||||
*failures = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Look up an address between top and end
|
||||
size_t diff = pointer_delta(the_end, p) / 2;
|
||||
HeapWord* addr_3 = p + diff;
|
||||
if (addr_3 < the_end) {
|
||||
HeapWord* b_start_3 = _offsets.block_start_const(addr_3);
|
||||
if (b_start_3 != p) {
|
||||
gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" "
|
||||
" yielded "PTR_FORMAT", expecting "PTR_FORMAT,
|
||||
addr_3, b_start_3, p);
|
||||
*failures = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Loook up end - 1
|
||||
HeapWord* addr_4 = the_end - 1;
|
||||
HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
|
||||
if (b_start_4 != p) {
|
||||
gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" "
|
||||
" yielded "PTR_FORMAT", expecting "PTR_FORMAT,
|
||||
addr_4, b_start_4, p);
|
||||
*failures = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -880,12 +920,6 @@ void HeapRegion::verify(bool allow_dirty,
|
|||
"but has "SIZE_FORMAT", objects",
|
||||
bottom(), end(), object_num);
|
||||
*failures = true;
|
||||
}
|
||||
|
||||
if (p != top()) {
|
||||
gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
|
||||
"does not match top "PTR_FORMAT, p, top());
|
||||
*failures = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -173,6 +173,19 @@ class G1OffsetTableContigSpace: public ContiguousSpace {
|
|||
virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
|
||||
|
||||
virtual void print() const;
|
||||
|
||||
void reset_bot() {
|
||||
_offsets.zero_bottom_entry();
|
||||
_offsets.initialize_threshold();
|
||||
}
|
||||
|
||||
void update_bot_for_object(HeapWord* start, size_t word_size) {
|
||||
_offsets.alloc_block(start, word_size);
|
||||
}
|
||||
|
||||
void print_bot_on(outputStream* out) {
|
||||
_offsets.print_on(out);
|
||||
}
|
||||
};
|
||||
|
||||
class HeapRegion: public G1OffsetTableContigSpace {
|
||||
|
@ -359,6 +372,15 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||
Allocated
|
||||
};
|
||||
|
||||
inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
|
||||
assert(is_young(), "we can only skip BOT updates on young regions");
|
||||
return ContiguousSpace::par_allocate(word_size);
|
||||
}
|
||||
inline HeapWord* allocate_no_bot_updates(size_t word_size) {
|
||||
assert(is_young(), "we can only skip BOT updates on young regions");
|
||||
return ContiguousSpace::allocate(word_size);
|
||||
}
|
||||
|
||||
// If this region is a member of a HeapRegionSeq, the index in that
|
||||
// sequence, otherwise -1.
|
||||
int hrs_index() const { return _hrs_index; }
|
||||
|
@ -404,13 +426,35 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||
return _humongous_start_region;
|
||||
}
|
||||
|
||||
// Causes the current region to represent a humongous object spanning "n"
|
||||
// regions.
|
||||
void set_startsHumongous(HeapWord* new_end);
|
||||
// Makes the current region be a "starts humongous" region, i.e.,
|
||||
// the first region in a series of one or more contiguous regions
|
||||
// that will contain a single "humongous" object. The two parameters
|
||||
// are as follows:
|
||||
//
|
||||
// new_top : The new value of the top field of this region which
|
||||
// points to the end of the humongous object that's being
|
||||
// allocated. If there is more than one region in the series, top
|
||||
// will lie beyond this region's original end field and on the last
|
||||
// region in the series.
|
||||
//
|
||||
// new_end : The new value of the end field of this region which
|
||||
// points to the end of the last region in the series. If there is
|
||||
// one region in the series (namely: this one) end will be the same
|
||||
// as the original end of this region.
|
||||
//
|
||||
// Updating top and end as described above makes this region look as
|
||||
// if it spans the entire space taken up by all the regions in the
|
||||
// series and an single allocation moved its top to new_top. This
|
||||
// ensures that the space (capacity / allocated) taken up by all
|
||||
// humongous regions can be calculated by just looking at the
|
||||
// "starts humongous" regions and by ignoring the "continues
|
||||
// humongous" regions.
|
||||
void set_startsHumongous(HeapWord* new_top, HeapWord* new_end);
|
||||
|
||||
// The regions that continue a humongous sequence should be added using
|
||||
// this method, in increasing address order.
|
||||
void set_continuesHumongous(HeapRegion* start);
|
||||
// Makes the current region be a "continues humongous'
|
||||
// region. first_hr is the "start humongous" region of the series
|
||||
// which this region will be part of.
|
||||
void set_continuesHumongous(HeapRegion* first_hr);
|
||||
|
||||
// If the region has a remembered set, return a pointer to it.
|
||||
HeapRegionRemSet* rem_set() const {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -144,7 +144,7 @@ HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
|
|||
// will also update the BOT covering all the regions to reflect
|
||||
// that there is a single object that starts at the bottom of the
|
||||
// first region.
|
||||
first_hr->set_startsHumongous(new_end);
|
||||
first_hr->set_startsHumongous(new_top, new_end);
|
||||
|
||||
// Then, if there are any, we will set up the "continues
|
||||
// humongous" regions.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue