mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-21 19:44:41 +02:00
6687581: Make CMS work with compressed oops
Make FreeChunk read markword instead of LSB in _klass pointer to indicate that it's a FreeChunk for compressed oops. Reviewed-by: ysr, jmasa
This commit is contained in:
parent
4cce21039e
commit
a2d6036a4d
13 changed files with 315 additions and 160 deletions
|
@ -805,28 +805,30 @@ size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
|
|||
// This must be volatile, or else there is a danger that the compiler
|
||||
// will compile the code below into a sometimes-infinite loop, by keeping
|
||||
// the value read the first time in a register.
|
||||
oop o = (oop)p;
|
||||
volatile oop* second_word_addr = o->klass_addr();
|
||||
while (true) {
|
||||
klassOop k = (klassOop)(*second_word_addr);
|
||||
// We must do this until we get a consistent view of the object.
|
||||
if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) {
|
||||
FreeChunk* fc = (FreeChunk*)p;
|
||||
volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr());
|
||||
size_t res = (*sz_addr);
|
||||
klassOop k2 = (klassOop)(*second_word_addr); // Read to confirm.
|
||||
if (k == k2) {
|
||||
if (FreeChunk::indicatesFreeChunk(p)) {
|
||||
volatile FreeChunk* fc = (volatile FreeChunk*)p;
|
||||
size_t res = fc->size();
|
||||
// If the object is still a free chunk, return the size, else it
|
||||
// has been allocated so try again.
|
||||
if (FreeChunk::indicatesFreeChunk(p)) {
|
||||
assert(res != 0, "Block size should not be 0");
|
||||
return res;
|
||||
}
|
||||
} else {
|
||||
// must read from what 'p' points to in each loop.
|
||||
klassOop k = ((volatile oopDesc*)p)->klass_or_null();
|
||||
if (k != NULL) {
|
||||
assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop.");
|
||||
oop o = (oop)p;
|
||||
assert(o->is_parsable(), "Should be parsable");
|
||||
assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
|
||||
size_t res = o->size_given_klass(k->klass_part());
|
||||
res = adjustObjectSize(res);
|
||||
assert(res != 0, "Block size should not be 0");
|
||||
return res;
|
||||
}
|
||||
} else if (k != NULL) {
|
||||
assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop.");
|
||||
assert(o->is_parsable(), "Should be parsable");
|
||||
assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
|
||||
size_t res = o->size_given_klass(k->klass_part());
|
||||
res = adjustObjectSize(res);
|
||||
assert(res != 0, "Block size should not be 0");
|
||||
return res;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -845,31 +847,31 @@ const {
|
|||
// This must be volatile, or else there is a danger that the compiler
|
||||
// will compile the code below into a sometimes-infinite loop, by keeping
|
||||
// the value read the first time in a register.
|
||||
oop o = (oop)p;
|
||||
volatile oop* second_word_addr = o->klass_addr();
|
||||
DEBUG_ONLY(uint loops = 0;)
|
||||
while (true) {
|
||||
klassOop k = (klassOop)(*second_word_addr);
|
||||
// We must do this until we get a consistent view of the object.
|
||||
if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) {
|
||||
FreeChunk* fc = (FreeChunk*)p;
|
||||
volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr());
|
||||
size_t res = (*sz_addr);
|
||||
klassOop k2 = (klassOop)(*second_word_addr); // Read to confirm.
|
||||
if (k == k2) {
|
||||
if (FreeChunk::indicatesFreeChunk(p)) {
|
||||
volatile FreeChunk* fc = (volatile FreeChunk*)p;
|
||||
size_t res = fc->size();
|
||||
if (FreeChunk::indicatesFreeChunk(p)) {
|
||||
assert(res != 0, "Block size should not be 0");
|
||||
assert(loops == 0, "Should be 0");
|
||||
return res;
|
||||
}
|
||||
} else if (k != NULL && o->is_parsable()) {
|
||||
assert(k->is_oop(), "Should really be klass oop.");
|
||||
assert(o->is_oop(), "Should be an oop");
|
||||
size_t res = o->size_given_klass(k->klass_part());
|
||||
res = adjustObjectSize(res);
|
||||
assert(res != 0, "Block size should not be 0");
|
||||
return res;
|
||||
} else {
|
||||
return c->block_size_if_printezis_bits(p);
|
||||
// must read from what 'p' points to in each loop.
|
||||
klassOop k = ((volatile oopDesc*)p)->klass_or_null();
|
||||
if (k != NULL && ((oopDesc*)p)->is_parsable()) {
|
||||
assert(k->is_oop(), "Should really be klass oop.");
|
||||
oop o = (oop)p;
|
||||
assert(o->is_oop(), "Should be an oop");
|
||||
size_t res = o->size_given_klass(k->klass_part());
|
||||
res = adjustObjectSize(res);
|
||||
assert(res != 0, "Block size should not be 0");
|
||||
return res;
|
||||
} else {
|
||||
return c->block_size_if_printezis_bits(p);
|
||||
}
|
||||
}
|
||||
assert(loops == 0, "Can loop at most once");
|
||||
DEBUG_ONLY(loops++;)
|
||||
|
@ -907,9 +909,8 @@ bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
|
|||
// and those objects (if garbage) may have been modified to hold
|
||||
// live range information.
|
||||
// assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary");
|
||||
klassOop k = oop(p)->klass();
|
||||
intptr_t ki = (intptr_t)k;
|
||||
if (FreeChunk::secondWordIndicatesFreeChunk(ki)) return false;
|
||||
if (FreeChunk::indicatesFreeChunk(p)) return false;
|
||||
klassOop k = oop(p)->klass_or_null();
|
||||
if (k != NULL) {
|
||||
// Ignore mark word because it may have been used to
|
||||
// chain together promoted objects (the last one
|
||||
|
@ -1027,7 +1028,7 @@ HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
|
|||
FreeChunk* fc = (FreeChunk*)res;
|
||||
fc->markNotFree();
|
||||
assert(!fc->isFree(), "shouldn't be marked free");
|
||||
assert(oop(fc)->klass() == NULL, "should look uninitialized");
|
||||
assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
|
||||
// Verify that the block offset table shows this to
|
||||
// be a single block, but not one which is unallocated.
|
||||
_bt.verify_single_block(res, size);
|
||||
|
@ -2593,7 +2594,7 @@ HeapWord* CFLS_LAB::alloc(size_t word_sz) {
|
|||
}
|
||||
res->markNotFree();
|
||||
assert(!res->isFree(), "shouldn't be marked free");
|
||||
assert(oop(res)->klass() == NULL, "should look uninitialized");
|
||||
assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
|
||||
// mangle a just allocated object with a distinct pattern.
|
||||
debug_only(res->mangleAllocated(word_sz));
|
||||
return (HeapWord*)res;
|
||||
|
|
|
@ -190,7 +190,8 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
|
|||
// depends on this property.
|
||||
debug_only(
|
||||
FreeChunk* junk = NULL;
|
||||
assert(junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
|
||||
assert(UseCompressedOops ||
|
||||
junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
|
||||
"Offset of FreeChunk::_prev within FreeChunk must match"
|
||||
" that of OopDesc::_klass within OopDesc");
|
||||
)
|
||||
|
@ -1039,7 +1040,7 @@ void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
|
|||
// mark end of object
|
||||
}
|
||||
// check that oop looks uninitialized
|
||||
assert(oop(start)->klass() == NULL, "_klass should be NULL");
|
||||
assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
|
||||
}
|
||||
|
||||
void CMSCollector::promoted(bool par, HeapWord* start,
|
||||
|
@ -1309,17 +1310,25 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
|
|||
}
|
||||
}
|
||||
oop obj = oop(obj_ptr);
|
||||
assert(obj->klass() == NULL, "Object should be uninitialized here.");
|
||||
assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
|
||||
// Otherwise, copy the object. Here we must be careful to insert the
|
||||
// klass pointer last, since this marks the block as an allocated object.
|
||||
// Except with compressed oops it's the mark word.
|
||||
HeapWord* old_ptr = (HeapWord*)old;
|
||||
if (word_sz > (size_t)oopDesc::header_size()) {
|
||||
Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
|
||||
obj_ptr + oopDesc::header_size(),
|
||||
word_sz - oopDesc::header_size());
|
||||
}
|
||||
|
||||
if (UseCompressedOops) {
|
||||
// Copy gap missed by (aligned) header size calculation above
|
||||
obj->set_klass_gap(old->klass_gap());
|
||||
}
|
||||
|
||||
// Restore the mark word copied above.
|
||||
obj->set_mark(m);
|
||||
|
||||
// Now we can track the promoted object, if necessary. We take care
|
||||
// To delay the transition from uninitialized to full object
|
||||
// (i.e., insertion of klass pointer) until after, so that it
|
||||
|
@ -1327,7 +1336,8 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
|
|||
if (promoInfo->tracking()) {
|
||||
promoInfo->track((PromotedObject*)obj, old->klass());
|
||||
}
|
||||
// Finally, install the klass pointer.
|
||||
|
||||
// Finally, install the klass pointer (this should be volatile).
|
||||
obj->set_klass(old->klass());
|
||||
|
||||
assert(old->is_oop(), "Will dereference klass ptr below");
|
||||
|
@ -6165,7 +6175,7 @@ size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
|
|||
HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
|
||||
size_t sz = 0;
|
||||
oop p = (oop)addr;
|
||||
if (p->klass() != NULL && p->is_parsable()) {
|
||||
if (p->klass_or_null() != NULL && p->is_parsable()) {
|
||||
sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
|
||||
} else {
|
||||
sz = block_size_using_printezis_bits(addr);
|
||||
|
@ -6602,7 +6612,7 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
|
|||
}
|
||||
if (_bitMap->isMarked(addr)) {
|
||||
// it's marked; is it potentially uninitialized?
|
||||
if (p->klass() != NULL) {
|
||||
if (p->klass_or_null() != NULL) {
|
||||
if (CMSPermGenPrecleaningEnabled && !p->is_parsable()) {
|
||||
// Signal precleaning to redirty the card since
|
||||
// the klass pointer is already installed.
|
||||
|
@ -6615,11 +6625,8 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
|
|||
if (p->is_objArray()) {
|
||||
// objArrays are precisely marked; restrict scanning
|
||||
// to dirty cards only.
|
||||
size = p->oop_iterate(_scanningClosure, mr);
|
||||
assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
|
||||
"adjustObjectSize should be the identity for array sizes, "
|
||||
"which are necessarily larger than minimum object size of "
|
||||
"two heap words");
|
||||
size = CompactibleFreeListSpace::adjustObjectSize(
|
||||
p->oop_iterate(_scanningClosure, mr));
|
||||
} else {
|
||||
// A non-array may have been imprecisely marked; we need
|
||||
// to scan object in its entirety.
|
||||
|
@ -6653,7 +6660,7 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
|
|||
}
|
||||
} else {
|
||||
// Either a not yet marked object or an uninitialized object
|
||||
if (p->klass() == NULL || !p->is_parsable()) {
|
||||
if (p->klass_or_null() == NULL || !p->is_parsable()) {
|
||||
// An uninitialized object, skip to the next card, since
|
||||
// we may not be able to read its P-bits yet.
|
||||
assert(size == 0, "Initial value");
|
||||
|
@ -6710,7 +6717,7 @@ size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
|
|||
HeapWord* addr = (HeapWord*)p;
|
||||
DEBUG_ONLY(_collector->verify_work_stacks_empty();)
|
||||
assert(!_span.contains(addr), "we are scanning the survivor spaces");
|
||||
assert(p->klass() != NULL, "object should be initializd");
|
||||
assert(p->klass_or_null() != NULL, "object should be initializd");
|
||||
assert(p->is_parsable(), "must be parsable.");
|
||||
// an initialized object; ignore mark word in verification below
|
||||
// since we are running concurrent with mutators
|
||||
|
@ -6868,7 +6875,7 @@ void MarkFromRootsClosure::do_bit(size_t offset) {
|
|||
assert(_skipBits == 0, "tautology");
|
||||
_skipBits = 2; // skip next two marked bits ("Printezis-marks")
|
||||
oop p = oop(addr);
|
||||
if (p->klass() == NULL || !p->is_parsable()) {
|
||||
if (p->klass_or_null() == NULL || !p->is_parsable()) {
|
||||
DEBUG_ONLY(if (!_verifying) {)
|
||||
// We re-dirty the cards on which this object lies and increase
|
||||
// the _threshold so that we'll come back to scan this object
|
||||
|
@ -6890,7 +6897,7 @@ void MarkFromRootsClosure::do_bit(size_t offset) {
|
|||
if (_threshold < end_card_addr) {
|
||||
_threshold = end_card_addr;
|
||||
}
|
||||
if (p->klass() != NULL) {
|
||||
if (p->klass_or_null() != NULL) {
|
||||
// Redirty the range of cards...
|
||||
_mut->mark_range(redirty_range);
|
||||
} // ...else the setting of klass will dirty the card anyway.
|
||||
|
@ -7048,7 +7055,7 @@ void Par_MarkFromRootsClosure::do_bit(size_t offset) {
|
|||
assert(_skip_bits == 0, "tautology");
|
||||
_skip_bits = 2; // skip next two marked bits ("Printezis-marks")
|
||||
oop p = oop(addr);
|
||||
if (p->klass() == NULL || !p->is_parsable()) {
|
||||
if (p->klass_or_null() == NULL || !p->is_parsable()) {
|
||||
// in the case of Clean-on-Enter optimization, redirty card
|
||||
// and avoid clearing card by increasing the threshold.
|
||||
return;
|
||||
|
@ -8023,7 +8030,7 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
|
|||
"alignment problem");
|
||||
|
||||
#ifdef DEBUG
|
||||
if (oop(addr)->klass() != NULL &&
|
||||
if (oop(addr)->klass_or_null() != NULL &&
|
||||
( !_collector->should_unload_classes()
|
||||
|| oop(addr)->is_parsable())) {
|
||||
// Ignore mark word because we are running concurrent with mutators
|
||||
|
@ -8036,7 +8043,7 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
|
|||
|
||||
} else {
|
||||
// This should be an initialized object that's alive.
|
||||
assert(oop(addr)->klass() != NULL &&
|
||||
assert(oop(addr)->klass_or_null() != NULL &&
|
||||
(!_collector->should_unload_classes()
|
||||
|| oop(addr)->is_parsable()),
|
||||
"Should be an initialized object");
|
||||
|
|
|
@ -22,88 +22,6 @@
|
|||
*
|
||||
*/
|
||||
|
||||
//
|
||||
// Free block maintenance for Concurrent Mark Sweep Generation
|
||||
//
|
||||
// The main data structure for free blocks are
|
||||
// . an indexed array of small free blocks, and
|
||||
// . a dictionary of large free blocks
|
||||
//
|
||||
|
||||
// No virtuals in FreeChunk (don't want any vtables).
|
||||
|
||||
// A FreeChunk is merely a chunk that can be in a doubly linked list
|
||||
// and has a size field. NOTE: FreeChunks are distinguished from allocated
|
||||
// objects in two ways (by the sweeper). The second word (prev) has the
|
||||
// LSB set to indicate a free chunk; allocated objects' klass() pointers
|
||||
// don't have their LSB set. The corresponding bit in the CMSBitMap is
|
||||
// set when the chunk is allocated. There are also blocks that "look free"
|
||||
// but are not part of the free list and should not be coalesced into larger
|
||||
// free blocks. These free blocks have their two LSB's set.
|
||||
|
||||
class FreeChunk VALUE_OBJ_CLASS_SPEC {
|
||||
friend class VMStructs;
|
||||
FreeChunk* _next;
|
||||
FreeChunk* _prev;
|
||||
size_t _size;
|
||||
|
||||
public:
|
||||
NOT_PRODUCT(static const size_t header_size();)
|
||||
// Returns "true" if the "wrd", which is required to be the second word
|
||||
// of a block, indicates that the block represents a free chunk.
|
||||
static bool secondWordIndicatesFreeChunk(intptr_t wrd) {
|
||||
return (wrd & 0x1) == 0x1;
|
||||
}
|
||||
bool isFree() const {
|
||||
return secondWordIndicatesFreeChunk((intptr_t)_prev);
|
||||
}
|
||||
bool cantCoalesce() const { return (((intptr_t)_prev) & 0x3) == 0x3; }
|
||||
FreeChunk* next() const { return _next; }
|
||||
FreeChunk* prev() const { return (FreeChunk*)(((intptr_t)_prev) & ~(0x3)); }
|
||||
debug_only(void* prev_addr() const { return (void*)&_prev; })
|
||||
|
||||
void linkAfter(FreeChunk* ptr) {
|
||||
linkNext(ptr);
|
||||
if (ptr != NULL) ptr->linkPrev(this);
|
||||
}
|
||||
void linkAfterNonNull(FreeChunk* ptr) {
|
||||
assert(ptr != NULL, "precondition violation");
|
||||
linkNext(ptr);
|
||||
ptr->linkPrev(this);
|
||||
}
|
||||
void linkNext(FreeChunk* ptr) { _next = ptr; }
|
||||
void linkPrev(FreeChunk* ptr) { _prev = (FreeChunk*)((intptr_t)ptr | 0x1); }
|
||||
void clearPrev() { _prev = NULL; }
|
||||
void clearNext() { _next = NULL; }
|
||||
void dontCoalesce() {
|
||||
// the block should be free
|
||||
assert(isFree(), "Should look like a free block");
|
||||
_prev = (FreeChunk*)(((intptr_t)_prev) | 0x2);
|
||||
}
|
||||
void markFree() { _prev = (FreeChunk*)((intptr_t)_prev | 0x1); }
|
||||
void markNotFree() { _prev = NULL; }
|
||||
|
||||
size_t size() const { return _size; }
|
||||
void setSize(size_t size) { _size = size; }
|
||||
|
||||
// For volatile reads:
|
||||
size_t* size_addr() { return &_size; }
|
||||
|
||||
// Return the address past the end of this chunk
|
||||
HeapWord* end() const { return ((HeapWord*) this) + _size; }
|
||||
|
||||
// debugging
|
||||
void verify() const PRODUCT_RETURN;
|
||||
void verifyList() const PRODUCT_RETURN;
|
||||
void mangleAllocated(size_t size) PRODUCT_RETURN;
|
||||
void mangleFreed(size_t size) PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
// Alignment helpers etc.
|
||||
#define numQuanta(x,y) ((x+y-1)/y)
|
||||
enum AlignmentConstants {
|
||||
MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment
|
||||
};
|
||||
|
||||
// A FreeBlockDictionary is an abstract superclass that will allow
|
||||
// a number of alternative implementations in the future.
|
||||
|
|
|
@ -47,15 +47,15 @@ void FreeChunk::mangleAllocated(size_t size) {
|
|||
Copy::fill_to_words(addr + hdr, size - hdr, baadbabeHeapWord);
|
||||
}
|
||||
|
||||
void FreeChunk::mangleFreed(size_t size) {
|
||||
void FreeChunk::mangleFreed(size_t sz) {
|
||||
assert(baadbabeHeapWord != deadbeefHeapWord, "Need distinct patterns");
|
||||
// mangle all but the header of a just-freed block of storage
|
||||
// just prior to passing it to the storage dictionary
|
||||
assert(size >= MinChunkSize, "smallest size of object");
|
||||
assert(size == _size, "just checking");
|
||||
assert(sz >= MinChunkSize, "smallest size of object");
|
||||
assert(sz == size(), "just checking");
|
||||
HeapWord* addr = (HeapWord*)this;
|
||||
size_t hdr = header_size();
|
||||
Copy::fill_to_words(addr + hdr, size - hdr, deadbeefHeapWord);
|
||||
Copy::fill_to_words(addr + hdr, sz - hdr, deadbeefHeapWord);
|
||||
}
|
||||
|
||||
void FreeChunk::verifyList() const {
|
||||
|
|
|
@ -0,0 +1,137 @@
|
|||
/*
|
||||
* Copyright 2001-2005 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
//
|
||||
// Free block maintenance for Concurrent Mark Sweep Generation
|
||||
//
|
||||
// The main data structure for free blocks are
|
||||
// . an indexed array of small free blocks, and
|
||||
// . a dictionary of large free blocks
|
||||
//
|
||||
|
||||
// No virtuals in FreeChunk (don't want any vtables).
|
||||
|
||||
// A FreeChunk is merely a chunk that can be in a doubly linked list
|
||||
// and has a size field. NOTE: FreeChunks are distinguished from allocated
|
||||
// objects in two ways (by the sweeper), depending on whether the VM is 32 or
|
||||
// 64 bits.
|
||||
// In 32 bits or 64 bits without CompressedOops, the second word (prev) has the
|
||||
// LSB set to indicate a free chunk; allocated objects' klass() pointers
|
||||
// don't have their LSB set. The corresponding bit in the CMSBitMap is
|
||||
// set when the chunk is allocated. There are also blocks that "look free"
|
||||
// but are not part of the free list and should not be coalesced into larger
|
||||
// free blocks. These free blocks have their two LSB's set.
|
||||
|
||||
class FreeChunk VALUE_OBJ_CLASS_SPEC {
|
||||
friend class VMStructs;
|
||||
// For 64 bit compressed oops, the markOop encodes both the size and the
|
||||
// indication that this is a FreeChunk and not an object.
|
||||
volatile size_t _size;
|
||||
FreeChunk* _prev;
|
||||
FreeChunk* _next;
|
||||
|
||||
markOop mark() const volatile { return (markOop)_size; }
|
||||
void set_mark(markOop m) { _size = (size_t)m; }
|
||||
|
||||
public:
|
||||
NOT_PRODUCT(static const size_t header_size();)
|
||||
|
||||
// Returns "true" if the address indicates that the block represents
|
||||
// a free chunk.
|
||||
static bool indicatesFreeChunk(const HeapWord* addr) {
|
||||
// Force volatile read from addr because value might change between
|
||||
// calls. We really want the read of _mark and _prev from this pointer
|
||||
// to be volatile but making the fields volatile causes all sorts of
|
||||
// compilation errors.
|
||||
return ((volatile FreeChunk*)addr)->isFree();
|
||||
}
|
||||
|
||||
bool isFree() const volatile {
|
||||
LP64_ONLY(if (UseCompressedOops) return mark()->is_cms_free_chunk(); else)
|
||||
return (((intptr_t)_prev) & 0x1) == 0x1;
|
||||
}
|
||||
bool cantCoalesce() const {
|
||||
assert(isFree(), "can't get coalesce bit on not free");
|
||||
return (((intptr_t)_prev) & 0x2) == 0x2;
|
||||
}
|
||||
void dontCoalesce() {
|
||||
// the block should be free
|
||||
assert(isFree(), "Should look like a free block");
|
||||
_prev = (FreeChunk*)(((intptr_t)_prev) | 0x2);
|
||||
}
|
||||
FreeChunk* prev() const {
|
||||
return (FreeChunk*)(((intptr_t)_prev) & ~(0x3));
|
||||
}
|
||||
|
||||
debug_only(void* prev_addr() const { return (void*)&_prev; })
|
||||
|
||||
size_t size() const volatile {
|
||||
LP64_ONLY(if (UseCompressedOops) return mark()->get_size(); else )
|
||||
return _size;
|
||||
}
|
||||
void setSize(size_t sz) {
|
||||
LP64_ONLY(if (UseCompressedOops) set_mark(markOopDesc::set_size_and_free(sz)); else )
|
||||
_size = sz;
|
||||
}
|
||||
|
||||
FreeChunk* next() const { return _next; }
|
||||
|
||||
void linkAfter(FreeChunk* ptr) {
|
||||
linkNext(ptr);
|
||||
if (ptr != NULL) ptr->linkPrev(this);
|
||||
}
|
||||
void linkAfterNonNull(FreeChunk* ptr) {
|
||||
assert(ptr != NULL, "precondition violation");
|
||||
linkNext(ptr);
|
||||
ptr->linkPrev(this);
|
||||
}
|
||||
void linkNext(FreeChunk* ptr) { _next = ptr; }
|
||||
void linkPrev(FreeChunk* ptr) {
|
||||
LP64_ONLY(if (UseCompressedOops) _prev = ptr; else)
|
||||
_prev = (FreeChunk*)((intptr_t)ptr | 0x1);
|
||||
}
|
||||
void clearPrev() { _prev = NULL; }
|
||||
void clearNext() { _next = NULL; }
|
||||
void markNotFree() {
|
||||
LP64_ONLY(if (UseCompressedOops) set_mark(markOopDesc::prototype());)
|
||||
// Also set _prev to null
|
||||
_prev = NULL;
|
||||
}
|
||||
|
||||
// Return the address past the end of this chunk
|
||||
HeapWord* end() const { return ((HeapWord*) this) + size(); }
|
||||
|
||||
// debugging
|
||||
void verify() const PRODUCT_RETURN;
|
||||
void verifyList() const PRODUCT_RETURN;
|
||||
void mangleAllocated(size_t size) PRODUCT_RETURN;
|
||||
void mangleFreed(size_t size) PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
// Alignment helpers etc.
|
||||
#define numQuanta(x,y) ((x+y-1)/y)
|
||||
enum AlignmentConstants {
|
||||
MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment
|
||||
};
|
||||
|
|
@ -23,6 +23,7 @@
|
|||
*/
|
||||
|
||||
#define VM_STRUCTS_CMS(nonstatic_field, \
|
||||
volatile_nonstatic_field, \
|
||||
static_field) \
|
||||
nonstatic_field(CompactibleFreeListSpace, _collector, CMSCollector*) \
|
||||
nonstatic_field(CompactibleFreeListSpace, _bt, BlockOffsetArrayNonContigSpace) \
|
||||
|
@ -36,9 +37,9 @@
|
|||
nonstatic_field(CMSCollector, _markBitMap, CMSBitMap) \
|
||||
nonstatic_field(ConcurrentMarkSweepGeneration, _cmsSpace, CompactibleFreeListSpace*) \
|
||||
static_field(ConcurrentMarkSweepThread, _collector, CMSCollector*) \
|
||||
volatile_nonstatic_field(FreeChunk, _size, size_t) \
|
||||
nonstatic_field(FreeChunk, _next, FreeChunk*) \
|
||||
nonstatic_field(FreeChunk, _prev, FreeChunk*) \
|
||||
nonstatic_field(FreeChunk, _size, size_t)
|
||||
nonstatic_field(FreeChunk, _prev, FreeChunk*)
|
||||
|
||||
#define VM_TYPES_CMS(declare_type, \
|
||||
declare_toplevel_type) \
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue