8038404: Move object_iterate_mem from Space to CMS since it is only ever used by CMS

Reviewed-by: brutisso, tschatzl, stefank
This commit is contained in:
Mikael Gerdin 2014-03-12 15:22:45 +01:00
parent 98d8c51389
commit e72dd1b433
6 changed files with 71 additions and 115 deletions

View file

@ -558,104 +558,11 @@ HeapWord* Space::object_iterate_careful_m(MemRegion mr,
return bottom();
}
void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
assert(!mr.is_empty(), "Should be non-empty");
// We use MemRegion(bottom(), end()) rather than used_region() below
// because the two are not necessarily equal for some kinds of
// spaces, in particular, certain kinds of free list spaces.
// We could use the more complicated but more precise:
// MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
// but the slight imprecision seems acceptable in the assertion check.
assert(MemRegion(bottom(), end()).contains(mr),
"Should be within used space");
HeapWord* prev = cl->previous(); // max address from last time
if (prev >= mr.end()) { // nothing to do
return;
}
// This assert will not work when we go from cms space to perm
// space, and use same closure. Easy fix deferred for later. XXX YSR
// assert(prev == NULL || contains(prev), "Should be within space");
bool last_was_obj_array = false;
HeapWord *blk_start_addr, *region_start_addr;
if (prev > mr.start()) {
region_start_addr = prev;
blk_start_addr = prev;
// The previous invocation may have pushed "prev" beyond the
// last allocated block yet there may be still be blocks
// in this region due to a particular coalescing policy.
// Relax the assertion so that the case where the unallocated
// block is maintained and "prev" is beyond the unallocated
// block does not cause the assertion to fire.
assert((BlockOffsetArrayUseUnallocatedBlock &&
(!is_in(prev))) ||
(blk_start_addr == block_start(region_start_addr)), "invariant");
} else {
region_start_addr = mr.start();
blk_start_addr = block_start(region_start_addr);
}
HeapWord* region_end_addr = mr.end();
MemRegion derived_mr(region_start_addr, region_end_addr);
while (blk_start_addr < region_end_addr) {
const size_t size = block_size(blk_start_addr);
if (block_is_obj(blk_start_addr)) {
last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
} else {
last_was_obj_array = false;
}
blk_start_addr += size;
}
if (!last_was_obj_array) {
assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
"Should be within (closed) used space");
assert(blk_start_addr > prev, "Invariant");
cl->set_previous(blk_start_addr); // min address for next time
}
}
bool Space::obj_is_alive(const HeapWord* p) const {
assert (block_is_obj(p), "The address should point to an object");
return true;
}
void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
assert(!mr.is_empty(), "Should be non-empty");
assert(used_region().contains(mr), "Should be within used space");
HeapWord* prev = cl->previous(); // max address from last time
if (prev >= mr.end()) { // nothing to do
return;
}
// See comment above (in more general method above) in case you
// happen to use this method.
assert(prev == NULL || is_in_reserved(prev), "Should be within space");
bool last_was_obj_array = false;
HeapWord *obj_start_addr, *region_start_addr;
if (prev > mr.start()) {
region_start_addr = prev;
obj_start_addr = prev;
assert(obj_start_addr == block_start(region_start_addr), "invariant");
} else {
region_start_addr = mr.start();
obj_start_addr = block_start(region_start_addr);
}
HeapWord* region_end_addr = mr.end();
MemRegion derived_mr(region_start_addr, region_end_addr);
while (obj_start_addr < region_end_addr) {
oop obj = oop(obj_start_addr);
const size_t size = obj->size();
last_was_obj_array = cl->do_object_bm(obj, derived_mr);
obj_start_addr += size;
}
if (!last_was_obj_array) {
assert((bottom() <= obj_start_addr) && (obj_start_addr <= end()),
"Should be within (closed) used space");
assert(obj_start_addr > prev, "Invariant");
cl->set_previous(obj_start_addr); // min address for next time
}
}
#if INCLUDE_ALL_GCS
#define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
\