mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-20 11:04:34 +02:00
Merge
This commit is contained in:
commit
dda69c88e0
28 changed files with 441 additions and 531 deletions
|
@ -337,56 +337,20 @@ endif
|
|||
ifeq ($(DEBUG_BINARIES), true)
|
||||
CFLAGS += -g
|
||||
else
|
||||
# Use the stabs format for debugging information (this is the default
|
||||
# on gcc-2.91). It's good enough, has all the information about line
|
||||
# numbers and local variables, and libjvm.so is only about 16M.
|
||||
# Change this back to "-g" if you want the most expressive format.
|
||||
# (warning: that could easily inflate libjvm.so to 150M!)
|
||||
# Note: The Itanium gcc compiler crashes when using -gstabs.
|
||||
DEBUG_CFLAGS/ia64 = -g
|
||||
DEBUG_CFLAGS/amd64 = -g
|
||||
DEBUG_CFLAGS/arm = -g
|
||||
DEBUG_CFLAGS/ppc = -g
|
||||
DEBUG_CFLAGS/ppc64 = -g
|
||||
DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
|
||||
ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
|
||||
ifeq ($(USE_CLANG), true)
|
||||
# Clang doesn't understand -gstabs
|
||||
DEBUG_CFLAGS += -g
|
||||
else
|
||||
DEBUG_CFLAGS += -gstabs
|
||||
endif
|
||||
DEBUG_CFLAGS += -g
|
||||
endif
|
||||
|
||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||
FASTDEBUG_CFLAGS/ia64 = -g
|
||||
FASTDEBUG_CFLAGS/amd64 = -g
|
||||
FASTDEBUG_CFLAGS/arm = -g
|
||||
FASTDEBUG_CFLAGS/ppc = -g
|
||||
FASTDEBUG_CFLAGS/ppc64 = -g
|
||||
FASTDEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
|
||||
FASTDEBUG_CFLAGS += $(FASTDEBUG_CFLAGS/$(BUILDARCH))
|
||||
ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
|
||||
ifeq ($(USE_CLANG), true)
|
||||
# Clang doesn't understand -gstabs
|
||||
FASTDEBUG_CFLAGS += -g
|
||||
else
|
||||
FASTDEBUG_CFLAGS += -gstabs
|
||||
endif
|
||||
FASTDEBUG_CFLAGS += -g
|
||||
endif
|
||||
|
||||
OPT_CFLAGS/ia64 = -g
|
||||
OPT_CFLAGS/amd64 = -g
|
||||
OPT_CFLAGS/arm = -g
|
||||
OPT_CFLAGS/ppc = -g
|
||||
OPT_CFLAGS/ppc64 = -g
|
||||
OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
|
||||
ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
|
||||
ifeq ($(USE_CLANG), true)
|
||||
# Clang doesn't understand -gstabs
|
||||
OPT_CFLAGS += -g
|
||||
else
|
||||
OPT_CFLAGS += -gstabs
|
||||
endif
|
||||
OPT_CFLAGS += -g
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
|
|
@ -116,10 +116,6 @@ class MarkRefsIntoClosure: public CMSOopsInGenClosure {
|
|||
MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_read;
|
||||
}
|
||||
};
|
||||
|
||||
class Par_MarkRefsIntoClosure: public CMSOopsInGenClosure {
|
||||
|
@ -132,10 +128,6 @@ class Par_MarkRefsIntoClosure: public CMSOopsInGenClosure {
|
|||
Par_MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_read;
|
||||
}
|
||||
};
|
||||
|
||||
// A variant of the above used in certain kinds of CMS
|
||||
|
@ -152,10 +144,6 @@ class MarkRefsIntoVerifyClosure: public CMSOopsInGenClosure {
|
|||
CMSBitMap* cms_bm);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_read;
|
||||
}
|
||||
};
|
||||
|
||||
// The non-parallel version (the parallel version appears further below).
|
||||
|
@ -181,10 +169,6 @@ class PushAndMarkClosure: public CMSOopClosure {
|
|||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
|
||||
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_read;
|
||||
}
|
||||
};
|
||||
|
||||
// In the parallel case, the bit map and the
|
||||
|
@ -211,10 +195,6 @@ class Par_PushAndMarkClosure: public CMSOopClosure {
|
|||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
|
||||
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_read;
|
||||
}
|
||||
};
|
||||
|
||||
// The non-parallel version (the parallel version appears further below).
|
||||
|
@ -245,9 +225,6 @@ class MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
|
|||
inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_read;
|
||||
}
|
||||
void set_freelistLock(Mutex* m) {
|
||||
_freelistLock = m;
|
||||
}
|
||||
|
@ -282,9 +259,6 @@ class Par_MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
|
|||
inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_read;
|
||||
}
|
||||
void trim_queue(uint size);
|
||||
};
|
||||
|
||||
|
|
|
@ -851,42 +851,60 @@ void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
|
|||
UpwardsObjectClosure* cl) {
|
||||
assert_locked(freelistLock());
|
||||
NOT_PRODUCT(verify_objects_initialized());
|
||||
Space::object_iterate_mem(mr, cl);
|
||||
assert(!mr.is_empty(), "Should be non-empty");
|
||||
// We use MemRegion(bottom(), end()) rather than used_region() below
|
||||
// because the two are not necessarily equal for some kinds of
|
||||
// spaces, in particular, certain kinds of free list spaces.
|
||||
// We could use the more complicated but more precise:
|
||||
// MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
|
||||
// but the slight imprecision seems acceptable in the assertion check.
|
||||
assert(MemRegion(bottom(), end()).contains(mr),
|
||||
"Should be within used space");
|
||||
HeapWord* prev = cl->previous(); // max address from last time
|
||||
if (prev >= mr.end()) { // nothing to do
|
||||
return;
|
||||
}
|
||||
// This assert will not work when we go from cms space to perm
|
||||
// space, and use same closure. Easy fix deferred for later. XXX YSR
|
||||
// assert(prev == NULL || contains(prev), "Should be within space");
|
||||
|
||||
bool last_was_obj_array = false;
|
||||
HeapWord *blk_start_addr, *region_start_addr;
|
||||
if (prev > mr.start()) {
|
||||
region_start_addr = prev;
|
||||
blk_start_addr = prev;
|
||||
// The previous invocation may have pushed "prev" beyond the
|
||||
// last allocated block yet there may be still be blocks
|
||||
// in this region due to a particular coalescing policy.
|
||||
// Relax the assertion so that the case where the unallocated
|
||||
// block is maintained and "prev" is beyond the unallocated
|
||||
// block does not cause the assertion to fire.
|
||||
assert((BlockOffsetArrayUseUnallocatedBlock &&
|
||||
(!is_in(prev))) ||
|
||||
(blk_start_addr == block_start(region_start_addr)), "invariant");
|
||||
} else {
|
||||
region_start_addr = mr.start();
|
||||
blk_start_addr = block_start(region_start_addr);
|
||||
}
|
||||
HeapWord* region_end_addr = mr.end();
|
||||
MemRegion derived_mr(region_start_addr, region_end_addr);
|
||||
while (blk_start_addr < region_end_addr) {
|
||||
const size_t size = block_size(blk_start_addr);
|
||||
if (block_is_obj(blk_start_addr)) {
|
||||
last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
|
||||
} else {
|
||||
last_was_obj_array = false;
|
||||
}
|
||||
blk_start_addr += size;
|
||||
}
|
||||
if (!last_was_obj_array) {
|
||||
assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
|
||||
"Should be within (closed) used space");
|
||||
assert(blk_start_addr > prev, "Invariant");
|
||||
cl->set_previous(blk_start_addr); // min address for next time
|
||||
}
|
||||
}
|
||||
|
||||
// Callers of this iterator beware: The closure application should
|
||||
// be robust in the face of uninitialized objects and should (always)
|
||||
// return a correct size so that the next addr + size below gives us a
|
||||
// valid block boundary. [See for instance,
|
||||
// ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
|
||||
// in ConcurrentMarkSweepGeneration.cpp.]
|
||||
HeapWord*
|
||||
CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
|
||||
assert_lock_strong(freelistLock());
|
||||
HeapWord *addr, *last;
|
||||
size_t size;
|
||||
for (addr = bottom(), last = end();
|
||||
addr < last; addr += size) {
|
||||
FreeChunk* fc = (FreeChunk*)addr;
|
||||
if (fc->is_free()) {
|
||||
// Since we hold the free list lock, which protects direct
|
||||
// allocation in this generation by mutators, a free object
|
||||
// will remain free throughout this iteration code.
|
||||
size = fc->size();
|
||||
} else {
|
||||
// Note that the object need not necessarily be initialized,
|
||||
// because (for instance) the free list lock does NOT protect
|
||||
// object initialization. The closure application below must
|
||||
// therefore be correct in the face of uninitialized objects.
|
||||
size = cl->do_object_careful(oop(addr));
|
||||
if (size == 0) {
|
||||
// An unparsable object found. Signal early termination.
|
||||
return addr;
|
||||
}
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Callers of this iterator beware: The closure application should
|
||||
// be robust in the face of uninitialized objects and should (always)
|
||||
|
|
|
@ -338,10 +338,6 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
unallocated_block() : end());
|
||||
}
|
||||
|
||||
bool is_in(const void* p) const {
|
||||
return used_region().contains(p);
|
||||
}
|
||||
|
||||
virtual bool is_free_block(const HeapWord* p) const;
|
||||
|
||||
// Resizing support
|
||||
|
@ -363,6 +359,12 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
// obj_is_alive() to determine whether it is safe to iterate of
|
||||
// an object.
|
||||
void safe_object_iterate(ObjectClosure* blk);
|
||||
|
||||
// Iterate over all objects that intersect with mr, calling "cl->do_object"
|
||||
// on each. There is an exception to this: if this closure has already
|
||||
// been invoked on an object, it may skip such objects in some cases. This is
|
||||
// Most likely to happen in an "upwards" (ascending address) iteration of
|
||||
// MemRegions.
|
||||
void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
|
||||
|
||||
// Requires that "mr" be entirely within the space.
|
||||
|
@ -371,11 +373,8 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
// terminate the iteration and return the address of the start of the
|
||||
// subregion that isn't done. Return of "NULL" indicates that the
|
||||
// iteration completed.
|
||||
virtual HeapWord*
|
||||
object_iterate_careful_m(MemRegion mr,
|
||||
ObjectClosureCareful* cl);
|
||||
virtual HeapWord*
|
||||
object_iterate_careful(ObjectClosureCareful* cl);
|
||||
HeapWord* object_iterate_careful_m(MemRegion mr,
|
||||
ObjectClosureCareful* cl);
|
||||
|
||||
// Override: provides a DCTO_CL specific to this kind of space.
|
||||
DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
|
||||
|
|
|
@ -1498,6 +1498,19 @@ class FalseBitMapClosure: public BitMapClosure {
|
|||
}
|
||||
};
|
||||
|
||||
// A version of ObjectClosure with "memory" (see _previous_address below)
|
||||
class UpwardsObjectClosure: public BoolObjectClosure {
|
||||
HeapWord* _previous_address;
|
||||
public:
|
||||
UpwardsObjectClosure() : _previous_address(NULL) { }
|
||||
void set_previous(HeapWord* addr) { _previous_address = addr; }
|
||||
HeapWord* previous() { return _previous_address; }
|
||||
// A return value of "true" can be used by the caller to decide
|
||||
// if this object's end should *NOT* be recorded in
|
||||
// _previous_address above.
|
||||
virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
|
||||
};
|
||||
|
||||
// This closure is used during the second checkpointing phase
|
||||
// to rescan the marked objects on the dirty cards in the mod
|
||||
// union table and the card table proper. It's invoked via
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/dirtyCardQueue.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
|
|
|
@ -3529,6 +3529,29 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
|
||||
const HeapRegion* hr,
|
||||
const VerifyOption vo) const {
|
||||
switch (vo) {
|
||||
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
|
||||
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
|
||||
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
return false; // keep some compilers happy
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
|
||||
const VerifyOption vo) const {
|
||||
switch (vo) {
|
||||
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
|
||||
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
|
||||
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
return false; // keep some compilers happy
|
||||
}
|
||||
|
||||
void G1CollectedHeap::print_on(outputStream* st) const {
|
||||
st->print(" %-20s", "garbage-first heap");
|
||||
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
|
||||
|
@ -6598,13 +6621,13 @@ public:
|
|||
if (hr->is_young()) {
|
||||
// TODO
|
||||
} else if (hr->startsHumongous()) {
|
||||
assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->region_num()));
|
||||
assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrs_index()));
|
||||
_humongous_count.increment(1u, hr->capacity());
|
||||
} else if (hr->is_empty()) {
|
||||
assert(hr->containing_set() == _free_list, err_msg("Heap region %u is empty but not on the free list.", hr->region_num()));
|
||||
assert(hr->containing_set() == _free_list, err_msg("Heap region %u is empty but not on the free list.", hr->hrs_index()));
|
||||
_free_count.increment(1u, hr->capacity());
|
||||
} else {
|
||||
assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->region_num()));
|
||||
assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrs_index()));
|
||||
_old_count.increment(1u, hr->capacity());
|
||||
}
|
||||
return false;
|
||||
|
|
|
@ -706,19 +706,7 @@ public:
|
|||
// This is a fast test on whether a reference points into the
|
||||
// collection set or not. Assume that the reference
|
||||
// points into the heap.
|
||||
bool in_cset_fast_test(oop obj) {
|
||||
assert(_in_cset_fast_test != NULL, "sanity");
|
||||
assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj));
|
||||
// no need to subtract the bottom of the heap from obj,
|
||||
// _in_cset_fast_test is biased
|
||||
uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
|
||||
bool ret = _in_cset_fast_test[index];
|
||||
// let's make sure the result is consistent with what the slower
|
||||
// test returns
|
||||
assert( ret || !obj_in_cs(obj), "sanity");
|
||||
assert(!ret || obj_in_cs(obj), "sanity");
|
||||
return ret;
|
||||
}
|
||||
inline bool in_cset_fast_test(oop obj);
|
||||
|
||||
void clear_cset_fast_test() {
|
||||
assert(_in_cset_fast_test_base != NULL, "sanity");
|
||||
|
@ -1250,9 +1238,7 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
void old_set_remove(HeapRegion* hr) {
|
||||
_old_set.remove(hr);
|
||||
}
|
||||
inline void old_set_remove(HeapRegion* hr);
|
||||
|
||||
size_t non_young_capacity_bytes() {
|
||||
return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
|
||||
|
@ -1343,7 +1329,7 @@ public:
|
|||
void heap_region_iterate(HeapRegionClosure* blk) const;
|
||||
|
||||
// Return the region with the given index. It assumes the index is valid.
|
||||
HeapRegion* region_at(uint index) const { return _hrs.at(index); }
|
||||
inline HeapRegion* region_at(uint index) const;
|
||||
|
||||
// Divide the heap region sequence into "chunks" of some size (the number
|
||||
// of regions divided by the number of parallel threads times some
|
||||
|
@ -1472,10 +1458,7 @@ public:
|
|||
return true;
|
||||
}
|
||||
|
||||
bool is_in_young(const oop obj) {
|
||||
HeapRegion* hr = heap_region_containing(obj);
|
||||
return hr != NULL && hr->is_young();
|
||||
}
|
||||
inline bool is_in_young(const oop obj);
|
||||
|
||||
#ifdef ASSERT
|
||||
virtual bool is_in_partial_collection(const void* p);
|
||||
|
@ -1488,9 +1471,7 @@ public:
|
|||
// pre-value that needs to be remembered; for the remembered-set
|
||||
// update logging post-barrier, we don't maintain remembered set
|
||||
// information for young gen objects.
|
||||
virtual bool can_elide_initializing_store_barrier(oop new_obj) {
|
||||
return is_in_young(new_obj);
|
||||
}
|
||||
virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
|
||||
|
||||
// Returns "true" iff the given word_size is "very large".
|
||||
static bool isHumongous(size_t word_size) {
|
||||
|
@ -1584,23 +1565,9 @@ public:
|
|||
|
||||
// Added if it is NULL it isn't dead.
|
||||
|
||||
bool is_obj_dead(const oop obj) const {
|
||||
const HeapRegion* hr = heap_region_containing(obj);
|
||||
if (hr == NULL) {
|
||||
if (obj == NULL) return false;
|
||||
else return true;
|
||||
}
|
||||
else return is_obj_dead(obj, hr);
|
||||
}
|
||||
inline bool is_obj_dead(const oop obj) const;
|
||||
|
||||
bool is_obj_ill(const oop obj) const {
|
||||
const HeapRegion* hr = heap_region_containing(obj);
|
||||
if (hr == NULL) {
|
||||
if (obj == NULL) return false;
|
||||
else return true;
|
||||
}
|
||||
else return is_obj_ill(obj, hr);
|
||||
}
|
||||
inline bool is_obj_ill(const oop obj) const;
|
||||
|
||||
bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
|
||||
HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
|
||||
|
@ -1694,26 +1661,10 @@ public:
|
|||
|
||||
bool is_obj_dead_cond(const oop obj,
|
||||
const HeapRegion* hr,
|
||||
const VerifyOption vo) const {
|
||||
switch (vo) {
|
||||
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
|
||||
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
|
||||
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
return false; // keep some compilers happy
|
||||
}
|
||||
const VerifyOption vo) const;
|
||||
|
||||
bool is_obj_dead_cond(const oop obj,
|
||||
const VerifyOption vo) const {
|
||||
switch (vo) {
|
||||
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
|
||||
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
|
||||
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
return false; // keep some compilers happy
|
||||
}
|
||||
const VerifyOption vo) const;
|
||||
|
||||
// Printing
|
||||
|
||||
|
@ -1807,11 +1758,7 @@ protected:
|
|||
DirtyCardQueue& dirty_card_queue() { return _dcq; }
|
||||
G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
|
||||
|
||||
template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
|
||||
if (!from->is_survivor()) {
|
||||
_g1_rem->par_write_ref(from, p, tid);
|
||||
}
|
||||
}
|
||||
template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
|
||||
|
||||
template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
|
||||
// If the new value of the field points to the same region or
|
||||
|
@ -1853,13 +1800,7 @@ public:
|
|||
refs()->push(ref);
|
||||
}
|
||||
|
||||
template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
|
||||
if (G1DeferredRSUpdate) {
|
||||
deferred_rs_update(from, p, tid);
|
||||
} else {
|
||||
immediate_rs_update(from, p, tid);
|
||||
}
|
||||
}
|
||||
template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
|
||||
|
||||
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
|
||||
HeapWord* obj = NULL;
|
||||
|
@ -1983,54 +1924,7 @@ private:
|
|||
return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
|
||||
}
|
||||
|
||||
void do_oop_partial_array(oop* p) {
|
||||
assert(has_partial_array_mask(p), "invariant");
|
||||
oop from_obj = clear_partial_array_mask(p);
|
||||
|
||||
assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
|
||||
assert(from_obj->is_objArray(), "must be obj array");
|
||||
objArrayOop from_obj_array = objArrayOop(from_obj);
|
||||
// The from-space object contains the real length.
|
||||
int length = from_obj_array->length();
|
||||
|
||||
assert(from_obj->is_forwarded(), "must be forwarded");
|
||||
oop to_obj = from_obj->forwardee();
|
||||
assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
|
||||
objArrayOop to_obj_array = objArrayOop(to_obj);
|
||||
// We keep track of the next start index in the length field of the
|
||||
// to-space object.
|
||||
int next_index = to_obj_array->length();
|
||||
assert(0 <= next_index && next_index < length,
|
||||
err_msg("invariant, next index: %d, length: %d", next_index, length));
|
||||
|
||||
int start = next_index;
|
||||
int end = length;
|
||||
int remainder = end - start;
|
||||
// We'll try not to push a range that's smaller than ParGCArrayScanChunk.
|
||||
if (remainder > 2 * ParGCArrayScanChunk) {
|
||||
end = start + ParGCArrayScanChunk;
|
||||
to_obj_array->set_length(end);
|
||||
// Push the remainder before we process the range in case another
|
||||
// worker has run out of things to do and can steal it.
|
||||
oop* from_obj_p = set_partial_array_mask(from_obj);
|
||||
push_on_queue(from_obj_p);
|
||||
} else {
|
||||
assert(length == end, "sanity");
|
||||
// We'll process the final range for this object. Restore the length
|
||||
// so that the heap remains parsable in case of evacuation failure.
|
||||
to_obj_array->set_length(end);
|
||||
}
|
||||
_scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
|
||||
// Process indexes [start,end). It will also process the header
|
||||
// along with the first chunk (i.e., the chunk with start == 0).
|
||||
// Note that at this point the length field of to_obj_array is not
|
||||
// correct given that we are using it to keep track of the next
|
||||
// start index. oop_iterate_range() (thankfully!) ignores the length
|
||||
// field and only relies on the start / end parameters. It does
|
||||
// however return the size of the object which will be incorrect. So
|
||||
// we have to ignore it even if we wanted to use it.
|
||||
to_obj_array->oop_iterate_range(&_scanner, start, end);
|
||||
}
|
||||
inline void do_oop_partial_array(oop* p);
|
||||
|
||||
// This method is applied to the fields of the objects that have just been copied.
|
||||
template <class T> void do_oop_evac(T* p, HeapRegion* from) {
|
||||
|
@ -2060,26 +1954,9 @@ public:
|
|||
|
||||
oop copy_to_survivor_space(oop const obj);
|
||||
|
||||
template <class T> void deal_with_reference(T* ref_to_scan) {
|
||||
if (!has_partial_array_mask(ref_to_scan)) {
|
||||
// Note: we can use "raw" versions of "region_containing" because
|
||||
// "obj_to_scan" is definitely in the heap, and is not in a
|
||||
// humongous region.
|
||||
HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
|
||||
do_oop_evac(ref_to_scan, r);
|
||||
} else {
|
||||
do_oop_partial_array((oop*)ref_to_scan);
|
||||
}
|
||||
}
|
||||
template <class T> inline void deal_with_reference(T* ref_to_scan);
|
||||
|
||||
void deal_with_reference(StarTask ref) {
|
||||
assert(verify_task(ref), "sanity");
|
||||
if (ref.is_narrow()) {
|
||||
deal_with_reference((narrowOop*)ref);
|
||||
} else {
|
||||
deal_with_reference((oop*)ref);
|
||||
}
|
||||
}
|
||||
inline void deal_with_reference(StarTask ref);
|
||||
|
||||
public:
|
||||
void trim_queue();
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||
#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
||||
|
@ -36,6 +37,9 @@
|
|||
|
||||
// Inline functions for G1CollectedHeap
|
||||
|
||||
// Return the region with the given index. It assumes the index is valid.
|
||||
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); }
|
||||
|
||||
template <class T>
|
||||
inline HeapRegion*
|
||||
G1CollectedHeap::heap_region_containing(const T addr) const {
|
||||
|
@ -55,6 +59,10 @@ G1CollectedHeap::heap_region_containing_raw(const T addr) const {
|
|||
return res;
|
||||
}
|
||||
|
||||
inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
|
||||
_old_set.remove(hr);
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::obj_in_cs(oop obj) {
|
||||
HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
|
||||
return r != NULL && r->in_collection_set();
|
||||
|
@ -151,6 +159,24 @@ inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
|
|||
return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
|
||||
}
|
||||
|
||||
|
||||
// This is a fast test on whether a reference points into the
|
||||
// collection set or not. Assume that the reference
|
||||
// points into the heap.
|
||||
inline bool G1CollectedHeap::in_cset_fast_test(oop obj) {
|
||||
assert(_in_cset_fast_test != NULL, "sanity");
|
||||
assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj));
|
||||
// no need to subtract the bottom of the heap from obj,
|
||||
// _in_cset_fast_test is biased
|
||||
uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
|
||||
bool ret = _in_cset_fast_test[index];
|
||||
// let's make sure the result is consistent with what the slower
|
||||
// test returns
|
||||
assert( ret || !obj_in_cs(obj), "sanity");
|
||||
assert(!ret || obj_in_cs(obj), "sanity");
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Support for G1EvacuationFailureALot
|
||||
|
||||
|
@ -224,4 +250,121 @@ inline void G1CollectedHeap::reset_evacuation_should_fail() {
|
|||
}
|
||||
#endif // #ifndef PRODUCT
|
||||
|
||||
inline bool G1CollectedHeap::is_in_young(const oop obj) {
|
||||
HeapRegion* hr = heap_region_containing(obj);
|
||||
return hr != NULL && hr->is_young();
|
||||
}
|
||||
|
||||
// We don't need barriers for initializing stores to objects
|
||||
// in the young gen: for the SATB pre-barrier, there is no
|
||||
// pre-value that needs to be remembered; for the remembered-set
|
||||
// update logging post-barrier, we don't maintain remembered set
|
||||
// information for young gen objects.
|
||||
inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
|
||||
return is_in_young(new_obj);
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
|
||||
const HeapRegion* hr = heap_region_containing(obj);
|
||||
if (hr == NULL) {
|
||||
if (obj == NULL) return false;
|
||||
else return true;
|
||||
}
|
||||
else return is_obj_dead(obj, hr);
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
|
||||
const HeapRegion* hr = heap_region_containing(obj);
|
||||
if (hr == NULL) {
|
||||
if (obj == NULL) return false;
|
||||
else return true;
|
||||
}
|
||||
else return is_obj_ill(obj, hr);
|
||||
}
|
||||
|
||||
template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) {
|
||||
if (!from->is_survivor()) {
|
||||
_g1_rem->par_write_ref(from, p, tid);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) {
|
||||
if (G1DeferredRSUpdate) {
|
||||
deferred_rs_update(from, p, tid);
|
||||
} else {
|
||||
immediate_rs_update(from, p, tid);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
|
||||
assert(has_partial_array_mask(p), "invariant");
|
||||
oop from_obj = clear_partial_array_mask(p);
|
||||
|
||||
assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
|
||||
assert(from_obj->is_objArray(), "must be obj array");
|
||||
objArrayOop from_obj_array = objArrayOop(from_obj);
|
||||
// The from-space object contains the real length.
|
||||
int length = from_obj_array->length();
|
||||
|
||||
assert(from_obj->is_forwarded(), "must be forwarded");
|
||||
oop to_obj = from_obj->forwardee();
|
||||
assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
|
||||
objArrayOop to_obj_array = objArrayOop(to_obj);
|
||||
// We keep track of the next start index in the length field of the
|
||||
// to-space object.
|
||||
int next_index = to_obj_array->length();
|
||||
assert(0 <= next_index && next_index < length,
|
||||
err_msg("invariant, next index: %d, length: %d", next_index, length));
|
||||
|
||||
int start = next_index;
|
||||
int end = length;
|
||||
int remainder = end - start;
|
||||
// We'll try not to push a range that's smaller than ParGCArrayScanChunk.
|
||||
if (remainder > 2 * ParGCArrayScanChunk) {
|
||||
end = start + ParGCArrayScanChunk;
|
||||
to_obj_array->set_length(end);
|
||||
// Push the remainder before we process the range in case another
|
||||
// worker has run out of things to do and can steal it.
|
||||
oop* from_obj_p = set_partial_array_mask(from_obj);
|
||||
push_on_queue(from_obj_p);
|
||||
} else {
|
||||
assert(length == end, "sanity");
|
||||
// We'll process the final range for this object. Restore the length
|
||||
// so that the heap remains parsable in case of evacuation failure.
|
||||
to_obj_array->set_length(end);
|
||||
}
|
||||
_scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
|
||||
// Process indexes [start,end). It will also process the header
|
||||
// along with the first chunk (i.e., the chunk with start == 0).
|
||||
// Note that at this point the length field of to_obj_array is not
|
||||
// correct given that we are using it to keep track of the next
|
||||
// start index. oop_iterate_range() (thankfully!) ignores the length
|
||||
// field and only relies on the start / end parameters. It does
|
||||
// however return the size of the object which will be incorrect. So
|
||||
// we have to ignore it even if we wanted to use it.
|
||||
to_obj_array->oop_iterate_range(&_scanner, start, end);
|
||||
}
|
||||
|
||||
template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
|
||||
if (!has_partial_array_mask(ref_to_scan)) {
|
||||
// Note: we can use "raw" versions of "region_containing" because
|
||||
// "obj_to_scan" is definitely in the heap, and is not in a
|
||||
// humongous region.
|
||||
HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
|
||||
do_oop_evac(ref_to_scan, r);
|
||||
} else {
|
||||
do_oop_partial_array((oop*)ref_to_scan);
|
||||
}
|
||||
}
|
||||
|
||||
inline void G1ParScanThreadState::deal_with_reference(StarTask ref) {
|
||||
assert(verify_task(ref), "sanity");
|
||||
if (ref.is_narrow()) {
|
||||
deal_with_reference((narrowOop*)ref);
|
||||
} else {
|
||||
deal_with_reference((oop*)ref);
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
|
||||
|
|
|
@ -472,9 +472,6 @@ HeapRegion::object_iterate_mem_careful(MemRegion mr,
|
|||
} else if (!g1h->is_obj_dead(obj)) {
|
||||
cl->do_object(obj);
|
||||
}
|
||||
if (cl->abort()) return cur;
|
||||
// The check above must occur before the operation below, since an
|
||||
// abort might invalidate the "size" operation.
|
||||
cur += obj->size();
|
||||
}
|
||||
return NULL;
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_SPARSEPRT_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_SPARSEPRT_HPP
|
||||
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/cardTableModRefBS.hpp"
|
||||
|
|
|
@ -89,6 +89,15 @@ bool VM_GC_Operation::doit_prologue() {
|
|||
assert(((_gc_cause != GCCause::_no_gc) &&
|
||||
(_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause");
|
||||
|
||||
// To be able to handle a GC the VM initialization needs to be completed.
|
||||
if (!is_init_completed()) {
|
||||
vm_exit_during_initialization(
|
||||
err_msg("GC triggered before VM initialization completed. Try increasing "
|
||||
"NewSize, current value " UINTX_FORMAT "%s.",
|
||||
byte_size_in_proper_unit(NewSize),
|
||||
proper_unit_for_byte_size(NewSize)));
|
||||
}
|
||||
|
||||
acquire_pending_list_lock();
|
||||
// If the GC count has changed someone beat us to the collection
|
||||
// Get the Heap_lock after the pending_list_lock.
|
||||
|
|
|
@ -748,6 +748,12 @@ class ArrayAllocator VALUE_OBJ_CLASS_SPEC {
|
|||
bool _use_malloc;
|
||||
size_t _size;
|
||||
bool _free_in_destructor;
|
||||
|
||||
static bool should_use_malloc(size_t size) {
|
||||
return size < ArrayAllocatorMallocLimit;
|
||||
}
|
||||
|
||||
static char* allocate_inner(size_t& size, bool& use_malloc);
|
||||
public:
|
||||
ArrayAllocator(bool free_in_destructor = true) :
|
||||
_addr(NULL), _use_malloc(false), _size(0), _free_in_destructor(free_in_destructor) { }
|
||||
|
@ -759,6 +765,7 @@ class ArrayAllocator VALUE_OBJ_CLASS_SPEC {
|
|||
}
|
||||
|
||||
E* allocate(size_t length);
|
||||
E* reallocate(size_t new_length);
|
||||
void free();
|
||||
};
|
||||
|
||||
|
|
|
@ -122,35 +122,57 @@ template <MEMFLAGS F> void CHeapObj<F>::operator delete [](void* p){
|
|||
}
|
||||
|
||||
template <class E, MEMFLAGS F>
|
||||
E* ArrayAllocator<E, F>::allocate(size_t length) {
|
||||
assert(_addr == NULL, "Already in use");
|
||||
char* ArrayAllocator<E, F>::allocate_inner(size_t &size, bool &use_malloc) {
|
||||
char* addr = NULL;
|
||||
|
||||
_size = sizeof(E) * length;
|
||||
_use_malloc = _size < ArrayAllocatorMallocLimit;
|
||||
|
||||
if (_use_malloc) {
|
||||
_addr = AllocateHeap(_size, F);
|
||||
if (_addr == NULL && _size >= (size_t)os::vm_allocation_granularity()) {
|
||||
if (use_malloc) {
|
||||
addr = AllocateHeap(size, F);
|
||||
if (addr == NULL && size >= (size_t)os::vm_allocation_granularity()) {
|
||||
// malloc failed let's try with mmap instead
|
||||
_use_malloc = false;
|
||||
use_malloc = false;
|
||||
} else {
|
||||
return (E*)_addr;
|
||||
return addr;
|
||||
}
|
||||
}
|
||||
|
||||
int alignment = os::vm_allocation_granularity();
|
||||
_size = align_size_up(_size, alignment);
|
||||
size = align_size_up(size, alignment);
|
||||
|
||||
_addr = os::reserve_memory(_size, NULL, alignment, F);
|
||||
if (_addr == NULL) {
|
||||
vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (reserve)");
|
||||
addr = os::reserve_memory(size, NULL, alignment, F);
|
||||
if (addr == NULL) {
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "Allocator (reserve)");
|
||||
}
|
||||
|
||||
os::commit_memory_or_exit(_addr, _size, !ExecMem, "Allocator (commit)");
|
||||
os::commit_memory_or_exit(addr, size, !ExecMem, "Allocator (commit)");
|
||||
return addr;
|
||||
}
|
||||
|
||||
template <class E, MEMFLAGS F>
|
||||
E* ArrayAllocator<E, F>::allocate(size_t length) {
|
||||
assert(_addr == NULL, "Already in use");
|
||||
|
||||
_size = sizeof(E) * length;
|
||||
_use_malloc = should_use_malloc(_size);
|
||||
_addr = allocate_inner(_size, _use_malloc);
|
||||
|
||||
return (E*)_addr;
|
||||
}
|
||||
|
||||
template <class E, MEMFLAGS F>
|
||||
E* ArrayAllocator<E, F>::reallocate(size_t new_length) {
|
||||
size_t new_size = sizeof(E) * new_length;
|
||||
bool use_malloc = should_use_malloc(new_size);
|
||||
char* new_addr = allocate_inner(new_size, use_malloc);
|
||||
|
||||
memcpy(new_addr, _addr, MIN2(new_size, _size));
|
||||
|
||||
free();
|
||||
_size = new_size;
|
||||
_use_malloc = use_malloc;
|
||||
_addr = new_addr;
|
||||
return (E*)new_addr;
|
||||
}
|
||||
|
||||
template<class E, MEMFLAGS F>
|
||||
void ArrayAllocator<E, F>::free() {
|
||||
if (_addr != NULL) {
|
||||
|
|
|
@ -28,7 +28,6 @@
|
|||
#include "memory/sharedHeap.hpp"
|
||||
|
||||
volatile jint GC_locker::_jni_lock_count = 0;
|
||||
volatile jint GC_locker::_lock_count = 0;
|
||||
volatile bool GC_locker::_needs_gc = false;
|
||||
volatile bool GC_locker::_doing_gc = false;
|
||||
|
||||
|
@ -102,7 +101,7 @@ void GC_locker::jni_lock(JavaThread* thread) {
|
|||
// We check that at least one thread is in a critical region before
|
||||
// blocking because blocked threads are woken up by a thread exiting
|
||||
// a JNI critical region.
|
||||
while ((needs_gc() && is_jni_active()) || _doing_gc) {
|
||||
while (is_active_and_needs_gc() || _doing_gc) {
|
||||
JNICritical_lock->wait();
|
||||
}
|
||||
thread->enter_critical();
|
||||
|
@ -116,27 +115,20 @@ void GC_locker::jni_unlock(JavaThread* thread) {
|
|||
_jni_lock_count--;
|
||||
decrement_debug_jni_lock_count();
|
||||
thread->exit_critical();
|
||||
if (needs_gc() && !is_jni_active()) {
|
||||
if (needs_gc() && !is_active_internal()) {
|
||||
// We're the last thread out. Cause a GC to occur.
|
||||
// GC will also check is_active, so this check is not
|
||||
// strictly needed. It's added here to make it clear that
|
||||
// the GC will NOT be performed if any other caller
|
||||
// of GC_locker::lock() still needs GC locked.
|
||||
if (!is_active_internal()) {
|
||||
_doing_gc = true;
|
||||
{
|
||||
// Must give up the lock while at a safepoint
|
||||
MutexUnlocker munlock(JNICritical_lock);
|
||||
if (PrintJNIGCStalls && PrintGCDetails) {
|
||||
ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
|
||||
gclog_or_tty->print_cr("%.3f: Thread \"%s\" is performing GC after exiting critical section, %d locked",
|
||||
gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
|
||||
}
|
||||
Universe::heap()->collect(GCCause::_gc_locker);
|
||||
_doing_gc = true;
|
||||
{
|
||||
// Must give up the lock while at a safepoint
|
||||
MutexUnlocker munlock(JNICritical_lock);
|
||||
if (PrintJNIGCStalls && PrintGCDetails) {
|
||||
ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
|
||||
gclog_or_tty->print_cr("%.3f: Thread \"%s\" is performing GC after exiting critical section, %d locked",
|
||||
gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
|
||||
}
|
||||
_doing_gc = false;
|
||||
Universe::heap()->collect(GCCause::_gc_locker);
|
||||
}
|
||||
|
||||
_doing_gc = false;
|
||||
_needs_gc = false;
|
||||
JNICritical_lock->notify_all();
|
||||
}
|
||||
|
|
|
@ -54,8 +54,6 @@ class GC_locker: public AllStatic {
|
|||
// safepointing and decremented during the slow path of GC_locker
|
||||
// unlocking.
|
||||
static volatile jint _jni_lock_count; // number of jni active instances.
|
||||
|
||||
static volatile jint _lock_count; // number of other active instances
|
||||
static volatile bool _needs_gc; // heap is filling, we need a GC
|
||||
// note: bool is typedef'd as jint
|
||||
static volatile bool _doing_gc; // unlock_critical() is doing a GC
|
||||
|
@ -66,12 +64,6 @@ class GC_locker: public AllStatic {
|
|||
static volatile jint _debug_jni_lock_count;
|
||||
#endif
|
||||
|
||||
// Accessors
|
||||
static bool is_jni_active() {
|
||||
assert(_needs_gc, "only valid when _needs_gc is set");
|
||||
return _jni_lock_count > 0;
|
||||
}
|
||||
|
||||
// At a safepoint, visit all threads and count the number of active
|
||||
// critical sections. This is used to ensure that all active
|
||||
// critical sections are exited before a new one is started.
|
||||
|
@ -82,7 +74,7 @@ class GC_locker: public AllStatic {
|
|||
|
||||
static bool is_active_internal() {
|
||||
verify_critical_count();
|
||||
return _lock_count > 0 || _jni_lock_count > 0;
|
||||
return _jni_lock_count > 0;
|
||||
}
|
||||
|
||||
public:
|
||||
|
@ -132,10 +124,6 @@ class GC_locker: public AllStatic {
|
|||
// not a stable predicate.
|
||||
static void stall_until_clear();
|
||||
|
||||
// Non-structured GC locking: currently needed for JNI. Use with care!
|
||||
static void lock();
|
||||
static void unlock();
|
||||
|
||||
// The following two methods are used for JNI critical regions.
|
||||
// If we find that we failed to perform a GC because the GC_locker
|
||||
// was active, arrange for one as soon as possible by allowing
|
||||
|
|
|
@ -27,22 +27,6 @@
|
|||
|
||||
#include "memory/gcLocker.hpp"
|
||||
|
||||
inline void GC_locker::lock() {
|
||||
// cast away volatile
|
||||
Atomic::inc(&_lock_count);
|
||||
CHECK_UNHANDLED_OOPS_ONLY(
|
||||
if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count++; })
|
||||
assert(Universe::heap() == NULL ||
|
||||
!Universe::heap()->is_gc_active(), "locking failed");
|
||||
}
|
||||
|
||||
inline void GC_locker::unlock() {
|
||||
// cast away volatile
|
||||
Atomic::dec(&_lock_count);
|
||||
CHECK_UNHANDLED_OOPS_ONLY(
|
||||
if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count--; })
|
||||
}
|
||||
|
||||
inline void GC_locker::lock_critical(JavaThread* thread) {
|
||||
if (!thread->in_critical()) {
|
||||
if (needs_gc()) {
|
||||
|
|
|
@ -115,9 +115,6 @@ class ScanClosure: public OopsInKlassOrGenClosure {
|
|||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p);
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_write;
|
||||
}
|
||||
};
|
||||
|
||||
// Closure for scanning DefNewGeneration.
|
||||
|
@ -137,9 +134,6 @@ class FastScanClosure: public OopsInKlassOrGenClosure {
|
|||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p);
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_write;
|
||||
}
|
||||
};
|
||||
|
||||
class KlassScanClosure: public KlassClosure {
|
||||
|
|
|
@ -27,11 +27,8 @@
|
|||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "runtime/prefetch.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
|
||||
// The following classes are C++ `closures` for iterating over objects, roots and spaces
|
||||
|
||||
class CodeBlob;
|
||||
class nmethod;
|
||||
class ReferenceProcessor;
|
||||
|
@ -39,22 +36,11 @@ class DataLayout;
|
|||
class KlassClosure;
|
||||
class ClassLoaderData;
|
||||
|
||||
// Closure provides abortability.
|
||||
// The following classes are C++ `closures` for iterating over objects, roots and spaces
|
||||
|
||||
class Closure : public StackObj {
|
||||
protected:
|
||||
bool _abort;
|
||||
void set_abort() { _abort = true; }
|
||||
public:
|
||||
Closure() : _abort(false) {}
|
||||
// A subtype can use this mechanism to indicate to some iterator mapping
|
||||
// functions that the iteration should cease.
|
||||
bool abort() { return _abort; }
|
||||
void clear_abort() { _abort = false; }
|
||||
};
|
||||
class Closure : public StackObj { };
|
||||
|
||||
// OopClosure is used for iterating through references to Java objects.
|
||||
|
||||
class OopClosure : public Closure {
|
||||
public:
|
||||
virtual void do_oop(oop* o) = 0;
|
||||
|
@ -97,11 +83,6 @@ class ExtendedOopClosure : public OopClosure {
|
|||
|
||||
virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
|
||||
|
||||
// Controls how prefetching is done for invocations of this closure.
|
||||
Prefetch::style prefetch_style() { // Note that this is non-virtual.
|
||||
return Prefetch::do_none;
|
||||
}
|
||||
|
||||
// True iff this closure may be safely applied more than once to an oop
|
||||
// location without an intervening "major reset" (like the end of a GC).
|
||||
virtual bool idempotent() { return false; }
|
||||
|
@ -177,19 +158,6 @@ public:
|
|||
ObjectToOopClosure(ExtendedOopClosure* cl) : _cl(cl) {}
|
||||
};
|
||||
|
||||
// A version of ObjectClosure with "memory" (see _previous_address below)
|
||||
class UpwardsObjectClosure: public BoolObjectClosure {
|
||||
HeapWord* _previous_address;
|
||||
public:
|
||||
UpwardsObjectClosure() : _previous_address(NULL) { }
|
||||
void set_previous(HeapWord* addr) { _previous_address = addr; }
|
||||
HeapWord* previous() { return _previous_address; }
|
||||
// A return value of "true" can be used by the caller to decide
|
||||
// if this object's end should *NOT* be recorded in
|
||||
// _previous_address above.
|
||||
virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
|
||||
};
|
||||
|
||||
// A version of ObjectClosure that is expected to be robust
|
||||
// in the face of possibly uninitialized objects.
|
||||
class ObjectClosureCareful : public ObjectClosure {
|
||||
|
|
|
@ -645,9 +645,6 @@ void MetaspaceShared::preload_and_dump(TRAPS) {
|
|||
TraceTime timer("Dump Shared Spaces", TraceStartupTime);
|
||||
ResourceMark rm;
|
||||
|
||||
// Lock out GC - is it necessary? I don't think we care.
|
||||
No_GC_Verifier no_gc;
|
||||
|
||||
// Preload classes to be shared.
|
||||
// Should use some os:: method rather than fopen() here. aB.
|
||||
// Construct the path to the class list (in jre/lib)
|
||||
|
|
|
@ -302,10 +302,6 @@ void ContiguousSpace::clear(bool mangle_space) {
|
|||
CompactibleSpace::clear(mangle_space);
|
||||
}
|
||||
|
||||
bool ContiguousSpace::is_in(const void* p) const {
|
||||
return _bottom <= p && p < _top;
|
||||
}
|
||||
|
||||
bool ContiguousSpace::is_free_block(const HeapWord* p) const {
|
||||
return p >= _top;
|
||||
}
|
||||
|
@ -547,115 +543,11 @@ void Space::oop_iterate(ExtendedOopClosure* blk) {
|
|||
object_iterate(&blk2);
|
||||
}
|
||||
|
||||
HeapWord* Space::object_iterate_careful(ObjectClosureCareful* cl) {
|
||||
guarantee(false, "NYI");
|
||||
return bottom();
|
||||
}
|
||||
|
||||
HeapWord* Space::object_iterate_careful_m(MemRegion mr,
|
||||
ObjectClosureCareful* cl) {
|
||||
guarantee(false, "NYI");
|
||||
return bottom();
|
||||
}
|
||||
|
||||
|
||||
void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
|
||||
assert(!mr.is_empty(), "Should be non-empty");
|
||||
// We use MemRegion(bottom(), end()) rather than used_region() below
|
||||
// because the two are not necessarily equal for some kinds of
|
||||
// spaces, in particular, certain kinds of free list spaces.
|
||||
// We could use the more complicated but more precise:
|
||||
// MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
|
||||
// but the slight imprecision seems acceptable in the assertion check.
|
||||
assert(MemRegion(bottom(), end()).contains(mr),
|
||||
"Should be within used space");
|
||||
HeapWord* prev = cl->previous(); // max address from last time
|
||||
if (prev >= mr.end()) { // nothing to do
|
||||
return;
|
||||
}
|
||||
// This assert will not work when we go from cms space to perm
|
||||
// space, and use same closure. Easy fix deferred for later. XXX YSR
|
||||
// assert(prev == NULL || contains(prev), "Should be within space");
|
||||
|
||||
bool last_was_obj_array = false;
|
||||
HeapWord *blk_start_addr, *region_start_addr;
|
||||
if (prev > mr.start()) {
|
||||
region_start_addr = prev;
|
||||
blk_start_addr = prev;
|
||||
// The previous invocation may have pushed "prev" beyond the
|
||||
// last allocated block yet there may be still be blocks
|
||||
// in this region due to a particular coalescing policy.
|
||||
// Relax the assertion so that the case where the unallocated
|
||||
// block is maintained and "prev" is beyond the unallocated
|
||||
// block does not cause the assertion to fire.
|
||||
assert((BlockOffsetArrayUseUnallocatedBlock &&
|
||||
(!is_in(prev))) ||
|
||||
(blk_start_addr == block_start(region_start_addr)), "invariant");
|
||||
} else {
|
||||
region_start_addr = mr.start();
|
||||
blk_start_addr = block_start(region_start_addr);
|
||||
}
|
||||
HeapWord* region_end_addr = mr.end();
|
||||
MemRegion derived_mr(region_start_addr, region_end_addr);
|
||||
while (blk_start_addr < region_end_addr) {
|
||||
const size_t size = block_size(blk_start_addr);
|
||||
if (block_is_obj(blk_start_addr)) {
|
||||
last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
|
||||
} else {
|
||||
last_was_obj_array = false;
|
||||
}
|
||||
blk_start_addr += size;
|
||||
}
|
||||
if (!last_was_obj_array) {
|
||||
assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
|
||||
"Should be within (closed) used space");
|
||||
assert(blk_start_addr > prev, "Invariant");
|
||||
cl->set_previous(blk_start_addr); // min address for next time
|
||||
}
|
||||
}
|
||||
|
||||
bool Space::obj_is_alive(const HeapWord* p) const {
|
||||
assert (block_is_obj(p), "The address should point to an object");
|
||||
return true;
|
||||
}
|
||||
|
||||
void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
|
||||
assert(!mr.is_empty(), "Should be non-empty");
|
||||
assert(used_region().contains(mr), "Should be within used space");
|
||||
HeapWord* prev = cl->previous(); // max address from last time
|
||||
if (prev >= mr.end()) { // nothing to do
|
||||
return;
|
||||
}
|
||||
// See comment above (in more general method above) in case you
|
||||
// happen to use this method.
|
||||
assert(prev == NULL || is_in_reserved(prev), "Should be within space");
|
||||
|
||||
bool last_was_obj_array = false;
|
||||
HeapWord *obj_start_addr, *region_start_addr;
|
||||
if (prev > mr.start()) {
|
||||
region_start_addr = prev;
|
||||
obj_start_addr = prev;
|
||||
assert(obj_start_addr == block_start(region_start_addr), "invariant");
|
||||
} else {
|
||||
region_start_addr = mr.start();
|
||||
obj_start_addr = block_start(region_start_addr);
|
||||
}
|
||||
HeapWord* region_end_addr = mr.end();
|
||||
MemRegion derived_mr(region_start_addr, region_end_addr);
|
||||
while (obj_start_addr < region_end_addr) {
|
||||
oop obj = oop(obj_start_addr);
|
||||
const size_t size = obj->size();
|
||||
last_was_obj_array = cl->do_object_bm(obj, derived_mr);
|
||||
obj_start_addr += size;
|
||||
}
|
||||
if (!last_was_obj_array) {
|
||||
assert((bottom() <= obj_start_addr) && (obj_start_addr <= end()),
|
||||
"Should be within (closed) used space");
|
||||
assert(obj_start_addr > prev, "Invariant");
|
||||
cl->set_previous(obj_start_addr); // min address for next time
|
||||
}
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
|
|
|
@ -120,6 +120,12 @@ class Space: public CHeapObj<mtGC> {
|
|||
|
||||
void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
|
||||
|
||||
// Returns true if this object has been allocated since a
|
||||
// generation's "save_marks" call.
|
||||
virtual bool obj_allocated_since_save_marks(const oop obj) const {
|
||||
return (HeapWord*)obj >= saved_mark_word();
|
||||
}
|
||||
|
||||
MemRegionClosure* preconsumptionDirtyCardClosure() const {
|
||||
return _preconsumptionDirtyCardClosure;
|
||||
}
|
||||
|
@ -127,9 +133,9 @@ class Space: public CHeapObj<mtGC> {
|
|||
_preconsumptionDirtyCardClosure = cl;
|
||||
}
|
||||
|
||||
// Returns a subregion of the space containing all the objects in
|
||||
// Returns a subregion of the space containing only the allocated objects in
|
||||
// the space.
|
||||
virtual MemRegion used_region() const { return MemRegion(bottom(), end()); }
|
||||
virtual MemRegion used_region() const = 0;
|
||||
|
||||
// Returns a region that is guaranteed to contain (at least) all objects
|
||||
// allocated at the time of the last call to "save_marks". If the space
|
||||
|
@ -139,7 +145,7 @@ class Space: public CHeapObj<mtGC> {
|
|||
// saved mark. Otherwise, the "obj_allocated_since_save_marks" method of
|
||||
// the space must distinguish between objects in the region allocated before
|
||||
// and after the call to save marks.
|
||||
virtual MemRegion used_region_at_save_marks() const {
|
||||
MemRegion used_region_at_save_marks() const {
|
||||
return MemRegion(bottom(), saved_mark_word());
|
||||
}
|
||||
|
||||
|
@ -172,7 +178,9 @@ class Space: public CHeapObj<mtGC> {
|
|||
// expensive operation. To prevent performance problems
|
||||
// on account of its inadvertent use in product jvm's,
|
||||
// we restrict its use to assertion checks only.
|
||||
virtual bool is_in(const void* p) const = 0;
|
||||
bool is_in(const void* p) const {
|
||||
return used_region().contains(p);
|
||||
}
|
||||
|
||||
// Returns true iff the given reserved memory of the space contains the
|
||||
// given address.
|
||||
|
@ -204,24 +212,6 @@ class Space: public CHeapObj<mtGC> {
|
|||
// objects whose internal references point to objects in the space.
|
||||
virtual void safe_object_iterate(ObjectClosure* blk) = 0;
|
||||
|
||||
// Iterate over all objects that intersect with mr, calling "cl->do_object"
|
||||
// on each. There is an exception to this: if this closure has already
|
||||
// been invoked on an object, it may skip such objects in some cases. This is
|
||||
// Most likely to happen in an "upwards" (ascending address) iteration of
|
||||
// MemRegions.
|
||||
virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
|
||||
|
||||
// Iterate over as many initialized objects in the space as possible,
|
||||
// calling "cl.do_object_careful" on each. Return NULL if all objects
|
||||
// in the space (at the start of the iteration) were iterated over.
|
||||
// Return an address indicating the extent of the iteration in the
|
||||
// event that the iteration had to return because of finding an
|
||||
// uninitialized object in the space, or if the closure "cl"
|
||||
// signaled early termination.
|
||||
virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
|
||||
virtual HeapWord* object_iterate_careful_m(MemRegion mr,
|
||||
ObjectClosureCareful* cl);
|
||||
|
||||
// Create and return a new dirty card to oop closure. Can be
|
||||
// overridden to return the appropriate type of closure
|
||||
// depending on the type of space in which the closure will
|
||||
|
@ -262,10 +252,6 @@ class Space: public CHeapObj<mtGC> {
|
|||
// Allocation (return NULL if full). Enforces mutual exclusion internally.
|
||||
virtual HeapWord* par_allocate(size_t word_size) = 0;
|
||||
|
||||
// Returns true if this object has been allocated since a
|
||||
// generation's "save_marks" call.
|
||||
virtual bool obj_allocated_since_save_marks(const oop obj) const = 0;
|
||||
|
||||
// Mark-sweep-compact support: all spaces can update pointers to objects
|
||||
// moving as a part of compaction.
|
||||
virtual void adjust_pointers();
|
||||
|
@ -397,7 +383,7 @@ public:
|
|||
|
||||
// Perform operations on the space needed after a compaction
|
||||
// has been performed.
|
||||
virtual void reset_after_compaction() {}
|
||||
virtual void reset_after_compaction() = 0;
|
||||
|
||||
// Returns the next space (in the current generation) to be compacted in
|
||||
// the global compaction order. Also is used to select the next
|
||||
|
@ -462,7 +448,7 @@ protected:
|
|||
HeapWord* _end_of_live;
|
||||
|
||||
// Minimum size of a free block.
|
||||
virtual size_t minimum_free_block_size() const = 0;
|
||||
virtual size_t minimum_free_block_size() const { return 0; }
|
||||
|
||||
// This the function is invoked when an allocation of an object covering
|
||||
// "start" to "end occurs crosses the threshold; returns the next
|
||||
|
@ -778,7 +764,7 @@ class ContiguousSpace: public CompactibleSpace {
|
|||
HeapWord* top() const { return _top; }
|
||||
void set_top(HeapWord* value) { _top = value; }
|
||||
|
||||
virtual void set_saved_mark() { _saved_mark_word = top(); }
|
||||
void set_saved_mark() { _saved_mark_word = top(); }
|
||||
void reset_saved_mark() { _saved_mark_word = bottom(); }
|
||||
|
||||
WaterMark bottom_mark() { return WaterMark(this, bottom()); }
|
||||
|
@ -813,35 +799,30 @@ class ContiguousSpace: public CompactibleSpace {
|
|||
size_t used() const { return byte_size(bottom(), top()); }
|
||||
size_t free() const { return byte_size(top(), end()); }
|
||||
|
||||
// Override from space.
|
||||
bool is_in(const void* p) const;
|
||||
|
||||
virtual bool is_free_block(const HeapWord* p) const;
|
||||
|
||||
// In a contiguous space we have a more obvious bound on what parts
|
||||
// contain objects.
|
||||
MemRegion used_region() const { return MemRegion(bottom(), top()); }
|
||||
|
||||
MemRegion used_region_at_save_marks() const {
|
||||
return MemRegion(bottom(), saved_mark_word());
|
||||
}
|
||||
|
||||
// Allocation (return NULL if full)
|
||||
virtual HeapWord* allocate(size_t word_size);
|
||||
virtual HeapWord* par_allocate(size_t word_size);
|
||||
|
||||
virtual bool obj_allocated_since_save_marks(const oop obj) const {
|
||||
return (HeapWord*)obj >= saved_mark_word();
|
||||
}
|
||||
|
||||
// Iteration
|
||||
void oop_iterate(ExtendedOopClosure* cl);
|
||||
void object_iterate(ObjectClosure* blk);
|
||||
// For contiguous spaces this method will iterate safely over objects
|
||||
// in the space (i.e., between bottom and top) when at a safepoint.
|
||||
void safe_object_iterate(ObjectClosure* blk);
|
||||
void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
|
||||
// iterates on objects up to the safe limit
|
||||
|
||||
// Iterate over as many initialized objects in the space as possible,
|
||||
// calling "cl.do_object_careful" on each. Return NULL if all objects
|
||||
// in the space (at the start of the iteration) were iterated over.
|
||||
// Return an address indicating the extent of the iteration in the
|
||||
// event that the iteration had to return because of finding an
|
||||
// uninitialized object in the space, or if the closure "cl"
|
||||
// signaled early termination.
|
||||
HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
|
||||
HeapWord* concurrent_iteration_safe_limit() {
|
||||
assert(_concurrent_iteration_safe_limit <= top(),
|
||||
|
@ -872,7 +853,6 @@ class ContiguousSpace: public CompactibleSpace {
|
|||
// set new iteration safe limit
|
||||
set_concurrent_iteration_safe_limit(compaction_top());
|
||||
}
|
||||
virtual size_t minimum_free_block_size() const { return 0; }
|
||||
|
||||
// Override.
|
||||
DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
|
||||
|
|
|
@ -632,7 +632,6 @@ jint universe_init() {
|
|||
guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
|
||||
"oop size is not not a multiple of HeapWord size");
|
||||
TraceTime timer("Genesis", TraceStartupTime);
|
||||
GC_locker::lock(); // do not allow gc during bootstrapping
|
||||
JavaClasses::compute_hard_coded_offsets();
|
||||
|
||||
jint status = Universe::initialize_heap();
|
||||
|
@ -1164,8 +1163,6 @@ bool universe_post_init() {
|
|||
|
||||
MemoryService::add_metaspace_memory_pools();
|
||||
|
||||
GC_locker::unlock(); // allow gc after bootstrapping
|
||||
|
||||
MemoryService::set_universe_heap(Universe::_collectedHeap);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -3878,6 +3878,7 @@ void TestMetachunk_test();
|
|||
void TestVirtualSpaceNode_test();
|
||||
void TestNewSize_test();
|
||||
void TestKlass_test();
|
||||
void TestBitMap_test();
|
||||
#if INCLUDE_ALL_GCS
|
||||
void TestOldFreeSpaceCalculation_test();
|
||||
void TestG1BiasedArray_test();
|
||||
|
@ -3903,6 +3904,7 @@ void execute_internal_vm_tests() {
|
|||
run_unit_test(test_loggc_filename());
|
||||
run_unit_test(TestNewSize_test());
|
||||
run_unit_test(TestKlass_test());
|
||||
run_unit_test(TestBitMap_test());
|
||||
#if INCLUDE_VM_STRUCTS
|
||||
run_unit_test(VMStructs::test());
|
||||
#endif
|
||||
|
|
|
@ -214,7 +214,6 @@ Thread::Thread() {
|
|||
debug_only(_allow_allocation_count = 0;)
|
||||
NOT_PRODUCT(_allow_safepoint_count = 0;)
|
||||
NOT_PRODUCT(_skip_gcalot = false;)
|
||||
CHECK_UNHANDLED_OOPS_ONLY(_gc_locked_out_count = 0;)
|
||||
_jvmti_env_iteration_count = 0;
|
||||
set_allocated_bytes(0);
|
||||
_vm_operation_started_count = 0;
|
||||
|
|
|
@ -249,9 +249,6 @@ class Thread: public ThreadShadow {
|
|||
// Used by SkipGCALot class.
|
||||
NOT_PRODUCT(bool _skip_gcalot;) // Should we elide gc-a-lot?
|
||||
|
||||
// Record when GC is locked out via the GC_locker mechanism
|
||||
CHECK_UNHANDLED_OOPS_ONLY(int _gc_locked_out_count;)
|
||||
|
||||
friend class No_Alloc_Verifier;
|
||||
friend class No_Safepoint_Verifier;
|
||||
friend class Pause_No_Safepoint_Verifier;
|
||||
|
@ -397,7 +394,6 @@ class Thread: public ThreadShadow {
|
|||
void clear_unhandled_oops() {
|
||||
if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops();
|
||||
}
|
||||
bool is_gc_locked_out() { return _gc_locked_out_count > 0; }
|
||||
#endif // CHECK_UNHANDLED_OOPS
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
|
|
@ -113,9 +113,7 @@ void UnhandledOops::unregister_unhandled_oop(oop* op) {
|
|||
|
||||
void UnhandledOops::clear_unhandled_oops() {
|
||||
assert (CheckUnhandledOops, "should only be called with checking option");
|
||||
if (_thread->is_gc_locked_out()) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (int k = 0; k < _oop_list->length(); k++) {
|
||||
UnhandledOopEntry entry = _oop_list->at(k);
|
||||
// If an entry is on the unhandled oop list but isn't on the stack
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
#ifdef TARGET_OS_FAMILY_linux
|
||||
|
@ -67,16 +68,14 @@ void BitMap::resize(idx_t size_in_bits, bool in_resource_area) {
|
|||
idx_t new_size_in_words = size_in_words();
|
||||
if (in_resource_area) {
|
||||
_map = NEW_RESOURCE_ARRAY(bm_word_t, new_size_in_words);
|
||||
Copy::disjoint_words((HeapWord*)old_map, (HeapWord*) _map,
|
||||
MIN2(old_size_in_words, new_size_in_words));
|
||||
} else {
|
||||
if (old_map != NULL) {
|
||||
_map_allocator.free();
|
||||
}
|
||||
_map = _map_allocator.allocate(new_size_in_words);
|
||||
_map = _map_allocator.reallocate(new_size_in_words);
|
||||
}
|
||||
Copy::disjoint_words((HeapWord*)old_map, (HeapWord*) _map,
|
||||
MIN2(old_size_in_words, new_size_in_words));
|
||||
|
||||
if (new_size_in_words > old_size_in_words) {
|
||||
clear_range_of_words(old_size_in_words, size_in_words());
|
||||
clear_range_of_words(old_size_in_words, new_size_in_words);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -536,6 +535,83 @@ void BitMap::print_on(outputStream* st) const {
|
|||
tty->cr();
|
||||
}
|
||||
|
||||
class TestBitMap : public AllStatic {
|
||||
const static BitMap::idx_t BITMAP_SIZE = 1024;
|
||||
static void fillBitMap(BitMap& map) {
|
||||
map.set_bit(1);
|
||||
map.set_bit(3);
|
||||
map.set_bit(17);
|
||||
map.set_bit(512);
|
||||
}
|
||||
|
||||
static void testResize(bool in_resource_area) {
|
||||
{
|
||||
BitMap map(0, in_resource_area);
|
||||
map.resize(BITMAP_SIZE, in_resource_area);
|
||||
fillBitMap(map);
|
||||
|
||||
BitMap map2(BITMAP_SIZE, in_resource_area);
|
||||
fillBitMap(map2);
|
||||
assert(map.is_same(map2), "could be");
|
||||
}
|
||||
|
||||
{
|
||||
BitMap map(128, in_resource_area);
|
||||
map.resize(BITMAP_SIZE, in_resource_area);
|
||||
fillBitMap(map);
|
||||
|
||||
BitMap map2(BITMAP_SIZE, in_resource_area);
|
||||
fillBitMap(map2);
|
||||
assert(map.is_same(map2), "could be");
|
||||
}
|
||||
|
||||
{
|
||||
BitMap map(BITMAP_SIZE, in_resource_area);
|
||||
map.resize(BITMAP_SIZE, in_resource_area);
|
||||
fillBitMap(map);
|
||||
|
||||
BitMap map2(BITMAP_SIZE, in_resource_area);
|
||||
fillBitMap(map2);
|
||||
assert(map.is_same(map2), "could be");
|
||||
}
|
||||
}
|
||||
|
||||
static void testResizeResource() {
|
||||
ResourceMark rm;
|
||||
testResize(true);
|
||||
}
|
||||
|
||||
static void testResizeNonResource() {
|
||||
const uintx bitmap_bytes = BITMAP_SIZE / BitsPerByte;
|
||||
|
||||
// Test the default behavior
|
||||
testResize(false);
|
||||
|
||||
{
|
||||
// Make sure that AllocatorMallocLimit is larger than our allocation request
|
||||
// forcing it to call standard malloc()
|
||||
UIntFlagSetting fs(ArrayAllocatorMallocLimit, bitmap_bytes * 4);
|
||||
testResize(false);
|
||||
}
|
||||
{
|
||||
// Make sure that AllocatorMallocLimit is smaller than our allocation request
|
||||
// forcing it to call mmap() (or equivalent)
|
||||
UIntFlagSetting fs(ArrayAllocatorMallocLimit, bitmap_bytes / 4);
|
||||
testResize(false);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
static void test() {
|
||||
testResizeResource();
|
||||
testResizeNonResource();
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
void TestBitMap_test() {
|
||||
TestBitMap::test();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue