mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-28 07:14:30 +02:00
8224815: Remove non-GC uses of CollectedHeap::is_in_reserved()
Reviewed-by: stefank, coleenp
This commit is contained in:
parent
d19e6eae9e
commit
f869706f5f
55 changed files with 242 additions and 239 deletions
|
@ -3953,7 +3953,7 @@ void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
|
||||||
assert (UseCompressedOops, "should only be used for compressed oops");
|
assert (UseCompressedOops, "should only be used for compressed oops");
|
||||||
assert (Universe::heap() != NULL, "java heap should be initialized");
|
assert (Universe::heap() != NULL, "java heap should be initialized");
|
||||||
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
||||||
assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "should be real oop");
|
assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
int oop_index = oop_recorder()->find_index(obj);
|
int oop_index = oop_recorder()->find_index(obj);
|
||||||
|
@ -3968,7 +3968,7 @@ void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
|
||||||
assert (UseCompressedClassPointers, "should only be used for compressed headers");
|
assert (UseCompressedClassPointers, "should only be used for compressed headers");
|
||||||
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
||||||
int index = oop_recorder()->find_index(k);
|
int index = oop_recorder()->find_index(k);
|
||||||
assert(! Universe::heap()->is_in_reserved(k), "should not be an oop");
|
assert(! Universe::heap()->is_in(k), "should not be an oop");
|
||||||
|
|
||||||
InstructionMark im(this);
|
InstructionMark im(this);
|
||||||
RelocationHolder rspec = metadata_Relocation::spec(index);
|
RelocationHolder rspec = metadata_Relocation::spec(index);
|
||||||
|
@ -4052,7 +4052,7 @@ void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
{
|
{
|
||||||
ThreadInVMfromUnknown tiv;
|
ThreadInVMfromUnknown tiv;
|
||||||
assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "should be real oop");
|
assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop");
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
oop_index = oop_recorder()->find_index(obj);
|
oop_index = oop_recorder()->find_index(obj);
|
||||||
|
@ -4082,7 +4082,7 @@ Address MacroAssembler::constant_oop_address(jobject obj) {
|
||||||
{
|
{
|
||||||
ThreadInVMfromUnknown tiv;
|
ThreadInVMfromUnknown tiv;
|
||||||
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
||||||
assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop");
|
assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop");
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
int oop_index = oop_recorder()->find_index(obj);
|
int oop_index = oop_recorder()->find_index(obj);
|
||||||
|
|
|
@ -409,7 +409,7 @@ void LIR_Assembler::jobject2reg(jobject o, Register reg) {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
{
|
{
|
||||||
ThreadInVMfromNative tiv(JavaThread::current());
|
ThreadInVMfromNative tiv(JavaThread::current());
|
||||||
assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(o)), "should be real oop");
|
assert(Universe::heap()->is_in(JNIHandles::resolve(o)), "should be real oop");
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
int oop_index = __ oop_recorder()->find_index(o);
|
int oop_index = __ oop_recorder()->find_index(o);
|
||||||
|
|
|
@ -990,7 +990,7 @@ AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
|
||||||
{
|
{
|
||||||
ThreadInVMfromUnknown tiv;
|
ThreadInVMfromUnknown tiv;
|
||||||
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
||||||
assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop");
|
assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop");
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
int oop_index = oop_recorder()->find_index(obj);
|
int oop_index = oop_recorder()->find_index(obj);
|
||||||
|
|
|
@ -51,7 +51,7 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||||
} else if (which == Assembler::narrow_oop_operand) {
|
} else if (which == Assembler::narrow_oop_operand) {
|
||||||
address disp = Assembler::locate_operand(addr(), which);
|
address disp = Assembler::locate_operand(addr(), which);
|
||||||
// both compressed oops and compressed classes look the same
|
// both compressed oops and compressed classes look the same
|
||||||
if (Universe::heap()->is_in_reserved((oop)x)) {
|
if (CompressedOops::is_in((void*)x)) {
|
||||||
if (verify_only) {
|
if (verify_only) {
|
||||||
guarantee(*(uint32_t*) disp == CompressedOops::encode((oop)x), "instructions must match");
|
guarantee(*(uint32_t*) disp == CompressedOops::encode((oop)x), "instructions must match");
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -546,7 +546,7 @@ void emit_d32_reloc(CodeBuffer& cbuf, int d32, RelocationHolder const& rspec, in
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
if (rspec.reloc()->type() == relocInfo::oop_type &&
|
if (rspec.reloc()->type() == relocInfo::oop_type &&
|
||||||
d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
|
d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
|
||||||
assert(Universe::heap()->is_in_reserved((address)(intptr_t)d32), "should be real oop");
|
assert(Universe::heap()->is_in((address)(intptr_t)d32), "should be real oop");
|
||||||
assert(oopDesc::is_oop(cast_to_oop((intptr_t)d32)), "cannot embed broken oops in code");
|
assert(oopDesc::is_oop(cast_to_oop((intptr_t)d32)), "cannot embed broken oops in code");
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -573,7 +573,7 @@ void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, RelocationHolder const& rspec
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
if (rspec.reloc()->type() == relocInfo::oop_type &&
|
if (rspec.reloc()->type() == relocInfo::oop_type &&
|
||||||
d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
|
d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
|
||||||
assert(Universe::heap()->is_in_reserved((address)d64), "should be real oop");
|
assert(Universe::heap()->is_in((address)d64), "should be real oop");
|
||||||
assert(oopDesc::is_oop(cast_to_oop(d64)), "cannot embed broken oops in code");
|
assert(oopDesc::is_oop(cast_to_oop(d64)), "cannot embed broken oops in code");
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -239,7 +239,7 @@ void ciObjectFactory::remove_symbols() {
|
||||||
ciObject* ciObjectFactory::get(oop key) {
|
ciObject* ciObjectFactory::get(oop key) {
|
||||||
ASSERT_IN_VM;
|
ASSERT_IN_VM;
|
||||||
|
|
||||||
assert(Universe::heap()->is_in_reserved(key), "must be");
|
assert(Universe::heap()->is_in(key), "must be");
|
||||||
|
|
||||||
NonPermObject* &bucket = find_non_perm(key);
|
NonPermObject* &bucket = find_non_perm(key);
|
||||||
if (bucket != NULL) {
|
if (bucket != NULL) {
|
||||||
|
@ -252,7 +252,7 @@ ciObject* ciObjectFactory::get(oop key) {
|
||||||
ciObject* new_object = create_new_object(keyHandle());
|
ciObject* new_object = create_new_object(keyHandle());
|
||||||
assert(oopDesc::equals(keyHandle(), new_object->get_oop()), "must be properly recorded");
|
assert(oopDesc::equals(keyHandle(), new_object->get_oop()), "must be properly recorded");
|
||||||
init_ident_of(new_object);
|
init_ident_of(new_object);
|
||||||
assert(Universe::heap()->is_in_reserved(new_object->get_oop()), "must be");
|
assert(Universe::heap()->is_in(new_object->get_oop()), "must be");
|
||||||
|
|
||||||
// Not a perm-space object.
|
// Not a perm-space object.
|
||||||
insert_non_perm(bucket, keyHandle(), new_object);
|
insert_non_perm(bucket, keyHandle(), new_object);
|
||||||
|
@ -644,7 +644,7 @@ static ciObjectFactory::NonPermObject* emptyBucket = NULL;
|
||||||
// If there is no entry in the cache corresponding to this oop, return
|
// If there is no entry in the cache corresponding to this oop, return
|
||||||
// the null tail of the bucket into which the oop should be inserted.
|
// the null tail of the bucket into which the oop should be inserted.
|
||||||
ciObjectFactory::NonPermObject* &ciObjectFactory::find_non_perm(oop key) {
|
ciObjectFactory::NonPermObject* &ciObjectFactory::find_non_perm(oop key) {
|
||||||
assert(Universe::heap()->is_in_reserved(key), "must be");
|
assert(Universe::heap()->is_in(key), "must be");
|
||||||
ciMetadata* klass = get_metadata(key->klass());
|
ciMetadata* klass = get_metadata(key->klass());
|
||||||
NonPermObject* *bp = &_non_perm_bucket[(unsigned) klass->hash() % NON_PERM_BUCKETS];
|
NonPermObject* *bp = &_non_perm_bucket[(unsigned) klass->hash() % NON_PERM_BUCKETS];
|
||||||
for (NonPermObject* p; (p = (*bp)) != NULL; bp = &p->next()) {
|
for (NonPermObject* p; (p = (*bp)) != NULL; bp = &p->next()) {
|
||||||
|
@ -672,7 +672,7 @@ inline ciObjectFactory::NonPermObject::NonPermObject(ciObjectFactory::NonPermObj
|
||||||
//
|
//
|
||||||
// Insert a ciObject into the non-perm table.
|
// Insert a ciObject into the non-perm table.
|
||||||
void ciObjectFactory::insert_non_perm(ciObjectFactory::NonPermObject* &where, oop key, ciObject* obj) {
|
void ciObjectFactory::insert_non_perm(ciObjectFactory::NonPermObject* &where, oop key, ciObject* obj) {
|
||||||
assert(Universe::heap()->is_in_reserved_or_null(key), "must be");
|
assert(Universe::heap()->is_in_or_null(key), "must be");
|
||||||
assert(&where != &emptyBucket, "must not try to fill empty bucket");
|
assert(&where != &emptyBucket, "must not try to fill empty bucket");
|
||||||
NonPermObject* p = new (arena()) NonPermObject(where, key, obj);
|
NonPermObject* p = new (arena()) NonPermObject(where, key, obj);
|
||||||
assert(where == p && is_equal(p, key) && p->object() == obj, "entry must match");
|
assert(where == p && is_equal(p, key) && p->object() == obj, "entry must match");
|
||||||
|
|
|
@ -225,7 +225,7 @@ void ConstantOopWriteValue::write_on(DebugInfoWriteStream* stream) {
|
||||||
// thread is already in VM state.
|
// thread is already in VM state.
|
||||||
ThreadInVMfromUnknown tiv;
|
ThreadInVMfromUnknown tiv;
|
||||||
assert(JNIHandles::resolve(value()) == NULL ||
|
assert(JNIHandles::resolve(value()) == NULL ||
|
||||||
Universe::heap()->is_in_reserved(JNIHandles::resolve(value())),
|
Universe::heap()->is_in(JNIHandles::resolve(value())),
|
||||||
"Should be in heap");
|
"Should be in heap");
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -246,7 +246,7 @@ void ConstantOopWriteValue::print_on(outputStream* st) const {
|
||||||
ConstantOopReadValue::ConstantOopReadValue(DebugInfoReadStream* stream) {
|
ConstantOopReadValue::ConstantOopReadValue(DebugInfoReadStream* stream) {
|
||||||
_value = Handle(Thread::current(), stream->read_oop());
|
_value = Handle(Thread::current(), stream->read_oop());
|
||||||
assert(_value() == NULL ||
|
assert(_value() == NULL ||
|
||||||
Universe::heap()->is_in_reserved(_value()), "Should be in heap");
|
Universe::heap()->is_in(_value()), "Should be in heap");
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConstantOopReadValue::write_on(DebugInfoWriteStream* stream) {
|
void ConstantOopReadValue::write_on(DebugInfoWriteStream* stream) {
|
||||||
|
|
|
@ -135,6 +135,10 @@ private:
|
||||||
bool should_do_concurrent_full_gc(GCCause::Cause cause);
|
bool should_do_concurrent_full_gc(GCCause::Cause cause);
|
||||||
|
|
||||||
void collect_mostly_concurrent(GCCause::Cause cause);
|
void collect_mostly_concurrent(GCCause::Cause cause);
|
||||||
|
|
||||||
|
// CMS forwards some non-heap value into the mark oop to reserve oops during
|
||||||
|
// promotion, so we can't assert about obj alignment or that the forwardee is in heap
|
||||||
|
virtual void check_oop_location(void* addr) const {}
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_GC_CMS_CMSHEAP_HPP
|
#endif // SHARE_GC_CMS_CMSHEAP_HPP
|
||||||
|
|
|
@ -70,7 +70,7 @@ JVMFlag::Error ParGCStridesPerThreadConstraintFunc(uintx value, bool verbose) {
|
||||||
JVMFlag::Error ParGCCardsPerStrideChunkConstraintFunc(intx value, bool verbose) {
|
JVMFlag::Error ParGCCardsPerStrideChunkConstraintFunc(intx value, bool verbose) {
|
||||||
if (UseConcMarkSweepGC) {
|
if (UseConcMarkSweepGC) {
|
||||||
// ParGCCardsPerStrideChunk should be compared with card table size.
|
// ParGCCardsPerStrideChunk should be compared with card table size.
|
||||||
size_t heap_size = Universe::heap()->reserved_region().word_size();
|
size_t heap_size = CMSHeap::heap()->reserved_region().word_size();
|
||||||
CardTableRS* ct = GenCollectedHeap::heap()->rem_set();
|
CardTableRS* ct = GenCollectedHeap::heap()->rem_set();
|
||||||
size_t card_table_size = ct->cards_required(heap_size) - 1; // Valid card table size
|
size_t card_table_size = ct->cards_required(heap_size) - 1; // Valid card table size
|
||||||
|
|
||||||
|
|
|
@ -39,13 +39,13 @@ jint EpsilonHeap::initialize() {
|
||||||
size_t max_byte_size = align_up(MaxHeapSize, align);
|
size_t max_byte_size = align_up(MaxHeapSize, align);
|
||||||
|
|
||||||
// Initialize backing storage
|
// Initialize backing storage
|
||||||
ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
|
ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
|
||||||
_virtual_space.initialize(heap_rs, init_byte_size);
|
_virtual_space.initialize(heap_rs, init_byte_size);
|
||||||
|
|
||||||
MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
|
MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
|
||||||
MemRegion reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary());
|
MemRegion reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary());
|
||||||
|
|
||||||
initialize_reserved_region(reserved_region.start(), reserved_region.end());
|
initialize_reserved_region(heap_rs);
|
||||||
|
|
||||||
_space = new ContiguousSpace();
|
_space = new ContiguousSpace();
|
||||||
_space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
|
_space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
|
||||||
|
|
|
@ -136,6 +136,9 @@ public:
|
||||||
return os::elapsed_counter() / NANOSECS_PER_MILLISEC;
|
return os::elapsed_counter() / NANOSECS_PER_MILLISEC;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MemRegion reserved_region() const { return _reserved; }
|
||||||
|
bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
|
||||||
|
|
||||||
virtual void print_on(outputStream* st) const;
|
virtual void print_on(outputStream* st) const;
|
||||||
virtual void print_tracing_info() const;
|
virtual void print_tracing_info() const;
|
||||||
virtual bool print_location(outputStream* st, void* addr) const;
|
virtual bool print_location(outputStream* st, void* addr) const;
|
||||||
|
|
|
@ -99,11 +99,11 @@ inline void G1ArchiveAllocator::enable_archive_object_check() {
|
||||||
|
|
||||||
_archive_check_enabled = true;
|
_archive_check_enabled = true;
|
||||||
size_t length = G1CollectedHeap::heap()->max_reserved_capacity();
|
size_t length = G1CollectedHeap::heap()->max_reserved_capacity();
|
||||||
_closed_archive_region_map.initialize((HeapWord*)Universe::heap()->base(),
|
_closed_archive_region_map.initialize(G1CollectedHeap::heap()->base(),
|
||||||
(HeapWord*)Universe::heap()->base() + length,
|
G1CollectedHeap::heap()->base() + length,
|
||||||
HeapRegion::GrainBytes);
|
HeapRegion::GrainBytes);
|
||||||
_open_archive_region_map.initialize((HeapWord*)Universe::heap()->base(),
|
_open_archive_region_map.initialize(G1CollectedHeap::heap()->base(),
|
||||||
(HeapWord*)Universe::heap()->base() + length,
|
G1CollectedHeap::heap()->base() + length,
|
||||||
HeapRegion::GrainBytes);
|
HeapRegion::GrainBytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1666,13 +1666,13 @@ jint G1CollectedHeap::initialize() {
|
||||||
// If this happens then we could end up using a non-optimal
|
// If this happens then we could end up using a non-optimal
|
||||||
// compressed oops mode.
|
// compressed oops mode.
|
||||||
|
|
||||||
ReservedSpace heap_rs = Universe::reserve_heap(reserved_byte_size,
|
ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_byte_size,
|
||||||
HeapAlignment);
|
HeapAlignment);
|
||||||
|
|
||||||
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
|
initialize_reserved_region(heap_rs);
|
||||||
|
|
||||||
// Create the barrier set for the entire reserved region.
|
// Create the barrier set for the entire reserved region.
|
||||||
G1CardTable* ct = new G1CardTable(reserved_region());
|
G1CardTable* ct = new G1CardTable(heap_rs.region());
|
||||||
ct->initialize();
|
ct->initialize();
|
||||||
G1BarrierSet* bs = new G1BarrierSet(ct);
|
G1BarrierSet* bs = new G1BarrierSet(ct);
|
||||||
bs->initialize();
|
bs->initialize();
|
||||||
|
@ -1742,6 +1742,7 @@ jint G1CollectedHeap::initialize() {
|
||||||
|
|
||||||
_hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
|
_hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
|
||||||
_card_table->initialize(cardtable_storage);
|
_card_table->initialize(cardtable_storage);
|
||||||
|
|
||||||
// Do later initialization work for concurrent refinement.
|
// Do later initialization work for concurrent refinement.
|
||||||
_hot_card_cache->initialize(card_counts_storage);
|
_hot_card_cache->initialize(card_counts_storage);
|
||||||
|
|
||||||
|
|
|
@ -1129,6 +1129,18 @@ public:
|
||||||
return _hrm->reserved();
|
return _hrm->reserved();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MemRegion reserved_region() const {
|
||||||
|
return _reserved;
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapWord* base() const {
|
||||||
|
return _reserved.start();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool is_in_reserved(const void* addr) const {
|
||||||
|
return _reserved.contains(addr);
|
||||||
|
}
|
||||||
|
|
||||||
G1HotCardCache* g1_hot_card_cache() const { return _hot_card_cache; }
|
G1HotCardCache* g1_hot_card_cache() const { return _hot_card_cache; }
|
||||||
|
|
||||||
G1CardTable* card_table() const {
|
G1CardTable* card_table() const {
|
||||||
|
|
|
@ -86,7 +86,7 @@ template <class T> inline void G1AdjustClosure::adjust_pointer(T* p) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Forwarded, just update.
|
// Forwarded, just update.
|
||||||
assert(Universe::heap()->is_in_reserved(forwardee), "should be in object space");
|
assert(G1CollectedHeap::heap()->is_in_reserved(forwardee), "should be in object space");
|
||||||
RawAccess<IS_NOT_NULL>::oop_store(p, forwardee);
|
RawAccess<IS_NOT_NULL>::oop_store(p, forwardee);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -115,8 +115,7 @@ inline static void check_obj_during_refinement(T* p, oop const obj) {
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
// can't do because of races
|
// can't do because of races
|
||||||
// assert(oopDesc::is_oop_or_null(obj), "expected an oop");
|
// assert(oopDesc::is_oop_or_null(obj), "expected an oop");
|
||||||
assert(check_obj_alignment(obj), "not oop aligned");
|
g1h->check_oop_location(obj);
|
||||||
assert(g1h->is_in_reserved(obj), "must be in heap");
|
|
||||||
|
|
||||||
HeapRegion* from = g1h->heap_region_containing(p);
|
HeapRegion* from = g1h->heap_region_containing(p);
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,7 @@ PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
|
||||||
jint ParallelScavengeHeap::initialize() {
|
jint ParallelScavengeHeap::initialize() {
|
||||||
const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes();
|
const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes();
|
||||||
|
|
||||||
ReservedSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment);
|
ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment);
|
||||||
|
|
||||||
os::trace_page_sizes("Heap",
|
os::trace_page_sizes("Heap",
|
||||||
MinHeapSize,
|
MinHeapSize,
|
||||||
|
@ -72,9 +72,9 @@ jint ParallelScavengeHeap::initialize() {
|
||||||
heap_rs.base(),
|
heap_rs.base(),
|
||||||
heap_rs.size());
|
heap_rs.size());
|
||||||
|
|
||||||
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
|
initialize_reserved_region(heap_rs);
|
||||||
|
|
||||||
PSCardTable* card_table = new PSCardTable(reserved_region());
|
PSCardTable* card_table = new PSCardTable(heap_rs.region());
|
||||||
card_table->initialize();
|
card_table->initialize();
|
||||||
CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table);
|
CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table);
|
||||||
barrier_set->initialize();
|
barrier_set->initialize();
|
||||||
|
|
|
@ -168,6 +168,9 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||||
bool is_in_young(oop p); // reserved part
|
bool is_in_young(oop p); // reserved part
|
||||||
bool is_in_old(oop p); // reserved part
|
bool is_in_old(oop p); // reserved part
|
||||||
|
|
||||||
|
MemRegion reserved_region() const { return _reserved; }
|
||||||
|
HeapWord* base() const { return _reserved.start(); }
|
||||||
|
|
||||||
// Memory allocation. "gc_time_limit_was_exceeded" will
|
// Memory allocation. "gc_time_limit_was_exceeded" will
|
||||||
// be set to true if the adaptive size policy determine that
|
// be set to true if the adaptive size policy determine that
|
||||||
// an excessive amount of time is being spent doing collections
|
// an excessive amount of time is being spent doing collections
|
||||||
|
|
|
@ -127,7 +127,7 @@ MarkSweep::FollowStackClosure MarkSweep::follow_stack_closure;
|
||||||
void MarkSweep::FollowStackClosure::do_void() { follow_stack(); }
|
void MarkSweep::FollowStackClosure::do_void() { follow_stack(); }
|
||||||
|
|
||||||
template <class T> inline void MarkSweep::follow_root(T* p) {
|
template <class T> inline void MarkSweep::follow_root(T* p) {
|
||||||
assert(!Universe::heap()->is_in_reserved(p),
|
assert(!Universe::heap()->is_in(p),
|
||||||
"roots shouldn't be things within the heap");
|
"roots shouldn't be things within the heap");
|
||||||
T heap_oop = RawAccess<>::oop_load(p);
|
T heap_oop = RawAccess<>::oop_load(p);
|
||||||
if (!CompressedOops::is_null(heap_oop)) {
|
if (!CompressedOops::is_null(heap_oop)) {
|
||||||
|
|
|
@ -87,8 +87,7 @@ template <class T> inline void MarkSweep::adjust_pointer(T* p) {
|
||||||
"should be forwarded");
|
"should be forwarded");
|
||||||
|
|
||||||
if (new_obj != NULL) {
|
if (new_obj != NULL) {
|
||||||
assert(Universe::heap()->is_in_reserved(new_obj),
|
DEBUG_ONLY(Universe::heap()->check_oop_location((HeapWord*)new_obj);)
|
||||||
"should be in object space");
|
|
||||||
RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
|
RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -244,10 +244,10 @@ void
|
||||||
BlockOffsetArray::do_block_internal(HeapWord* blk_start,
|
BlockOffsetArray::do_block_internal(HeapWord* blk_start,
|
||||||
HeapWord* blk_end,
|
HeapWord* blk_end,
|
||||||
Action action, bool reducing) {
|
Action action, bool reducing) {
|
||||||
assert(Universe::heap()->is_in_reserved(blk_start),
|
assert(_sp->is_in_reserved(blk_start),
|
||||||
"reference must be into the heap");
|
"reference must be into the space");
|
||||||
assert(Universe::heap()->is_in_reserved(blk_end-1),
|
assert(_sp->is_in_reserved(blk_end-1),
|
||||||
"limit must be within the heap");
|
"limit must be within the space");
|
||||||
// This is optimized to make the test fast, assuming we only rarely
|
// This is optimized to make the test fast, assuming we only rarely
|
||||||
// cross boundaries.
|
// cross boundaries.
|
||||||
uintptr_t end_ui = (uintptr_t)(blk_end - 1);
|
uintptr_t end_ui = (uintptr_t)(blk_end - 1);
|
||||||
|
@ -718,10 +718,10 @@ void BlockOffsetArrayContigSpace::alloc_block_work(HeapWord* blk_start,
|
||||||
"blk_start should be at or before threshold");
|
"blk_start should be at or before threshold");
|
||||||
assert(pointer_delta(_next_offset_threshold, blk_start) <= BOTConstants::N_words,
|
assert(pointer_delta(_next_offset_threshold, blk_start) <= BOTConstants::N_words,
|
||||||
"offset should be <= BlockOffsetSharedArray::N");
|
"offset should be <= BlockOffsetSharedArray::N");
|
||||||
assert(Universe::heap()->is_in_reserved(blk_start),
|
assert(_sp->is_in_reserved(blk_start),
|
||||||
"reference must be into the heap");
|
"reference must be into the space");
|
||||||
assert(Universe::heap()->is_in_reserved(blk_end-1),
|
assert(_sp->is_in_reserved(blk_end-1),
|
||||||
"limit must be within the heap");
|
"limit must be within the space");
|
||||||
assert(_next_offset_threshold ==
|
assert(_next_offset_threshold ==
|
||||||
_array->_reserved.start() + _next_offset_index*BOTConstants::N_words,
|
_array->_reserved.start() + _next_offset_index*BOTConstants::N_words,
|
||||||
"index must agree with threshold");
|
"index must agree with threshold");
|
||||||
|
@ -775,8 +775,6 @@ void BlockOffsetArrayContigSpace::alloc_block_work(HeapWord* blk_start,
|
||||||
}
|
}
|
||||||
|
|
||||||
HeapWord* BlockOffsetArrayContigSpace::initialize_threshold() {
|
HeapWord* BlockOffsetArrayContigSpace::initialize_threshold() {
|
||||||
assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
|
|
||||||
"just checking");
|
|
||||||
_next_offset_index = _array->index_for(_bottom);
|
_next_offset_index = _array->index_for(_bottom);
|
||||||
_next_offset_index++;
|
_next_offset_index++;
|
||||||
_next_offset_threshold =
|
_next_offset_threshold =
|
||||||
|
@ -785,8 +783,6 @@ HeapWord* BlockOffsetArrayContigSpace::initialize_threshold() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlockOffsetArrayContigSpace::zero_bottom_entry() {
|
void BlockOffsetArrayContigSpace::zero_bottom_entry() {
|
||||||
assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
|
|
||||||
"just checking");
|
|
||||||
size_t bottom_index = _array->index_for(_bottom);
|
size_t bottom_index = _array->index_for(_bottom);
|
||||||
_array->set_offset_array(bottom_index, 0);
|
_array->set_offset_array(bottom_index, 0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,7 +91,7 @@ VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
|
||||||
size_t capacity_in_words = capacity() / HeapWordSize;
|
size_t capacity_in_words = capacity() / HeapWordSize;
|
||||||
|
|
||||||
return VirtualSpaceSummary(
|
return VirtualSpaceSummary(
|
||||||
reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end());
|
_reserved.start(), _reserved.start() + capacity_in_words, _reserved.end());
|
||||||
}
|
}
|
||||||
|
|
||||||
GCHeapSummary CollectedHeap::create_heap_summary() {
|
GCHeapSummary CollectedHeap::create_heap_summary() {
|
||||||
|
@ -178,11 +178,11 @@ bool CollectedHeap::is_oop(oop object) const {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!is_in_reserved(object)) {
|
if (!is_in(object)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_in_reserved(object->klass_or_null())) {
|
if (is_in(object->klass_or_null())) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -343,6 +343,11 @@ void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t siz
|
||||||
}
|
}
|
||||||
#endif // PRODUCT
|
#endif // PRODUCT
|
||||||
|
|
||||||
|
void CollectedHeap::check_oop_location(void* addr) const {
|
||||||
|
assert(check_obj_alignment(addr), "address is not aligned");
|
||||||
|
assert(_reserved.contains(addr), "address is not in reserved heap");
|
||||||
|
}
|
||||||
|
|
||||||
size_t CollectedHeap::max_tlab_size() const {
|
size_t CollectedHeap::max_tlab_size() const {
|
||||||
// TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
|
// TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
|
||||||
// This restriction could be removed by enabling filling with multiple arrays.
|
// This restriction could be removed by enabling filling with multiple arrays.
|
||||||
|
@ -371,8 +376,8 @@ void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
|
||||||
{
|
{
|
||||||
assert(words >= min_fill_size(), "too small to fill");
|
assert(words >= min_fill_size(), "too small to fill");
|
||||||
assert(is_object_aligned(words), "unaligned size");
|
assert(is_object_aligned(words), "unaligned size");
|
||||||
assert(Universe::heap()->is_in_reserved(start), "not in heap");
|
DEBUG_ONLY(Universe::heap()->check_oop_location(start);)
|
||||||
assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
|
DEBUG_ONLY(Universe::heap()->check_oop_location(start + words - MinObjAlignment);)
|
||||||
}
|
}
|
||||||
|
|
||||||
void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
|
void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
|
||||||
|
@ -516,12 +521,12 @@ void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
|
||||||
full_gc_dump(timer, false);
|
full_gc_dump(timer, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
|
void CollectedHeap::initialize_reserved_region(const ReservedHeapSpace& rs) {
|
||||||
// It is important to do this in a way such that concurrent readers can't
|
// It is important to do this in a way such that concurrent readers can't
|
||||||
// temporarily think something is in the heap. (Seen this happen in asserts.)
|
// temporarily think something is in the heap. (Seen this happen in asserts.)
|
||||||
_reserved.set_word_size(0);
|
_reserved.set_word_size(0);
|
||||||
_reserved.set_start(start);
|
_reserved.set_start((HeapWord*)rs.base());
|
||||||
_reserved.set_end(end);
|
_reserved.set_end((HeapWord*)rs.end());
|
||||||
}
|
}
|
||||||
|
|
||||||
void CollectedHeap::post_initialize() {
|
void CollectedHeap::post_initialize() {
|
||||||
|
|
|
@ -51,6 +51,7 @@ class GCTracer;
|
||||||
class GCMemoryManager;
|
class GCMemoryManager;
|
||||||
class MemoryPool;
|
class MemoryPool;
|
||||||
class MetaspaceSummary;
|
class MetaspaceSummary;
|
||||||
|
class ReservedHeapSpace;
|
||||||
class SoftRefPolicy;
|
class SoftRefPolicy;
|
||||||
class Thread;
|
class Thread;
|
||||||
class ThreadClosure;
|
class ThreadClosure;
|
||||||
|
@ -102,9 +103,10 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||||
private:
|
private:
|
||||||
GCHeapLog* _gc_heap_log;
|
GCHeapLog* _gc_heap_log;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
// Not used by all GCs
|
||||||
MemRegion _reserved;
|
MemRegion _reserved;
|
||||||
|
|
||||||
protected:
|
|
||||||
bool _is_gc_active;
|
bool _is_gc_active;
|
||||||
|
|
||||||
// Used for filler objects (static, but initialized in ctor).
|
// Used for filler objects (static, but initialized in ctor).
|
||||||
|
@ -203,9 +205,7 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||||
virtual void safepoint_synchronize_begin() {}
|
virtual void safepoint_synchronize_begin() {}
|
||||||
virtual void safepoint_synchronize_end() {}
|
virtual void safepoint_synchronize_end() {}
|
||||||
|
|
||||||
void initialize_reserved_region(HeapWord *start, HeapWord *end);
|
void initialize_reserved_region(const ReservedHeapSpace& rs);
|
||||||
MemRegion reserved_region() const { return _reserved; }
|
|
||||||
address base() const { return (address)reserved_region().start(); }
|
|
||||||
|
|
||||||
virtual size_t capacity() const = 0;
|
virtual size_t capacity() const = 0;
|
||||||
virtual size_t used() const = 0;
|
virtual size_t used() const = 0;
|
||||||
|
@ -226,15 +226,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||||
// spaces).
|
// spaces).
|
||||||
virtual size_t max_capacity() const = 0;
|
virtual size_t max_capacity() const = 0;
|
||||||
|
|
||||||
// Returns "TRUE" if "p" points into the reserved area of the heap.
|
|
||||||
bool is_in_reserved(const void* p) const {
|
|
||||||
return _reserved.contains(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool is_in_reserved_or_null(const void* p) const {
|
|
||||||
return p == NULL || is_in_reserved(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns "TRUE" iff "p" points into the committed areas of the heap.
|
// Returns "TRUE" iff "p" points into the committed areas of the heap.
|
||||||
// This method can be expensive so avoid using it in performance critical
|
// This method can be expensive so avoid using it in performance critical
|
||||||
// code.
|
// code.
|
||||||
|
@ -242,6 +233,11 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||||
|
|
||||||
DEBUG_ONLY(bool is_in_or_null(const void* p) const { return p == NULL || is_in(p); })
|
DEBUG_ONLY(bool is_in_or_null(const void* p) const { return p == NULL || is_in(p); })
|
||||||
|
|
||||||
|
// This function verifies that "addr" is a valid oop location, w.r.t. heap
|
||||||
|
// datastructures such as bitmaps and virtual memory address. It does *not*
|
||||||
|
// check if the location is within committed heap memory.
|
||||||
|
virtual void check_oop_location(void* addr) const;
|
||||||
|
|
||||||
virtual uint32_t hash_oop(oop obj) const;
|
virtual uint32_t hash_oop(oop obj) const;
|
||||||
|
|
||||||
void set_gc_cause(GCCause::Cause v) {
|
void set_gc_cause(GCCause::Cause v) {
|
||||||
|
|
|
@ -163,7 +163,7 @@ void VM_GenCollectForAllocation::doit() {
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||||
GCCauseSetter gccs(gch, _gc_cause);
|
GCCauseSetter gccs(gch, _gc_cause);
|
||||||
_result = gch->satisfy_failed_allocation(_word_size, _tlab);
|
_result = gch->satisfy_failed_allocation(_word_size, _tlab);
|
||||||
assert(gch->is_in_reserved_or_null(_result), "result not in heap");
|
assert(_result == NULL || gch->is_in_reserved(_result), "result not in heap");
|
||||||
|
|
||||||
if (_result == NULL && GCLocker::is_active_and_needs_gc()) {
|
if (_result == NULL && GCLocker::is_active_and_needs_gc()) {
|
||||||
set_gc_locked();
|
set_gc_locked();
|
||||||
|
|
|
@ -105,10 +105,7 @@ jint GenCollectedHeap::initialize() {
|
||||||
|
|
||||||
// Allocate space for the heap.
|
// Allocate space for the heap.
|
||||||
|
|
||||||
char* heap_address;
|
ReservedHeapSpace heap_rs = allocate(HeapAlignment);
|
||||||
ReservedSpace heap_rs;
|
|
||||||
|
|
||||||
heap_address = allocate(HeapAlignment, &heap_rs);
|
|
||||||
|
|
||||||
if (!heap_rs.is_reserved()) {
|
if (!heap_rs.is_reserved()) {
|
||||||
vm_shutdown_during_initialization(
|
vm_shutdown_during_initialization(
|
||||||
|
@ -116,9 +113,9 @@ jint GenCollectedHeap::initialize() {
|
||||||
return JNI_ENOMEM;
|
return JNI_ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
|
initialize_reserved_region(heap_rs);
|
||||||
|
|
||||||
_rem_set = create_rem_set(reserved_region());
|
_rem_set = create_rem_set(heap_rs.region());
|
||||||
_rem_set->initialize();
|
_rem_set->initialize();
|
||||||
CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set);
|
CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set);
|
||||||
bs->initialize();
|
bs->initialize();
|
||||||
|
@ -126,9 +123,9 @@ jint GenCollectedHeap::initialize() {
|
||||||
|
|
||||||
ReservedSpace young_rs = heap_rs.first_part(_young_gen_spec->max_size(), false, false);
|
ReservedSpace young_rs = heap_rs.first_part(_young_gen_spec->max_size(), false, false);
|
||||||
_young_gen = _young_gen_spec->init(young_rs, rem_set());
|
_young_gen = _young_gen_spec->init(young_rs, rem_set());
|
||||||
heap_rs = heap_rs.last_part(_young_gen_spec->max_size());
|
ReservedSpace old_rs = heap_rs.last_part(_young_gen_spec->max_size());
|
||||||
|
|
||||||
ReservedSpace old_rs = heap_rs.first_part(_old_gen_spec->max_size(), false, false);
|
old_rs = old_rs.first_part(_old_gen_spec->max_size(), false, false);
|
||||||
_old_gen = _old_gen_spec->init(old_rs, rem_set());
|
_old_gen = _old_gen_spec->init(old_rs, rem_set());
|
||||||
clear_incremental_collection_failed();
|
clear_incremental_collection_failed();
|
||||||
|
|
||||||
|
@ -150,8 +147,7 @@ void GenCollectedHeap::initialize_size_policy(size_t init_eden_size,
|
||||||
GCTimeRatio);
|
GCTimeRatio);
|
||||||
}
|
}
|
||||||
|
|
||||||
char* GenCollectedHeap::allocate(size_t alignment,
|
ReservedHeapSpace GenCollectedHeap::allocate(size_t alignment) {
|
||||||
ReservedSpace* heap_rs){
|
|
||||||
// Now figure out the total size.
|
// Now figure out the total size.
|
||||||
const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
|
const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
|
||||||
assert(alignment % pageSize == 0, "Must be");
|
assert(alignment % pageSize == 0, "Must be");
|
||||||
|
@ -166,16 +162,16 @@ char* GenCollectedHeap::allocate(size_t alignment,
|
||||||
"Gen size; total_reserved=" SIZE_FORMAT ", alignment="
|
"Gen size; total_reserved=" SIZE_FORMAT ", alignment="
|
||||||
SIZE_FORMAT, total_reserved, alignment);
|
SIZE_FORMAT, total_reserved, alignment);
|
||||||
|
|
||||||
*heap_rs = Universe::reserve_heap(total_reserved, alignment);
|
ReservedHeapSpace heap_rs = Universe::reserve_heap(total_reserved, alignment);
|
||||||
|
|
||||||
os::trace_page_sizes("Heap",
|
os::trace_page_sizes("Heap",
|
||||||
MinHeapSize,
|
MinHeapSize,
|
||||||
total_reserved,
|
total_reserved,
|
||||||
alignment,
|
alignment,
|
||||||
heap_rs->base(),
|
heap_rs.base(),
|
||||||
heap_rs->size());
|
heap_rs.size());
|
||||||
|
|
||||||
return heap_rs->base();
|
return heap_rs;
|
||||||
}
|
}
|
||||||
|
|
||||||
class GenIsScavengable : public BoolObjectClosure {
|
class GenIsScavengable : public BoolObjectClosure {
|
||||||
|
|
|
@ -96,7 +96,7 @@ private:
|
||||||
bool restore_marks_for_biased_locking);
|
bool restore_marks_for_biased_locking);
|
||||||
|
|
||||||
// Reserve aligned space for the heap as needed by the contained generations.
|
// Reserve aligned space for the heap as needed by the contained generations.
|
||||||
char* allocate(size_t alignment, ReservedSpace* heap_rs);
|
ReservedHeapSpace allocate(size_t alignment);
|
||||||
|
|
||||||
// Initialize ("weak") refs processing support
|
// Initialize ("weak") refs processing support
|
||||||
void ref_processing_init();
|
void ref_processing_init();
|
||||||
|
@ -180,6 +180,9 @@ public:
|
||||||
bool is_young_gen(const Generation* gen) const { return gen == _young_gen; }
|
bool is_young_gen(const Generation* gen) const { return gen == _young_gen; }
|
||||||
bool is_old_gen(const Generation* gen) const { return gen == _old_gen; }
|
bool is_old_gen(const Generation* gen) const { return gen == _old_gen; }
|
||||||
|
|
||||||
|
MemRegion reserved_region() const { return _reserved; }
|
||||||
|
bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
|
||||||
|
|
||||||
GenerationSpec* young_gen_spec() const;
|
GenerationSpec* young_gen_spec() const;
|
||||||
GenerationSpec* old_gen_spec() const;
|
GenerationSpec* old_gen_spec() const;
|
||||||
|
|
||||||
|
|
|
@ -62,7 +62,7 @@ void MarkBitMap::do_clear(MemRegion mr, bool large) {
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
void MarkBitMap::check_mark(HeapWord* addr) {
|
void MarkBitMap::check_mark(HeapWord* addr) {
|
||||||
assert(Universe::heap()->is_in_reserved(addr),
|
assert(Universe::heap()->is_in(addr),
|
||||||
"Trying to access bitmap " PTR_FORMAT " for address " PTR_FORMAT " not in the heap.",
|
"Trying to access bitmap " PTR_FORMAT " for address " PTR_FORMAT " not in the heap.",
|
||||||
p2i(this), p2i(addr));
|
p2i(this), p2i(addr));
|
||||||
}
|
}
|
||||||
|
|
|
@ -269,7 +269,7 @@ void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
|
||||||
|
|
||||||
_referent_addr = java_lang_ref_Reference::referent_addr_raw(_current_discovered);
|
_referent_addr = java_lang_ref_Reference::referent_addr_raw(_current_discovered);
|
||||||
_referent = java_lang_ref_Reference::referent(_current_discovered);
|
_referent = java_lang_ref_Reference::referent(_current_discovered);
|
||||||
assert(Universe::heap()->is_in_reserved_or_null(_referent),
|
assert(Universe::heap()->is_in_or_null(_referent),
|
||||||
"Wrong oop found in java.lang.Reference object");
|
"Wrong oop found in java.lang.Reference object");
|
||||||
assert(allow_null_referent ?
|
assert(allow_null_referent ?
|
||||||
oopDesc::is_oop_or_null(_referent)
|
oopDesc::is_oop_or_null(_referent)
|
||||||
|
|
|
@ -599,7 +599,7 @@ void StringDedupTable::verify() {
|
||||||
while (*entry != NULL) {
|
while (*entry != NULL) {
|
||||||
typeArrayOop value = (*entry)->obj();
|
typeArrayOop value = (*entry)->obj();
|
||||||
guarantee(value != NULL, "Object must not be NULL");
|
guarantee(value != NULL, "Object must not be NULL");
|
||||||
guarantee(Universe::heap()->is_in_reserved(value), "Object must be on the heap");
|
guarantee(Universe::heap()->is_in(value), "Object must be on the heap");
|
||||||
guarantee(!value->is_forwarded(), "Object must not be forwarded");
|
guarantee(!value->is_forwarded(), "Object must not be forwarded");
|
||||||
guarantee(value->is_typeArray(), "Object must be a typeArrayOop");
|
guarantee(value->is_typeArray(), "Object must be a typeArrayOop");
|
||||||
bool latin1 = (*entry)->latin1();
|
bool latin1 = (*entry)->latin1();
|
||||||
|
|
|
@ -179,8 +179,8 @@ jint ShenandoahHeap::initialize() {
|
||||||
// Reserve and commit memory for heap
|
// Reserve and commit memory for heap
|
||||||
//
|
//
|
||||||
|
|
||||||
ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
|
ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
|
||||||
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
|
initialize_reserved_region(heap_rs);
|
||||||
_heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
|
_heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
|
||||||
_heap_region_special = heap_rs.special();
|
_heap_region_special = heap_rs.special();
|
||||||
|
|
||||||
|
|
|
@ -530,6 +530,9 @@ public:
|
||||||
|
|
||||||
bool is_in(const void* p) const;
|
bool is_in(const void* p) const;
|
||||||
|
|
||||||
|
MemRegion reserved_region() const { return _reserved; }
|
||||||
|
bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
|
||||||
|
|
||||||
void collect(GCCause::Cause cause);
|
void collect(GCCause::Cause cause);
|
||||||
void do_full_collection(bool clear_all_soft_refs);
|
void do_full_collection(bool clear_all_soft_refs);
|
||||||
|
|
||||||
|
|
|
@ -67,7 +67,7 @@ jint ZCollectedHeap::initialize() {
|
||||||
return JNI_ENOMEM;
|
return JNI_ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
initialize_reserved_region((HeapWord*)ZAddressReservedStart,
|
Universe::calculate_verify_data((HeapWord*)ZAddressReservedStart,
|
||||||
(HeapWord*)ZAddressReservedEnd);
|
(HeapWord*)ZAddressReservedEnd);
|
||||||
|
|
||||||
return JNI_OK;
|
return JNI_OK;
|
||||||
|
@ -286,9 +286,10 @@ void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
|
||||||
VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
|
VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
|
||||||
const size_t capacity_in_words = capacity() / HeapWordSize;
|
const size_t capacity_in_words = capacity() / HeapWordSize;
|
||||||
const size_t max_capacity_in_words = max_capacity() / HeapWordSize;
|
const size_t max_capacity_in_words = max_capacity() / HeapWordSize;
|
||||||
return VirtualSpaceSummary(reserved_region().start(),
|
HeapWord* const heap_start = (HeapWord*)ZAddressReservedStart;
|
||||||
reserved_region().start() + capacity_in_words,
|
return VirtualSpaceSummary(heap_start,
|
||||||
reserved_region().start() + max_capacity_in_words);
|
heap_start + capacity_in_words,
|
||||||
|
heap_start + max_capacity_in_words);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZCollectedHeap::safepoint_synchronize_begin() {
|
void ZCollectedHeap::safepoint_synchronize_begin() {
|
||||||
|
@ -366,3 +367,11 @@ void ZCollectedHeap::verify(VerifyOption option /* ignored */) {
|
||||||
bool ZCollectedHeap::is_oop(oop object) const {
|
bool ZCollectedHeap::is_oop(oop object) const {
|
||||||
return CollectedHeap::is_oop(object) && _heap.is_oop(object);
|
return CollectedHeap::is_oop(object) && _heap.is_oop(object);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ZCollectedHeap::check_oop_location(void* addr) const {
|
||||||
|
assert(check_obj_alignment(addr), "address is not aligned");
|
||||||
|
|
||||||
|
const uintptr_t addr_int = reinterpret_cast<uintptr_t>(addr);
|
||||||
|
assert(addr_int >= ZAddressSpaceStart, "address is outside of the heap");
|
||||||
|
assert(addr_int < ZAddressSpaceEnd, "address is outside of the heap");
|
||||||
|
}
|
||||||
|
|
|
@ -126,6 +126,7 @@ public:
|
||||||
virtual void prepare_for_verify();
|
virtual void prepare_for_verify();
|
||||||
virtual void verify(VerifyOption option /* ignored */);
|
virtual void verify(VerifyOption option /* ignored */);
|
||||||
virtual bool is_oop(oop object) const;
|
virtual bool is_oop(oop object) const;
|
||||||
|
virtual void check_oop_location(void* addr) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_GC_Z_ZCOLLECTEDHEAP_HPP
|
#endif // SHARE_GC_Z_ZCOLLECTEDHEAP_HPP
|
||||||
|
|
|
@ -769,10 +769,10 @@ JRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorenter(JavaThread* thread, Ba
|
||||||
Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
|
Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
|
||||||
}
|
}
|
||||||
Handle h_obj(thread, elem->obj());
|
Handle h_obj(thread, elem->obj());
|
||||||
assert(Universe::heap()->is_in_reserved_or_null(h_obj()),
|
assert(Universe::heap()->is_in_or_null(h_obj()),
|
||||||
"must be NULL or an object");
|
"must be NULL or an object");
|
||||||
ObjectSynchronizer::enter(h_obj, elem->lock(), CHECK);
|
ObjectSynchronizer::enter(h_obj, elem->lock(), CHECK);
|
||||||
assert(Universe::heap()->is_in_reserved_or_null(elem->obj()),
|
assert(Universe::heap()->is_in_or_null(elem->obj()),
|
||||||
"must be NULL or an object");
|
"must be NULL or an object");
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
thread->last_frame().interpreter_frame_verify_monitor(elem);
|
thread->last_frame().interpreter_frame_verify_monitor(elem);
|
||||||
|
@ -786,7 +786,7 @@ JRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorexit(JavaThread* thread, Bas
|
||||||
thread->last_frame().interpreter_frame_verify_monitor(elem);
|
thread->last_frame().interpreter_frame_verify_monitor(elem);
|
||||||
#endif
|
#endif
|
||||||
Handle h_obj(thread, elem->obj());
|
Handle h_obj(thread, elem->obj());
|
||||||
assert(Universe::heap()->is_in_reserved_or_null(h_obj()),
|
assert(Universe::heap()->is_in_or_null(h_obj()),
|
||||||
"must be NULL or an object");
|
"must be NULL or an object");
|
||||||
if (elem == NULL || h_obj()->is_unlocked()) {
|
if (elem == NULL || h_obj()->is_unlocked()) {
|
||||||
THROW(vmSymbols::java_lang_IllegalMonitorStateException());
|
THROW(vmSymbols::java_lang_IllegalMonitorStateException());
|
||||||
|
@ -853,10 +853,10 @@ void InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes::Code byte
|
||||||
Symbol* signature = call.signature();
|
Symbol* signature = call.signature();
|
||||||
receiver = Handle(thread, last_frame.callee_receiver(signature));
|
receiver = Handle(thread, last_frame.callee_receiver(signature));
|
||||||
|
|
||||||
assert(Universe::heap()->is_in_reserved_or_null(receiver()),
|
assert(Universe::heap()->is_in_or_null(receiver()),
|
||||||
"sanity check");
|
"sanity check");
|
||||||
assert(receiver.is_null() ||
|
assert(receiver.is_null() ||
|
||||||
!Universe::heap()->is_in_reserved(receiver->klass()),
|
!Universe::heap()->is_in(receiver->klass()),
|
||||||
"sanity check");
|
"sanity check");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -858,17 +858,6 @@ JVMCIObject JVMCIRuntime::get_HotSpotJVMCIRuntime(JVMCI_TRAPS) {
|
||||||
|
|
||||||
// private void CompilerToVM.registerNatives()
|
// private void CompilerToVM.registerNatives()
|
||||||
JVM_ENTRY_NO_ENV(void, JVM_RegisterJVMCINatives(JNIEnv *env, jclass c2vmClass))
|
JVM_ENTRY_NO_ENV(void, JVM_RegisterJVMCINatives(JNIEnv *env, jclass c2vmClass))
|
||||||
|
|
||||||
#ifdef _LP64
|
|
||||||
#ifndef TARGET_ARCH_sparc
|
|
||||||
uintptr_t heap_end = (uintptr_t) Universe::heap()->reserved_region().end();
|
|
||||||
uintptr_t allocation_end = heap_end + ((uintptr_t)16) * 1024 * 1024 * 1024;
|
|
||||||
guarantee(heap_end < allocation_end, "heap end too close to end of address space (might lead to erroneous TLAB allocations)");
|
|
||||||
#endif // TARGET_ARCH_sparc
|
|
||||||
#else
|
|
||||||
fatal("check TLAB allocation code for address space conflicts");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
JNI_JVMCIENV(thread, env);
|
JNI_JVMCIENV(thread, env);
|
||||||
|
|
||||||
if (!EnableJVMCI) {
|
if (!EnableJVMCI) {
|
||||||
|
|
|
@ -231,7 +231,7 @@ void FileMapHeader::populate(FileMapInfo* mapinfo, size_t alignment) {
|
||||||
_narrow_klass_shift = CompressedKlassPointers::shift();
|
_narrow_klass_shift = CompressedKlassPointers::shift();
|
||||||
_shared_path_table = mapinfo->_shared_path_table;
|
_shared_path_table = mapinfo->_shared_path_table;
|
||||||
if (HeapShared::is_heap_object_archiving_allowed()) {
|
if (HeapShared::is_heap_object_archiving_allowed()) {
|
||||||
_heap_reserved = Universe::heap()->reserved_region();
|
_heap_end = CompressedOops::end();
|
||||||
}
|
}
|
||||||
|
|
||||||
// The following fields are for sanity checks for whether this archive
|
// The following fields are for sanity checks for whether this archive
|
||||||
|
@ -1500,8 +1500,6 @@ void FileMapInfo::map_heap_regions_impl() {
|
||||||
// referenced objects are replaced. See HeapShared::initialize_from_archived_subgraph().
|
// referenced objects are replaced. See HeapShared::initialize_from_archived_subgraph().
|
||||||
}
|
}
|
||||||
|
|
||||||
MemRegion heap_reserved = Universe::heap()->reserved_region();
|
|
||||||
|
|
||||||
log_info(cds)("CDS archive was created with max heap size = " SIZE_FORMAT "M, and the following configuration:",
|
log_info(cds)("CDS archive was created with max heap size = " SIZE_FORMAT "M, and the following configuration:",
|
||||||
max_heap_size()/M);
|
max_heap_size()/M);
|
||||||
log_info(cds)(" narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
|
log_info(cds)(" narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
|
||||||
|
@ -1510,7 +1508,7 @@ void FileMapInfo::map_heap_regions_impl() {
|
||||||
narrow_oop_mode(), p2i(narrow_oop_base()), narrow_oop_shift());
|
narrow_oop_mode(), p2i(narrow_oop_base()), narrow_oop_shift());
|
||||||
|
|
||||||
log_info(cds)("The current max heap size = " SIZE_FORMAT "M, HeapRegion::GrainBytes = " SIZE_FORMAT,
|
log_info(cds)("The current max heap size = " SIZE_FORMAT "M, HeapRegion::GrainBytes = " SIZE_FORMAT,
|
||||||
heap_reserved.byte_size()/M, HeapRegion::GrainBytes);
|
MaxHeapSize/M, HeapRegion::GrainBytes);
|
||||||
log_info(cds)(" narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
|
log_info(cds)(" narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
|
||||||
p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift());
|
p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift());
|
||||||
log_info(cds)(" narrow_oop_mode = %d, narrow_oop_base = " PTR_FORMAT ", narrow_oop_shift = %d",
|
log_info(cds)(" narrow_oop_mode = %d, narrow_oop_base = " PTR_FORMAT ", narrow_oop_shift = %d",
|
||||||
|
@ -1529,10 +1527,10 @@ void FileMapInfo::map_heap_regions_impl() {
|
||||||
_heap_pointers_need_patching = true;
|
_heap_pointers_need_patching = true;
|
||||||
} else {
|
} else {
|
||||||
MemRegion range = get_heap_regions_range_with_current_oop_encoding_mode();
|
MemRegion range = get_heap_regions_range_with_current_oop_encoding_mode();
|
||||||
if (!heap_reserved.contains(range)) {
|
if (!CompressedOops::is_in(range)) {
|
||||||
log_info(cds)("CDS heap data need to be relocated because");
|
log_info(cds)("CDS heap data need to be relocated because");
|
||||||
log_info(cds)("the desired range " PTR_FORMAT " - " PTR_FORMAT, p2i(range.start()), p2i(range.end()));
|
log_info(cds)("the desired range " PTR_FORMAT " - " PTR_FORMAT, p2i(range.start()), p2i(range.end()));
|
||||||
log_info(cds)("is outside of the heap " PTR_FORMAT " - " PTR_FORMAT, p2i(heap_reserved.start()), p2i(heap_reserved.end()));
|
log_info(cds)("is outside of the heap " PTR_FORMAT " - " PTR_FORMAT, p2i(CompressedOops::begin()), p2i(CompressedOops::end()));
|
||||||
_heap_pointers_need_patching = true;
|
_heap_pointers_need_patching = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1548,8 +1546,8 @@ void FileMapInfo::map_heap_regions_impl() {
|
||||||
// At run time, they may not be inside the heap, so we move them so
|
// At run time, they may not be inside the heap, so we move them so
|
||||||
// that they are now near the top of the runtime time. This can be done by
|
// that they are now near the top of the runtime time. This can be done by
|
||||||
// the simple math of adding the delta as shown above.
|
// the simple math of adding the delta as shown above.
|
||||||
address dumptime_heap_end = (address)_header->_heap_reserved.end();
|
address dumptime_heap_end = (address)_header->_heap_end;
|
||||||
address runtime_heap_end = (address)heap_reserved.end();
|
address runtime_heap_end = (address)CompressedOops::end();
|
||||||
delta = runtime_heap_end - dumptime_heap_end;
|
delta = runtime_heap_end - dumptime_heap_end;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -142,7 +142,7 @@ struct FileMapHeader : public CDSFileMapHeaderBase {
|
||||||
size_t _cds_i2i_entry_code_buffers_size;
|
size_t _cds_i2i_entry_code_buffers_size;
|
||||||
size_t _core_spaces_size; // number of bytes allocated by the core spaces
|
size_t _core_spaces_size; // number of bytes allocated by the core spaces
|
||||||
// (mc, md, ro, rw and od).
|
// (mc, md, ro, rw and od).
|
||||||
MemRegion _heap_reserved; // reserved region for the entire heap at dump time.
|
address _heap_end; // heap end at dump time.
|
||||||
bool _base_archive_is_default; // indicates if the base archive is the system default one
|
bool _base_archive_is_default; // indicates if the base archive is the system default one
|
||||||
|
|
||||||
// The following fields are all sanity checks for whether this archive
|
// The following fields are all sanity checks for whether this archive
|
||||||
|
|
|
@ -1268,7 +1268,7 @@ void Metaspace::global_initialize() {
|
||||||
{
|
{
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
if (using_class_space()) {
|
if (using_class_space()) {
|
||||||
char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
|
char* base = (char*)align_up(CompressedOops::end(), _reserve_alignment);
|
||||||
allocate_metaspace_compressed_klass_ptrs(base, 0);
|
allocate_metaspace_compressed_klass_ptrs(base, 0);
|
||||||
}
|
}
|
||||||
#endif // _LP64
|
#endif // _LP64
|
||||||
|
|
|
@ -666,8 +666,6 @@ jint universe_init() {
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
CompressedOops::initialize();
|
|
||||||
|
|
||||||
Universe::initialize_tlab();
|
Universe::initialize_tlab();
|
||||||
|
|
||||||
Metaspace::global_initialize();
|
Metaspace::global_initialize();
|
||||||
|
@ -747,7 +745,7 @@ void Universe::initialize_tlab() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
|
ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
|
||||||
|
|
||||||
assert(alignment <= Arguments::conservative_max_heap_alignment(),
|
assert(alignment <= Arguments::conservative_max_heap_alignment(),
|
||||||
"actual alignment " SIZE_FORMAT " must be within maximum heap alignment " SIZE_FORMAT,
|
"actual alignment " SIZE_FORMAT " must be within maximum heap alignment " SIZE_FORMAT,
|
||||||
|
@ -770,16 +768,16 @@ ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
|
||||||
"must be exactly of required size and alignment");
|
"must be exactly of required size and alignment");
|
||||||
// We are good.
|
// We are good.
|
||||||
|
|
||||||
if (UseCompressedOops) {
|
|
||||||
// Universe::initialize_heap() will reset this to NULL if unscaled
|
|
||||||
// or zero-based narrow oops are actually used.
|
|
||||||
// Else heap start and base MUST differ, so that NULL can be encoded nonambigous.
|
|
||||||
CompressedOops::set_base((address)total_rs.compressed_oop_base());
|
|
||||||
}
|
|
||||||
|
|
||||||
if (AllocateHeapAt != NULL) {
|
if (AllocateHeapAt != NULL) {
|
||||||
log_info(gc,heap)("Successfully allocated Java heap at location %s", AllocateHeapAt);
|
log_info(gc,heap)("Successfully allocated Java heap at location %s", AllocateHeapAt);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (UseCompressedOops) {
|
||||||
|
CompressedOops::initialize(total_rs);
|
||||||
|
}
|
||||||
|
|
||||||
|
Universe::calculate_verify_data((HeapWord*)total_rs.base(), (HeapWord*)total_rs.end());
|
||||||
|
|
||||||
return total_rs;
|
return total_rs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1171,14 +1169,10 @@ void Universe::calculate_verify_data(HeapWord* low_boundary, HeapWord* high_boun
|
||||||
// Oop verification (see MacroAssembler::verify_oop)
|
// Oop verification (see MacroAssembler::verify_oop)
|
||||||
|
|
||||||
uintptr_t Universe::verify_oop_mask() {
|
uintptr_t Universe::verify_oop_mask() {
|
||||||
MemRegion m = heap()->reserved_region();
|
|
||||||
calculate_verify_data(m.start(), m.end());
|
|
||||||
return _verify_oop_mask;
|
return _verify_oop_mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
uintptr_t Universe::verify_oop_bits() {
|
uintptr_t Universe::verify_oop_bits() {
|
||||||
MemRegion m = heap()->reserved_region();
|
|
||||||
calculate_verify_data(m.start(), m.end());
|
|
||||||
return _verify_oop_bits;
|
return _verify_oop_bits;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -212,9 +212,9 @@ class Universe: AllStatic {
|
||||||
static uintptr_t _verify_oop_mask;
|
static uintptr_t _verify_oop_mask;
|
||||||
static uintptr_t _verify_oop_bits;
|
static uintptr_t _verify_oop_bits;
|
||||||
|
|
||||||
|
public:
|
||||||
static void calculate_verify_data(HeapWord* low_boundary, HeapWord* high_boundary) PRODUCT_RETURN;
|
static void calculate_verify_data(HeapWord* low_boundary, HeapWord* high_boundary) PRODUCT_RETURN;
|
||||||
|
|
||||||
public:
|
|
||||||
// Known classes in the VM
|
// Known classes in the VM
|
||||||
static Klass* boolArrayKlassObj() { return typeArrayKlassObj(T_BOOLEAN); }
|
static Klass* boolArrayKlassObj() { return typeArrayKlassObj(T_BOOLEAN); }
|
||||||
static Klass* byteArrayKlassObj() { return typeArrayKlassObj(T_BYTE); }
|
static Klass* byteArrayKlassObj() { return typeArrayKlassObj(T_BYTE); }
|
||||||
|
@ -326,7 +326,7 @@ class Universe: AllStatic {
|
||||||
static CollectedHeap* heap() { return _collectedHeap; }
|
static CollectedHeap* heap() { return _collectedHeap; }
|
||||||
|
|
||||||
// Reserve Java heap and determine CompressedOops mode
|
// Reserve Java heap and determine CompressedOops mode
|
||||||
static ReservedSpace reserve_heap(size_t heap_size, size_t alignment);
|
static ReservedHeapSpace reserve_heap(size_t heap_size, size_t alignment);
|
||||||
|
|
||||||
// Historic gc information
|
// Historic gc information
|
||||||
static size_t get_heap_free_at_last_gc() { return _heap_capacity_at_last_gc - _heap_used_at_last_gc; }
|
static size_t get_heap_free_at_last_gc() { return _heap_capacity_at_last_gc - _heap_used_at_last_gc; }
|
||||||
|
|
|
@ -637,6 +637,10 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MemRegion ReservedHeapSpace::region() const {
|
||||||
|
return MemRegion((HeapWord*)base(), (HeapWord*)end());
|
||||||
|
}
|
||||||
|
|
||||||
// Reserve space for code segment. Same as Java heap only we mark this as
|
// Reserve space for code segment. Same as Java heap only we mark this as
|
||||||
// executable.
|
// executable.
|
||||||
ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
|
ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#ifndef SHARE_MEMORY_VIRTUALSPACE_HPP
|
#ifndef SHARE_MEMORY_VIRTUALSPACE_HPP
|
||||||
#define SHARE_MEMORY_VIRTUALSPACE_HPP
|
#define SHARE_MEMORY_VIRTUALSPACE_HPP
|
||||||
|
|
||||||
|
#include "memory/memRegion.hpp"
|
||||||
#include "utilities/globalDefinitions.hpp"
|
#include "utilities/globalDefinitions.hpp"
|
||||||
|
|
||||||
class outputStream;
|
class outputStream;
|
||||||
|
@ -122,7 +123,8 @@ class ReservedHeapSpace : public ReservedSpace {
|
||||||
ReservedHeapSpace(size_t size, size_t forced_base_alignment, bool large, const char* heap_allocation_directory = NULL);
|
ReservedHeapSpace(size_t size, size_t forced_base_alignment, bool large, const char* heap_allocation_directory = NULL);
|
||||||
// Returns the base to be used for compression, i.e. so that null can be
|
// Returns the base to be used for compression, i.e. so that null can be
|
||||||
// encoded safely and implicit null checks can work.
|
// encoded safely and implicit null checks can work.
|
||||||
char *compressed_oop_base() { return _base - _noaccess_prefix; }
|
char *compressed_oop_base() const { return _base - _noaccess_prefix; }
|
||||||
|
MemRegion region() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Class encapsulating behavior specific memory space for Code
|
// Class encapsulating behavior specific memory space for Code
|
||||||
|
|
|
@ -34,8 +34,7 @@
|
||||||
|
|
||||||
// For UseCompressedOops.
|
// For UseCompressedOops.
|
||||||
NarrowPtrStruct CompressedOops::_narrow_oop = { NULL, 0, true };
|
NarrowPtrStruct CompressedOops::_narrow_oop = { NULL, 0, true };
|
||||||
|
MemRegion CompressedOops::_heap_address_range;
|
||||||
address CompressedOops::_narrow_ptrs_base;
|
|
||||||
|
|
||||||
// Choose the heap base address and oop encoding mode
|
// Choose the heap base address and oop encoding mode
|
||||||
// when compressed oops are used:
|
// when compressed oops are used:
|
||||||
|
@ -44,26 +43,28 @@ address CompressedOops::_narrow_ptrs_base;
|
||||||
// ZeroBased - Use zero based compressed oops with encoding when
|
// ZeroBased - Use zero based compressed oops with encoding when
|
||||||
// NarrowOopHeapBaseMin + heap_size < 32Gb
|
// NarrowOopHeapBaseMin + heap_size < 32Gb
|
||||||
// HeapBased - Use compressed oops with heap base + encoding.
|
// HeapBased - Use compressed oops with heap base + encoding.
|
||||||
void CompressedOops::initialize() {
|
void CompressedOops::initialize(const ReservedHeapSpace& heap_space) {
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
if (UseCompressedOops) {
|
|
||||||
// Subtract a page because something can get allocated at heap base.
|
// Subtract a page because something can get allocated at heap base.
|
||||||
// This also makes implicit null checking work, because the
|
// This also makes implicit null checking work, because the
|
||||||
// memory+1 page below heap_base needs to cause a signal.
|
// memory+1 page below heap_base needs to cause a signal.
|
||||||
// See needs_explicit_null_check.
|
// See needs_explicit_null_check.
|
||||||
// Only set the heap base for compressed oops because it indicates
|
// Only set the heap base for compressed oops because it indicates
|
||||||
// compressed oops for pstack code.
|
// compressed oops for pstack code.
|
||||||
if ((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) {
|
if ((uint64_t)heap_space.end() > UnscaledOopHeapMax) {
|
||||||
// Didn't reserve heap below 4Gb. Must shift.
|
// Didn't reserve heap below 4Gb. Must shift.
|
||||||
set_shift(LogMinObjAlignmentInBytes);
|
set_shift(LogMinObjAlignmentInBytes);
|
||||||
}
|
}
|
||||||
if ((uint64_t)Universe::heap()->reserved_region().end() <= OopEncodingHeapMax) {
|
if ((uint64_t)heap_space.end() <= OopEncodingHeapMax) {
|
||||||
// Did reserve heap below 32Gb. Can use base == 0;
|
// Did reserve heap below 32Gb. Can use base == 0;
|
||||||
set_base(0);
|
set_base(0);
|
||||||
|
} else {
|
||||||
|
set_base((address)heap_space.compressed_oop_base());
|
||||||
}
|
}
|
||||||
|
|
||||||
AOTLoader::set_narrow_oop_shift();
|
AOTLoader::set_narrow_oop_shift();
|
||||||
|
|
||||||
set_ptrs_base(base());
|
_heap_address_range = heap_space.region();
|
||||||
|
|
||||||
LogTarget(Info, gc, heap, coops) lt;
|
LogTarget(Info, gc, heap, coops) lt;
|
||||||
if (lt.is_enabled()) {
|
if (lt.is_enabled()) {
|
||||||
|
@ -76,9 +77,9 @@ void CompressedOops::initialize() {
|
||||||
Arguments::PropertyList_add(new SystemProperty("java.vm.compressedOopsMode",
|
Arguments::PropertyList_add(new SystemProperty("java.vm.compressedOopsMode",
|
||||||
mode_to_string(mode()),
|
mode_to_string(mode()),
|
||||||
false));
|
false));
|
||||||
}
|
|
||||||
// base() is one page below the heap.
|
// base() is one page below the heap.
|
||||||
assert((intptr_t)base() <= (intptr_t)(Universe::heap()->base() - os::vm_page_size()) ||
|
assert((intptr_t)base() <= (intptr_t)(_heap_address_range.start() - os::vm_page_size()) ||
|
||||||
base() == NULL, "invalid value");
|
base() == NULL, "invalid value");
|
||||||
assert(shift() == LogMinObjAlignmentInBytes ||
|
assert(shift() == LogMinObjAlignmentInBytes ||
|
||||||
shift() == 0, "invalid value");
|
shift() == 0, "invalid value");
|
||||||
|
@ -99,8 +100,12 @@ void CompressedOops::set_use_implicit_null_checks(bool use) {
|
||||||
_narrow_oop._use_implicit_null_checks = use;
|
_narrow_oop._use_implicit_null_checks = use;
|
||||||
}
|
}
|
||||||
|
|
||||||
void CompressedOops::set_ptrs_base(address addr) {
|
bool CompressedOops::is_in(void* addr) {
|
||||||
_narrow_ptrs_base = addr;
|
return _heap_address_range.contains(addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CompressedOops::is_in(MemRegion mr) {
|
||||||
|
return _heap_address_range.contains(mr);
|
||||||
}
|
}
|
||||||
|
|
||||||
CompressedOops::Mode CompressedOops::mode() {
|
CompressedOops::Mode CompressedOops::mode() {
|
||||||
|
@ -155,7 +160,7 @@ bool CompressedOops::base_overlaps() {
|
||||||
|
|
||||||
void CompressedOops::print_mode(outputStream* st) {
|
void CompressedOops::print_mode(outputStream* st) {
|
||||||
st->print("Heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
|
st->print("Heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
|
||||||
p2i(Universe::heap()->base()), Universe::heap()->reserved_region().byte_size()/M);
|
p2i(_heap_address_range.start()), _heap_address_range.byte_size()/M);
|
||||||
|
|
||||||
st->print(", Compressed Oops mode: %s", mode_to_string(mode()));
|
st->print(", Compressed Oops mode: %s", mode_to_string(mode()));
|
||||||
|
|
||||||
|
|
|
@ -26,10 +26,12 @@
|
||||||
#define SHARE_OOPS_COMPRESSEDOOPS_HPP
|
#define SHARE_OOPS_COMPRESSEDOOPS_HPP
|
||||||
|
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
|
#include "memory/memRegion.hpp"
|
||||||
#include "oops/oopsHierarchy.hpp"
|
#include "oops/oopsHierarchy.hpp"
|
||||||
#include "utilities/globalDefinitions.hpp"
|
#include "utilities/globalDefinitions.hpp"
|
||||||
|
|
||||||
class outputStream;
|
class outputStream;
|
||||||
|
class ReservedHeapSpace;
|
||||||
|
|
||||||
struct NarrowPtrStruct {
|
struct NarrowPtrStruct {
|
||||||
// Base address for oop-within-java-object materialization.
|
// Base address for oop-within-java-object materialization.
|
||||||
|
@ -49,7 +51,8 @@ class CompressedOops : public AllStatic {
|
||||||
// For UseCompressedOops.
|
// For UseCompressedOops.
|
||||||
static NarrowPtrStruct _narrow_oop;
|
static NarrowPtrStruct _narrow_oop;
|
||||||
|
|
||||||
static address _narrow_ptrs_base;
|
// The address range of the heap
|
||||||
|
static MemRegion _heap_address_range;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// For UseCompressedOops
|
// For UseCompressedOops
|
||||||
|
@ -73,21 +76,24 @@ public:
|
||||||
AnyNarrowOopMode = 4
|
AnyNarrowOopMode = 4
|
||||||
};
|
};
|
||||||
|
|
||||||
static void initialize();
|
static void initialize(const ReservedHeapSpace& heap_space);
|
||||||
|
|
||||||
static void set_base(address base);
|
static void set_base(address base);
|
||||||
static void set_shift(int shift);
|
static void set_shift(int shift);
|
||||||
static void set_use_implicit_null_checks(bool use);
|
static void set_use_implicit_null_checks(bool use);
|
||||||
|
|
||||||
static void set_ptrs_base(address addr);
|
|
||||||
|
|
||||||
static address base() { return _narrow_oop._base; }
|
static address base() { return _narrow_oop._base; }
|
||||||
|
static address begin() { return (address)_heap_address_range.start(); }
|
||||||
|
static address end() { return (address)_heap_address_range.end(); }
|
||||||
static bool is_base(void* addr) { return (base() == (address)addr); }
|
static bool is_base(void* addr) { return (base() == (address)addr); }
|
||||||
static int shift() { return _narrow_oop._shift; }
|
static int shift() { return _narrow_oop._shift; }
|
||||||
static bool use_implicit_null_checks() { return _narrow_oop._use_implicit_null_checks; }
|
static bool use_implicit_null_checks() { return _narrow_oop._use_implicit_null_checks; }
|
||||||
|
|
||||||
static address* ptrs_base_addr() { return &_narrow_ptrs_base; }
|
static address* ptrs_base_addr() { return &_narrow_oop._base; }
|
||||||
static address ptrs_base() { return _narrow_ptrs_base; }
|
static address ptrs_base() { return _narrow_oop._base; }
|
||||||
|
|
||||||
|
static bool is_in(void* addr);
|
||||||
|
static bool is_in(MemRegion mr);
|
||||||
|
|
||||||
static Mode mode();
|
static Mode mode();
|
||||||
static const char* mode_to_string(Mode mode);
|
static const char* mode_to_string(Mode mode);
|
||||||
|
|
|
@ -57,8 +57,7 @@ inline oop CompressedOops::decode(narrowOop v) {
|
||||||
|
|
||||||
inline narrowOop CompressedOops::encode_not_null(oop v) {
|
inline narrowOop CompressedOops::encode_not_null(oop v) {
|
||||||
assert(!is_null(v), "oop value can never be zero");
|
assert(!is_null(v), "oop value can never be zero");
|
||||||
assert(check_obj_alignment(v), "Address not aligned");
|
DEBUG_ONLY(Universe::heap()->check_oop_location(v);)
|
||||||
assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
|
|
||||||
uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base(), 1));
|
uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base(), 1));
|
||||||
assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
|
assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
|
||||||
uint64_t result = pd >> shift();
|
uint64_t result = pd >> shift();
|
||||||
|
|
|
@ -35,6 +35,7 @@
|
||||||
#include "runtime/handles.inline.hpp"
|
#include "runtime/handles.inline.hpp"
|
||||||
#include "runtime/thread.inline.hpp"
|
#include "runtime/thread.inline.hpp"
|
||||||
#include "utilities/copy.hpp"
|
#include "utilities/copy.hpp"
|
||||||
|
#include "utilities/macros.hpp"
|
||||||
|
|
||||||
bool always_do_update_barrier = false;
|
bool always_do_update_barrier = false;
|
||||||
|
|
||||||
|
@ -123,14 +124,6 @@ bool oopDesc::is_oop_or_null(oop obj, bool ignore_mark_word) {
|
||||||
return obj == NULL ? true : is_oop(obj, ignore_mark_word);
|
return obj == NULL ? true : is_oop(obj, ignore_mark_word);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
|
||||||
#if INCLUDE_CDS_JAVA_HEAP
|
|
||||||
bool oopDesc::is_archived_object(oop p) {
|
|
||||||
return HeapShared::is_archived_object(p);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#endif // PRODUCT
|
|
||||||
|
|
||||||
VerifyOopClosure VerifyOopClosure::verify_oop;
|
VerifyOopClosure VerifyOopClosure::verify_oop;
|
||||||
|
|
||||||
template <class T> void VerifyOopClosure::do_oop_work(T* p) {
|
template <class T> void VerifyOopClosure::do_oop_work(T* p) {
|
||||||
|
@ -215,3 +208,13 @@ void oopDesc::release_float_field_put(int offset, jfloat value) { HeapAcce
|
||||||
|
|
||||||
jdouble oopDesc::double_field_acquire(int offset) const { return HeapAccess<MO_ACQUIRE>::load_at(as_oop(), offset); }
|
jdouble oopDesc::double_field_acquire(int offset) const { return HeapAccess<MO_ACQUIRE>::load_at(as_oop(), offset); }
|
||||||
void oopDesc::release_double_field_put(int offset, jdouble value) { HeapAccess<MO_RELEASE>::store_at(as_oop(), offset, value); }
|
void oopDesc::release_double_field_put(int offset, jdouble value) { HeapAccess<MO_RELEASE>::store_at(as_oop(), offset, value); }
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
void oopDesc::verify_forwardee(oop forwardee) {
|
||||||
|
Universe::heap()->check_oop_location(forwardee);
|
||||||
|
#if INCLUDE_CDS_JAVA_HEAP
|
||||||
|
assert(!HeapShared::is_archived_object(forwardee) && !HeapShared::is_archived_object(this),
|
||||||
|
"forwarding archive object");
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
|
@ -256,9 +256,6 @@ class oopDesc {
|
||||||
// asserts and guarantees
|
// asserts and guarantees
|
||||||
static bool is_oop(oop obj, bool ignore_mark_word = false);
|
static bool is_oop(oop obj, bool ignore_mark_word = false);
|
||||||
static bool is_oop_or_null(oop obj, bool ignore_mark_word = false);
|
static bool is_oop_or_null(oop obj, bool ignore_mark_word = false);
|
||||||
#ifndef PRODUCT
|
|
||||||
static bool is_archived_object(oop p) NOT_CDS_JAVA_HEAP_RETURN_(false);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// garbage collection
|
// garbage collection
|
||||||
inline bool is_gc_marked() const;
|
inline bool is_gc_marked() const;
|
||||||
|
@ -266,6 +263,8 @@ class oopDesc {
|
||||||
// Forward pointer operations for scavenge
|
// Forward pointer operations for scavenge
|
||||||
inline bool is_forwarded() const;
|
inline bool is_forwarded() const;
|
||||||
|
|
||||||
|
void verify_forwardee(oop forwardee) NOT_DEBUG_RETURN;
|
||||||
|
|
||||||
inline void forward_to(oop p);
|
inline void forward_to(oop p);
|
||||||
inline bool cas_forward_to(oop p, markWord compare, atomic_memory_order order = memory_order_conservative);
|
inline bool cas_forward_to(oop p, markWord compare, atomic_memory_order order = memory_order_conservative);
|
||||||
|
|
||||||
|
|
|
@ -350,13 +350,7 @@ bool oopDesc::is_forwarded() const {
|
||||||
|
|
||||||
// Used by scavengers
|
// Used by scavengers
|
||||||
void oopDesc::forward_to(oop p) {
|
void oopDesc::forward_to(oop p) {
|
||||||
assert(check_obj_alignment(p),
|
verify_forwardee(p);
|
||||||
"forwarding to something not aligned");
|
|
||||||
assert(Universe::heap()->is_in_reserved(p),
|
|
||||||
"forwarding to something not in heap");
|
|
||||||
assert(!is_archived_object(oop(this)) &&
|
|
||||||
!is_archived_object(p),
|
|
||||||
"forwarding archive object");
|
|
||||||
markWord m = markWord::encode_pointer_as_mark(p);
|
markWord m = markWord::encode_pointer_as_mark(p);
|
||||||
assert(m.decode_pointer() == p, "encoding must be reversable");
|
assert(m.decode_pointer() == p, "encoding must be reversable");
|
||||||
set_mark_raw(m);
|
set_mark_raw(m);
|
||||||
|
@ -364,22 +358,14 @@ void oopDesc::forward_to(oop p) {
|
||||||
|
|
||||||
// Used by parallel scavengers
|
// Used by parallel scavengers
|
||||||
bool oopDesc::cas_forward_to(oop p, markWord compare, atomic_memory_order order) {
|
bool oopDesc::cas_forward_to(oop p, markWord compare, atomic_memory_order order) {
|
||||||
assert(check_obj_alignment(p),
|
verify_forwardee(p);
|
||||||
"forwarding to something not aligned");
|
|
||||||
assert(Universe::heap()->is_in_reserved(p),
|
|
||||||
"forwarding to something not in heap");
|
|
||||||
markWord m = markWord::encode_pointer_as_mark(p);
|
markWord m = markWord::encode_pointer_as_mark(p);
|
||||||
assert(m.decode_pointer() == p, "encoding must be reversable");
|
assert(m.decode_pointer() == p, "encoding must be reversable");
|
||||||
return cas_set_mark_raw(m, compare, order) == compare;
|
return cas_set_mark_raw(m, compare, order) == compare;
|
||||||
}
|
}
|
||||||
|
|
||||||
oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) {
|
oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) {
|
||||||
// CMS forwards some non-heap value into the mark oop to reserve oops during
|
verify_forwardee(p);
|
||||||
// promotion, so the next two asserts do not hold.
|
|
||||||
assert(UseConcMarkSweepGC || check_obj_alignment(p),
|
|
||||||
"forwarding to something not aligned");
|
|
||||||
assert(UseConcMarkSweepGC || Universe::heap()->is_in_reserved(p),
|
|
||||||
"forwarding to something not in heap");
|
|
||||||
markWord m = markWord::encode_pointer_as_mark(p);
|
markWord m = markWord::encode_pointer_as_mark(p);
|
||||||
assert(m.decode_pointer() == p, "encoding must be reversable");
|
assert(m.decode_pointer() == p, "encoding must be reversable");
|
||||||
markWord old_mark = cas_set_mark_raw(m, compare, order);
|
markWord old_mark = cas_set_mark_raw(m, compare, order);
|
||||||
|
|
|
@ -190,8 +190,8 @@ template <class T> inline T cast_from_oop(oop o) {
|
||||||
return (T)(CHECK_UNHANDLED_OOPS_ONLY((void*))o);
|
return (T)(CHECK_UNHANDLED_OOPS_ONLY((void*))o);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool check_obj_alignment(oop obj) {
|
inline bool check_obj_alignment(void* ptr) {
|
||||||
return (cast_from_oop<intptr_t>(obj) & MinObjAlignmentInBytesMask) == 0;
|
return (uintptr_t(ptr) & MinObjAlignmentInBytesMask) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// The metadata hierarchy is separate from the oop hierarchy
|
// The metadata hierarchy is separate from the oop hierarchy
|
||||||
|
|
|
@ -394,7 +394,7 @@ bool Symbol::is_valid(Symbol* s) {
|
||||||
if (!os::is_readable_range(s, s + 1)) return false;
|
if (!os::is_readable_range(s, s + 1)) return false;
|
||||||
|
|
||||||
// Symbols are not allocated in Java heap.
|
// Symbols are not allocated in Java heap.
|
||||||
if (Universe::heap()->is_in_reserved(s)) return false;
|
if (Universe::heap()->is_in(s)) return false;
|
||||||
|
|
||||||
int len = s->utf8_length();
|
int len = s->utf8_length();
|
||||||
if (len < 0) return false;
|
if (len < 0) return false;
|
||||||
|
|
|
@ -348,7 +348,7 @@ const class TypePtr *MachNode::adr_type() const {
|
||||||
return TypePtr::BOTTOM;
|
return TypePtr::BOTTOM;
|
||||||
}
|
}
|
||||||
// %%% make offset be intptr_t
|
// %%% make offset be intptr_t
|
||||||
assert(!Universe::heap()->is_in_reserved(cast_to_oop(offset)), "must be a raw ptr");
|
assert(!Universe::heap()->is_in(cast_to_oop(offset)), "must be a raw ptr");
|
||||||
return TypeRawPtr::BOTTOM;
|
return TypeRawPtr::BOTTOM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1545,7 +1545,7 @@ class TagObjectCollector : public JvmtiTagHashmapEntryClosure {
|
||||||
// SATB marking similar to other j.l.ref.Reference referents. This is
|
// SATB marking similar to other j.l.ref.Reference referents. This is
|
||||||
// achieved by using a phantom load in the object() accessor.
|
// achieved by using a phantom load in the object() accessor.
|
||||||
oop o = entry->object();
|
oop o = entry->object();
|
||||||
assert(o != NULL && Universe::heap()->is_in_reserved(o), "sanity check");
|
assert(o != NULL && Universe::heap()->is_in(o), "sanity check");
|
||||||
jobject ref = JNIHandles::make_local(JavaThread::current(), o);
|
jobject ref = JNIHandles::make_local(JavaThread::current(), o);
|
||||||
_object_results->append(ref);
|
_object_results->append(ref);
|
||||||
_tag_results->append((uint64_t)entry->tag());
|
_tag_results->append((uint64_t)entry->tag());
|
||||||
|
@ -2572,7 +2572,7 @@ class SimpleRootsClosure : public OopClosure {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(Universe::heap()->is_in_reserved(o), "should be impossible");
|
assert(Universe::heap()->is_in(o), "should be impossible");
|
||||||
|
|
||||||
jvmtiHeapReferenceKind kind = root_kind();
|
jvmtiHeapReferenceKind kind = root_kind();
|
||||||
if (kind == JVMTI_HEAP_REFERENCE_SYSTEM_CLASS) {
|
if (kind == JVMTI_HEAP_REFERENCE_SYSTEM_CLASS) {
|
||||||
|
@ -2964,7 +2964,7 @@ inline bool VM_HeapWalkOperation::iterate_over_object(oop o) {
|
||||||
oop fld_o = o->obj_field(field->field_offset());
|
oop fld_o = o->obj_field(field->field_offset());
|
||||||
// ignore any objects that aren't visible to profiler
|
// ignore any objects that aren't visible to profiler
|
||||||
if (fld_o != NULL) {
|
if (fld_o != NULL) {
|
||||||
assert(Universe::heap()->is_in_reserved(fld_o), "unsafe code should not "
|
assert(Universe::heap()->is_in(fld_o), "unsafe code should not "
|
||||||
"have references to Klass* anymore");
|
"have references to Klass* anymore");
|
||||||
int slot = field->field_index();
|
int slot = field->field_index();
|
||||||
if (!CallbackInvoker::report_field_reference(o, fld_o, slot)) {
|
if (!CallbackInvoker::report_field_reference(o, fld_o, slot)) {
|
||||||
|
|
|
@ -483,7 +483,7 @@ void JNIHandleBlock::oops_do(OopClosure* f) {
|
||||||
|
|
||||||
|
|
||||||
jobject JNIHandleBlock::allocate_handle(oop obj) {
|
jobject JNIHandleBlock::allocate_handle(oop obj) {
|
||||||
assert(Universe::heap()->is_in_reserved(obj), "sanity check");
|
assert(Universe::heap()->is_in(obj), "sanity check");
|
||||||
if (_top == 0) {
|
if (_top == 0) {
|
||||||
// This is the first allocation or the initial block got zapped when
|
// This is the first allocation or the initial block got zapped when
|
||||||
// entering a native function. If we have any following blocks they are
|
// entering a native function. If we have any following blocks they are
|
||||||
|
|
|
@ -30,25 +30,12 @@ TEST_VM(CollectedHeap, is_in) {
|
||||||
CollectedHeap* heap = Universe::heap();
|
CollectedHeap* heap = Universe::heap();
|
||||||
|
|
||||||
uintptr_t epsilon = (uintptr_t) MinObjAlignment;
|
uintptr_t epsilon = (uintptr_t) MinObjAlignment;
|
||||||
uintptr_t heap_start = (uintptr_t) heap->reserved_region().start();
|
uintptr_t outside_heap = (uintptr_t) ε
|
||||||
uintptr_t heap_end = (uintptr_t) heap->reserved_region().end();
|
|
||||||
|
|
||||||
// Test that NULL is not in the heap.
|
// Test that NULL is not in the heap.
|
||||||
ASSERT_FALSE(heap->is_in(NULL)) << "NULL is unexpectedly in the heap";
|
ASSERT_FALSE(heap->is_in(NULL)) << "NULL is unexpectedly in the heap";
|
||||||
|
|
||||||
// Test that a pointer to before the heap start is reported as outside the heap.
|
// Test that a pointer to outside the heap start is reported as outside the heap.
|
||||||
ASSERT_GE(heap_start, ((uintptr_t) NULL + epsilon))
|
ASSERT_FALSE(heap->is_in((void*)outside_heap)) << "outside_heap: " << outside_heap
|
||||||
<< "Sanity check - heap should not start at 0";
|
|
||||||
|
|
||||||
void* before_heap = (void*) (heap_start - epsilon);
|
|
||||||
ASSERT_FALSE(heap->is_in(before_heap)) << "before_heap: " << p2i(before_heap)
|
|
||||||
<< " is unexpectedly in the heap";
|
|
||||||
|
|
||||||
// Test that a pointer to after the heap end is reported as outside the heap.
|
|
||||||
ASSERT_LE(heap_end, ((uintptr_t)-1 - epsilon))
|
|
||||||
<< "Sanity check - heap should not end at the end of address space";
|
|
||||||
|
|
||||||
void* after_heap = (void*) (heap_end + epsilon);
|
|
||||||
ASSERT_FALSE(heap->is_in(after_heap)) << "after_heap: " << p2i(after_heap)
|
|
||||||
<< " is unexpectedly in the heap";
|
<< " is unexpectedly in the heap";
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,7 +67,9 @@ public class TestLargePageUseForHeap {
|
||||||
String errorStr = "Reserve regular memory without large pages";
|
String errorStr = "Reserve regular memory without large pages";
|
||||||
String heapPattern = ".*Heap: ";
|
String heapPattern = ".*Heap: ";
|
||||||
// If errorStr is printed just before heap page log, reservation for Java Heap is failed.
|
// If errorStr is printed just before heap page log, reservation for Java Heap is failed.
|
||||||
String result = output.firstMatch(errorStr + "\n" + heapPattern);
|
String result = output.firstMatch(errorStr + "\n" +
|
||||||
|
"(?:.*Heap address: .*\n)?" // Heap address: 0x00000000f8000000, size: 128 MB, Compressed Oops mode: 32-bit
|
||||||
|
+ heapPattern);
|
||||||
if (result != null) {
|
if (result != null) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -149,4 +151,3 @@ public class TestLargePageUseForHeap {
|
||||||
return longValue * multiplier;
|
return longValue * multiplier;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue