mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-27 06:45:07 +02:00
8001592: NMT: assertion failed: assert(_amount >= amt) failed: Just check: memBaseline.hpp:180
Fixed NMT that miscounted arena memory when it is used as value or stack object. Reviewed-by: acorn, coleenp
This commit is contained in:
parent
61a5a58cb1
commit
f47de1cb41
5 changed files with 135 additions and 113 deletions
|
@ -115,17 +115,25 @@ bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records)
|
||||||
while (malloc_ptr != NULL) {
|
while (malloc_ptr != NULL) {
|
||||||
index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
|
index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
|
||||||
size_t size = malloc_ptr->size();
|
size_t size = malloc_ptr->size();
|
||||||
_total_malloced += size;
|
if (malloc_ptr->is_arena_memory_record()) {
|
||||||
_malloc_data[index].inc(size);
|
// We do have anonymous arenas, they are either used as value objects,
|
||||||
if (MemPointerRecord::is_arena_record(malloc_ptr->flags())) {
|
// which are embedded inside other objects, or used as stack objects.
|
||||||
// see if arena size record present
|
_arena_data[index].inc(size);
|
||||||
MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
|
used_arena_size += size;
|
||||||
if (MemPointerRecord::is_arena_size_record(next_malloc_ptr->flags())) {
|
} else {
|
||||||
assert(next_malloc_ptr->is_size_record_of_arena(malloc_ptr), "arena records do not match");
|
_total_malloced += size;
|
||||||
size = next_malloc_ptr->size();
|
_malloc_data[index].inc(size);
|
||||||
_arena_data[index].inc(size);
|
if (malloc_ptr->is_arena_record()) {
|
||||||
used_arena_size += size;
|
// see if arena memory record present
|
||||||
malloc_itr.next();
|
MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
|
||||||
|
if (next_malloc_ptr->is_arena_memory_record()) {
|
||||||
|
assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
|
||||||
|
"Arena records do not match");
|
||||||
|
size = next_malloc_ptr->size();
|
||||||
|
_arena_data[index].inc(size);
|
||||||
|
used_arena_size += size;
|
||||||
|
malloc_itr.next();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
|
malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
|
||||||
|
@ -193,7 +201,7 @@ bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records)
|
||||||
|
|
||||||
// baseline memory that is totaled over 1 KB
|
// baseline memory that is totaled over 1 KB
|
||||||
while (malloc_ptr != NULL) {
|
while (malloc_ptr != NULL) {
|
||||||
if (!MemPointerRecord::is_arena_size_record(malloc_ptr->flags())) {
|
if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) {
|
||||||
// skip thread stacks
|
// skip thread stacks
|
||||||
if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
|
if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
|
||||||
if (malloc_callsite.addr() != malloc_ptr->pc()) {
|
if (malloc_callsite.addr() != malloc_ptr->pc()) {
|
||||||
|
|
|
@ -165,7 +165,7 @@ public:
|
||||||
return (flags & (otArena | tag_size)) == otArena;
|
return (flags & (otArena | tag_size)) == otArena;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline static bool is_arena_size_record(MEMFLAGS flags) {
|
inline static bool is_arena_memory_record(MEMFLAGS flags) {
|
||||||
return (flags & (otArena | tag_size)) == (otArena | tag_size);
|
return (flags & (otArena | tag_size)) == (otArena | tag_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -256,8 +256,8 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
// if this record records a size information of an arena
|
// if this record records a size information of an arena
|
||||||
inline bool is_arena_size_record() const {
|
inline bool is_arena_memory_record() const {
|
||||||
return is_arena_size_record(_flags);
|
return is_arena_memory_record(_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
// if this pointer represents an address to an arena object
|
// if this pointer represents an address to an arena object
|
||||||
|
@ -266,8 +266,8 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
// if this record represents a size information of specific arena
|
// if this record represents a size information of specific arena
|
||||||
inline bool is_size_record_of_arena(const MemPointerRecord* arena_rc) {
|
inline bool is_memory_record_of_arena(const MemPointerRecord* arena_rc) {
|
||||||
assert(is_arena_size_record(), "not size record");
|
assert(is_arena_memory_record(), "not size record");
|
||||||
assert(arena_rc->is_arena_record(), "not arena record");
|
assert(arena_rc->is_arena_record(), "not arena record");
|
||||||
return (arena_rc->addr() + sizeof(void*)) == addr();
|
return (arena_rc->addr() + sizeof(void*)) == addr();
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,7 +50,7 @@ void decode_pointer_record(MemPointerRecord* rec) {
|
||||||
tty->print_cr(" (tag)");
|
tty->print_cr(" (tag)");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (rec->is_arena_size_record()) {
|
if (rec->is_arena_memory_record()) {
|
||||||
tty->print_cr(" (arena size)");
|
tty->print_cr(" (arena size)");
|
||||||
} else if (rec->is_allocation_record()) {
|
} else if (rec->is_allocation_record()) {
|
||||||
tty->print_cr(" (malloc)");
|
tty->print_cr(" (malloc)");
|
||||||
|
@ -390,21 +390,31 @@ MemSnapshot::~MemSnapshot() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemSnapshot::copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
|
|
||||||
|
void MemSnapshot::copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
|
||||||
assert(dest != NULL && src != NULL, "Just check");
|
assert(dest != NULL && src != NULL, "Just check");
|
||||||
assert(dest->addr() == src->addr(), "Just check");
|
assert(dest->addr() == src->addr(), "Just check");
|
||||||
|
assert(dest->seq() > 0 && src->seq() > 0, "not sequenced");
|
||||||
|
|
||||||
MEMFLAGS flags = dest->flags();
|
if (MemTracker::track_callsite()) {
|
||||||
|
*(SeqMemPointerRecordEx*)dest = *(SeqMemPointerRecordEx*)src;
|
||||||
|
} else {
|
||||||
|
*(SeqMemPointerRecord*)dest = *(SeqMemPointerRecord*)src;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemSnapshot::assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src) {
|
||||||
|
assert(src != NULL && dest != NULL, "Just check");
|
||||||
|
assert(dest->seq() == 0 && src->seq() >0, "cast away sequence");
|
||||||
|
|
||||||
if (MemTracker::track_callsite()) {
|
if (MemTracker::track_callsite()) {
|
||||||
*(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src;
|
*(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src;
|
||||||
} else {
|
} else {
|
||||||
*dest = *src;
|
*(MemPointerRecord*)dest = *(MemPointerRecord*)src;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// merge a recorder to the staging area
|
||||||
// merge a per-thread memory recorder to the staging area
|
|
||||||
bool MemSnapshot::merge(MemRecorder* rec) {
|
bool MemSnapshot::merge(MemRecorder* rec) {
|
||||||
assert(rec != NULL && !rec->out_of_memory(), "Just check");
|
assert(rec != NULL && !rec->out_of_memory(), "Just check");
|
||||||
|
|
||||||
|
@ -412,71 +422,45 @@ bool MemSnapshot::merge(MemRecorder* rec) {
|
||||||
|
|
||||||
MutexLockerEx lock(_lock, true);
|
MutexLockerEx lock(_lock, true);
|
||||||
MemPointerIterator malloc_staging_itr(_staging_area.malloc_data());
|
MemPointerIterator malloc_staging_itr(_staging_area.malloc_data());
|
||||||
MemPointerRecord *p1, *p2;
|
MemPointerRecord* incoming_rec = (MemPointerRecord*) itr.current();
|
||||||
p1 = (MemPointerRecord*) itr.current();
|
MemPointerRecord* matched_rec;
|
||||||
while (p1 != NULL) {
|
|
||||||
if (p1->is_vm_pointer()) {
|
while (incoming_rec != NULL) {
|
||||||
|
if (incoming_rec->is_vm_pointer()) {
|
||||||
// we don't do anything with virtual memory records during merge
|
// we don't do anything with virtual memory records during merge
|
||||||
if (!_staging_area.vm_data()->append(p1)) {
|
if (!_staging_area.vm_data()->append(incoming_rec)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// locate matched record and/or also position the iterator to proper
|
// locate matched record and/or also position the iterator to proper
|
||||||
// location for this incoming record.
|
// location for this incoming record.
|
||||||
p2 = (MemPointerRecord*)malloc_staging_itr.locate(p1->addr());
|
matched_rec = (MemPointerRecord*)malloc_staging_itr.locate(incoming_rec->addr());
|
||||||
// we have not seen this memory block, so just add to staging area
|
// we have not seen this memory block in this generation,
|
||||||
if (p2 == NULL) {
|
// so just add to staging area
|
||||||
if (!malloc_staging_itr.insert(p1)) {
|
if (matched_rec == NULL) {
|
||||||
|
if (!malloc_staging_itr.insert(incoming_rec)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} else if (p1->addr() == p2->addr()) {
|
} else if (incoming_rec->addr() == matched_rec->addr()) {
|
||||||
MemPointerRecord* staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next();
|
// whoever has higher sequence number wins
|
||||||
// a memory block can have many tagging records, find right one to replace or
|
if (incoming_rec->seq() > matched_rec->seq()) {
|
||||||
// right position to insert
|
copy_seq_pointer(matched_rec, incoming_rec);
|
||||||
while (staging_next != NULL && staging_next->addr() == p1->addr()) {
|
|
||||||
if ((staging_next->flags() & MemPointerRecord::tag_masks) <=
|
|
||||||
(p1->flags() & MemPointerRecord::tag_masks)) {
|
|
||||||
p2 = (MemPointerRecord*)malloc_staging_itr.next();
|
|
||||||
staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next();
|
|
||||||
} else {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
int df = (p1->flags() & MemPointerRecord::tag_masks) -
|
} else if (incoming_rec->addr() < matched_rec->addr()) {
|
||||||
(p2->flags() & MemPointerRecord::tag_masks);
|
if (!malloc_staging_itr.insert(incoming_rec)) {
|
||||||
if (df == 0) {
|
|
||||||
assert(p1->seq() > 0, "not sequenced");
|
|
||||||
assert(p2->seq() > 0, "not sequenced");
|
|
||||||
if (p1->seq() > p2->seq()) {
|
|
||||||
copy_pointer(p2, p1);
|
|
||||||
}
|
|
||||||
} else if (df < 0) {
|
|
||||||
if (!malloc_staging_itr.insert(p1)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (!malloc_staging_itr.insert_after(p1)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (p1->addr() < p2->addr()) {
|
|
||||||
if (!malloc_staging_itr.insert(p1)) {
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (!malloc_staging_itr.insert_after(p1)) {
|
ShouldNotReachHere();
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
p1 = (MemPointerRecord*)itr.next();
|
incoming_rec = (MemPointerRecord*)itr.next();
|
||||||
}
|
}
|
||||||
NOT_PRODUCT(void check_staging_data();)
|
NOT_PRODUCT(void check_staging_data();)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// promote data to next generation
|
// promote data to next generation
|
||||||
bool MemSnapshot::promote() {
|
bool MemSnapshot::promote() {
|
||||||
assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
|
assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
|
||||||
|
@ -507,20 +491,25 @@ bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
|
||||||
// found matched memory block
|
// found matched memory block
|
||||||
if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
|
if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
|
||||||
// snapshot already contains 'live' records
|
// snapshot already contains 'live' records
|
||||||
assert(matched_rec->is_allocation_record() || matched_rec->is_arena_size_record(),
|
assert(matched_rec->is_allocation_record() || matched_rec->is_arena_memory_record(),
|
||||||
"Sanity check");
|
"Sanity check");
|
||||||
// update block states
|
// update block states
|
||||||
if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) {
|
if (new_rec->is_allocation_record()) {
|
||||||
copy_pointer(matched_rec, new_rec);
|
assign_pointer(matched_rec, new_rec);
|
||||||
|
} else if (new_rec->is_arena_memory_record()) {
|
||||||
|
if (new_rec->size() == 0) {
|
||||||
|
// remove size record once size drops to 0
|
||||||
|
malloc_snapshot_itr.remove();
|
||||||
|
} else {
|
||||||
|
assign_pointer(matched_rec, new_rec);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// a deallocation record
|
// a deallocation record
|
||||||
assert(new_rec->is_deallocation_record(), "Sanity check");
|
assert(new_rec->is_deallocation_record(), "Sanity check");
|
||||||
// an arena record can be followed by a size record, we need to remove both
|
// an arena record can be followed by a size record, we need to remove both
|
||||||
if (matched_rec->is_arena_record()) {
|
if (matched_rec->is_arena_record()) {
|
||||||
MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next();
|
MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next();
|
||||||
if (next->is_arena_size_record()) {
|
if (next->is_arena_memory_record() && next->is_memory_record_of_arena(matched_rec)) {
|
||||||
// it has to match the arena record
|
|
||||||
assert(next->is_size_record_of_arena(matched_rec), "Sanity check");
|
|
||||||
malloc_snapshot_itr.remove();
|
malloc_snapshot_itr.remove();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -528,17 +517,13 @@ bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
|
||||||
malloc_snapshot_itr.remove();
|
malloc_snapshot_itr.remove();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// it is a new record, insert into snapshot
|
// don't insert size 0 record
|
||||||
if (new_rec->is_arena_size_record()) {
|
if (new_rec->is_arena_memory_record() && new_rec->size() == 0) {
|
||||||
MemPointerRecord* prev = (MemPointerRecord*)malloc_snapshot_itr.peek_prev();
|
new_rec = NULL;
|
||||||
if (prev == NULL || !prev->is_arena_record() || !new_rec->is_size_record_of_arena(prev)) {
|
|
||||||
// no matched arena record, ignore the size record
|
|
||||||
new_rec = NULL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// only 'live' record can go into snapshot
|
|
||||||
if (new_rec != NULL) {
|
if (new_rec != NULL) {
|
||||||
if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) {
|
if (new_rec->is_allocation_record() || new_rec->is_arena_memory_record()) {
|
||||||
if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) {
|
if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) {
|
||||||
if (!malloc_snapshot_itr.insert_after(new_rec)) {
|
if (!malloc_snapshot_itr.insert_after(new_rec)) {
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -31,7 +31,6 @@
|
||||||
#include "services/memBaseline.hpp"
|
#include "services/memBaseline.hpp"
|
||||||
#include "services/memPtrArray.hpp"
|
#include "services/memPtrArray.hpp"
|
||||||
|
|
||||||
|
|
||||||
// Snapshot pointer array iterator
|
// Snapshot pointer array iterator
|
||||||
|
|
||||||
// The pointer array contains malloc-ed pointers
|
// The pointer array contains malloc-ed pointers
|
||||||
|
@ -165,39 +164,58 @@ class VMMemPointerIterator : public MemPointerIterator {
|
||||||
};
|
};
|
||||||
|
|
||||||
class MallocRecordIterator : public MemPointerArrayIterator {
|
class MallocRecordIterator : public MemPointerArrayIterator {
|
||||||
protected:
|
private:
|
||||||
MemPointerArrayIteratorImpl _itr;
|
MemPointerArrayIteratorImpl _itr;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
MallocRecordIterator(MemPointerArray* arr) : _itr(arr) {
|
MallocRecordIterator(MemPointerArray* arr) : _itr(arr) {
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual MemPointer* current() const {
|
virtual MemPointer* current() const {
|
||||||
MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
|
#ifdef ASSERT
|
||||||
assert(cur == NULL || !cur->is_vm_pointer(), "seek error");
|
MemPointer* cur_rec = _itr.current();
|
||||||
MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
|
if (cur_rec != NULL) {
|
||||||
if (next == NULL || next->addr() != cur->addr()) {
|
MemPointer* prev_rec = _itr.peek_prev();
|
||||||
return cur;
|
MemPointer* next_rec = _itr.peek_next();
|
||||||
} else {
|
assert(prev_rec == NULL || prev_rec->addr() < cur_rec->addr(), "Sorting order");
|
||||||
assert(!cur->is_vm_pointer(), "Sanity check");
|
assert(next_rec == NULL || next_rec->addr() > cur_rec->addr(), "Sorting order");
|
||||||
assert(cur->is_allocation_record() && next->is_deallocation_record(),
|
|
||||||
"sorting order");
|
|
||||||
assert(cur->seq() != next->seq(), "Sanity check");
|
|
||||||
return cur->seq() > next->seq() ? cur : next;
|
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
return _itr.current();
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual MemPointer* next() {
|
virtual MemPointer* next() {
|
||||||
MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
|
MemPointerRecord* next_rec = (MemPointerRecord*)_itr.next();
|
||||||
assert(cur == NULL || !cur->is_vm_pointer(), "Sanity check");
|
// arena memory record is a special case, which we have to compare
|
||||||
MemPointerRecord* next = (MemPointerRecord*)_itr.next();
|
// sequence number against its associated arena record.
|
||||||
if (next == NULL) {
|
if (next_rec != NULL && next_rec->is_arena_memory_record()) {
|
||||||
return NULL;
|
MemPointerRecord* prev_rec = (MemPointerRecord*)_itr.peek_prev();
|
||||||
|
// if there is an associated arena record, it has to be previous
|
||||||
|
// record because of sorting order (by address) - NMT generates a pseudo address
|
||||||
|
// for arena's size record by offsetting arena's address, that guarantees
|
||||||
|
// the order of arena record and it's size record.
|
||||||
|
if (prev_rec != NULL && prev_rec->is_arena_record() &&
|
||||||
|
next_rec->is_memory_record_of_arena(prev_rec)) {
|
||||||
|
if (prev_rec->seq() > next_rec->seq()) {
|
||||||
|
// Skip this arena memory record
|
||||||
|
// Two scenarios:
|
||||||
|
// - if the arena record is an allocation record, this early
|
||||||
|
// size record must be leftover by previous arena,
|
||||||
|
// and the last size record should have size = 0.
|
||||||
|
// - if the arena record is a deallocation record, this
|
||||||
|
// size record should be its cleanup record, which should
|
||||||
|
// also have size = 0. In other world, arena alway reset
|
||||||
|
// its size before gone (see Arena's destructor)
|
||||||
|
assert(next_rec->size() == 0, "size not reset");
|
||||||
|
return _itr.next();
|
||||||
|
} else {
|
||||||
|
assert(prev_rec->is_allocation_record(),
|
||||||
|
"Arena size record ahead of allocation record");
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (cur->addr() == next->addr()) {
|
return next_rec;
|
||||||
next = (MemPointerRecord*)_itr.next();
|
|
||||||
}
|
|
||||||
return current();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; }
|
MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; }
|
||||||
|
@ -213,9 +231,12 @@ class MallocRecordIterator : public MemPointerArrayIterator {
|
||||||
// still chances seeing duplicated records during promotion.
|
// still chances seeing duplicated records during promotion.
|
||||||
// We want to use the record with higher sequence number, because it has
|
// We want to use the record with higher sequence number, because it has
|
||||||
// more accurate callsite pc.
|
// more accurate callsite pc.
|
||||||
class VMRecordIterator : public MallocRecordIterator {
|
class VMRecordIterator : public MemPointerArrayIterator {
|
||||||
|
private:
|
||||||
|
MemPointerArrayIteratorImpl _itr;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
VMRecordIterator(MemPointerArray* arr) : MallocRecordIterator(arr) {
|
VMRecordIterator(MemPointerArray* arr) : _itr(arr) {
|
||||||
MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
|
MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
|
||||||
MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
|
MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
|
||||||
while (next != NULL) {
|
while (next != NULL) {
|
||||||
|
@ -256,6 +277,12 @@ class VMRecordIterator : public MallocRecordIterator {
|
||||||
return cur;
|
return cur;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; }
|
||||||
|
MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; }
|
||||||
|
void remove() { ShouldNotReachHere(); }
|
||||||
|
bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; }
|
||||||
|
bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const {
|
bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const {
|
||||||
bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags());
|
bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags());
|
||||||
|
@ -348,8 +375,10 @@ class MemSnapshot : public CHeapObj<mtNMT> {
|
||||||
DEBUG_ONLY( void dump_all_vm_pointers();)
|
DEBUG_ONLY( void dump_all_vm_pointers();)
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// copy pointer data from src to dest
|
// copy sequenced pointer from src to dest
|
||||||
void copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
|
void copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
|
||||||
|
// assign a sequenced pointer to non-sequenced pointer
|
||||||
|
void assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src);
|
||||||
|
|
||||||
bool promote_malloc_records(MemPointerArrayIterator* itr);
|
bool promote_malloc_records(MemPointerArrayIterator* itr);
|
||||||
bool promote_virtual_memory_records(MemPointerArrayIterator* itr);
|
bool promote_virtual_memory_records(MemPointerArrayIterator* itr);
|
||||||
|
|
|
@ -284,14 +284,14 @@ class MemTracker : AllStatic {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// record arena size
|
// record arena memory size
|
||||||
static inline void record_arena_size(address addr, size_t size) {
|
static inline void record_arena_size(address addr, size_t size) {
|
||||||
// we add a positive offset to arena address, so we can have arena size record
|
// we add a positive offset to arena address, so we can have arena memory record
|
||||||
// sorted after arena record
|
// sorted after arena record
|
||||||
if (is_on() && !UseMallocOnly) {
|
if (is_on() && !UseMallocOnly) {
|
||||||
assert(addr != NULL, "Sanity check");
|
assert(addr != NULL, "Sanity check");
|
||||||
create_memory_record((addr + sizeof(void*)), MemPointerRecord::arena_size_tag(), size,
|
create_memory_record((addr + sizeof(void*)), MemPointerRecord::arena_size_tag(), size,
|
||||||
0, NULL);
|
DEBUG_CALLER_PC, NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue