mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-27 06:45:07 +02:00
7181995: NMT ON: NMT assertion failure assert(cur_vm->is_uncommit_record() || cur_vm->is_deallocation_record
Fixed virtual memory records merge and promotion logic, should be based on sequence number vs. base address order Reviewed-by: coleenp, acorn
This commit is contained in:
parent
6f32be5139
commit
442e4b0e54
7 changed files with 403 additions and 390 deletions
|
@ -318,10 +318,9 @@ void Thread::record_stack_base_and_size() {
|
||||||
set_stack_size(os::current_stack_size());
|
set_stack_size(os::current_stack_size());
|
||||||
|
|
||||||
// record thread's native stack, stack grows downward
|
// record thread's native stack, stack grows downward
|
||||||
address vm_base = _stack_base - _stack_size;
|
address low_stack_addr = stack_base() - stack_size();
|
||||||
MemTracker::record_virtual_memory_reserve(vm_base, _stack_size,
|
MemTracker::record_thread_stack(low_stack_addr, stack_size(), this,
|
||||||
CURRENT_PC, this);
|
CURRENT_PC);
|
||||||
MemTracker::record_virtual_memory_type(vm_base, mtThreadStack);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -329,8 +328,8 @@ Thread::~Thread() {
|
||||||
// Reclaim the objectmonitors from the omFreeList of the moribund thread.
|
// Reclaim the objectmonitors from the omFreeList of the moribund thread.
|
||||||
ObjectSynchronizer::omFlush (this) ;
|
ObjectSynchronizer::omFlush (this) ;
|
||||||
|
|
||||||
MemTracker::record_virtual_memory_release((_stack_base - _stack_size),
|
address low_stack_addr = stack_base() - stack_size();
|
||||||
_stack_size, this);
|
MemTracker::release_thread_stack(low_stack_addr, stack_size(), this);
|
||||||
|
|
||||||
// deallocate data structures
|
// deallocate data structures
|
||||||
delete resource_area();
|
delete resource_area();
|
||||||
|
|
|
@ -43,9 +43,9 @@ jint SequenceGenerator::next() {
|
||||||
|
|
||||||
|
|
||||||
bool VMMemRegion::contains(const VMMemRegion* mr) const {
|
bool VMMemRegion::contains(const VMMemRegion* mr) const {
|
||||||
assert(base() != 0, "no base address");
|
assert(base() != 0, "Sanity check");
|
||||||
assert(size() != 0 || committed_size() != 0,
|
assert(size() != 0 || committed_size() != 0,
|
||||||
"no range");
|
"Sanity check");
|
||||||
address base_addr = base();
|
address base_addr = base();
|
||||||
address end_addr = base_addr +
|
address end_addr = base_addr +
|
||||||
(is_reserve_record()? reserved_size(): committed_size());
|
(is_reserve_record()? reserved_size(): committed_size());
|
||||||
|
@ -61,14 +61,14 @@ bool VMMemRegion::contains(const VMMemRegion* mr) const {
|
||||||
return (mr->base() >= base_addr &&
|
return (mr->base() >= base_addr &&
|
||||||
(mr->base() + mr->committed_size()) <= end_addr);
|
(mr->base() + mr->committed_size()) <= end_addr);
|
||||||
} else if (mr->is_type_tagging_record()) {
|
} else if (mr->is_type_tagging_record()) {
|
||||||
assert(mr->base() != 0, "no base");
|
assert(mr->base() != NULL, "Sanity check");
|
||||||
return mr->base() == base_addr;
|
return (mr->base() >= base_addr && mr->base() < end_addr);
|
||||||
} else if (mr->is_release_record()) {
|
} else if (mr->is_release_record()) {
|
||||||
assert(mr->base() != 0 && mr->size() > 0,
|
assert(mr->base() != 0 && mr->size() > 0,
|
||||||
"bad record");
|
"bad record");
|
||||||
return (mr->base() == base_addr && mr->size() == size());
|
return (mr->base() == base_addr && mr->size() == size());
|
||||||
} else {
|
} else {
|
||||||
assert(false, "what happened?");
|
ShouldNotReachHere();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -84,11 +84,7 @@ class MemPointerArrayIterator VALUE_OBJ_CLASS_SPEC {
|
||||||
|
|
||||||
// implementation class
|
// implementation class
|
||||||
class MemPointerArrayIteratorImpl : public MemPointerArrayIterator {
|
class MemPointerArrayIteratorImpl : public MemPointerArrayIterator {
|
||||||
#ifdef ASSERT
|
|
||||||
protected:
|
protected:
|
||||||
#else
|
|
||||||
private:
|
|
||||||
#endif
|
|
||||||
MemPointerArray* _array;
|
MemPointerArray* _array;
|
||||||
int _pos;
|
int _pos;
|
||||||
|
|
||||||
|
|
|
@ -31,148 +31,54 @@
|
||||||
#include "services/memSnapshot.hpp"
|
#include "services/memSnapshot.hpp"
|
||||||
#include "services/memTracker.hpp"
|
#include "services/memTracker.hpp"
|
||||||
|
|
||||||
|
static int sort_in_seq_order(const void* p1, const void* p2) {
|
||||||
// stagging data groups the data of a VM memory range, so we can consolidate
|
assert(p1 != NULL && p2 != NULL, "Sanity check");
|
||||||
// them into one record during the walk
|
const MemPointerRecord* mp1 = (MemPointerRecord*)p1;
|
||||||
bool StagingWalker::consolidate_vm_records(VMMemRegionEx* vm_rec) {
|
const MemPointerRecord* mp2 = (MemPointerRecord*)p2;
|
||||||
MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
|
return (mp1->seq() - mp2->seq());
|
||||||
assert(cur != NULL && cur->is_vm_pointer(), "not a virtual memory pointer");
|
|
||||||
|
|
||||||
jint cur_seq;
|
|
||||||
jint next_seq;
|
|
||||||
|
|
||||||
bool trackCallsite = MemTracker::track_callsite();
|
|
||||||
|
|
||||||
if (trackCallsite) {
|
|
||||||
vm_rec->init((MemPointerRecordEx*)cur);
|
|
||||||
cur_seq = ((SeqMemPointerRecordEx*)cur)->seq();
|
|
||||||
} else {
|
|
||||||
vm_rec->init((MemPointerRecord*)cur);
|
|
||||||
cur_seq = ((SeqMemPointerRecord*)cur)->seq();
|
|
||||||
}
|
|
||||||
|
|
||||||
// only can consolidate when we have allocation record,
|
|
||||||
// which contains virtual memory range
|
|
||||||
if (!cur->is_allocation_record()) {
|
|
||||||
_itr.next();
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// allocation range
|
|
||||||
address base = cur->addr();
|
|
||||||
address end = base + cur->size();
|
|
||||||
|
|
||||||
MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
|
|
||||||
// if the memory range is alive
|
|
||||||
bool live_vm_rec = true;
|
|
||||||
while (next != NULL && next->is_vm_pointer()) {
|
|
||||||
if (next->is_allocation_record()) {
|
|
||||||
assert(next->addr() >= base, "sorting order or overlapping");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (trackCallsite) {
|
|
||||||
next_seq = ((SeqMemPointerRecordEx*)next)->seq();
|
|
||||||
} else {
|
|
||||||
next_seq = ((SeqMemPointerRecord*)next)->seq();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (next_seq < cur_seq) {
|
|
||||||
_itr.next();
|
|
||||||
next = (MemPointerRecord*)_itr.peek_next();
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (next->is_deallocation_record()) {
|
|
||||||
if (next->addr() == base && next->size() == cur->size()) {
|
|
||||||
// the virtual memory range has been released
|
|
||||||
_itr.next();
|
|
||||||
live_vm_rec = false;
|
|
||||||
break;
|
|
||||||
} else if (next->addr() < end) { // partial release
|
|
||||||
vm_rec->partial_release(next->addr(), next->size());
|
|
||||||
_itr.next();
|
|
||||||
} else {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} else if (next->is_commit_record()) {
|
|
||||||
if (next->addr() >= base && next->addr() + next->size() <= end) {
|
|
||||||
vm_rec->commit(next->size());
|
|
||||||
_itr.next();
|
|
||||||
} else {
|
|
||||||
assert(next->addr() >= base, "sorting order or overlapping");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} else if (next->is_uncommit_record()) {
|
|
||||||
if (next->addr() >= base && next->addr() + next->size() <= end) {
|
|
||||||
vm_rec->uncommit(next->size());
|
|
||||||
_itr.next();
|
|
||||||
} else {
|
|
||||||
assert(next->addr() >= end, "sorting order or overlapping");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} else if (next->is_type_tagging_record()) {
|
|
||||||
if (next->addr() >= base && next->addr() < end ) {
|
|
||||||
vm_rec->tag(next->flags());
|
|
||||||
_itr.next();
|
|
||||||
} else {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
assert(false, "unknown record type");
|
|
||||||
}
|
|
||||||
next = (MemPointerRecord*)_itr.peek_next();
|
|
||||||
}
|
|
||||||
_itr.next();
|
|
||||||
return live_vm_rec;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
MemPointer* StagingWalker::next() {
|
bool StagingArea::init() {
|
||||||
MemPointerRecord* cur_p = (MemPointerRecord*)_itr.current();
|
|
||||||
if (cur_p == NULL) {
|
|
||||||
_end_of_array = true;
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
MemPointerRecord* next_p;
|
|
||||||
if (cur_p->is_vm_pointer()) {
|
|
||||||
_is_vm_record = true;
|
|
||||||
if (!consolidate_vm_records(&_vm_record)) {
|
|
||||||
return next();
|
|
||||||
}
|
|
||||||
} else { // malloc-ed pointer
|
|
||||||
_is_vm_record = false;
|
|
||||||
next_p = (MemPointerRecord*)_itr.peek_next();
|
|
||||||
if (next_p != NULL && next_p->addr() == cur_p->addr()) {
|
|
||||||
assert(cur_p->is_allocation_record(), "sorting order");
|
|
||||||
assert(!next_p->is_allocation_record(), "sorting order");
|
|
||||||
_itr.next();
|
|
||||||
if (cur_p->seq() < next_p->seq()) {
|
|
||||||
cur_p = next_p;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (MemTracker::track_callsite()) {
|
if (MemTracker::track_callsite()) {
|
||||||
_malloc_record.init((MemPointerRecordEx*)cur_p);
|
_malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
|
||||||
|
_vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
|
||||||
} else {
|
} else {
|
||||||
_malloc_record.init((MemPointerRecord*)cur_p);
|
_malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
|
||||||
|
_vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
|
||||||
}
|
}
|
||||||
|
|
||||||
_itr.next();
|
if (_malloc_data != NULL && _vm_data != NULL &&
|
||||||
|
!_malloc_data->out_of_memory() &&
|
||||||
|
!_vm_data->out_of_memory()) {
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
if (_malloc_data != NULL) delete _malloc_data;
|
||||||
|
if (_vm_data != NULL) delete _vm_data;
|
||||||
|
_malloc_data = NULL;
|
||||||
|
_vm_data = NULL;
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
return current();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
MemPointerArrayIteratorImpl StagingArea::virtual_memory_record_walker() {
|
||||||
|
MemPointerArray* arr = vm_data();
|
||||||
|
// sort into seq number order
|
||||||
|
arr->sort((FN_SORT)sort_in_seq_order);
|
||||||
|
return MemPointerArrayIteratorImpl(arr);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
MemSnapshot::MemSnapshot() {
|
MemSnapshot::MemSnapshot() {
|
||||||
if (MemTracker::track_callsite()) {
|
if (MemTracker::track_callsite()) {
|
||||||
_alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>();
|
_alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>();
|
||||||
_vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true);
|
_vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true);
|
||||||
_staging_area = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
|
|
||||||
} else {
|
} else {
|
||||||
_alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>();
|
_alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>();
|
||||||
_vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true);
|
_vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true);
|
||||||
_staging_area = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_staging_area.init();
|
||||||
_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock");
|
_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock");
|
||||||
NOT_PRODUCT(_untracked_count = 0;)
|
NOT_PRODUCT(_untracked_count = 0;)
|
||||||
}
|
}
|
||||||
|
@ -181,11 +87,6 @@ MemSnapshot::~MemSnapshot() {
|
||||||
assert(MemTracker::shutdown_in_progress(), "native memory tracking still on");
|
assert(MemTracker::shutdown_in_progress(), "native memory tracking still on");
|
||||||
{
|
{
|
||||||
MutexLockerEx locker(_lock);
|
MutexLockerEx locker(_lock);
|
||||||
if (_staging_area != NULL) {
|
|
||||||
delete _staging_area;
|
|
||||||
_staging_area = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (_alloc_ptrs != NULL) {
|
if (_alloc_ptrs != NULL) {
|
||||||
delete _alloc_ptrs;
|
delete _alloc_ptrs;
|
||||||
_alloc_ptrs = NULL;
|
_alloc_ptrs = NULL;
|
||||||
|
@ -221,33 +122,34 @@ void MemSnapshot::copy_pointer(MemPointerRecord* dest, const MemPointerRecord* s
|
||||||
bool MemSnapshot::merge(MemRecorder* rec) {
|
bool MemSnapshot::merge(MemRecorder* rec) {
|
||||||
assert(rec != NULL && !rec->out_of_memory(), "Just check");
|
assert(rec != NULL && !rec->out_of_memory(), "Just check");
|
||||||
|
|
||||||
// out of memory
|
|
||||||
if (_staging_area == NULL || _staging_area->out_of_memory()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
SequencedRecordIterator itr(rec->pointer_itr());
|
SequencedRecordIterator itr(rec->pointer_itr());
|
||||||
|
|
||||||
MutexLockerEx lock(_lock, true);
|
MutexLockerEx lock(_lock, true);
|
||||||
MemPointerIterator staging_itr(_staging_area);
|
MemPointerIterator malloc_staging_itr(_staging_area.malloc_data());
|
||||||
MemPointerRecord *p1, *p2;
|
MemPointerRecord *p1, *p2;
|
||||||
p1 = (MemPointerRecord*) itr.current();
|
p1 = (MemPointerRecord*) itr.current();
|
||||||
while (p1 != NULL) {
|
while (p1 != NULL) {
|
||||||
p2 = (MemPointerRecord*)staging_itr.locate(p1->addr());
|
if (p1->is_vm_pointer()) {
|
||||||
|
// we don't do anything with virtual memory records during merge
|
||||||
|
if (!_staging_area.vm_data()->append(p1)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
p2 = (MemPointerRecord*)malloc_staging_itr.locate(p1->addr());
|
||||||
// we have not seen this memory block, so just add to staging area
|
// we have not seen this memory block, so just add to staging area
|
||||||
if (p2 == NULL) {
|
if (p2 == NULL) {
|
||||||
if (!staging_itr.insert(p1)) {
|
if (!malloc_staging_itr.insert(p1)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} else if (p1->addr() == p2->addr()) {
|
} else if (p1->addr() == p2->addr()) {
|
||||||
MemPointerRecord* staging_next = (MemPointerRecord*)staging_itr.peek_next();
|
MemPointerRecord* staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next();
|
||||||
// a memory block can have many tagging records, find right one to replace or
|
// a memory block can have many tagging records, find right one to replace or
|
||||||
// right position to insert
|
// right position to insert
|
||||||
while (staging_next != NULL && staging_next->addr() == p1->addr()) {
|
while (staging_next != NULL && staging_next->addr() == p1->addr()) {
|
||||||
if ((staging_next->flags() & MemPointerRecord::tag_masks) <=
|
if ((staging_next->flags() & MemPointerRecord::tag_masks) <=
|
||||||
(p1->flags() & MemPointerRecord::tag_masks)) {
|
(p1->flags() & MemPointerRecord::tag_masks)) {
|
||||||
p2 = (MemPointerRecord*)staging_itr.next();
|
p2 = (MemPointerRecord*)malloc_staging_itr.next();
|
||||||
staging_next = (MemPointerRecord*)staging_itr.peek_next();
|
staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next();
|
||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -261,23 +163,24 @@ bool MemSnapshot::merge(MemRecorder* rec) {
|
||||||
copy_pointer(p2, p1);
|
copy_pointer(p2, p1);
|
||||||
}
|
}
|
||||||
} else if (df < 0) {
|
} else if (df < 0) {
|
||||||
if (!staging_itr.insert(p1)) {
|
if (!malloc_staging_itr.insert(p1)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (!staging_itr.insert_after(p1)) {
|
if (!malloc_staging_itr.insert_after(p1)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if (p1->addr() < p2->addr()) {
|
} else if (p1->addr() < p2->addr()) {
|
||||||
if (!staging_itr.insert(p1)) {
|
if (!malloc_staging_itr.insert(p1)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (!staging_itr.insert_after(p1)) {
|
if (!malloc_staging_itr.insert_after(p1)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
p1 = (MemPointerRecord*)itr.next();
|
p1 = (MemPointerRecord*)itr.next();
|
||||||
}
|
}
|
||||||
NOT_PRODUCT(void check_staging_data();)
|
NOT_PRODUCT(void check_staging_data();)
|
||||||
|
@ -287,122 +190,179 @@ bool MemSnapshot::merge(MemRecorder* rec) {
|
||||||
|
|
||||||
|
|
||||||
// promote data to next generation
|
// promote data to next generation
|
||||||
void MemSnapshot::promote() {
|
bool MemSnapshot::promote() {
|
||||||
assert(_alloc_ptrs != NULL && _staging_area != NULL && _vm_ptrs != NULL,
|
assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
|
||||||
|
assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL,
|
||||||
"Just check");
|
"Just check");
|
||||||
MutexLockerEx lock(_lock, true);
|
MutexLockerEx lock(_lock, true);
|
||||||
StagingWalker walker(_staging_area);
|
|
||||||
MemPointerIterator malloc_itr(_alloc_ptrs);
|
|
||||||
VMMemPointerIterator vm_itr(_vm_ptrs);
|
|
||||||
MemPointer* cur = walker.current();
|
|
||||||
while (cur != NULL) {
|
|
||||||
if (walker.is_vm_record()) {
|
|
||||||
VMMemRegion* cur_vm = (VMMemRegion*)cur;
|
|
||||||
VMMemRegion* p = (VMMemRegion*)vm_itr.locate(cur_vm->addr());
|
|
||||||
cur_vm = (VMMemRegion*)cur;
|
|
||||||
if (p != NULL && (p->contains(cur_vm) || p->base() == cur_vm->base())) {
|
|
||||||
assert(p->is_reserve_record() ||
|
|
||||||
p->is_commit_record(), "wrong vm record type");
|
|
||||||
// resize existing reserved range
|
|
||||||
if (cur_vm->is_reserve_record() && p->base() == cur_vm->base()) {
|
|
||||||
assert(cur_vm->size() >= p->committed_size(), "incorrect resizing");
|
|
||||||
p->set_reserved_size(cur_vm->size());
|
|
||||||
} else if (cur_vm->is_commit_record()) {
|
|
||||||
p->commit(cur_vm->committed_size());
|
|
||||||
} else if (cur_vm->is_uncommit_record()) {
|
|
||||||
p->uncommit(cur_vm->committed_size());
|
|
||||||
if (!p->is_reserve_record() && p->committed_size() == 0) {
|
|
||||||
vm_itr.remove();
|
|
||||||
}
|
|
||||||
} else if (cur_vm->is_type_tagging_record()) {
|
|
||||||
p->tag(cur_vm->flags());
|
|
||||||
} else if (cur_vm->is_release_record()) {
|
|
||||||
if (cur_vm->base() == p->base() && cur_vm->size() == p->size()) {
|
|
||||||
// release the whole range
|
|
||||||
vm_itr.remove();
|
|
||||||
} else {
|
|
||||||
// partial release
|
|
||||||
p->partial_release(cur_vm->base(), cur_vm->size());
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// we do see multiple reserver on the same vm range
|
|
||||||
assert((cur_vm->is_commit_record() || cur_vm->is_reserve_record()) &&
|
|
||||||
cur_vm->base() == p->base() && cur_vm->size() == p->size(), "bad record");
|
|
||||||
p->tag(cur_vm->flags());
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if(cur_vm->is_reserve_record()) {
|
|
||||||
if (p == NULL || p->base() > cur_vm->base()) {
|
|
||||||
vm_itr.insert(cur_vm);
|
|
||||||
} else {
|
|
||||||
vm_itr.insert_after(cur_vm);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// In theory, we should assert without conditions. However, in case of native
|
|
||||||
// thread stack, NMT explicitly releases the thread stack in Thread's destructor,
|
|
||||||
// due to platform dependent behaviors. On some platforms, we see uncommit/release
|
|
||||||
// native thread stack, but some, we don't.
|
|
||||||
assert(cur_vm->is_uncommit_record() || cur_vm->is_deallocation_record(),
|
|
||||||
err_msg("Should not reach here, pointer addr = [" INTPTR_FORMAT "], flags = [%x]",
|
|
||||||
cur_vm->addr(), cur_vm->flags()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
MemPointerRecord* cur_p = (MemPointerRecord*)cur;
|
|
||||||
MemPointerRecord* p = (MemPointerRecord*)malloc_itr.locate(cur->addr());
|
|
||||||
if (p != NULL && cur_p->addr() == p->addr()) {
|
|
||||||
assert(p->is_allocation_record() || p->is_arena_size_record(), "untracked");
|
|
||||||
if (cur_p->is_allocation_record() || cur_p->is_arena_size_record()) {
|
|
||||||
copy_pointer(p, cur_p);
|
|
||||||
} else { // deallocation record
|
|
||||||
assert(cur_p->is_deallocation_record(), "wrong record type");
|
|
||||||
|
|
||||||
// we are removing an arena record, we also need to remove its 'size'
|
MallocRecordIterator malloc_itr = _staging_area.malloc_record_walker();
|
||||||
// record behind it
|
bool promoted = false;
|
||||||
if (p->is_arena_record()) {
|
if (promote_malloc_records(&malloc_itr)) {
|
||||||
MemPointerRecord* next_p = (MemPointerRecord*)malloc_itr.peek_next();
|
MemPointerArrayIteratorImpl vm_itr = _staging_area.virtual_memory_record_walker();
|
||||||
if (next_p->is_arena_size_record()) {
|
if (promote_virtual_memory_records(&vm_itr)) {
|
||||||
assert(next_p->is_size_record_of_arena(p), "arena records dont match");
|
promoted = true;
|
||||||
malloc_itr.remove();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
malloc_itr.remove();
|
|
||||||
|
NOT_PRODUCT(check_malloc_pointers();)
|
||||||
|
_staging_area.clear();
|
||||||
|
return promoted;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
|
||||||
|
MemPointerIterator malloc_snapshot_itr(_alloc_ptrs);
|
||||||
|
MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
|
||||||
|
MemPointerRecord* matched_rec;
|
||||||
|
while (new_rec != NULL) {
|
||||||
|
matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr());
|
||||||
|
// found matched memory block
|
||||||
|
if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
|
||||||
|
// snapshot already contains 'lived' records
|
||||||
|
assert(matched_rec->is_allocation_record() || matched_rec->is_arena_size_record(),
|
||||||
|
"Sanity check");
|
||||||
|
// update block states
|
||||||
|
if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) {
|
||||||
|
copy_pointer(matched_rec, new_rec);
|
||||||
|
} else {
|
||||||
|
// a deallocation record
|
||||||
|
assert(new_rec->is_deallocation_record(), "Sanity check");
|
||||||
|
// an arena record can be followed by a size record, we need to remove both
|
||||||
|
if (matched_rec->is_arena_record()) {
|
||||||
|
MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next();
|
||||||
|
if (next->is_arena_size_record()) {
|
||||||
|
// it has to match the arena record
|
||||||
|
assert(next->is_size_record_of_arena(matched_rec), "Sanity check");
|
||||||
|
malloc_snapshot_itr.remove();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// the memory is deallocated, remove related record(s)
|
||||||
|
malloc_snapshot_itr.remove();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (cur_p->is_arena_size_record()) {
|
// it is a new record, insert into snapshot
|
||||||
MemPointerRecord* prev_p = (MemPointerRecord*)malloc_itr.peek_prev();
|
if (new_rec->is_arena_size_record()) {
|
||||||
if (prev_p != NULL &&
|
MemPointerRecord* prev = (MemPointerRecord*)malloc_snapshot_itr.peek_prev();
|
||||||
(!prev_p->is_arena_record() || !cur_p->is_size_record_of_arena(prev_p))) {
|
if (prev == NULL || !prev->is_arena_record() || !new_rec->is_size_record_of_arena(prev)) {
|
||||||
// arena already deallocated
|
// no matched arena record, ignore the size record
|
||||||
cur_p = NULL;
|
new_rec = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (cur_p != NULL) {
|
// only 'live' record can go into snapshot
|
||||||
if (cur_p->is_allocation_record() || cur_p->is_arena_size_record()) {
|
if (new_rec != NULL) {
|
||||||
if (p != NULL && cur_p->addr() > p->addr()) {
|
if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) {
|
||||||
malloc_itr.insert_after(cur);
|
if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) {
|
||||||
|
if (!malloc_snapshot_itr.insert_after(new_rec)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
malloc_itr.insert(cur);
|
if (!malloc_snapshot_itr.insert(new_rec)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
else if (!has_allocation_record(cur_p->addr())){
|
else if (!has_allocation_record(new_rec->addr())) {
|
||||||
// NMT can not track some startup memory, which allocated before NMT
|
// NMT can not track some startup memory, which is allocated before NMT is on
|
||||||
// is enabled
|
|
||||||
_untracked_count ++;
|
_untracked_count ++;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
new_rec = (MemPointerRecord*)itr->next();
|
||||||
}
|
}
|
||||||
|
return true;
|
||||||
cur = walker.next();
|
|
||||||
}
|
|
||||||
NOT_PRODUCT(check_malloc_pointers();)
|
|
||||||
_staging_area->shrink();
|
|
||||||
_staging_area->clear();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) {
|
||||||
|
VMMemPointerIterator vm_snapshot_itr(_vm_ptrs);
|
||||||
|
MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
|
||||||
|
VMMemRegionEx new_vm_rec;
|
||||||
|
VMMemRegion* matched_rec;
|
||||||
|
while (new_rec != NULL) {
|
||||||
|
assert(new_rec->is_vm_pointer(), "Sanity check");
|
||||||
|
if (MemTracker::track_callsite()) {
|
||||||
|
new_vm_rec.init((MemPointerRecordEx*)new_rec);
|
||||||
|
} else {
|
||||||
|
new_vm_rec.init(new_rec);
|
||||||
|
}
|
||||||
|
matched_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
|
||||||
|
if (matched_rec != NULL &&
|
||||||
|
(matched_rec->contains(&new_vm_rec) || matched_rec->base() == new_vm_rec.base())) {
|
||||||
|
// snapshot can only have 'live' records
|
||||||
|
assert(matched_rec->is_reserve_record(), "Sanity check");
|
||||||
|
if (new_vm_rec.is_reserve_record() && matched_rec->base() == new_vm_rec.base()) {
|
||||||
|
// resize reserved virtual memory range
|
||||||
|
// resize has to cover committed area
|
||||||
|
assert(new_vm_rec.size() >= matched_rec->committed_size(), "Sanity check");
|
||||||
|
matched_rec->set_reserved_size(new_vm_rec.size());
|
||||||
|
} else if (new_vm_rec.is_commit_record()) {
|
||||||
|
// commit memory inside reserved memory range
|
||||||
|
assert(new_vm_rec.committed_size() <= matched_rec->reserved_size(), "Sanity check");
|
||||||
|
// thread stacks are marked committed, so we ignore 'commit' record for creating
|
||||||
|
// stack guard pages
|
||||||
|
if (FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) != mtThreadStack) {
|
||||||
|
matched_rec->commit(new_vm_rec.committed_size());
|
||||||
|
}
|
||||||
|
} else if (new_vm_rec.is_uncommit_record()) {
|
||||||
|
if (FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == mtThreadStack) {
|
||||||
|
// ignore 'uncommit' record from removing stack guard pages, uncommit
|
||||||
|
// thread stack as whole
|
||||||
|
if (matched_rec->committed_size() == new_vm_rec.committed_size()) {
|
||||||
|
matched_rec->uncommit(new_vm_rec.committed_size());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// uncommit memory inside reserved memory range
|
||||||
|
assert(new_vm_rec.committed_size() <= matched_rec->committed_size(),
|
||||||
|
"Sanity check");
|
||||||
|
matched_rec->uncommit(new_vm_rec.committed_size());
|
||||||
|
}
|
||||||
|
} else if (new_vm_rec.is_type_tagging_record()) {
|
||||||
|
// tag this virtual memory range to a memory type
|
||||||
|
// can not re-tag a memory range to different type
|
||||||
|
assert(FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == mtNone ||
|
||||||
|
FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_vm_rec.flags()),
|
||||||
|
"Sanity check");
|
||||||
|
matched_rec->tag(new_vm_rec.flags());
|
||||||
|
} else if (new_vm_rec.is_release_record()) {
|
||||||
|
// release part or whole memory range
|
||||||
|
if (new_vm_rec.base() == matched_rec->base() &&
|
||||||
|
new_vm_rec.size() == matched_rec->size()) {
|
||||||
|
// release whole virtual memory range
|
||||||
|
assert(matched_rec->committed_size() == 0, "Sanity check");
|
||||||
|
vm_snapshot_itr.remove();
|
||||||
|
} else {
|
||||||
|
// partial release
|
||||||
|
matched_rec->partial_release(new_vm_rec.base(), new_vm_rec.size());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// multiple reserve/commit on the same virtual memory range
|
||||||
|
assert((new_vm_rec.is_reserve_record() || new_vm_rec.is_commit_record()) &&
|
||||||
|
(new_vm_rec.base() == matched_rec->base() && new_vm_rec.size() == matched_rec->size()),
|
||||||
|
"Sanity check");
|
||||||
|
matched_rec->tag(new_vm_rec.flags());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// no matched record
|
||||||
|
if (new_vm_rec.is_reserve_record()) {
|
||||||
|
if (matched_rec == NULL || matched_rec->base() > new_vm_rec.base()) {
|
||||||
|
if (!vm_snapshot_itr.insert(&new_vm_rec)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (!vm_snapshot_itr.insert_after(&new_vm_rec)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// throw out obsolete records, which are the commit/uncommit/release/tag records
|
||||||
|
// on memory regions that are already released.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
new_rec = (MemPointerRecord*)itr->next();
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
void MemSnapshot::print_snapshot_stats(outputStream* st) {
|
void MemSnapshot::print_snapshot_stats(outputStream* st) {
|
||||||
|
@ -413,8 +373,15 @@ void MemSnapshot::print_snapshot_stats(outputStream* st) {
|
||||||
st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(),
|
st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(),
|
||||||
(100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K);
|
(100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K);
|
||||||
|
|
||||||
st->print_cr("\tStaging: %d/%d [%5.2f%%] %dKB", _staging_area->length(), _staging_area->capacity(),
|
st->print_cr("\tMalloc staging Area: %d/%d [%5.2f%%] %dKB", _staging_area.malloc_data()->length(),
|
||||||
(100.0 * (float)_staging_area->length()) / (float)_staging_area->capacity(), _staging_area->instance_size()/K);
|
_staging_area.malloc_data()->capacity(),
|
||||||
|
(100.0 * (float)_staging_area.malloc_data()->length()) / (float)_staging_area.malloc_data()->capacity(),
|
||||||
|
_staging_area.malloc_data()->instance_size()/K);
|
||||||
|
|
||||||
|
st->print_cr("\tVirtual memory staging Area: %d/%d [%5.2f%%] %dKB", _staging_area.vm_data()->length(),
|
||||||
|
_staging_area.vm_data()->capacity(),
|
||||||
|
(100.0 * (float)_staging_area.vm_data()->length()) / (float)_staging_area.vm_data()->capacity(),
|
||||||
|
_staging_area.vm_data()->instance_size()/K);
|
||||||
|
|
||||||
st->print_cr("\tUntracked allocation: %d", _untracked_count);
|
st->print_cr("\tUntracked allocation: %d", _untracked_count);
|
||||||
}
|
}
|
||||||
|
@ -433,7 +400,7 @@ void MemSnapshot::check_malloc_pointers() {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MemSnapshot::has_allocation_record(address addr) {
|
bool MemSnapshot::has_allocation_record(address addr) {
|
||||||
MemPointerArrayIteratorImpl itr(_staging_area);
|
MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
|
||||||
MemPointerRecord* cur = (MemPointerRecord*)itr.current();
|
MemPointerRecord* cur = (MemPointerRecord*)itr.current();
|
||||||
while (cur != NULL) {
|
while (cur != NULL) {
|
||||||
if (cur->addr() == addr && cur->is_allocation_record()) {
|
if (cur->addr() == addr && cur->is_allocation_record()) {
|
||||||
|
@ -447,7 +414,7 @@ bool MemSnapshot::has_allocation_record(address addr) {
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
void MemSnapshot::check_staging_data() {
|
void MemSnapshot::check_staging_data() {
|
||||||
MemPointerArrayIteratorImpl itr(_staging_area);
|
MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
|
||||||
MemPointerRecord* cur = (MemPointerRecord*)itr.current();
|
MemPointerRecord* cur = (MemPointerRecord*)itr.current();
|
||||||
MemPointerRecord* next = (MemPointerRecord*)itr.next();
|
MemPointerRecord* next = (MemPointerRecord*)itr.next();
|
||||||
while (next != NULL) {
|
while (next != NULL) {
|
||||||
|
@ -458,6 +425,13 @@ void MemSnapshot::check_staging_data() {
|
||||||
cur = next;
|
cur = next;
|
||||||
next = (MemPointerRecord*)itr.next();
|
next = (MemPointerRecord*)itr.next();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MemPointerArrayIteratorImpl vm_itr(_staging_area.vm_data());
|
||||||
|
cur = (MemPointerRecord*)vm_itr.current();
|
||||||
|
while (cur != NULL) {
|
||||||
|
assert(cur->is_vm_pointer(), "virtual memory pointer only");
|
||||||
|
cur = (MemPointerRecord*)vm_itr.next();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#endif // ASSERT
|
#endif // ASSERT
|
||||||
|
|
||||||
|
|
|
@ -111,38 +111,32 @@ class VMMemPointerIterator : public MemPointerIterator {
|
||||||
MemPointerIterator(arr) {
|
MemPointerIterator(arr) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// locate an exiting record that contains specified address, or
|
// locate an existing record that contains specified address, or
|
||||||
// the record, where the record with specified address, should
|
// the record, where the record with specified address, should
|
||||||
// be inserted
|
// be inserted.
|
||||||
|
// virtual memory record array is sorted in address order, so
|
||||||
|
// binary search is performed
|
||||||
virtual MemPointer* locate(address addr) {
|
virtual MemPointer* locate(address addr) {
|
||||||
VMMemRegion* cur = (VMMemRegion*)current();
|
int index_low = 0;
|
||||||
VMMemRegion* next_p;
|
int index_high = _array->length();
|
||||||
|
int index_mid = (index_high + index_low) / 2;
|
||||||
while (cur != NULL) {
|
int r = 1;
|
||||||
if (cur->base() > addr) {
|
while (index_low < index_high && (r = compare(index_mid, addr)) != 0) {
|
||||||
return cur;
|
if (r > 0) {
|
||||||
|
index_high = index_mid;
|
||||||
} else {
|
} else {
|
||||||
// find nearest existing range that has base address <= addr
|
index_low = index_mid;
|
||||||
next_p = (VMMemRegion*)peek_next();
|
|
||||||
if (next_p != NULL && next_p->base() <= addr) {
|
|
||||||
cur = (VMMemRegion*)next();
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
|
index_mid = (index_high + index_low) / 2;
|
||||||
}
|
}
|
||||||
|
if (r == 0) {
|
||||||
if (cur->is_reserve_record() &&
|
// update current location
|
||||||
cur->base() <= addr &&
|
_pos = index_mid;
|
||||||
(cur->base() + cur->size() > addr)) {
|
return _array->at(index_mid);
|
||||||
return cur;
|
} else {
|
||||||
} else if (cur->is_commit_record() &&
|
|
||||||
cur->base() <= addr &&
|
|
||||||
(cur->base() + cur->committed_size() > addr)) {
|
|
||||||
return cur;
|
|
||||||
}
|
|
||||||
cur = (VMMemRegion*)next();
|
|
||||||
}
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
virtual bool is_dup_pointer(const MemPointer* ptr1,
|
virtual bool is_dup_pointer(const MemPointer* ptr1,
|
||||||
|
@ -160,75 +154,99 @@ class VMMemPointerIterator : public MemPointerIterator {
|
||||||
(p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
|
(p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
// compare if an address falls into a memory region,
|
||||||
|
// return 0, if the address falls into a memory region at specified index
|
||||||
|
// return 1, if memory region pointed by specified index is higher than the address
|
||||||
|
// return -1, if memory region pointed by specified index is lower than the address
|
||||||
|
int compare(int index, address addr) const {
|
||||||
|
VMMemRegion* r = (VMMemRegion*)_array->at(index);
|
||||||
|
assert(r->is_reserve_record(), "Sanity check");
|
||||||
|
if (r->addr() > addr) {
|
||||||
|
return 1;
|
||||||
|
} else if (r->addr() + r->reserved_size() <= addr) {
|
||||||
|
return -1;
|
||||||
|
} else {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class StagingWalker : public MemPointerArrayIterator {
|
class MallocRecordIterator : public MemPointerArrayIterator {
|
||||||
private:
|
private:
|
||||||
MemPointerArrayIteratorImpl _itr;
|
MemPointerArrayIteratorImpl _itr;
|
||||||
bool _is_vm_record;
|
|
||||||
bool _end_of_array;
|
|
||||||
VMMemRegionEx _vm_record;
|
|
||||||
MemPointerRecordEx _malloc_record;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
StagingWalker(MemPointerArray* arr): _itr(arr) {
|
MallocRecordIterator(MemPointerArray* arr) : _itr(arr) {
|
||||||
_end_of_array = false;
|
|
||||||
next();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// return the pointer at current position
|
|
||||||
MemPointer* current() const {
|
MemPointer* current() const {
|
||||||
if (_end_of_array) {
|
MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
|
||||||
return NULL;
|
assert(cur == NULL || !cur->is_vm_pointer(), "seek error");
|
||||||
}
|
MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
|
||||||
if (is_vm_record()) {
|
if (next == NULL || next->addr() != cur->addr()) {
|
||||||
return (MemPointer*)&_vm_record;
|
return cur;
|
||||||
} else {
|
} else {
|
||||||
return (MemPointer*)&_malloc_record;
|
assert(!cur->is_vm_pointer(), "Sanity check");
|
||||||
|
assert(cur->is_allocation_record() && next->is_deallocation_record(),
|
||||||
|
"sorting order");
|
||||||
|
assert(cur->seq() != next->seq(), "Sanity check");
|
||||||
|
return cur->seq() > next->seq() ? cur : next;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// return the next pointer and advance current position
|
MemPointer* next() {
|
||||||
MemPointer* next();
|
MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
|
||||||
|
assert(cur == NULL || !cur->is_vm_pointer(), "Sanity check");
|
||||||
// type of 'current' record
|
MemPointerRecord* next = (MemPointerRecord*)_itr.next();
|
||||||
bool is_vm_record() const {
|
if (next == NULL) {
|
||||||
return _is_vm_record;
|
|
||||||
}
|
|
||||||
|
|
||||||
// return the next poinger without advancing current position
|
|
||||||
MemPointer* peek_next() const {
|
|
||||||
assert(false, "not supported");
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
if (cur->addr() == next->addr()) {
|
||||||
MemPointer* peek_prev() const {
|
next = (MemPointerRecord*)_itr.next();
|
||||||
assert(false, "not supported");
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
// remove the pointer at current position
|
return current();
|
||||||
void remove() {
|
|
||||||
assert(false, "not supported");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// insert the pointer at current position
|
MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; }
|
||||||
bool insert(MemPointer* ptr) {
|
MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; }
|
||||||
assert(false, "not supported");
|
void remove() { ShouldNotReachHere(); }
|
||||||
return false;
|
bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; }
|
||||||
}
|
bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
|
||||||
|
};
|
||||||
bool insert_after(MemPointer* ptr) {
|
|
||||||
assert(false, "not supported");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
class StagingArea : public _ValueObj {
|
||||||
private:
|
private:
|
||||||
// consolidate all records referring to this vm region
|
MemPointerArray* _malloc_data;
|
||||||
bool consolidate_vm_records(VMMemRegionEx* vm_rec);
|
MemPointerArray* _vm_data;
|
||||||
|
|
||||||
|
public:
|
||||||
|
StagingArea() : _malloc_data(NULL), _vm_data(NULL) {
|
||||||
|
init();
|
||||||
|
}
|
||||||
|
|
||||||
|
~StagingArea() {
|
||||||
|
if (_malloc_data != NULL) delete _malloc_data;
|
||||||
|
if (_vm_data != NULL) delete _vm_data;
|
||||||
|
}
|
||||||
|
|
||||||
|
MallocRecordIterator malloc_record_walker() {
|
||||||
|
return MallocRecordIterator(malloc_data());
|
||||||
|
}
|
||||||
|
|
||||||
|
MemPointerArrayIteratorImpl virtual_memory_record_walker();
|
||||||
|
bool init();
|
||||||
|
void clear() {
|
||||||
|
assert(_malloc_data != NULL && _vm_data != NULL, "Just check");
|
||||||
|
_malloc_data->shrink();
|
||||||
|
_malloc_data->clear();
|
||||||
|
_vm_data->clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline MemPointerArray* malloc_data() { return _malloc_data; }
|
||||||
|
inline MemPointerArray* vm_data() { return _vm_data; }
|
||||||
};
|
};
|
||||||
|
|
||||||
class MemBaseline;
|
class MemBaseline;
|
||||||
|
|
||||||
class MemSnapshot : public CHeapObj<mtNMT> {
|
class MemSnapshot : public CHeapObj<mtNMT> {
|
||||||
private:
|
private:
|
||||||
// the following two arrays contain records of all known lived memory blocks
|
// the following two arrays contain records of all known lived memory blocks
|
||||||
|
@ -237,9 +255,7 @@ class MemSnapshot : public CHeapObj<mtNMT> {
|
||||||
// live virtual memory pointers
|
// live virtual memory pointers
|
||||||
MemPointerArray* _vm_ptrs;
|
MemPointerArray* _vm_ptrs;
|
||||||
|
|
||||||
// stagging a generation's data, before
|
StagingArea _staging_area;
|
||||||
// it can be prompted to snapshot
|
|
||||||
MemPointerArray* _staging_area;
|
|
||||||
|
|
||||||
// the lock to protect this snapshot
|
// the lock to protect this snapshot
|
||||||
Monitor* _lock;
|
Monitor* _lock;
|
||||||
|
@ -252,18 +268,19 @@ class MemSnapshot : public CHeapObj<mtNMT> {
|
||||||
virtual ~MemSnapshot();
|
virtual ~MemSnapshot();
|
||||||
|
|
||||||
// if we are running out of native memory
|
// if we are running out of native memory
|
||||||
bool out_of_memory() const {
|
bool out_of_memory() {
|
||||||
return (_alloc_ptrs == NULL || _staging_area == NULL ||
|
return (_alloc_ptrs == NULL ||
|
||||||
|
_staging_area.malloc_data() == NULL ||
|
||||||
|
_staging_area.vm_data() == NULL ||
|
||||||
_vm_ptrs == NULL || _lock == NULL ||
|
_vm_ptrs == NULL || _lock == NULL ||
|
||||||
_alloc_ptrs->out_of_memory() ||
|
_alloc_ptrs->out_of_memory() ||
|
||||||
_staging_area->out_of_memory() ||
|
|
||||||
_vm_ptrs->out_of_memory());
|
_vm_ptrs->out_of_memory());
|
||||||
}
|
}
|
||||||
|
|
||||||
// merge a per-thread memory recorder into staging area
|
// merge a per-thread memory recorder into staging area
|
||||||
bool merge(MemRecorder* rec);
|
bool merge(MemRecorder* rec);
|
||||||
// promote staged data to snapshot
|
// promote staged data to snapshot
|
||||||
void promote();
|
bool promote();
|
||||||
|
|
||||||
|
|
||||||
void wait(long timeout) {
|
void wait(long timeout) {
|
||||||
|
@ -280,6 +297,9 @@ class MemSnapshot : public CHeapObj<mtNMT> {
|
||||||
private:
|
private:
|
||||||
// copy pointer data from src to dest
|
// copy pointer data from src to dest
|
||||||
void copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
|
void copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
|
||||||
|
|
||||||
|
bool promote_malloc_records(MemPointerArrayIterator* itr);
|
||||||
|
bool promote_virtual_memory_records(MemPointerArrayIterator* itr);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -118,7 +118,10 @@ void MemTrackWorker::run() {
|
||||||
_head = (_head + 1) % MAX_GENERATIONS;
|
_head = (_head + 1) % MAX_GENERATIONS;
|
||||||
}
|
}
|
||||||
// promote this generation data to snapshot
|
// promote this generation data to snapshot
|
||||||
snapshot->promote();
|
if (!snapshot->promote()) {
|
||||||
|
// failed to promote, means out of memory
|
||||||
|
MemTracker::shutdown(MemTracker::NMT_out_of_memory);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
snapshot->wait(1000);
|
snapshot->wait(1000);
|
||||||
ThreadCritical tc;
|
ThreadCritical tc;
|
||||||
|
|
|
@ -39,7 +39,7 @@
|
||||||
#include "thread_solaris.inline.hpp"
|
#include "thread_solaris.inline.hpp"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef _DEBUG_
|
#ifdef _DEBUG
|
||||||
#define DEBUG_CALLER_PC os::get_caller_pc(3)
|
#define DEBUG_CALLER_PC os::get_caller_pc(3)
|
||||||
#else
|
#else
|
||||||
#define DEBUG_CALLER_PC 0
|
#define DEBUG_CALLER_PC 0
|
||||||
|
@ -223,12 +223,33 @@ class MemTracker : AllStatic {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void record_thread_stack(address addr, size_t size, Thread* thr,
|
||||||
|
address pc = 0) {
|
||||||
|
if (is_on()) {
|
||||||
|
assert(size > 0 && thr != NULL, "Sanity check");
|
||||||
|
create_memory_record(addr, MemPointerRecord::virtual_memory_reserve_tag() | mtThreadStack,
|
||||||
|
size, pc, thr);
|
||||||
|
create_memory_record(addr, MemPointerRecord::virtual_memory_commit_tag() | mtThreadStack,
|
||||||
|
size, pc, thr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void release_thread_stack(address addr, size_t size, Thread* thr) {
|
||||||
|
if (is_on()) {
|
||||||
|
assert(size > 0 && thr != NULL, "Sanity check");
|
||||||
|
create_memory_record(addr, MemPointerRecord::virtual_memory_uncommit_tag() | mtThreadStack,
|
||||||
|
size, DEBUG_CALLER_PC, thr);
|
||||||
|
create_memory_record(addr, MemPointerRecord::virtual_memory_release_tag() | mtThreadStack,
|
||||||
|
size, DEBUG_CALLER_PC, thr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// record a virtual memory 'commit' call
|
// record a virtual memory 'commit' call
|
||||||
static inline void record_virtual_memory_commit(address addr, size_t size,
|
static inline void record_virtual_memory_commit(address addr, size_t size,
|
||||||
address pc = 0, Thread* thread = NULL) {
|
address pc = 0, Thread* thread = NULL) {
|
||||||
if (is_on()) {
|
if (is_on()) {
|
||||||
create_memory_record(addr, MemPointerRecord::virtual_memory_commit_tag(),
|
create_memory_record(addr, MemPointerRecord::virtual_memory_commit_tag(),
|
||||||
size, pc, thread);
|
size, DEBUG_CALLER_PC, thread);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -237,7 +258,7 @@ class MemTracker : AllStatic {
|
||||||
Thread* thread = NULL) {
|
Thread* thread = NULL) {
|
||||||
if (is_on()) {
|
if (is_on()) {
|
||||||
create_memory_record(addr, MemPointerRecord::virtual_memory_uncommit_tag(),
|
create_memory_record(addr, MemPointerRecord::virtual_memory_uncommit_tag(),
|
||||||
size, 0, thread);
|
size, DEBUG_CALLER_PC, thread);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -246,7 +267,7 @@ class MemTracker : AllStatic {
|
||||||
Thread* thread = NULL) {
|
Thread* thread = NULL) {
|
||||||
if (is_on()) {
|
if (is_on()) {
|
||||||
create_memory_record(addr, MemPointerRecord::virtual_memory_release_tag(),
|
create_memory_record(addr, MemPointerRecord::virtual_memory_release_tag(),
|
||||||
size, 0, thread);
|
size, DEBUG_CALLER_PC, thread);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -257,7 +278,7 @@ class MemTracker : AllStatic {
|
||||||
assert(base > 0, "wrong base address");
|
assert(base > 0, "wrong base address");
|
||||||
assert((flags & (~mt_masks)) == 0, "memory type only");
|
assert((flags & (~mt_masks)) == 0, "memory type only");
|
||||||
create_memory_record(base, (flags | MemPointerRecord::virtual_memory_type_tag()),
|
create_memory_record(base, (flags | MemPointerRecord::virtual_memory_type_tag()),
|
||||||
0, 0, thread);
|
0, DEBUG_CALLER_PC, thread);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue