8196405: [REDO] NMT: add_committed_regions doesn't merge succeeding regions

Reviewed-by: eosterlund, coleenp, zgu
This commit is contained in:
Stefan Karlsson 2018-02-21 12:40:05 +01:00
parent f3fc38d5f5
commit f1540011d0
6 changed files with 1043 additions and 163 deletions

View file

@ -291,7 +291,7 @@ void MemDetailReporter::report_virtual_memory_region(const ReservedMemoryRegion*
outputStream* out = output();
const char* scale = current_scale();
const NativeCallStack* stack = reserved_rgn->call_stack();
bool all_committed = reserved_rgn->all_committed();
bool all_committed = reserved_rgn->size() == reserved_rgn->committed_size();
const char* region_type = (all_committed ? "reserved and committed" : "reserved");
out->print_cr(" ");
print_virtual_memory_region(region_type, reserved_rgn->base(), reserved_rgn->size());
@ -303,7 +303,17 @@ void MemDetailReporter::report_virtual_memory_region(const ReservedMemoryRegion*
stack->print_on(out, 4);
}
if (all_committed) return;
if (all_committed) {
CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions();
const CommittedMemoryRegion* committed_rgn = itr.next();
if (committed_rgn->size() == reserved_rgn->size() && committed_rgn->call_stack()->equals(*stack)) {
// One region spanning the entire reserved region, with the same stack trace.
// Don't print this regions because the "reserved and committed" line above
// already indicates that the region is comitted.
assert(itr.next() == NULL, "Unexpectedly more than one regions");
return;
}
}
CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions();
const CommittedMemoryRegion* committed_rgn;
@ -745,4 +755,3 @@ void MemDetailDiffReporter::diff_virtual_memory_site(const NativeCallStack* stac
out->print_cr(")\n");
}

View file

@ -113,6 +113,8 @@ class Tracker : public StackObj {
};
class MemTracker : AllStatic {
friend class VirtualMemoryTrackerTest;
public:
static inline NMT_TrackingLevel tracking_level() {
if (_tracking_level == NMT_unknown) {
@ -215,8 +217,7 @@ class MemTracker : AllStatic {
if (addr != NULL) {
ThreadCritical tc;
if (tracking_level() < NMT_summary) return;
VirtualMemoryTracker::add_reserved_region((address)addr, size,
stack, flag, true);
VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag);
VirtualMemoryTracker::add_committed_region((address)addr, size, stack);
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,57 +48,105 @@ int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedM
return r1.compare(r2);
}
static bool is_mergeable_with(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
return rgn->adjacent_to(addr, size) && rgn->call_stack()->equals(stack);
}
static bool is_same_as(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
// It would have made sense to use rgn->equals(...), but equals returns true for overlapping regions.
return rgn->same_region(addr, size) && rgn->call_stack()->equals(stack);
}
static LinkedListNode<CommittedMemoryRegion>* find_preceding_node_from(LinkedListNode<CommittedMemoryRegion>* from, address addr) {
LinkedListNode<CommittedMemoryRegion>* preceding = NULL;
for (LinkedListNode<CommittedMemoryRegion>* node = from; node != NULL; node = node->next()) {
CommittedMemoryRegion* rgn = node->data();
// We searched past the region start.
if (rgn->end() > addr) {
break;
}
preceding = node;
}
return preceding;
}
static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, address addr, size_t size, const NativeCallStack& stack) {
if (node != NULL) {
CommittedMemoryRegion* rgn = node->data();
if (is_mergeable_with(rgn, addr, size, stack)) {
rgn->expand_region(addr, size);
return true;
}
}
return false;
}
static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, LinkedListNode<CommittedMemoryRegion>* other) {
if (other == NULL) {
return false;
}
CommittedMemoryRegion* rgn = other->data();
return try_merge_with(node, rgn->base(), rgn->size(), *rgn->call_stack());
}
bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
assert(addr != NULL, "Invalid address");
assert(size > 0, "Invalid size");
assert(contain_region(addr, size), "Not contain this region");
if (all_committed()) return true;
// Find the region that fully precedes the [addr, addr + size) region.
LinkedListNode<CommittedMemoryRegion>* prev = find_preceding_node_from(_committed_regions.head(), addr);
LinkedListNode<CommittedMemoryRegion>* next = (prev != NULL ? prev->next() : _committed_regions.head());
CommittedMemoryRegion committed_rgn(addr, size, stack);
LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.head();
while (node != NULL) {
CommittedMemoryRegion* rgn = node->data();
if (rgn->same_region(addr, size)) {
if (next != NULL) {
// Ignore request if region already exists.
if (is_same_as(next->data(), addr, size, stack)) {
return true;
}
if (rgn->adjacent_to(addr, size)) {
// special case to expand prior region if there is no next region
LinkedListNode<CommittedMemoryRegion>* next = node->next();
if (next == NULL && rgn->call_stack()->equals(stack)) {
VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag());
// the two adjacent regions have the same call stack, merge them
rgn->expand_region(addr, size);
VirtualMemorySummary::record_committed_memory(rgn->size(), flag());
return true;
}
}
if (rgn->overlap_region(addr, size)) {
// Clear a space for this region in the case it overlaps with any regions.
// The new region is after prev, and either overlaps with the
// next region (and maybe more regions), or overlaps with no region.
if (next->data()->overlap_region(addr, size)) {
// Remove _all_ overlapping regions, and parts of regions,
// in preparation for the addition of this new region.
remove_uncommitted_region(addr, size);
break; // commit below
// The remove could have split a region into two and created a
// new prev region. Need to reset the prev and next pointers.
prev = find_preceding_node_from((prev != NULL ? prev : _committed_regions.head()), addr);
next = (prev != NULL ? prev->next() : _committed_regions.head());
}
if (rgn->end() >= addr + size){
break;
}
node = node->next();
}
// New committed region
// At this point the previous overlapping regions have been
// cleared, and the full region is guaranteed to be inserted.
VirtualMemorySummary::record_committed_memory(size, flag());
return add_committed_region(committed_rgn);
// Try to merge with prev and possibly next.
if (try_merge_with(prev, addr, size, stack)) {
if (try_merge_with(prev, next)) {
// prev was expanded to contain the new region
// and next, need to remove next from the list
_committed_regions.remove_after(prev);
}
void ReservedMemoryRegion::set_all_committed(bool b) {
if (all_committed() != b) {
_all_committed = b;
if (b) {
VirtualMemorySummary::record_committed_memory(size(), flag());
return true;
}
// Didn't merge with prev, try with next.
if (try_merge_with(next, addr, size, stack)) {
return true;
}
// Couldn't merge with any regions - create a new region.
return add_committed_region(CommittedMemoryRegion(addr, size, stack));
}
bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
@ -135,45 +183,9 @@ bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMem
}
bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
// uncommit stack guard pages
if (flag() == mtThreadStack && !same_region(addr, sz)) {
return true;
}
assert(addr != NULL, "Invalid address");
assert(sz > 0, "Invalid size");
if (all_committed()) {
assert(_committed_regions.is_empty(), "Sanity check");
assert(contain_region(addr, sz), "Reserved region does not contain this region");
set_all_committed(false);
VirtualMemorySummary::record_uncommitted_memory(sz, flag());
if (same_region(addr, sz)) {
return true;
} else {
CommittedMemoryRegion rgn(base(), size(), *call_stack());
if (rgn.base() == addr || rgn.end() == (addr + sz)) {
rgn.exclude_region(addr, sz);
return add_committed_region(rgn);
} else {
// split this region
// top of the whole region
address top =rgn.end();
// use this region for lower part
size_t exclude_size = rgn.end() - addr;
rgn.exclude_region(addr, exclude_size);
if (add_committed_region(rgn)) {
// higher part
address high_base = addr + sz;
size_t high_size = top - high_base;
CommittedMemoryRegion high_rgn(high_base, high_size, NativeCallStack::EMPTY_STACK);
return add_committed_region(high_rgn);
} else {
return false;
}
}
}
} else {
CommittedMemoryRegion del_rgn(addr, sz, *call_stack());
address end = addr + sz;
@ -223,7 +235,6 @@ bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
prev = head;
head = head->next();
}
}
return true;
}
@ -256,9 +267,6 @@ void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRe
}
size_t ReservedMemoryRegion::committed_size() const {
if (all_committed()) {
return size();
} else {
size_t committed = 0;
LinkedListNode<CommittedMemoryRegion>* head =
_committed_regions.head();
@ -268,7 +276,6 @@ size_t ReservedMemoryRegion::committed_size() const {
}
return committed;
}
}
void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
assert((flag() == mtNone || flag() == f), "Overwrite memory type");
@ -296,22 +303,16 @@ bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) {
}
bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
const NativeCallStack& stack, MEMFLAGS flag, bool all_committed) {
const NativeCallStack& stack, MEMFLAGS flag) {
assert(base_addr != NULL, "Invalid address");
assert(size > 0, "Invalid size");
assert(_reserved_regions != NULL, "Sanity check");
ReservedMemoryRegion rgn(base_addr, size, stack, flag);
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
LinkedListNode<ReservedMemoryRegion>* node;
if (reserved_rgn == NULL) {
VirtualMemorySummary::record_reserved_memory(size, flag);
node = _reserved_regions->add(rgn);
if (node != NULL) {
node->data()->set_all_committed(all_committed);
return true;
} else {
return false;
}
return _reserved_regions->add(rgn) != NULL;
} else {
if (reserved_rgn->same_region(base_addr, size)) {
reserved_rgn->set_call_stack(stack);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -210,6 +210,8 @@ class VirtualMemoryRegion VALUE_OBJ_CLASS_SPEC {
inline bool overlap_region(address addr, size_t sz) const {
assert(sz > 0, "Invalid size");
assert(size() > 0, "Invalid size");
VirtualMemoryRegion rgn(addr, sz);
return contain_address(addr) ||
contain_address(addr + sz - 1) ||
@ -295,18 +297,14 @@ class ReservedMemoryRegion : public VirtualMemoryRegion {
NativeCallStack _stack;
MEMFLAGS _flag;
bool _all_committed;
public:
ReservedMemoryRegion(address base, size_t size, const NativeCallStack& stack,
MEMFLAGS flag = mtNone) :
VirtualMemoryRegion(base, size), _stack(stack), _flag(flag),
_all_committed(false) { }
VirtualMemoryRegion(base, size), _stack(stack), _flag(flag) { }
ReservedMemoryRegion(address base, size_t size) :
VirtualMemoryRegion(base, size), _stack(NativeCallStack::EMPTY_STACK), _flag(mtNone),
_all_committed(false) { }
VirtualMemoryRegion(base, size), _stack(NativeCallStack::EMPTY_STACK), _flag(mtNone) { }
// Copy constructor
ReservedMemoryRegion(const ReservedMemoryRegion& rr) :
@ -347,9 +345,6 @@ class ReservedMemoryRegion : public VirtualMemoryRegion {
// the new region
void move_committed_regions(address addr, ReservedMemoryRegion& rgn);
inline bool all_committed() const { return _all_committed; }
void set_all_committed(bool b);
CommittedRegionIterator iterate_committed_regions() const {
return CommittedRegionIterator(_committed_regions.head());
}
@ -360,17 +355,14 @@ class ReservedMemoryRegion : public VirtualMemoryRegion {
_stack = *other.call_stack();
_flag = other.flag();
_all_committed = other.all_committed();
if (other.all_committed()) {
set_all_committed(true);
} else {
CommittedRegionIterator itr = other.iterate_committed_regions();
const CommittedMemoryRegion* rgn = itr.next();
while (rgn != NULL) {
_committed_regions.add(*rgn);
rgn = itr.next();
}
}
return *this;
}
@ -396,14 +388,15 @@ class VirtualMemoryWalker : public StackObj {
// Main class called from MemTracker to track virtual memory allocations, commits and releases.
class VirtualMemoryTracker : AllStatic {
friend class VirtualMemoryTrackerTest;
public:
static bool initialize(NMT_TrackingLevel level);
// Late phase initialization
static bool late_initialize(NMT_TrackingLevel level);
static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack,
MEMFLAGS flag = mtNone, bool all_committed = false);
static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack, MEMFLAGS flag = mtNone);
static bool add_committed_region (address base_addr, size_t size, const NativeCallStack& stack);
static bool remove_uncommitted_region (address base_addr, size_t size);

View file

@ -0,0 +1,550 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
// Included early because the NMT flags don't include it.
#include "utilities/macros.hpp"
#if INCLUDE_NMT
#include "services/memTracker.hpp"
#include "services/virtualMemoryTracker.hpp"
#include "utilities/globalDefinitions.hpp"
#include "unittest.hpp"
namespace {
struct R {
address _addr;
size_t _size;
};
}
#define check(rmr, regions) check_inner((rmr), (regions), ARRAY_SIZE(regions), __FILE__, __LINE__)
#define check_empty(rmr) \
do { \
check_inner((rmr), NULL, 0, __FILE__, __LINE__); \
} while (false)
static void check_inner(ReservedMemoryRegion* rmr, R* regions, size_t regions_size, const char* file, int line) {
CommittedRegionIterator iter = rmr->iterate_committed_regions();
size_t i = 0;
size_t size = 0;
#define WHERE " from " << file << ":" << line
for (const CommittedMemoryRegion* region = iter.next(); region != NULL; region = iter.next()) {
EXPECT_LT(i, regions_size) << WHERE;
EXPECT_EQ(region->base(), regions[i]._addr) << WHERE;
EXPECT_EQ(region->size(), regions[i]._size) << WHERE;
size += region->size();
i++;
}
EXPECT_EQ(i, regions_size) << WHERE;
EXPECT_EQ(size, rmr->committed_size()) << WHERE;
}
class VirtualMemoryTrackerTest {
public:
static void test_add_committed_region_adjacent() {
VirtualMemoryTracker::initialize(NMT_detail);
VirtualMemoryTracker::late_initialize(NMT_detail);
address addr = (address)0x10000000;
size_t size = 0x01000000;
address frame1 = (address)0x1234;
address frame2 = (address)0x1235;
NativeCallStack stack(&frame1, 1);
NativeCallStack stack2(&frame2, 1);
// Add the reserved memory
VirtualMemoryTracker::add_reserved_region(addr, size, stack, mtTest);
// Fetch the added RMR added above
ReservedMemoryRegion* rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion(addr, size));
ASSERT_EQ(rmr->size(), size);
ASSERT_EQ(rmr->base(), addr);
// Commit Size Granularity
const size_t cs = 0x1000;
// Commit adjacent regions with same stack
{ // Commit one region
rmr->add_committed_region(addr + cs, cs, stack);
R r[] = { {addr + cs, cs} };
check(rmr, r);
}
{ // Commit adjacent - lower address
rmr->add_committed_region(addr, cs, stack);
R r[] = { {addr, 2 * cs} };
check(rmr, r);
}
{ // Commit adjacent - higher address
rmr->add_committed_region(addr + 2 * cs, cs, stack);
R r[] = { {addr, 3 * cs} };
check(rmr, r);
}
// Cleanup
rmr->remove_uncommitted_region(addr, 3 * cs);
ASSERT_EQ(rmr->committed_size(), 0u);
// Commit adjacent regions with different stacks
{ // Commit one region
rmr->add_committed_region(addr + cs, cs, stack);
R r[] = { {addr + cs, cs} };
check(rmr, r);
}
{ // Commit adjacent - lower address
rmr->add_committed_region(addr, cs, stack2);
R r[] = { {addr, cs},
{addr + cs, cs} };
check(rmr, r);
}
{ // Commit adjacent - higher address
rmr->add_committed_region(addr + 2 * cs, cs, stack2);
R r[] = { {addr, cs},
{addr + cs, cs},
{addr + 2 * cs, cs} };
check(rmr, r);
}
// Cleanup
rmr->remove_uncommitted_region(addr, 3 * cs);
ASSERT_EQ(rmr->committed_size(), 0u);
}
static void test_add_committed_region_adjacent_overlapping() {
VirtualMemoryTracker::initialize(NMT_detail);
VirtualMemoryTracker::late_initialize(NMT_detail);
address addr = (address)0x10000000;
size_t size = 0x01000000;
address frame1 = (address)0x1234;
address frame2 = (address)0x1235;
NativeCallStack stack(&frame1, 1);
NativeCallStack stack2(&frame2, 1);
// Add the reserved memory
VirtualMemoryTracker::add_reserved_region(addr, size, stack, mtTest);
// Fetch the added RMR added above
ReservedMemoryRegion* rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion(addr, size));
ASSERT_EQ(rmr->size(), size);
ASSERT_EQ(rmr->base(), addr);
// Commit Size Granularity
const size_t cs = 0x1000;
// Commit adjacent and overlapping regions with same stack
{ // Commit two non-adjacent regions
rmr->add_committed_region(addr, 2 * cs, stack);
rmr->add_committed_region(addr + 3 * cs, 2 * cs, stack);
R r[] = { {addr, 2 * cs},
{addr + 3 * cs, 2 * cs} };
check(rmr, r);
}
{ // Commit adjacent and overlapping
rmr->add_committed_region(addr + 2 * cs, 2 * cs, stack);
R r[] = { {addr, 5 * cs} };
check(rmr, r);
}
// revert to two non-adjacent regions
rmr->remove_uncommitted_region(addr + 2 * cs, cs);
ASSERT_EQ(rmr->committed_size(), 4 * cs);
{ // Commit overlapping and adjacent
rmr->add_committed_region(addr + cs, 2 * cs, stack);
R r[] = { {addr, 5 * cs} };
check(rmr, r);
}
// Cleanup
rmr->remove_uncommitted_region(addr, 5 * cs);
ASSERT_EQ(rmr->committed_size(), 0u);
// Commit adjacent and overlapping regions with different stacks
{ // Commit two non-adjacent regions
rmr->add_committed_region(addr, 2 * cs, stack);
rmr->add_committed_region(addr + 3 * cs, 2 * cs, stack);
R r[] = { {addr, 2 * cs},
{addr + 3 * cs, 2 * cs} };
check(rmr, r);
}
{ // Commit adjacent and overlapping
rmr->add_committed_region(addr + 2 * cs, 2 * cs, stack2);
R r[] = { {addr, 2 * cs},
{addr + 2 * cs, 2 * cs},
{addr + 4 * cs, cs} };
check(rmr, r);
}
// revert to two non-adjacent regions
rmr->add_committed_region(addr, 5 * cs, stack);
rmr->remove_uncommitted_region(addr + 2 * cs, cs);
ASSERT_EQ(rmr->committed_size(), 4 * cs);
{ // Commit overlapping and adjacent
rmr->add_committed_region(addr + cs, 2 * cs, stack2);
R r[] = { {addr, cs},
{addr + cs, 2 * cs},
{addr + 3 * cs, 2 * cs} };
check(rmr, r);
}
}
static void test_add_committed_region_overlapping() {
VirtualMemoryTracker::initialize(NMT_detail);
VirtualMemoryTracker::late_initialize(NMT_detail);
address addr = (address)0x10000000;
size_t size = 0x01000000;
address frame1 = (address)0x1234;
address frame2 = (address)0x1235;
NativeCallStack stack(&frame1, 1);
NativeCallStack stack2(&frame2, 1);
// Add the reserved memory
VirtualMemoryTracker::add_reserved_region(addr, size, stack, mtTest);
// Fetch the added RMR added above
ReservedMemoryRegion* rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion(addr, size));
ASSERT_EQ(rmr->size(), size);
ASSERT_EQ(rmr->base(), addr);
// Commit Size Granularity
const size_t cs = 0x1000;
// With same stack
{ // Commit one region
rmr->add_committed_region(addr, cs, stack);
R r[] = { {addr, cs} };
check(rmr, r);
}
{ // Commit the same region
rmr->add_committed_region(addr, cs, stack);
R r[] = { {addr, cs} };
check(rmr, r);
}
{ // Commit a succeeding region
rmr->add_committed_region(addr + cs, cs, stack);
R r[] = { {addr, 2 * cs} };
check(rmr, r);
}
{ // Commit over two regions
rmr->add_committed_region(addr, 2 * cs, stack);
R r[] = { {addr, 2 * cs} };
check(rmr, r);
}
{// Commit first part of a region
rmr->add_committed_region(addr, cs, stack);
R r[] = { {addr, 2 * cs} };
check(rmr, r);
}
{ // Commit second part of a region
rmr->add_committed_region(addr + cs, cs, stack);
R r[] = { {addr, 2 * cs} };
check(rmr, r);
}
{ // Commit a third part
rmr->add_committed_region(addr + 2 * cs, cs, stack);
R r[] = { {addr, 3 * cs} };
check(rmr, r);
}
{ // Commit in the middle of a region
rmr->add_committed_region(addr + 1 * cs, cs, stack);
R r[] = { {addr, 3 * cs} };
check(rmr, r);
}
// Cleanup
rmr->remove_uncommitted_region(addr, 3 * cs);
ASSERT_EQ(rmr->committed_size(), 0u);
// With preceding region
rmr->add_committed_region(addr, cs, stack);
rmr->add_committed_region(addr + 2 * cs, 3 * cs, stack);
rmr->add_committed_region(addr + 2 * cs, cs, stack);
{
R r[] = { {addr, cs},
{addr + 2 * cs, 3 * cs} };
check(rmr, r);
}
rmr->add_committed_region(addr + 3 * cs, cs, stack);
{
R r[] = { {addr, cs},
{addr + 2 * cs, 3 * cs} };
check(rmr, r);
}
rmr->add_committed_region(addr + 4 * cs, cs, stack);
{
R r[] = { {addr, cs},
{addr + 2 * cs, 3 * cs} };
check(rmr, r);
}
// Cleanup
rmr->remove_uncommitted_region(addr, 5 * cs);
ASSERT_EQ(rmr->committed_size(), 0u);
// With different stacks
{ // Commit one region
rmr->add_committed_region(addr, cs, stack);
R r[] = { {addr, cs} };
check(rmr, r);
}
{ // Commit the same region
rmr->add_committed_region(addr, cs, stack2);
R r[] = { {addr, cs} };
check(rmr, r);
}
{ // Commit a succeeding region
rmr->add_committed_region(addr + cs, cs, stack);
R r[] = { {addr, cs},
{addr + cs, cs} };
check(rmr, r);
}
{ // Commit over two regions
rmr->add_committed_region(addr, 2 * cs, stack);
R r[] = { {addr, 2 * cs} };
check(rmr, r);
}
{// Commit first part of a region
rmr->add_committed_region(addr, cs, stack2);
R r[] = { {addr, cs},
{addr + cs, cs} };
check(rmr, r);
}
{ // Commit second part of a region
rmr->add_committed_region(addr + cs, cs, stack2);
R r[] = { {addr, 2 * cs} };
check(rmr, r);
}
{ // Commit a third part
rmr->add_committed_region(addr + 2 * cs, cs, stack2);
R r[] = { {addr, 3 * cs} };
check(rmr, r);
}
{ // Commit in the middle of a region
rmr->add_committed_region(addr + 1 * cs, cs, stack);
R r[] = { {addr, cs},
{addr + cs, cs},
{addr + 2 * cs, cs} };
check(rmr, r);
}
}
static void test_add_committed_region() {
test_add_committed_region_adjacent();
test_add_committed_region_adjacent_overlapping();
test_add_committed_region_overlapping();
}
template <size_t S>
static void fix(R r[S]) {
}
static void test_remove_uncommitted_region() {
VirtualMemoryTracker::initialize(NMT_detail);
VirtualMemoryTracker::late_initialize(NMT_detail);
address addr = (address)0x10000000;
size_t size = 0x01000000;
address frame1 = (address)0x1234;
address frame2 = (address)0x1235;
NativeCallStack stack(&frame1, 1);
NativeCallStack stack2(&frame2, 1);
// Add the reserved memory
VirtualMemoryTracker::add_reserved_region(addr, size, stack, mtTest);
// Fetch the added RMR added above
ReservedMemoryRegion* rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion(addr, size));
ASSERT_EQ(rmr->size(), size);
ASSERT_EQ(rmr->base(), addr);
// Commit Size Granularity
const size_t cs = 0x1000;
{ // Commit regions
rmr->add_committed_region(addr, 3 * cs, stack);
R r[] = { {addr, 3 * cs} };
check(rmr, r);
// Remove only existing
rmr->remove_uncommitted_region(addr, 3 * cs);
check_empty(rmr);
}
{
rmr->add_committed_region(addr + 0 * cs, cs, stack);
rmr->add_committed_region(addr + 2 * cs, cs, stack);
rmr->add_committed_region(addr + 4 * cs, cs, stack);
{ // Remove first
rmr->remove_uncommitted_region(addr, cs);
R r[] = { {addr + 2 * cs, cs},
{addr + 4 * cs, cs} };
check(rmr, r);
}
// add back
rmr->add_committed_region(addr, cs, stack);
{ // Remove middle
rmr->remove_uncommitted_region(addr + 2 * cs, cs);
R r[] = { {addr + 0 * cs, cs},
{addr + 4 * cs, cs} };
check(rmr, r);
}
// add back
rmr->add_committed_region(addr + 2 * cs, cs, stack);
{ // Remove end
rmr->remove_uncommitted_region(addr + 4 * cs, cs);
R r[] = { {addr + 0 * cs, cs},
{addr + 2 * cs, cs} };
check(rmr, r);
}
rmr->remove_uncommitted_region(addr, 5 * cs);
check_empty(rmr);
}
{ // Remove larger region
rmr->add_committed_region(addr + 1 * cs, cs, stack);
rmr->remove_uncommitted_region(addr, 3 * cs);
check_empty(rmr);
}
{ // Remove smaller region - in the middle
rmr->add_committed_region(addr, 3 * cs, stack);
rmr->remove_uncommitted_region(addr + 1 * cs, cs);
R r[] = { { addr + 0 * cs, cs},
{ addr + 2 * cs, cs} };
check(rmr, r);
rmr->remove_uncommitted_region(addr, 3 * cs);
check_empty(rmr);
}
{ // Remove smaller region - at the beginning
rmr->add_committed_region(addr, 3 * cs, stack);
rmr->remove_uncommitted_region(addr + 0 * cs, cs);
R r[] = { { addr + 1 * cs, 2 * cs} };
check(rmr, r);
rmr->remove_uncommitted_region(addr, 3 * cs);
check_empty(rmr);
}
{ // Remove smaller region - at the end
rmr->add_committed_region(addr, 3 * cs, stack);
rmr->remove_uncommitted_region(addr + 2 * cs, cs);
R r[] = { { addr, 2 * cs} };
check(rmr, r);
rmr->remove_uncommitted_region(addr, 3 * cs);
check_empty(rmr);
}
{ // Remove smaller, overlapping region - at the beginning
rmr->add_committed_region(addr + 1 * cs, 4 * cs, stack);
rmr->remove_uncommitted_region(addr, 2 * cs);
R r[] = { { addr + 2 * cs, 3 * cs} };
check(rmr, r);
rmr->remove_uncommitted_region(addr + 1 * cs, 4 * cs);
check_empty(rmr);
}
{ // Remove smaller, overlapping region - at the end
rmr->add_committed_region(addr, 3 * cs, stack);
rmr->remove_uncommitted_region(addr + 2 * cs, 2 * cs);
R r[] = { { addr, 2 * cs} };
check(rmr, r);
rmr->remove_uncommitted_region(addr, 3 * cs);
check_empty(rmr);
}
}
};
TEST_VM(VirtualMemoryTracker, add_committed_region) {
VirtualMemoryTrackerTest::test_add_committed_region();
}
TEST_VM(VirtualMemoryTracker, remove_uncommitted_region) {
VirtualMemoryTrackerTest::test_remove_uncommitted_region();
}
#endif // INCLUDE_NMT

View file

@ -0,0 +1,326 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @summary Test merging of committed virtual memory and that we track it correctly
* @key nmt jcmd
* @library /test/lib
* @modules java.base/jdk.internal.misc
* java.management
* @build sun.hotspot.WhiteBox
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail VirtualAllocCommitMerge
*
*/
import jdk.test.lib.process.ProcessTools;
import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.JDKToolFinder;
import sun.hotspot.WhiteBox;
public class VirtualAllocCommitMerge {
public static WhiteBox wb = WhiteBox.getWhiteBox();
public static void main(String args[]) throws Exception {
OutputAnalyzer output;
long commitSize = 128 * 1024; // 128KB
long reserveSize = 4 * 1024 * 1024; // 4096KB
long addr;
String pid = Long.toString(ProcessTools.getProcessId());
ProcessBuilder pb = new ProcessBuilder();
// reserve
addr = wb.NMTReserveMemory(reserveSize);
pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid,
"VM.native_memory", "detail" });
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "0KB");
checkReserved(output, addr, reserveSize, "4096KB");
long addrA = addr + (0 * commitSize);
long addrB = addr + (1 * commitSize);
long addrC = addr + (2 * commitSize);
long addrD = addr + (3 * commitSize);
long addrE = addr + (4 * commitSize);
{
// commit overlapping ABC, A, B, C
wb.NMTCommitMemory(addrA, 3 * commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "384KB");
checkReserved(output, addr, reserveSize, "4096KB");
checkCommitted(output, addrA, 3 * commitSize, "384KB");
wb.NMTCommitMemory(addrA, commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "384KB");
checkReserved(output, addr, reserveSize, "4096KB");
checkCommitted(output, addrA, 3 * commitSize, "384KB");
wb.NMTCommitMemory(addrB, commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "384KB");
checkReserved(output, addr, reserveSize, "4096KB");
checkCommitted(output, addrA, 3 * commitSize, "384KB");
wb.NMTCommitMemory(addrC, commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "384KB");
checkReserved(output, addr, reserveSize, "4096KB");
checkCommitted(output, addrA, 3 * commitSize, "384KB");
// uncommit
wb.NMTUncommitMemory(addrA, 3 * commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "0KB");
}
// Test discontigous areas
{
// commit ACE
wb.NMTCommitMemory(addrA, commitSize);
wb.NMTCommitMemory(addrC, commitSize);
wb.NMTCommitMemory(addrE, commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "384KB");
checkReserved(output, addr, reserveSize, "4096KB");
checkCommitted(output, addrA, commitSize, "128KB");
checkCommitted(output, addrC, commitSize, "128KB");
checkCommitted(output, addrE, commitSize, "128KB");
// uncommit ACE
wb.NMTUncommitMemory(addrA, commitSize);
wb.NMTUncommitMemory(addrC, commitSize);
wb.NMTUncommitMemory(addrE, commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "0KB");
}
// Test contiguous areas
{
// commit AB
wb.NMTCommitMemory(addrA, commitSize);
wb.NMTCommitMemory(addrB, commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "256KB");
checkReserved(output, addr, reserveSize, "4096KB");
checkCommitted(output, addrA, 2 * commitSize, "256KB");
// uncommit AB
wb.NMTUncommitMemory(addrA, commitSize);
wb.NMTUncommitMemory(addrB, commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "0KB");
}
{
// commit BA
wb.NMTCommitMemory(addrB, commitSize);
wb.NMTCommitMemory(addrA, commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "256KB");
checkReserved(output, addr, reserveSize, "4096KB");
checkCommitted(output, addrA, 2 * commitSize, "256KB");
// uncommit AB
wb.NMTUncommitMemory(addrB, commitSize);
wb.NMTUncommitMemory(addrA, commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "0KB");
}
{
// commit ABC
wb.NMTCommitMemory(addrA, commitSize);
wb.NMTCommitMemory(addrB, commitSize);
wb.NMTCommitMemory(addrC, commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "384KB");
checkReserved(output, addr, reserveSize, "4096KB");
checkCommitted(output, addrA, 3 * commitSize, "384KB");
// uncommit
wb.NMTUncommitMemory(addrA, commitSize);
wb.NMTUncommitMemory(addrB, commitSize);
wb.NMTUncommitMemory(addrC, commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "0KB");
}
{
// commit ACB
wb.NMTCommitMemory(addrA, commitSize);
wb.NMTCommitMemory(addrC, commitSize);
wb.NMTCommitMemory(addrB, commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "384KB");
checkReserved(output, addr, reserveSize, "4096KB");
checkCommitted(output, addrA, 3 * commitSize, "384KB");
// uncommit
wb.NMTUncommitMemory(addrA, commitSize);
wb.NMTUncommitMemory(addrC, commitSize);
wb.NMTUncommitMemory(addrB, commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "0KB");
}
{
// commit BAC
wb.NMTCommitMemory(addrB, commitSize);
wb.NMTCommitMemory(addrA, commitSize);
wb.NMTCommitMemory(addrC, commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "384KB");
checkReserved(output, addr, reserveSize, "4096KB");
checkCommitted(output, addrA, 3 * commitSize, "384KB");
// uncommit
wb.NMTUncommitMemory(addrB, commitSize);
wb.NMTUncommitMemory(addrA, commitSize);
wb.NMTUncommitMemory(addrC, commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "0KB");
}
{
// commit BCA
wb.NMTCommitMemory(addrB, commitSize);
wb.NMTCommitMemory(addrC, commitSize);
wb.NMTCommitMemory(addrA, commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "384KB");
checkReserved(output, addr, reserveSize, "4096KB");
checkCommitted(output, addrA, 3 * commitSize, "384KB");
// uncommit
wb.NMTUncommitMemory(addrB, commitSize);
wb.NMTUncommitMemory(addrC, commitSize);
wb.NMTUncommitMemory(addrA, commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "0KB");
}
{
// commit CAB
wb.NMTCommitMemory(addrC, commitSize);
wb.NMTCommitMemory(addrA, commitSize);
wb.NMTCommitMemory(addrB, commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "384KB");
checkReserved(output, addr, reserveSize, "4096KB");
checkCommitted(output, addrA, 3 * commitSize, "384KB");
// uncommit
wb.NMTUncommitMemory(addrC, commitSize);
wb.NMTUncommitMemory(addrA, commitSize);
wb.NMTUncommitMemory(addrB, commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "0KB");
}
{
// commit CBA
wb.NMTCommitMemory(addrC, commitSize);
wb.NMTCommitMemory(addrB, commitSize);
wb.NMTCommitMemory(addrA, commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "384KB");
checkReserved(output, addr, reserveSize, "4096KB");
checkCommitted(output, addrA, 3 * commitSize, "384KB");
// uncommit
wb.NMTUncommitMemory(addrC, commitSize);
wb.NMTUncommitMemory(addrB, commitSize);
wb.NMTUncommitMemory(addrA, commitSize);
output = new OutputAnalyzer(pb.start());
checkReservedCommittedSummary(output, "4096KB", "0KB");
}
// release
wb.NMTReleaseMemory(addr, reserveSize);
output = new OutputAnalyzer(pb.start());
output.shouldNotContain("Test (reserved=");
output.shouldNotMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ Long.toHexString(addr + reserveSize) + "\\] reserved 4096KB for Test");
}
public static void checkReservedCommittedSummary(OutputAnalyzer output, String reservedString, String committedString) {
output.shouldContain("Test (reserved=" + reservedString + ", committed=" + committedString + ")");
}
public static void checkReserved(OutputAnalyzer output, long addr, long size, String sizeString) {
output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ Long.toHexString(addr + size)
+ "\\] reserved 4096KB for Test");
}
public static void checkCommitted(OutputAnalyzer output, long addr, long size, String sizeString) {
output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ Long.toHexString(addr + size)
+ "\\] committed " + sizeString + " from.*");
}
}