mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-18 10:04:42 +02:00
8191369: NMT: Enhance thread stack tracking
More precise thread stack tracking on Linux and Windows Reviewed-by: coleenp, adinn, minqi
This commit is contained in:
parent
d7765d9002
commit
354249f51e
8 changed files with 209 additions and 28 deletions
|
@ -3053,10 +3053,12 @@ bool os::pd_uncommit_memory(char* addr, size_t size) {
|
||||||
return res != (uintptr_t) MAP_FAILED;
|
return res != (uintptr_t) MAP_FAILED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static address get_stack_commited_bottom(address bottom, size_t size) {
|
// If there is no page mapped/committed, top (bottom + size) is returned
|
||||||
address nbot = bottom;
|
static address get_stack_mapped_bottom(address bottom,
|
||||||
address ntop = bottom + size;
|
size_t size,
|
||||||
|
bool committed_only /* must have backing pages */) {
|
||||||
|
// address used to test if the page is mapped/committed
|
||||||
|
address test_addr = bottom + size;
|
||||||
size_t page_sz = os::vm_page_size();
|
size_t page_sz = os::vm_page_size();
|
||||||
unsigned pages = size / page_sz;
|
unsigned pages = size / page_sz;
|
||||||
|
|
||||||
|
@ -3068,39 +3070,40 @@ static address get_stack_commited_bottom(address bottom, size_t size) {
|
||||||
|
|
||||||
while (imin < imax) {
|
while (imin < imax) {
|
||||||
imid = (imax + imin) / 2;
|
imid = (imax + imin) / 2;
|
||||||
nbot = ntop - (imid * page_sz);
|
test_addr = bottom + (imid * page_sz);
|
||||||
|
|
||||||
// Use a trick with mincore to check whether the page is mapped or not.
|
// Use a trick with mincore to check whether the page is mapped or not.
|
||||||
// mincore sets vec to 1 if page resides in memory and to 0 if page
|
// mincore sets vec to 1 if page resides in memory and to 0 if page
|
||||||
// is swapped output but if page we are asking for is unmapped
|
// is swapped output but if page we are asking for is unmapped
|
||||||
// it returns -1,ENOMEM
|
// it returns -1,ENOMEM
|
||||||
mincore_return_value = mincore(nbot, page_sz, vec);
|
mincore_return_value = mincore(test_addr, page_sz, vec);
|
||||||
|
|
||||||
|
if (mincore_return_value == -1 || (committed_only && (vec[0] & 0x01) == 0)) {
|
||||||
|
// Page is not mapped/committed go up
|
||||||
|
// to find first mapped/committed page
|
||||||
|
if ((mincore_return_value == -1 && errno != EAGAIN)
|
||||||
|
|| (committed_only && (vec[0] & 0x01) == 0)) {
|
||||||
|
assert(mincore_return_value != -1 || errno == ENOMEM, "Unexpected mincore errno");
|
||||||
|
|
||||||
if (mincore_return_value == -1) {
|
|
||||||
// Page is not mapped go up
|
|
||||||
// to find first mapped page
|
|
||||||
if (errno != EAGAIN) {
|
|
||||||
assert(errno == ENOMEM, "Unexpected mincore errno");
|
|
||||||
imax = imid;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Page is mapped go down
|
|
||||||
// to find first not mapped page
|
|
||||||
imin = imid + 1;
|
imin = imid + 1;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// mapped/committed, go down
|
||||||
|
imax= imid;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nbot = nbot + page_sz;
|
// Adjust stack bottom one page up if last checked page is not mapped/committed
|
||||||
|
if (mincore_return_value == -1 || (committed_only && (vec[0] & 0x01) == 0)) {
|
||||||
|
assert(mincore_return_value != -1 || (errno != EAGAIN && errno != ENOMEM),
|
||||||
|
"Should not get to here");
|
||||||
|
|
||||||
// Adjust stack bottom one page up if last checked page is not mapped
|
test_addr = test_addr + page_sz;
|
||||||
if (mincore_return_value == -1) {
|
|
||||||
nbot = nbot + page_sz;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nbot;
|
return test_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Linux uses a growable mapping for the stack, and if the mapping for
|
// Linux uses a growable mapping for the stack, and if the mapping for
|
||||||
// the stack guard pages is not removed when we detach a thread the
|
// the stack guard pages is not removed when we detach a thread the
|
||||||
// stack cannot grow beyond the pages where the stack guard was
|
// stack cannot grow beyond the pages where the stack guard was
|
||||||
|
@ -3137,9 +3140,9 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
|
||||||
|
|
||||||
if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) {
|
if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) {
|
||||||
// Fallback to slow path on all errors, including EAGAIN
|
// Fallback to slow path on all errors, including EAGAIN
|
||||||
stack_extent = (uintptr_t) get_stack_commited_bottom(
|
stack_extent = (uintptr_t) get_stack_mapped_bottom(os::Linux::initial_thread_stack_bottom(),
|
||||||
os::Linux::initial_thread_stack_bottom(),
|
(size_t)addr - stack_extent,
|
||||||
(size_t)addr - stack_extent);
|
false /* committed_only */);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (stack_extent < (uintptr_t)addr) {
|
if (stack_extent < (uintptr_t)addr) {
|
||||||
|
@ -3166,6 +3169,11 @@ bool os::remove_stack_guard_pages(char* addr, size_t size) {
|
||||||
return os::uncommit_memory(addr, size);
|
return os::uncommit_memory(addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t os::committed_stack_size(address bottom, size_t size) {
|
||||||
|
address bot = get_stack_mapped_bottom(bottom, size, true /* committed_only */);
|
||||||
|
return size_t(bottom + size - bot);
|
||||||
|
}
|
||||||
|
|
||||||
// If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
|
// If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
|
||||||
// at 'requested_addr'. If there are existing memory mappings at the same
|
// at 'requested_addr'. If there are existing memory mappings at the same
|
||||||
// location, however, they will be overwritten. If 'fixed' is false,
|
// location, however, they will be overwritten. If 'fixed' is false,
|
||||||
|
|
|
@ -363,6 +363,25 @@ size_t os::current_stack_size() {
|
||||||
return sz;
|
return sz;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t os::committed_stack_size(address bottom, size_t size) {
|
||||||
|
MEMORY_BASIC_INFORMATION minfo;
|
||||||
|
address top = bottom + size;
|
||||||
|
size_t committed_size = 0;
|
||||||
|
|
||||||
|
while (committed_size < size) {
|
||||||
|
// top is exclusive
|
||||||
|
VirtualQuery(top - 1, &minfo, sizeof(minfo));
|
||||||
|
if ((minfo.State & MEM_COMMIT) != 0) {
|
||||||
|
committed_size += minfo.RegionSize;
|
||||||
|
top -= minfo.RegionSize;
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return MIN2(committed_size, size);
|
||||||
|
}
|
||||||
|
|
||||||
struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
|
struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
|
||||||
const struct tm* time_struct_ptr = localtime(clock);
|
const struct tm* time_struct_ptr = localtime(clock);
|
||||||
if (time_struct_ptr != NULL) {
|
if (time_struct_ptr != NULL) {
|
||||||
|
|
|
@ -245,6 +245,13 @@ OSReturn os::get_priority(const Thread* const thread, ThreadPriority& priority)
|
||||||
return OS_OK;
|
return OS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#if !defined(LINUX) && !defined(_WINDOWS)
|
||||||
|
size_t os::committed_stack_size(address bottom, size_t size) {
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
bool os::dll_build_name(char* buffer, size_t size, const char* fname) {
|
bool os::dll_build_name(char* buffer, size_t size, const char* fname) {
|
||||||
int n = jio_snprintf(buffer, size, "%s%s%s", JNI_LIB_PREFIX, fname, JNI_LIB_SUFFIX);
|
int n = jio_snprintf(buffer, size, "%s%s%s", JNI_LIB_PREFIX, fname, JNI_LIB_SUFFIX);
|
||||||
return (n != -1);
|
return (n != -1);
|
||||||
|
|
|
@ -271,6 +271,10 @@ class os: AllStatic {
|
||||||
static void map_stack_shadow_pages(address sp);
|
static void map_stack_shadow_pages(address sp);
|
||||||
static bool stack_shadow_pages_available(Thread *thread, const methodHandle& method, address sp);
|
static bool stack_shadow_pages_available(Thread *thread, const methodHandle& method, address sp);
|
||||||
|
|
||||||
|
// Return size of stack that is actually committed. For Java thread, the bottom should be above
|
||||||
|
// guard pages (stack grows downward)
|
||||||
|
static size_t committed_stack_size(address bottom, size_t size);
|
||||||
|
|
||||||
// OS interface to Virtual Memory
|
// OS interface to Virtual Memory
|
||||||
|
|
||||||
// Return the default page size.
|
// Return the default page size.
|
||||||
|
|
|
@ -246,7 +246,7 @@ class MemTracker : AllStatic {
|
||||||
if (addr != NULL) {
|
if (addr != NULL) {
|
||||||
// uses thread stack malloc slot for book keeping number of threads
|
// uses thread stack malloc slot for book keeping number of threads
|
||||||
MallocMemorySummary::record_malloc(0, mtThreadStack);
|
MallocMemorySummary::record_malloc(0, mtThreadStack);
|
||||||
record_virtual_memory_reserve_and_commit(addr, size, CALLER_PC, mtThreadStack);
|
record_virtual_memory_reserve(addr, size, CALLER_PC, mtThreadStack);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -38,6 +38,12 @@ void VirtualMemorySummary::initialize() {
|
||||||
::new ((void*)_snapshot) VirtualMemorySnapshot();
|
::new ((void*)_snapshot) VirtualMemorySnapshot();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) {
|
||||||
|
// Snapshot current thread stacks
|
||||||
|
VirtualMemoryTracker::snapshot_thread_stacks();
|
||||||
|
as_snapshot()->copy_to(s);
|
||||||
|
}
|
||||||
|
|
||||||
SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
|
SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
|
||||||
|
|
||||||
int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
|
int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
|
||||||
|
@ -286,6 +292,26 @@ void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const {
|
||||||
|
assert(flag() == mtThreadStack, "Only for thread stack");
|
||||||
|
LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
|
||||||
|
address bottom = base();
|
||||||
|
address top = base() + size();
|
||||||
|
while (head != NULL) {
|
||||||
|
address committed_top = head->data()->base() + head->data()->size();
|
||||||
|
if (committed_top < top) {
|
||||||
|
// committed stack guard pages, skip them
|
||||||
|
bottom = head->data()->base() + head->data()->size();
|
||||||
|
head = head->next();
|
||||||
|
} else {
|
||||||
|
assert(top == committed_top, "Sanity");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return bottom;
|
||||||
|
}
|
||||||
|
|
||||||
bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
|
bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
|
||||||
if (level >= NMT_summary) {
|
if (level >= NMT_summary) {
|
||||||
VirtualMemorySummary::initialize();
|
VirtualMemorySummary::initialize();
|
||||||
|
@ -460,6 +486,32 @@ bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Walk all known thread stacks, snapshot their committed ranges.
|
||||||
|
class SnapshotThreadStackWalker : public VirtualMemoryWalker {
|
||||||
|
public:
|
||||||
|
SnapshotThreadStackWalker() {}
|
||||||
|
|
||||||
|
bool do_allocation_site(const ReservedMemoryRegion* rgn) {
|
||||||
|
if (rgn->flag() == mtThreadStack) {
|
||||||
|
address stack_bottom = rgn->thread_stack_uncommitted_bottom();
|
||||||
|
size_t stack_size = rgn->base() + rgn->size() - stack_bottom;
|
||||||
|
size_t committed_size = os::committed_stack_size(stack_bottom, stack_size);
|
||||||
|
if (committed_size > 0) {
|
||||||
|
ReservedMemoryRegion* region = const_cast<ReservedMemoryRegion*>(rgn);
|
||||||
|
NativeCallStack ncs; // empty stack
|
||||||
|
|
||||||
|
// Stack grows downward
|
||||||
|
region->add_committed_region(rgn->base() + rgn->size() - committed_size, committed_size, ncs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
void VirtualMemoryTracker::snapshot_thread_stacks() {
|
||||||
|
SnapshotThreadStackWalker walker;
|
||||||
|
walk_virtual_memory(&walker);
|
||||||
|
}
|
||||||
|
|
||||||
bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
|
bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
|
||||||
assert(_reserved_regions != NULL, "Sanity check");
|
assert(_reserved_regions != NULL, "Sanity check");
|
||||||
|
|
|
@ -160,9 +160,7 @@ class VirtualMemorySummary : AllStatic {
|
||||||
as_snapshot()->by_type(to)->commit_memory(size);
|
as_snapshot()->by_type(to)->commit_memory(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void snapshot(VirtualMemorySnapshot* s) {
|
static void snapshot(VirtualMemorySnapshot* s);
|
||||||
as_snapshot()->copy_to(s);
|
|
||||||
}
|
|
||||||
|
|
||||||
static VirtualMemorySnapshot* as_snapshot() {
|
static VirtualMemorySnapshot* as_snapshot() {
|
||||||
return (VirtualMemorySnapshot*)_snapshot;
|
return (VirtualMemorySnapshot*)_snapshot;
|
||||||
|
@ -336,6 +334,9 @@ class ReservedMemoryRegion : public VirtualMemoryRegion {
|
||||||
return compare(rgn) == 0;
|
return compare(rgn) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// uncommitted thread stack bottom, above guard pages if there is any.
|
||||||
|
address thread_stack_uncommitted_bottom() const;
|
||||||
|
|
||||||
bool add_committed_region(address addr, size_t size, const NativeCallStack& stack);
|
bool add_committed_region(address addr, size_t size, const NativeCallStack& stack);
|
||||||
bool remove_uncommitted_region(address addr, size_t size);
|
bool remove_uncommitted_region(address addr, size_t size);
|
||||||
|
|
||||||
|
@ -389,6 +390,7 @@ class VirtualMemoryWalker : public StackObj {
|
||||||
// Main class called from MemTracker to track virtual memory allocations, commits and releases.
|
// Main class called from MemTracker to track virtual memory allocations, commits and releases.
|
||||||
class VirtualMemoryTracker : AllStatic {
|
class VirtualMemoryTracker : AllStatic {
|
||||||
friend class VirtualMemoryTrackerTest;
|
friend class VirtualMemoryTrackerTest;
|
||||||
|
friend class ThreadStackTrackingTest;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static bool initialize(NMT_TrackingLevel level);
|
static bool initialize(NMT_TrackingLevel level);
|
||||||
|
@ -408,6 +410,9 @@ class VirtualMemoryTracker : AllStatic {
|
||||||
|
|
||||||
static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
|
static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
|
||||||
|
|
||||||
|
// Snapshot current thread stacks
|
||||||
|
static void snapshot_thread_stacks();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* _reserved_regions;
|
static SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* _reserved_regions;
|
||||||
};
|
};
|
||||||
|
|
86
test/hotspot/gtest/runtime/test_threadstack_tracking.cpp
Normal file
86
test/hotspot/gtest/runtime/test_threadstack_tracking.cpp
Normal file
|
@ -0,0 +1,86 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
|
||||||
|
// Included early because the NMT flags don't include it.
|
||||||
|
#include "utilities/macros.hpp"
|
||||||
|
|
||||||
|
#include "runtime/thread.hpp"
|
||||||
|
#include "services/memTracker.hpp"
|
||||||
|
#include "services/virtualMemoryTracker.hpp"
|
||||||
|
#include "utilities/globalDefinitions.hpp"
|
||||||
|
#include "unittest.hpp"
|
||||||
|
|
||||||
|
|
||||||
|
class ThreadStackTrackingTest {
|
||||||
|
public:
|
||||||
|
static void test() {
|
||||||
|
VirtualMemoryTracker::initialize(NMT_detail);
|
||||||
|
VirtualMemoryTracker::late_initialize(NMT_detail);
|
||||||
|
|
||||||
|
Thread* thr = Thread::current();
|
||||||
|
address stack_end = thr->stack_end();
|
||||||
|
size_t stack_size = thr->stack_size();
|
||||||
|
|
||||||
|
MemTracker::record_thread_stack(stack_end, stack_size);
|
||||||
|
|
||||||
|
VirtualMemoryTracker::add_reserved_region(stack_end, stack_size, CALLER_PC, mtThreadStack);
|
||||||
|
|
||||||
|
// snapshot current stack usage
|
||||||
|
VirtualMemoryTracker::snapshot_thread_stacks();
|
||||||
|
|
||||||
|
ReservedMemoryRegion* rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion(stack_end, stack_size));
|
||||||
|
ASSERT_TRUE(rmr != NULL);
|
||||||
|
|
||||||
|
ASSERT_EQ(rmr->base(), stack_end);
|
||||||
|
ASSERT_EQ(rmr->size(), stack_size);
|
||||||
|
|
||||||
|
CommittedRegionIterator iter = rmr->iterate_committed_regions();
|
||||||
|
int i = 0;
|
||||||
|
address i_addr = (address)&i;
|
||||||
|
|
||||||
|
// stack grows downward
|
||||||
|
address stack_top = stack_end + stack_size;
|
||||||
|
bool found_stack_top = false;
|
||||||
|
|
||||||
|
for (const CommittedMemoryRegion* region = iter.next(); region != NULL; region = iter.next()) {
|
||||||
|
if (region->base() + region->size() == stack_top) {
|
||||||
|
// This should be active part, "i" should be here
|
||||||
|
ASSERT_TRUE(i_addr < stack_top && i_addr >= region->base());
|
||||||
|
ASSERT_TRUE(region->size() <= stack_size);
|
||||||
|
found_stack_top = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
|
||||||
|
// NMT was not turned on when the thread was created, so we don't have guard pages
|
||||||
|
ASSERT_TRUE(i == 1);
|
||||||
|
ASSERT_TRUE(found_stack_top);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST_VM(VirtualMemoryTracker, thread_stack_tracking) {
|
||||||
|
ThreadStackTrackingTest::test();
|
||||||
|
}
|
Loading…
Add table
Add a link
Reference in a new issue