8213269: convert test/hotspot/jtreg/runtime/memory/RunUnitTestsConcurrently to gtest

Reviewed-by: iignatyev, coleenp, stuefe
This commit is contained in:
Mikhailo Seledtsov 2021-03-08 20:09:59 +00:00
parent 17853ee92c
commit 9221540e2a
13 changed files with 638 additions and 668 deletions

View file

@ -3224,12 +3224,6 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
return strlen(buffer); return strlen(buffer);
} }
#ifndef PRODUCT
void TestReserveMemorySpecial_test() {
// No tests available for this platform
}
#endif
bool os::start_debugging(char *buf, int buflen) { bool os::start_debugging(char *buf, int buflen) {
int len = (int)strlen(buf); int len = (int)strlen(buf);
char *p = &buf[len]; char *p = &buf[len];

View file

@ -2718,12 +2718,6 @@ bool os::supports_map_sync() {
return false; return false;
} }
#ifndef PRODUCT
void TestReserveMemorySpecial_test() {
// No tests available for this platform
}
#endif
bool os::start_debugging(char *buf, int buflen) { bool os::start_debugging(char *buf, int buflen) {
int len = (int)strlen(buf); int len = (int)strlen(buf);
char *p = &buf[len]; char *p = &buf[len];

View file

@ -5535,172 +5535,3 @@ void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) {
st->cr(); st->cr();
} }
} }
/////////////// Unit tests ///////////////
#ifndef PRODUCT
class TestReserveMemorySpecial : AllStatic {
public:
static void small_page_write(void* addr, size_t size) {
size_t page_size = os::vm_page_size();
char* end = (char*)addr + size;
for (char* p = (char*)addr; p < end; p += page_size) {
*p = 1;
}
}
static void test_reserve_memory_special_huge_tlbfs_only(size_t size) {
if (!UseHugeTLBFS) {
return;
}
char* addr = os::Linux::reserve_memory_special_huge_tlbfs_only(size, NULL, false);
if (addr != NULL) {
small_page_write(addr, size);
os::Linux::release_memory_special_huge_tlbfs(addr, size);
}
}
static void test_reserve_memory_special_huge_tlbfs_only() {
if (!UseHugeTLBFS) {
return;
}
size_t lp = os::large_page_size();
for (size_t size = lp; size <= lp * 10; size += lp) {
test_reserve_memory_special_huge_tlbfs_only(size);
}
}
static void test_reserve_memory_special_huge_tlbfs_mixed() {
size_t lp = os::large_page_size();
size_t ag = os::vm_allocation_granularity();
// sizes to test
const size_t sizes[] = {
lp, lp + ag, lp + lp / 2, lp * 2,
lp * 2 + ag, lp * 2 - ag, lp * 2 + lp / 2,
lp * 10, lp * 10 + lp / 2
};
const int num_sizes = sizeof(sizes) / sizeof(size_t);
// For each size/alignment combination, we test three scenarios:
// 1) with req_addr == NULL
// 2) with a non-null req_addr at which we expect to successfully allocate
// 3) with a non-null req_addr which contains a pre-existing mapping, at which we
// expect the allocation to either fail or to ignore req_addr
// Pre-allocate two areas; they shall be as large as the largest allocation
// and aligned to the largest alignment we will be testing.
const size_t mapping_size = sizes[num_sizes - 1] * 2;
char* const mapping1 = (char*) ::mmap(NULL, mapping_size,
PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
-1, 0);
assert(mapping1 != MAP_FAILED, "should work");
char* const mapping2 = (char*) ::mmap(NULL, mapping_size,
PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
-1, 0);
assert(mapping2 != MAP_FAILED, "should work");
// Unmap the first mapping, but leave the second mapping intact: the first
// mapping will serve as a value for a "good" req_addr (case 2). The second
// mapping, still intact, as "bad" req_addr (case 3).
::munmap(mapping1, mapping_size);
// Case 1
for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i];
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
if (p != NULL) {
assert(is_aligned(p, alignment), "must be");
small_page_write(p, size);
os::Linux::release_memory_special_huge_tlbfs(p, size);
}
}
}
// Case 2
for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i];
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
char* const req_addr = align_up(mapping1, alignment);
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
if (p != NULL) {
assert(p == req_addr, "must be");
small_page_write(p, size);
os::Linux::release_memory_special_huge_tlbfs(p, size);
}
}
}
// Case 3
for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i];
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
char* const req_addr = align_up(mapping2, alignment);
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
// as the area around req_addr contains already existing mappings, the API should always
// return NULL (as per contract, it cannot return another address)
assert(p == NULL, "must be");
}
}
::munmap(mapping2, mapping_size);
}
static void test_reserve_memory_special_huge_tlbfs() {
if (!UseHugeTLBFS) {
return;
}
test_reserve_memory_special_huge_tlbfs_only();
test_reserve_memory_special_huge_tlbfs_mixed();
}
static void test_reserve_memory_special_shm(size_t size, size_t alignment) {
if (!UseSHM) {
return;
}
char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false);
if (addr != NULL) {
assert(is_aligned(addr, alignment), "Check");
assert(is_aligned(addr, os::large_page_size()), "Check");
small_page_write(addr, size);
os::Linux::release_memory_special_shm(addr, size);
}
}
static void test_reserve_memory_special_shm() {
size_t lp = os::large_page_size();
size_t ag = os::vm_allocation_granularity();
for (size_t size = ag; size < lp * 3; size += ag) {
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
test_reserve_memory_special_shm(size, alignment);
}
}
}
static void test() {
test_reserve_memory_special_huge_tlbfs();
test_reserve_memory_special_shm();
}
};
void TestReserveMemorySpecial_test() {
TestReserveMemorySpecial::test();
}
#endif

View file

@ -5790,58 +5790,6 @@ char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
return agent_entry_name; return agent_entry_name;
} }
#ifndef PRODUCT
// test the code path in reserve_memory_special() that tries to allocate memory in a single
// contiguous memory block at a particular address.
// The test first tries to find a good approximate address to allocate at by using the same
// method to allocate some memory at any address. The test then tries to allocate memory in
// the vicinity (not directly after it to avoid possible by-chance use of that location)
// This is of course only some dodgy assumption, there is no guarantee that the vicinity of
// the previously allocated memory is available for allocation. The only actual failure
// that is reported is when the test tries to allocate at a particular location but gets a
// different valid one. A NULL return value at this point is not considered an error but may
// be legitimate.
void TestReserveMemorySpecial_test() {
if (!UseLargePages) {
return;
}
// save current value of globals
bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
bool old_use_numa_interleaving = UseNUMAInterleaving;
// set globals to make sure we hit the correct code path
UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
// do an allocation at an address selected by the OS to get a good one.
const size_t large_allocation_size = os::large_page_size() * 4;
char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
if (result == NULL) {
} else {
os::release_memory_special(result, large_allocation_size);
// allocate another page within the recently allocated memory area which seems to be a good location. At least
// we managed to get it once.
const size_t expected_allocation_size = os::large_page_size();
char* expected_location = result + os::large_page_size();
char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
if (actual_location == NULL) {
} else {
// release memory
os::release_memory_special(actual_location, expected_allocation_size);
// only now check, after releasing any memory to avoid any leaks.
assert(actual_location == expected_location,
"Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
expected_location, expected_allocation_size, actual_location);
}
}
// restore globals
UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
UseNUMAInterleaving = old_use_numa_interleaving;
}
#endif // PRODUCT
/* /*
All the defined signal names for Windows. All the defined signal names for Windows.

View file

@ -1086,343 +1086,4 @@ void VirtualSpace::print() {
print_on(tty); print_on(tty);
} }
/////////////// Unit tests ///////////////
#ifndef PRODUCT
class TestReservedSpace : AllStatic {
public:
static void small_page_write(void* addr, size_t size) {
size_t page_size = os::vm_page_size();
char* end = (char*)addr + size;
for (char* p = (char*)addr; p < end; p += page_size) {
*p = 1;
}
}
static void release_memory_for_test(ReservedSpace rs) {
if (rs.special()) {
guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
} else {
guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
}
}
static void test_reserved_space1(size_t size, size_t alignment) {
assert(is_aligned(size, alignment), "Incorrect input parameters");
ReservedSpace rs(size, // size
alignment, // alignment
UseLargePages, // large
(char *)NULL); // requested_address
assert(rs.base() != NULL, "Must be");
assert(rs.size() == size, "Must be");
assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
if (rs.special()) {
small_page_write(rs.base(), size);
}
release_memory_for_test(rs);
}
static void test_reserved_space2(size_t size) {
assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
ReservedSpace rs(size);
assert(rs.base() != NULL, "Must be");
assert(rs.size() == size, "Must be");
if (rs.special()) {
small_page_write(rs.base(), size);
}
release_memory_for_test(rs);
}
static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
if (size < alignment) {
// Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
return;
}
assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
assert(is_aligned(size, alignment), "Must be at least aligned against alignment");
bool large = maybe_large && UseLargePages && size >= os::large_page_size();
ReservedSpace rs(size, alignment, large);
assert(rs.base() != NULL, "Must be");
assert(rs.size() == size, "Must be");
if (rs.special()) {
small_page_write(rs.base(), size);
}
release_memory_for_test(rs);
}
static void test_reserved_space1() {
size_t size = 2 * 1024 * 1024;
size_t ag = os::vm_allocation_granularity();
test_reserved_space1(size, ag);
test_reserved_space1(size * 2, ag);
test_reserved_space1(size * 10, ag);
}
static void test_reserved_space2() {
size_t size = 2 * 1024 * 1024;
size_t ag = os::vm_allocation_granularity();
test_reserved_space2(size * 1);
test_reserved_space2(size * 2);
test_reserved_space2(size * 10);
test_reserved_space2(ag);
test_reserved_space2(size - ag);
test_reserved_space2(size);
test_reserved_space2(size + ag);
test_reserved_space2(size * 2);
test_reserved_space2(size * 2 - ag);
test_reserved_space2(size * 2 + ag);
test_reserved_space2(size * 3);
test_reserved_space2(size * 3 - ag);
test_reserved_space2(size * 3 + ag);
test_reserved_space2(size * 10);
test_reserved_space2(size * 10 + size / 2);
}
static void test_reserved_space3() {
size_t ag = os::vm_allocation_granularity();
test_reserved_space3(ag, ag , false);
test_reserved_space3(ag * 2, ag , false);
test_reserved_space3(ag * 3, ag , false);
test_reserved_space3(ag * 2, ag * 2, false);
test_reserved_space3(ag * 4, ag * 2, false);
test_reserved_space3(ag * 8, ag * 2, false);
test_reserved_space3(ag * 4, ag * 4, false);
test_reserved_space3(ag * 8, ag * 4, false);
test_reserved_space3(ag * 16, ag * 4, false);
if (UseLargePages) {
size_t lp = os::large_page_size();
// Without large pages
test_reserved_space3(lp, ag * 4, false);
test_reserved_space3(lp * 2, ag * 4, false);
test_reserved_space3(lp * 4, ag * 4, false);
test_reserved_space3(lp, lp , false);
test_reserved_space3(lp * 2, lp , false);
test_reserved_space3(lp * 3, lp , false);
test_reserved_space3(lp * 2, lp * 2, false);
test_reserved_space3(lp * 4, lp * 2, false);
test_reserved_space3(lp * 8, lp * 2, false);
// With large pages
test_reserved_space3(lp, ag * 4 , true);
test_reserved_space3(lp * 2, ag * 4, true);
test_reserved_space3(lp * 4, ag * 4, true);
test_reserved_space3(lp, lp , true);
test_reserved_space3(lp * 2, lp , true);
test_reserved_space3(lp * 3, lp , true);
test_reserved_space3(lp * 2, lp * 2, true);
test_reserved_space3(lp * 4, lp * 2, true);
test_reserved_space3(lp * 8, lp * 2, true);
}
}
static void test_reserved_space() {
test_reserved_space1();
test_reserved_space2();
test_reserved_space3();
}
};
void TestReservedSpace_test() {
TestReservedSpace::test_reserved_space();
}
#define assert_equals(actual, expected) \
assert(actual == expected, \
"Got " SIZE_FORMAT " expected " \
SIZE_FORMAT, actual, expected);
#define assert_ge(value1, value2) \
assert(value1 >= value2, \
"'" #value1 "': " SIZE_FORMAT " '" \
#value2 "': " SIZE_FORMAT, value1, value2);
#define assert_lt(value1, value2) \
assert(value1 < value2, \
"'" #value1 "': " SIZE_FORMAT " '" \
#value2 "': " SIZE_FORMAT, value1, value2);
class TestVirtualSpace : AllStatic {
enum TestLargePages {
Default,
Disable,
Reserve,
Commit
};
static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
switch(mode) {
default:
case Default:
case Reserve:
return ReservedSpace(reserve_size_aligned);
case Disable:
case Commit:
return ReservedSpace(reserve_size_aligned,
os::vm_allocation_granularity(),
/* large */ false);
}
}
static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
switch(mode) {
default:
case Default:
case Reserve:
return vs.initialize(rs, 0);
case Disable:
return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
case Commit:
return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
}
}
public:
static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
TestLargePages mode = Default) {
size_t granularity = os::vm_allocation_granularity();
size_t reserve_size_aligned = align_up(reserve_size, granularity);
ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
assert(reserved.is_reserved(), "Must be");
VirtualSpace vs;
bool initialized = initialize_virtual_space(vs, reserved, mode);
assert(initialized, "Failed to initialize VirtualSpace");
vs.expand_by(commit_size, false);
if (vs.special()) {
assert_equals(vs.actual_committed_size(), reserve_size_aligned);
} else {
assert_ge(vs.actual_committed_size(), commit_size);
// Approximate the commit granularity.
// Make sure that we don't commit using large pages
// if large pages has been disabled for this VirtualSpace.
size_t commit_granularity = (mode == Disable || !UseLargePages) ?
os::vm_page_size() : os::large_page_size();
assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
}
reserved.release();
}
static void test_virtual_space_actual_committed_space_one_large_page() {
if (!UseLargePages) {
return;
}
size_t large_page_size = os::large_page_size();
ReservedSpace reserved(large_page_size, large_page_size, true);
assert(reserved.is_reserved(), "Must be");
VirtualSpace vs;
bool initialized = vs.initialize(reserved, 0);
assert(initialized, "Failed to initialize VirtualSpace");
vs.expand_by(large_page_size, false);
assert_equals(vs.actual_committed_size(), large_page_size);
reserved.release();
}
static void test_virtual_space_actual_committed_space() {
test_virtual_space_actual_committed_space(4 * K, 0);
test_virtual_space_actual_committed_space(4 * K, 4 * K);
test_virtual_space_actual_committed_space(8 * K, 0);
test_virtual_space_actual_committed_space(8 * K, 4 * K);
test_virtual_space_actual_committed_space(8 * K, 8 * K);
test_virtual_space_actual_committed_space(12 * K, 0);
test_virtual_space_actual_committed_space(12 * K, 4 * K);
test_virtual_space_actual_committed_space(12 * K, 8 * K);
test_virtual_space_actual_committed_space(12 * K, 12 * K);
test_virtual_space_actual_committed_space(64 * K, 0);
test_virtual_space_actual_committed_space(64 * K, 32 * K);
test_virtual_space_actual_committed_space(64 * K, 64 * K);
test_virtual_space_actual_committed_space(2 * M, 0);
test_virtual_space_actual_committed_space(2 * M, 4 * K);
test_virtual_space_actual_committed_space(2 * M, 64 * K);
test_virtual_space_actual_committed_space(2 * M, 1 * M);
test_virtual_space_actual_committed_space(2 * M, 2 * M);
test_virtual_space_actual_committed_space(10 * M, 0);
test_virtual_space_actual_committed_space(10 * M, 4 * K);
test_virtual_space_actual_committed_space(10 * M, 8 * K);
test_virtual_space_actual_committed_space(10 * M, 1 * M);
test_virtual_space_actual_committed_space(10 * M, 2 * M);
test_virtual_space_actual_committed_space(10 * M, 5 * M);
test_virtual_space_actual_committed_space(10 * M, 10 * M);
}
static void test_virtual_space_disable_large_pages() {
if (!UseLargePages) {
return;
}
// These test cases verify that if we force VirtualSpace to disable large pages
test_virtual_space_actual_committed_space(10 * M, 0, Disable);
test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
test_virtual_space_actual_committed_space(10 * M, 0, Commit);
test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
}
static void test_virtual_space() {
test_virtual_space_actual_committed_space();
test_virtual_space_actual_committed_space_one_large_page();
test_virtual_space_disable_large_pages();
}
};
void TestVirtualSpace_test() {
TestVirtualSpace::test_virtual_space();
}
#endif // PRODUCT
#endif #endif

View file

@ -253,21 +253,6 @@ WB_ENTRY(void, WB_PrintHeapSizes(JNIEnv* env, jobject o)) {
} }
WB_END WB_END
#ifndef PRODUCT
// Forward declaration
void TestReservedSpace_test();
void TestReserveMemorySpecial_test();
void TestVirtualSpace_test();
#endif
WB_ENTRY(void, WB_RunMemoryUnitTests(JNIEnv* env, jobject o))
#ifndef PRODUCT
TestReservedSpace_test();
TestReserveMemorySpecial_test();
TestVirtualSpace_test();
#endif
WB_END
WB_ENTRY(void, WB_ReadFromNoaccessArea(JNIEnv* env, jobject o)) WB_ENTRY(void, WB_ReadFromNoaccessArea(JNIEnv* env, jobject o))
size_t granularity = os::vm_allocation_granularity(); size_t granularity = os::vm_allocation_granularity();
ReservedHeapSpace rhs(100 * granularity, granularity, false); ReservedHeapSpace rhs(100 * granularity, granularity, false);
@ -2348,7 +2333,6 @@ static JNINativeMethod methods[] = {
{CC"getCompressedOopsMaxHeapSize", CC"()J", {CC"getCompressedOopsMaxHeapSize", CC"()J",
(void*)&WB_GetCompressedOopsMaxHeapSize}, (void*)&WB_GetCompressedOopsMaxHeapSize},
{CC"printHeapSizes", CC"()V", (void*)&WB_PrintHeapSizes }, {CC"printHeapSizes", CC"()V", (void*)&WB_PrintHeapSizes },
{CC"runMemoryUnitTests", CC"()V", (void*)&WB_RunMemoryUnitTests},
{CC"readFromNoaccessArea",CC"()V", (void*)&WB_ReadFromNoaccessArea}, {CC"readFromNoaccessArea",CC"()V", (void*)&WB_ReadFromNoaccessArea},
{CC"stressVirtualSpaceResize",CC"(JJJ)I", (void*)&WB_StressVirtualSpaceResize}, {CC"stressVirtualSpaceResize",CC"(JJJ)I", (void*)&WB_StressVirtualSpaceResize},
#if INCLUDE_CDS #if INCLUDE_CDS

View file

@ -0,0 +1,98 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef GTEST_CONCURRENT_TEST_RUNNER_INLINE_HPP
#define GTEST_CONCURRENT_TEST_RUNNER_INLINE_HPP
#include "memory/allocation.hpp"
#include "runtime/semaphore.hpp"
#include "runtime/thread.inline.hpp"
#include "threadHelper.inline.hpp"
// This file contains helper classes to run unit tests concurrently in multiple threads.
// Base class for test runnable. Override runUnitTest() to specify what to run.
class TestRunnable {
public:
virtual void runUnitTest() const = 0;
};
// This class represents a thread for a unit test.
class UnitTestThread : public JavaTestThread {
public:
// runnableArg - what to run
// doneArg - a semaphore to notify when the thread is done running
// testDurationArg - how long to run (in milliseconds)
UnitTestThread(TestRunnable* const runnableArg, Semaphore* doneArg, const long testDurationArg) :
JavaTestThread(doneArg), runnable(runnableArg), testDuration(testDurationArg) {}
// from JavaTestThread
void main_run() {
long stopTime = os::javaTimeMillis() + testDuration;
while (os::javaTimeMillis() < stopTime) {
runnable->runUnitTest();
}
}
private:
TestRunnable* const runnable;
const long testDuration;
};
// Helper class for running a given unit test concurrently in multiple threads.
class ConcurrentTestRunner {
public:
// runnableArg - what to run
// nrOfThreadsArg - how many threads to use concurrently
// testDurationMillisArg - duration for each test run
ConcurrentTestRunner(TestRunnable* const runnableArg, int nrOfThreadsArg, long testDurationMillisArg) :
unitTestRunnable(runnableArg),
nrOfThreads(nrOfThreadsArg),
testDurationMillis(testDurationMillisArg) {}
void run() {
Semaphore done(0);
UnitTestThread** t = NEW_C_HEAP_ARRAY(UnitTestThread*, nrOfThreads, mtInternal);
for (int i = 0; i < nrOfThreads; i++) {
t[i] = new UnitTestThread(unitTestRunnable, &done, testDurationMillis);
}
for (int i = 0; i < nrOfThreads; i++) {
t[i]->doit();
}
for (int i = 0; i < nrOfThreads; i++) {
done.wait();
}
FREE_C_HEAP_ARRAY(UnitTestThread**, t);
}
private:
TestRunnable* const unitTestRunnable;
const int nrOfThreads;
const long testDurationMillis;
};
#endif // GTEST_CONCURRENT_TEST_RUNNER_INLINE_HPP

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,7 @@
#include "runtime/os.hpp" #include "runtime/os.hpp"
#include "oops/oop.hpp" #include "oops/oop.hpp"
#include "utilities/align.hpp" #include "utilities/align.hpp"
#include "concurrentTestRunner.inline.hpp"
#include "unittest.hpp" #include "unittest.hpp"
namespace { namespace {
@ -337,3 +338,343 @@ TEST_VM(VirtualSpace, disable_large_pages) {
EXPECT_NO_FATAL_FAILURE(test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit)); EXPECT_NO_FATAL_FAILURE(test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit));
EXPECT_NO_FATAL_FAILURE(test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit)); EXPECT_NO_FATAL_FAILURE(test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit));
} }
// ========================= concurrent virtual space memory tests
// This class have been imported from the original "internal VM test" with minor modification,
// specifically using GTest asserts instead of native HotSpot asserts.
class TestReservedSpace : AllStatic {
public:
static void small_page_write(void* addr, size_t size) {
size_t page_size = os::vm_page_size();
char* end = (char*)addr + size;
for (char* p = (char*)addr; p < end; p += page_size) {
*p = 1;
}
}
static void release_memory_for_test(ReservedSpace rs) {
if (rs.special()) {
EXPECT_TRUE(os::release_memory_special(rs.base(), rs.size()));
} else {
EXPECT_TRUE(os::release_memory(rs.base(), rs.size()));
}
}
static void test_reserved_space1(size_t size, size_t alignment) {
ASSERT_TRUE(is_aligned(size, alignment)) << "Incorrect input parameters";
ReservedSpace rs(size, // size
alignment, // alignment
UseLargePages, // large
(char *)NULL); // requested_address
EXPECT_TRUE(rs.base() != NULL);
EXPECT_EQ(rs.size(), size) << "rs.size: " << rs.size();
EXPECT_TRUE(is_aligned(rs.base(), alignment)) << "aligned sizes should always give aligned addresses";
EXPECT_TRUE(is_aligned(rs.size(), alignment)) << "aligned sizes should always give aligned addresses";
if (rs.special()) {
small_page_write(rs.base(), size);
}
release_memory_for_test(rs);
}
static void test_reserved_space2(size_t size) {
ASSERT_TRUE(is_aligned(size, os::vm_allocation_granularity())) << "Must be at least AG aligned";
ReservedSpace rs(size);
EXPECT_TRUE(rs.base() != NULL);
EXPECT_EQ(rs.size(), size) << "rs.size: " << rs.size();
if (rs.special()) {
small_page_write(rs.base(), size);
}
release_memory_for_test(rs);
}
static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
if (size < alignment) {
// Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
ASSERT_EQ((size_t)os::vm_page_size(), os::large_page_size()) << "Test needs further refinement";
return;
}
EXPECT_TRUE(is_aligned(size, os::vm_allocation_granularity())) << "Must be at least AG aligned";
EXPECT_TRUE(is_aligned(size, alignment)) << "Must be at least aligned against alignment";
bool large = maybe_large && UseLargePages && size >= os::large_page_size();
ReservedSpace rs(size, alignment, large);
EXPECT_TRUE(rs.base() != NULL);
EXPECT_EQ(rs.size(), size) << "rs.size: " << rs.size();
if (rs.special()) {
small_page_write(rs.base(), size);
}
release_memory_for_test(rs);
}
static void test_reserved_space1() {
size_t size = 2 * 1024 * 1024;
size_t ag = os::vm_allocation_granularity();
test_reserved_space1(size, ag);
test_reserved_space1(size * 2, ag);
test_reserved_space1(size * 10, ag);
}
static void test_reserved_space2() {
size_t size = 2 * 1024 * 1024;
size_t ag = os::vm_allocation_granularity();
test_reserved_space2(size * 1);
test_reserved_space2(size * 2);
test_reserved_space2(size * 10);
test_reserved_space2(ag);
test_reserved_space2(size - ag);
test_reserved_space2(size);
test_reserved_space2(size + ag);
test_reserved_space2(size * 2);
test_reserved_space2(size * 2 - ag);
test_reserved_space2(size * 2 + ag);
test_reserved_space2(size * 3);
test_reserved_space2(size * 3 - ag);
test_reserved_space2(size * 3 + ag);
test_reserved_space2(size * 10);
test_reserved_space2(size * 10 + size / 2);
}
static void test_reserved_space3() {
size_t ag = os::vm_allocation_granularity();
test_reserved_space3(ag, ag , false);
test_reserved_space3(ag * 2, ag , false);
test_reserved_space3(ag * 3, ag , false);
test_reserved_space3(ag * 2, ag * 2, false);
test_reserved_space3(ag * 4, ag * 2, false);
test_reserved_space3(ag * 8, ag * 2, false);
test_reserved_space3(ag * 4, ag * 4, false);
test_reserved_space3(ag * 8, ag * 4, false);
test_reserved_space3(ag * 16, ag * 4, false);
if (UseLargePages) {
size_t lp = os::large_page_size();
// Without large pages
test_reserved_space3(lp, ag * 4, false);
test_reserved_space3(lp * 2, ag * 4, false);
test_reserved_space3(lp * 4, ag * 4, false);
test_reserved_space3(lp, lp , false);
test_reserved_space3(lp * 2, lp , false);
test_reserved_space3(lp * 3, lp , false);
test_reserved_space3(lp * 2, lp * 2, false);
test_reserved_space3(lp * 4, lp * 2, false);
test_reserved_space3(lp * 8, lp * 2, false);
// With large pages
test_reserved_space3(lp, ag * 4 , true);
test_reserved_space3(lp * 2, ag * 4, true);
test_reserved_space3(lp * 4, ag * 4, true);
test_reserved_space3(lp, lp , true);
test_reserved_space3(lp * 2, lp , true);
test_reserved_space3(lp * 3, lp , true);
test_reserved_space3(lp * 2, lp * 2, true);
test_reserved_space3(lp * 4, lp * 2, true);
test_reserved_space3(lp * 8, lp * 2, true);
}
}
static void test_reserved_space() {
test_reserved_space1();
test_reserved_space2();
test_reserved_space3();
}
};
class TestVirtualSpace : AllStatic {
enum TestLargePages {
Default,
Disable,
Reserve,
Commit
};
static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
switch(mode) {
default:
case Default:
case Reserve:
return ReservedSpace(reserve_size_aligned);
case Disable:
case Commit:
return ReservedSpace(reserve_size_aligned,
os::vm_allocation_granularity(),
/* large */ false);
}
}
static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
switch(mode) {
default:
case Default:
case Reserve:
return vs.initialize(rs, 0);
case Disable:
return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
case Commit:
return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
}
}
public:
static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
TestLargePages mode = Default) {
size_t granularity = os::vm_allocation_granularity();
size_t reserve_size_aligned = align_up(reserve_size, granularity);
ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
EXPECT_TRUE(reserved.is_reserved());
VirtualSpace vs;
bool initialized = initialize_virtual_space(vs, reserved, mode);
EXPECT_TRUE(initialized) << "Failed to initialize VirtualSpace";
vs.expand_by(commit_size, false);
if (vs.special()) {
EXPECT_EQ(vs.actual_committed_size(), reserve_size_aligned);
} else {
EXPECT_GE(vs.actual_committed_size(), commit_size);
// Approximate the commit granularity.
// Make sure that we don't commit using large pages
// if large pages has been disabled for this VirtualSpace.
size_t commit_granularity = (mode == Disable || !UseLargePages) ?
os::vm_page_size() : os::large_page_size();
EXPECT_LT(vs.actual_committed_size(), commit_size + commit_granularity);
}
reserved.release();
}
static void test_virtual_space_actual_committed_space_one_large_page() {
if (!UseLargePages) {
return;
}
size_t large_page_size = os::large_page_size();
ReservedSpace reserved(large_page_size, large_page_size, true);
EXPECT_TRUE(reserved.is_reserved());
VirtualSpace vs;
bool initialized = vs.initialize(reserved, 0);
EXPECT_TRUE(initialized) << "Failed to initialize VirtualSpace";
vs.expand_by(large_page_size, false);
EXPECT_EQ(vs.actual_committed_size(), large_page_size);
reserved.release();
}
static void test_virtual_space_actual_committed_space() {
test_virtual_space_actual_committed_space(4 * K, 0);
test_virtual_space_actual_committed_space(4 * K, 4 * K);
test_virtual_space_actual_committed_space(8 * K, 0);
test_virtual_space_actual_committed_space(8 * K, 4 * K);
test_virtual_space_actual_committed_space(8 * K, 8 * K);
test_virtual_space_actual_committed_space(12 * K, 0);
test_virtual_space_actual_committed_space(12 * K, 4 * K);
test_virtual_space_actual_committed_space(12 * K, 8 * K);
test_virtual_space_actual_committed_space(12 * K, 12 * K);
test_virtual_space_actual_committed_space(64 * K, 0);
test_virtual_space_actual_committed_space(64 * K, 32 * K);
test_virtual_space_actual_committed_space(64 * K, 64 * K);
test_virtual_space_actual_committed_space(2 * M, 0);
test_virtual_space_actual_committed_space(2 * M, 4 * K);
test_virtual_space_actual_committed_space(2 * M, 64 * K);
test_virtual_space_actual_committed_space(2 * M, 1 * M);
test_virtual_space_actual_committed_space(2 * M, 2 * M);
test_virtual_space_actual_committed_space(10 * M, 0);
test_virtual_space_actual_committed_space(10 * M, 4 * K);
test_virtual_space_actual_committed_space(10 * M, 8 * K);
test_virtual_space_actual_committed_space(10 * M, 1 * M);
test_virtual_space_actual_committed_space(10 * M, 2 * M);
test_virtual_space_actual_committed_space(10 * M, 5 * M);
test_virtual_space_actual_committed_space(10 * M, 10 * M);
}
static void test_virtual_space_disable_large_pages() {
if (!UseLargePages) {
return;
}
// These test cases verify that if we force VirtualSpace to disable large pages
test_virtual_space_actual_committed_space(10 * M, 0, Disable);
test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
test_virtual_space_actual_committed_space(10 * M, 0, Commit);
test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
}
static void test_virtual_space() {
test_virtual_space_actual_committed_space();
test_virtual_space_actual_committed_space_one_large_page();
test_virtual_space_disable_large_pages();
}
};
class ReservedSpaceRunnable : public TestRunnable {
public:
void runUnitTest() const {
TestReservedSpace::test_reserved_space();
}
};
TEST_VM(VirtualSpace, os_reserve_space_concurrent) {
ReservedSpaceRunnable runnable;
ConcurrentTestRunner testRunner(&runnable, 30, 15000);
testRunner.run();
}
class VirtualSpaceRunnable : public TestRunnable {
public:
void runUnitTest() const {
TestVirtualSpace::test_virtual_space();
}
};
TEST_VM(VirtualSpace, os_virtual_space_concurrent) {
VirtualSpaceRunnable runnable;
ConcurrentTestRunner testRunner(&runnable, 30, 15000);
testRunner.run();
}

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -30,6 +30,7 @@
#include "runtime/globals.hpp" #include "runtime/globals.hpp"
#include "runtime/os.hpp" #include "runtime/os.hpp"
#include "utilities/align.hpp" #include "utilities/align.hpp"
#include "concurrentTestRunner.inline.hpp"
#include "unittest.hpp" #include "unittest.hpp"
namespace { namespace {
@ -242,4 +243,180 @@ TEST_VM(os_linux, reserve_memory_special_shm) {
} }
} }
class TestReserveMemorySpecial : AllStatic {
public:
static void small_page_write(void* addr, size_t size) {
size_t page_size = os::vm_page_size();
char* end = (char*)addr + size;
for (char* p = (char*)addr; p < end; p += page_size) {
*p = 1;
}
}
static void test_reserve_memory_special_huge_tlbfs_only(size_t size) {
if (!UseHugeTLBFS) {
return;
}
char* addr = os::Linux::reserve_memory_special_huge_tlbfs_only(size, NULL, false);
if (addr != NULL) {
small_page_write(addr, size);
os::Linux::release_memory_special_huge_tlbfs(addr, size);
}
}
static void test_reserve_memory_special_huge_tlbfs_only() {
if (!UseHugeTLBFS) {
return;
}
size_t lp = os::large_page_size();
for (size_t size = lp; size <= lp * 10; size += lp) {
test_reserve_memory_special_huge_tlbfs_only(size);
}
}
static void test_reserve_memory_special_huge_tlbfs_mixed() {
size_t lp = os::large_page_size();
size_t ag = os::vm_allocation_granularity();
// sizes to test
const size_t sizes[] = {
lp, lp + ag, lp + lp / 2, lp * 2,
lp * 2 + ag, lp * 2 - ag, lp * 2 + lp / 2,
lp * 10, lp * 10 + lp / 2
};
const int num_sizes = sizeof(sizes) / sizeof(size_t);
// For each size/alignment combination, we test three scenarios:
// 1) with req_addr == NULL
// 2) with a non-null req_addr at which we expect to successfully allocate
// 3) with a non-null req_addr which contains a pre-existing mapping, at which we
// expect the allocation to either fail or to ignore req_addr
// Pre-allocate two areas; they shall be as large as the largest allocation
// and aligned to the largest alignment we will be testing.
const size_t mapping_size = sizes[num_sizes - 1] * 2;
char* const mapping1 = (char*) ::mmap(NULL, mapping_size,
PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
-1, 0);
EXPECT_NE(mapping1, MAP_FAILED);
char* const mapping2 = (char*) ::mmap(NULL, mapping_size,
PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
-1, 0);
EXPECT_NE(mapping2, MAP_FAILED);
// Unmap the first mapping, but leave the second mapping intact: the first
// mapping will serve as a value for a "good" req_addr (case 2). The second
// mapping, still intact, as "bad" req_addr (case 3).
::munmap(mapping1, mapping_size);
// Case 1
for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i];
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
if (p != NULL) {
EXPECT_TRUE(is_aligned(p, alignment));
small_page_write(p, size);
os::Linux::release_memory_special_huge_tlbfs(p, size);
}
}
}
// Case 2
for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i];
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
char* const req_addr = align_up(mapping1, alignment);
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
if (p != NULL) {
EXPECT_EQ(p, req_addr);
small_page_write(p, size);
os::Linux::release_memory_special_huge_tlbfs(p, size);
}
}
}
// Case 3
for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i];
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
char* const req_addr = align_up(mapping2, alignment);
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
// as the area around req_addr contains already existing mappings, the API should always
// return NULL (as per contract, it cannot return another address)
EXPECT_TRUE(p == NULL);
}
}
::munmap(mapping2, mapping_size);
}
static void test_reserve_memory_special_huge_tlbfs() {
if (!UseHugeTLBFS) {
return;
}
test_reserve_memory_special_huge_tlbfs_only();
test_reserve_memory_special_huge_tlbfs_mixed();
}
static void test_reserve_memory_special_shm(size_t size, size_t alignment) {
if (!UseSHM) {
return;
}
char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false);
if (addr != NULL) {
EXPECT_TRUE(is_aligned(addr, alignment));
EXPECT_TRUE(is_aligned(addr, os::large_page_size()));
small_page_write(addr, size);
os::Linux::release_memory_special_shm(addr, size);
}
}
static void test_reserve_memory_special_shm() {
size_t lp = os::large_page_size();
size_t ag = os::vm_allocation_granularity();
for (size_t size = ag; size < lp * 3; size += ag) {
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
test_reserve_memory_special_shm(size, alignment);
}
}
}
static void test() {
test_reserve_memory_special_huge_tlbfs();
test_reserve_memory_special_shm();
}
};
TEST_VM(os_linux, reserve_memory_special) {
TestReserveMemorySpecial::test();
}
class ReserveMemorySpecialRunnable : public TestRunnable {
public:
void runUnitTest() const {
TestReserveMemorySpecial::test();
}
};
TEST_VM(os_linux, reserve_memory_special_concurrent) {
ReserveMemorySpecialRunnable runnable;
ConcurrentTestRunner testRunner(&runnable, 30, 15000);
testRunner.run();
}
#endif #endif

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,7 @@
#include "runtime/os.hpp" #include "runtime/os.hpp"
#include "runtime/flags/flagSetting.hpp" #include "runtime/flags/flagSetting.hpp"
#include "runtime/globals_extension.hpp" #include "runtime/globals_extension.hpp"
#include "concurrentTestRunner.inline.hpp"
#include "unittest.hpp" #include "unittest.hpp"
namespace { namespace {
@ -51,7 +52,7 @@ namespace {
// that is reported is when the test tries to allocate at a particular location but gets a // that is reported is when the test tries to allocate at a particular location but gets a
// different valid one. A NULL return value at this point is not considered an error but may // different valid one. A NULL return value at this point is not considered an error but may
// be legitimate. // be legitimate.
TEST_VM(os_windows, reserve_memory_special) { void TestReserveMemorySpecial_test() {
if (!UseLargePages) { if (!UseLargePages) {
return; return;
} }
@ -688,4 +689,21 @@ TEST_VM(os_windows, handle_long_paths) {
delete_empty_rel_directory_w(nearly_long_rel_path); delete_empty_rel_directory_w(nearly_long_rel_path);
} }
TEST_VM(os_windows, reserve_memory_special) {
TestReserveMemorySpecial_test();
}
class ReserveMemorySpecialRunnable : public TestRunnable {
public:
void runUnitTest() const {
TestReserveMemorySpecial_test();
}
};
TEST_VM(os_windows, reserve_memory_special_concurrent) {
ReserveMemorySpecialRunnable runnable;
ConcurrentTestRunner testRunner(&runnable, 30, 15000);
testRunner.run();
}
#endif #endif

View file

@ -281,7 +281,6 @@ tier1_runtime = \
-runtime/InvocationTests \ -runtime/InvocationTests \
-runtime/logging/MonitorMismatchTest.java \ -runtime/logging/MonitorMismatchTest.java \
-runtime/memory/ReserveMemory.java \ -runtime/memory/ReserveMemory.java \
-runtime/memory/RunUnitTestsConcurrently.java \
-runtime/Metaspace/FragmentMetaspace.java \ -runtime/Metaspace/FragmentMetaspace.java \
-runtime/Metaspace/FragmentMetaspaceSimple.java \ -runtime/Metaspace/FragmentMetaspaceSimple.java \
-runtime/MirrorFrame/Test8003720.java \ -runtime/MirrorFrame/Test8003720.java \

View file

@ -1,74 +0,0 @@
/*
* Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @summary Test launches unit tests inside vm concurrently
* @requires vm.debug
* @requires vm.bits == 64
* @library /test/lib
* @modules java.base/jdk.internal.misc
* java.management
* @build sun.hotspot.WhiteBox
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI RunUnitTestsConcurrently 30 15000
*/
import sun.hotspot.WhiteBox;
public class RunUnitTestsConcurrently {
private static WhiteBox wb;
private static long timeout;
private static long timeStamp;
public static class Worker implements Runnable {
@Override
public void run() {
while (System.currentTimeMillis() - timeStamp < timeout) {
wb.runMemoryUnitTests();
}
}
}
public static void main(String[] args) throws InterruptedException {
wb = WhiteBox.getWhiteBox();
System.out.println("Starting threads");
int threads = Integer.valueOf(args[0]);
timeout = Long.valueOf(args[1]);
timeStamp = System.currentTimeMillis();
Thread[] threadsArray = new Thread[threads];
for (int i = 0; i < threads; i++) {
threadsArray[i] = new Thread(new Worker());
threadsArray[i].start();
}
for (int i = 0; i < threads; i++) {
threadsArray[i].join();
}
System.out.println("Quitting test.");
}
}

View file

@ -509,7 +509,6 @@ public class WhiteBox {
// Tests on ReservedSpace/VirtualSpace classes // Tests on ReservedSpace/VirtualSpace classes
public native int stressVirtualSpaceResize(long reservedSpaceSize, long magnitude, long iterations); public native int stressVirtualSpaceResize(long reservedSpaceSize, long magnitude, long iterations);
public native void runMemoryUnitTests();
public native void readFromNoaccessArea(); public native void readFromNoaccessArea();
public native long getThreadStackSize(); public native long getThreadStackSize();
public native long getThreadRemainingStackSize(); public native long getThreadRemainingStackSize();