8213269: convert test/hotspot/jtreg/runtime/memory/RunUnitTestsConcurrently to gtest

Reviewed-by: iignatyev, coleenp, stuefe
This commit is contained in:
Mikhailo Seledtsov 2021-03-08 20:09:59 +00:00
parent 17853ee92c
commit 9221540e2a
13 changed files with 638 additions and 668 deletions

View file

@ -5535,172 +5535,3 @@ void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) {
st->cr();
}
}
/////////////// Unit tests ///////////////
#ifndef PRODUCT
class TestReserveMemorySpecial : AllStatic {
public:
static void small_page_write(void* addr, size_t size) {
size_t page_size = os::vm_page_size();
char* end = (char*)addr + size;
for (char* p = (char*)addr; p < end; p += page_size) {
*p = 1;
}
}
static void test_reserve_memory_special_huge_tlbfs_only(size_t size) {
if (!UseHugeTLBFS) {
return;
}
char* addr = os::Linux::reserve_memory_special_huge_tlbfs_only(size, NULL, false);
if (addr != NULL) {
small_page_write(addr, size);
os::Linux::release_memory_special_huge_tlbfs(addr, size);
}
}
static void test_reserve_memory_special_huge_tlbfs_only() {
if (!UseHugeTLBFS) {
return;
}
size_t lp = os::large_page_size();
for (size_t size = lp; size <= lp * 10; size += lp) {
test_reserve_memory_special_huge_tlbfs_only(size);
}
}
static void test_reserve_memory_special_huge_tlbfs_mixed() {
size_t lp = os::large_page_size();
size_t ag = os::vm_allocation_granularity();
// sizes to test
const size_t sizes[] = {
lp, lp + ag, lp + lp / 2, lp * 2,
lp * 2 + ag, lp * 2 - ag, lp * 2 + lp / 2,
lp * 10, lp * 10 + lp / 2
};
const int num_sizes = sizeof(sizes) / sizeof(size_t);
// For each size/alignment combination, we test three scenarios:
// 1) with req_addr == NULL
// 2) with a non-null req_addr at which we expect to successfully allocate
// 3) with a non-null req_addr which contains a pre-existing mapping, at which we
// expect the allocation to either fail or to ignore req_addr
// Pre-allocate two areas; they shall be as large as the largest allocation
// and aligned to the largest alignment we will be testing.
const size_t mapping_size = sizes[num_sizes - 1] * 2;
char* const mapping1 = (char*) ::mmap(NULL, mapping_size,
PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
-1, 0);
assert(mapping1 != MAP_FAILED, "should work");
char* const mapping2 = (char*) ::mmap(NULL, mapping_size,
PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
-1, 0);
assert(mapping2 != MAP_FAILED, "should work");
// Unmap the first mapping, but leave the second mapping intact: the first
// mapping will serve as a value for a "good" req_addr (case 2). The second
// mapping, still intact, as "bad" req_addr (case 3).
::munmap(mapping1, mapping_size);
// Case 1
for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i];
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
if (p != NULL) {
assert(is_aligned(p, alignment), "must be");
small_page_write(p, size);
os::Linux::release_memory_special_huge_tlbfs(p, size);
}
}
}
// Case 2
for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i];
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
char* const req_addr = align_up(mapping1, alignment);
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
if (p != NULL) {
assert(p == req_addr, "must be");
small_page_write(p, size);
os::Linux::release_memory_special_huge_tlbfs(p, size);
}
}
}
// Case 3
for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i];
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
char* const req_addr = align_up(mapping2, alignment);
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
// as the area around req_addr contains already existing mappings, the API should always
// return NULL (as per contract, it cannot return another address)
assert(p == NULL, "must be");
}
}
::munmap(mapping2, mapping_size);
}
static void test_reserve_memory_special_huge_tlbfs() {
if (!UseHugeTLBFS) {
return;
}
test_reserve_memory_special_huge_tlbfs_only();
test_reserve_memory_special_huge_tlbfs_mixed();
}
static void test_reserve_memory_special_shm(size_t size, size_t alignment) {
if (!UseSHM) {
return;
}
char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false);
if (addr != NULL) {
assert(is_aligned(addr, alignment), "Check");
assert(is_aligned(addr, os::large_page_size()), "Check");
small_page_write(addr, size);
os::Linux::release_memory_special_shm(addr, size);
}
}
static void test_reserve_memory_special_shm() {
size_t lp = os::large_page_size();
size_t ag = os::vm_allocation_granularity();
for (size_t size = ag; size < lp * 3; size += ag) {
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
test_reserve_memory_special_shm(size, alignment);
}
}
}
static void test() {
test_reserve_memory_special_huge_tlbfs();
test_reserve_memory_special_shm();
}
};
void TestReserveMemorySpecial_test() {
TestReserveMemorySpecial::test();
}
#endif