8262291: Refactor reserve_memory_special_huge_tlbfs

Reviewed-by: iwalulya, stuefe
This commit is contained in:
Stefan Johansson 2021-04-13 08:59:09 +00:00
parent 008fc75a29
commit f2f7aa3bec
3 changed files with 103 additions and 141 deletions

View file

@ -3898,8 +3898,8 @@ char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment,
return addr;
}
static void warn_on_large_pages_failure(char* req_addr, size_t bytes,
int error) {
static void warn_on_commit_special_failure(char* req_addr, size_t bytes,
size_t page_size, int error) {
assert(error == ENOMEM, "Only expect to fail if no memory is available");
bool warn_on_failure = UseLargePages &&
@ -3909,122 +3909,45 @@ static void warn_on_large_pages_failure(char* req_addr, size_t bytes,
if (warn_on_failure) {
char msg[128];
jio_snprintf(msg, sizeof(msg), "Failed to reserve large pages memory req_addr: "
PTR_FORMAT " bytes: " SIZE_FORMAT " (errno = %d).", req_addr, bytes, error);
jio_snprintf(msg, sizeof(msg), "Failed to reserve and commit memory. req_addr: "
PTR_FORMAT " bytes: " SIZE_FORMAT " page size: "
SIZE_FORMAT " (errno = %d).",
req_addr, bytes, page_size, error);
warning("%s", msg);
}
}
char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes,
bool os::Linux::commit_memory_special(size_t bytes,
size_t page_size,
char* req_addr,
bool exec) {
assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
assert(is_aligned(bytes, os::large_page_size()), "Unaligned size");
assert(is_aligned(req_addr, os::large_page_size()), "Unaligned address");
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
int flags = MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB;
// Ensure the correct page size flag is used when needed.
flags |= hugetlbfs_page_size_flag(os::large_page_size());
char* addr = (char*)::mmap(req_addr, bytes, prot, flags, -1, 0);
if (addr == MAP_FAILED) {
warn_on_large_pages_failure(req_addr, bytes, errno);
return NULL;
}
assert(is_aligned(addr, os::large_page_size()), "Must be");
return addr;
}
// Reserve memory using mmap(MAP_HUGETLB).
// - bytes shall be a multiple of alignment.
// - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
// - alignment sets the alignment at which memory shall be allocated.
// It must be a multiple of allocation granularity.
// Returns address of memory or NULL. If req_addr was not NULL, will only return
// req_addr or NULL.
char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes,
size_t alignment,
char* req_addr,
bool exec) {
size_t large_page_size = os::large_page_size();
assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes");
assert(is_aligned(req_addr, alignment), "Must be");
assert(is_aligned(bytes, alignment), "Must be");
// First reserve - but not commit - the address range in small pages.
char* const start = anon_mmap_aligned(req_addr, bytes, alignment);
if (start == NULL) {
return NULL;
}
assert(is_aligned(start, alignment), "Must be");
char* end = start + bytes;
// Find the regions of the allocated chunk that can be promoted to large pages.
char* lp_start = align_up(start, large_page_size);
char* lp_end = align_down(end, large_page_size);
size_t lp_bytes = lp_end - lp_start;
assert(is_aligned(lp_bytes, large_page_size), "Must be");
if (lp_bytes == 0) {
// The mapped region doesn't even span the start and the end of a large page.
// Fall back to allocate a non-special area.
::munmap(start, end - start);
return NULL;
}
assert(UseLargePages && UseHugeTLBFS, "Should only get here when HugeTLBFS large pages are used");
assert(is_aligned(bytes, page_size), "Unaligned size");
assert(is_aligned(req_addr, page_size), "Unaligned address");
assert(req_addr != NULL, "Must have a requested address for special mappings");
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
int flags = MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED;
void* result;
// Commit small-paged leading area.
if (start != lp_start) {
result = ::mmap(start, lp_start - start, prot, flags, -1, 0);
if (result == MAP_FAILED) {
::munmap(lp_start, end - lp_start);
return NULL;
// For large pages additional flags are required.
if (page_size > (size_t) os::vm_page_size()) {
flags |= MAP_HUGETLB | hugetlbfs_page_size_flag(page_size);
}
char* addr = (char*)::mmap(req_addr, bytes, prot, flags, -1, 0);
if (addr == MAP_FAILED) {
warn_on_commit_special_failure(req_addr, bytes, page_size, errno);
return false;
}
// Commit large-paged area.
flags |= MAP_HUGETLB | hugetlbfs_page_size_flag(os::large_page_size());
result = ::mmap(lp_start, lp_bytes, prot, flags, -1, 0);
if (result == MAP_FAILED) {
warn_on_large_pages_failure(lp_start, lp_bytes, errno);
// If the mmap above fails, the large pages region will be unmapped and we
// have regions before and after with small pages. Release these regions.
//
// | mapped | unmapped | mapped |
// ^ ^ ^ ^
// start lp_start lp_end end
//
::munmap(start, lp_start - start);
::munmap(lp_end, end - lp_end);
return NULL;
}
// Commit small-paged trailing area.
if (lp_end != end) {
result = ::mmap(lp_end, end - lp_end, prot,
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
-1, 0);
if (result == MAP_FAILED) {
::munmap(start, lp_end - start);
return NULL;
}
}
return start;
log_debug(pagesize)("Commit special mapping: " PTR_FORMAT ", size=" SIZE_FORMAT "%s, page size="
SIZE_FORMAT "%s",
p2i(addr), byte_size_in_exact_unit(bytes),
exact_unit_for_byte_size(bytes),
byte_size_in_exact_unit(page_size),
exact_unit_for_byte_size(page_size));
assert(is_aligned(addr, page_size), "Must be");
return true;
}
char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes,
@ -4033,15 +3956,54 @@ char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes,
bool exec) {
assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
assert(is_aligned(req_addr, alignment), "Must be");
assert(is_aligned(req_addr, os::large_page_size()), "Must be");
assert(is_aligned(alignment, os::vm_allocation_granularity()), "Must be");
assert(is_power_of_2(os::large_page_size()), "Must be");
assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes");
if (is_aligned(bytes, os::large_page_size()) && alignment <= os::large_page_size()) {
return reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec);
} else {
return reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec);
// We only end up here when at least 1 large page can be used.
// If the size is not a multiple of the large page size, we
// will mix the type of pages used, but in a decending order.
// Start off by reserving a range of the given size that is
// properly aligned. At this point no pages are committed. If
// a requested address is given it will be used and it must be
// aligned to both the large page size and the given alignment.
// The larger of the two will be used.
size_t required_alignment = MAX(os::large_page_size(), alignment);
char* const aligned_start = anon_mmap_aligned(req_addr, bytes, required_alignment);
if (aligned_start == NULL) {
return NULL;
}
// First commit using large pages.
size_t large_bytes = align_down(bytes, os::large_page_size());
bool large_committed = commit_memory_special(large_bytes, os::large_page_size(), aligned_start, exec);
if (large_committed && bytes == large_bytes) {
// The size was large page aligned so no additional work is
// needed even if the commit failed.
return aligned_start;
}
// The requested size requires some small pages as well.
char* small_start = aligned_start + large_bytes;
size_t small_size = bytes - large_bytes;
if (!large_committed) {
// Failed to commit large pages, so we need to unmap the
// reminder of the orinal reservation.
::munmap(small_start, small_size);
return NULL;
}
// Commit the remaining bytes using small pages.
bool small_committed = commit_memory_special(small_size, os::vm_page_size(), small_start, exec);
if (!small_committed) {
// Failed to commit the remaining size, need to unmap
// the large pages part of the reservation.
::munmap(aligned_start, large_bytes);
return NULL;
}
return aligned_start;
}
char* os::pd_reserve_memory_special(size_t bytes, size_t alignment,

View file

@ -91,8 +91,7 @@ class Linux {
static char* reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec);
static char* reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec);
static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec);
static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec);
static bool commit_memory_special(size_t bytes, size_t page_size, char* req_addr, bool exec);
static bool release_memory_special_impl(char* base, size_t bytes);
static bool release_memory_special_shm(char* base, size_t bytes);

View file

@ -47,11 +47,8 @@ namespace {
char* const _ptr;
const size_t _size;
public:
static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec) {
return os::Linux::reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec);
}
static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec) {
return os::Linux::reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec);
static char* reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec) {
return os::Linux::reserve_memory_special_huge_tlbfs(bytes, alignment, req_addr, exec);
}
HugeTlbfsMemory(char* const ptr, size_t size) : _ptr(ptr), _size(size) { }
~HugeTlbfsMemory() {
@ -96,14 +93,14 @@ namespace {
}
}
TEST_VM(os_linux, reserve_memory_special_huge_tlbfs_only) {
TEST_VM(os_linux, reserve_memory_special_huge_tlbfs_size_aligned) {
if (!UseHugeTLBFS) {
return;
}
size_t lp = os::large_page_size();
for (size_t size = lp; size <= lp * 10; size += lp) {
char* addr = HugeTlbfsMemory::reserve_memory_special_huge_tlbfs_only(size, NULL, false);
char* addr = HugeTlbfsMemory::reserve_memory_special_huge_tlbfs(size, lp, NULL, false);
if (addr != NULL) {
HugeTlbfsMemory mr(addr, size);
@ -112,7 +109,7 @@ TEST_VM(os_linux, reserve_memory_special_huge_tlbfs_only) {
}
}
TEST_VM(os_linux, reserve_memory_special_huge_tlbfs_mixed_without_addr) {
TEST_VM(os_linux, reserve_memory_special_huge_tlbfs_size_not_aligned_without_addr) {
if (!UseHugeTLBFS) {
return;
}
@ -129,7 +126,7 @@ TEST_VM(os_linux, reserve_memory_special_huge_tlbfs_mixed_without_addr) {
for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i];
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
char* p = HugeTlbfsMemory::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
char* p = HugeTlbfsMemory::reserve_memory_special_huge_tlbfs(size, alignment, NULL, false);
if (p != NULL) {
HugeTlbfsMemory mr(p, size);
EXPECT_PRED2(is_ptr_aligned, p, alignment) << " size = " << size;
@ -139,7 +136,7 @@ TEST_VM(os_linux, reserve_memory_special_huge_tlbfs_mixed_without_addr) {
}
}
TEST_VM(os_linux, reserve_memory_special_huge_tlbfs_mixed_with_good_req_addr) {
TEST_VM(os_linux, reserve_memory_special_huge_tlbfs_size_not_aligned_with_good_req_addr) {
if (!UseHugeTLBFS) {
return;
}
@ -167,8 +164,9 @@ TEST_VM(os_linux, reserve_memory_special_huge_tlbfs_mixed_with_good_req_addr) {
for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i];
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
char* const req_addr = align_up(mapping, alignment);
char* p = HugeTlbfsMemory::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
// req_addr must be at least large page aligned.
char* const req_addr = align_up(mapping, MAX2(alignment, lp));
char* p = HugeTlbfsMemory::reserve_memory_special_huge_tlbfs(size, alignment, req_addr, false);
if (p != NULL) {
HugeTlbfsMemory mr(p, size);
ASSERT_EQ(req_addr, p) << " size = " << size << ", alignment = " << alignment;
@ -179,7 +177,7 @@ TEST_VM(os_linux, reserve_memory_special_huge_tlbfs_mixed_with_good_req_addr) {
}
TEST_VM(os_linux, reserve_memory_special_huge_tlbfs_mixed_with_bad_req_addr) {
TEST_VM(os_linux, reserve_memory_special_huge_tlbfs_size_not_aligned_with_bad_req_addr) {
if (!UseHugeTLBFS) {
return;
}
@ -216,8 +214,9 @@ TEST_VM(os_linux, reserve_memory_special_huge_tlbfs_mixed_with_bad_req_addr) {
for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i];
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
char* const req_addr = align_up(mapping, alignment);
char* p = HugeTlbfsMemory::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
// req_addr must be at least large page aligned.
char* const req_addr = align_up(mapping, MAX2(alignment, lp));
char* p = HugeTlbfsMemory::reserve_memory_special_huge_tlbfs(size, alignment, req_addr, false);
HugeTlbfsMemory mr(p, size);
// as the area around req_addr contains already existing mappings, the API should always
// return NULL (as per contract, it cannot return another address)
@ -254,12 +253,12 @@ class TestReserveMemorySpecial : AllStatic {
}
}
static void test_reserve_memory_special_huge_tlbfs_only(size_t size) {
static void test_reserve_memory_special_huge_tlbfs_size_aligned(size_t size, size_t alignment) {
if (!UseHugeTLBFS) {
return;
}
char* addr = os::Linux::reserve_memory_special_huge_tlbfs_only(size, NULL, false);
char* addr = os::Linux::reserve_memory_special_huge_tlbfs(size, alignment, NULL, false);
if (addr != NULL) {
small_page_write(addr, size);
@ -268,7 +267,7 @@ class TestReserveMemorySpecial : AllStatic {
}
}
static void test_reserve_memory_special_huge_tlbfs_only() {
static void test_reserve_memory_special_huge_tlbfs_size_aligned() {
if (!UseHugeTLBFS) {
return;
}
@ -276,11 +275,11 @@ class TestReserveMemorySpecial : AllStatic {
size_t lp = os::large_page_size();
for (size_t size = lp; size <= lp * 10; size += lp) {
test_reserve_memory_special_huge_tlbfs_only(size);
test_reserve_memory_special_huge_tlbfs_size_aligned(size, lp);
}
}
static void test_reserve_memory_special_huge_tlbfs_mixed() {
static void test_reserve_memory_special_huge_tlbfs_size_not_aligned() {
size_t lp = os::large_page_size();
size_t ag = os::vm_allocation_granularity();
@ -320,7 +319,7 @@ class TestReserveMemorySpecial : AllStatic {
for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i];
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
char* p = os::Linux::reserve_memory_special_huge_tlbfs(size, alignment, NULL, false);
if (p != NULL) {
EXPECT_TRUE(is_aligned(p, alignment));
small_page_write(p, size);
@ -333,8 +332,9 @@ class TestReserveMemorySpecial : AllStatic {
for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i];
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
char* const req_addr = align_up(mapping1, alignment);
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
// req_addr must be at least large page aligned.
char* const req_addr = align_up(mapping1, MAX2(alignment, lp));
char* p = os::Linux::reserve_memory_special_huge_tlbfs(size, alignment, req_addr, false);
if (p != NULL) {
EXPECT_EQ(p, req_addr);
small_page_write(p, size);
@ -347,8 +347,9 @@ class TestReserveMemorySpecial : AllStatic {
for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i];
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
char* const req_addr = align_up(mapping2, alignment);
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
// req_addr must be at least large page aligned.
char* const req_addr = align_up(mapping2, MAX2(alignment, lp));
char* p = os::Linux::reserve_memory_special_huge_tlbfs(size, alignment, req_addr, false);
// as the area around req_addr contains already existing mappings, the API should always
// return NULL (as per contract, it cannot return another address)
EXPECT_TRUE(p == NULL);
@ -364,8 +365,8 @@ class TestReserveMemorySpecial : AllStatic {
return;
}
test_reserve_memory_special_huge_tlbfs_only();
test_reserve_memory_special_huge_tlbfs_mixed();
test_reserve_memory_special_huge_tlbfs_size_aligned();
test_reserve_memory_special_huge_tlbfs_size_not_aligned();
}
static void test_reserve_memory_special_shm(size_t size, size_t alignment) {