mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-26 06:14:49 +02:00
Merge
This commit is contained in:
commit
1a700c9216
1322 changed files with 52051 additions and 19966 deletions
|
@ -2676,36 +2676,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
|
|||
|
||||
int os::Linux::commit_memory_impl(char* addr, size_t size,
|
||||
size_t alignment_hint, bool exec) {
|
||||
int err;
|
||||
if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
|
||||
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
|
||||
uintptr_t res =
|
||||
(uintptr_t) ::mmap(addr, size, prot,
|
||||
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB,
|
||||
-1, 0);
|
||||
if (res != (uintptr_t) MAP_FAILED) {
|
||||
if (UseNUMAInterleaving) {
|
||||
numa_make_global(addr, size);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = errno; // save errno from mmap() call above
|
||||
|
||||
if (!recoverable_mmap_error(err)) {
|
||||
// However, it is not clear that this loss of our reserved mapping
|
||||
// happens with large pages on Linux or that we cannot recover
|
||||
// from the loss. For now, we just issue a warning and we don't
|
||||
// call vm_exit_out_of_memory(). This issue is being tracked by
|
||||
// JBS-8007074.
|
||||
warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
|
||||
// vm_exit_out_of_memory(size, OOM_MMAP_ERROR,
|
||||
// "committing reserved memory.");
|
||||
}
|
||||
// Fall through and try to use small pages
|
||||
}
|
||||
|
||||
err = os::Linux::commit_memory_impl(addr, size, exec);
|
||||
int err = os::Linux::commit_memory_impl(addr, size, exec);
|
||||
if (err == 0) {
|
||||
realign_memory(addr, size, alignment_hint);
|
||||
}
|
||||
|
@ -2730,7 +2701,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size,
|
|||
}
|
||||
|
||||
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
||||
if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
|
||||
if (UseTransparentHugePages && alignment_hint > (size_t)vm_page_size()) {
|
||||
// We don't check the return value: madvise(MADV_HUGEPAGE) may not
|
||||
// be supported or the memory may already be backed by huge pages.
|
||||
::madvise(addr, bytes, MADV_HUGEPAGE);
|
||||
|
@ -2743,7 +2714,7 @@ void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
|||
// uncommitted at all. We don't do anything in this case to avoid creating a segment with
|
||||
// small pages on top of the SHM segment. This method always works for small pages, so we
|
||||
// allow that in any case.
|
||||
if (alignment_hint <= (size_t)os::vm_page_size() || !UseSHM) {
|
||||
if (alignment_hint <= (size_t)os::vm_page_size() || can_commit_large_page_memory()) {
|
||||
commit_memory(addr, bytes, alignment_hint, !ExecMem);
|
||||
}
|
||||
}
|
||||
|
@ -3113,11 +3084,31 @@ bool os::unguard_memory(char* addr, size_t size) {
|
|||
return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
|
||||
}
|
||||
|
||||
bool os::Linux::transparent_huge_pages_sanity_check(bool warn, size_t page_size) {
|
||||
bool result = false;
|
||||
void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
|
||||
MAP_ANONYMOUS|MAP_PRIVATE,
|
||||
-1, 0);
|
||||
if (p != MAP_FAILED) {
|
||||
void *aligned_p = align_ptr_up(p, page_size);
|
||||
|
||||
result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0;
|
||||
|
||||
munmap(p, page_size * 2);
|
||||
}
|
||||
|
||||
if (warn && !result) {
|
||||
warning("TransparentHugePages is not supported by the operating system.");
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
|
||||
bool result = false;
|
||||
void *p = mmap (NULL, page_size, PROT_READ|PROT_WRITE,
|
||||
MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
|
||||
-1, 0);
|
||||
void *p = mmap(NULL, page_size, PROT_READ|PROT_WRITE,
|
||||
MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
|
||||
-1, 0);
|
||||
|
||||
if (p != MAP_FAILED) {
|
||||
// We don't know if this really is a huge page or not.
|
||||
|
@ -3138,12 +3129,10 @@ bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
|
|||
}
|
||||
fclose(fp);
|
||||
}
|
||||
munmap (p, page_size);
|
||||
if (result)
|
||||
return true;
|
||||
munmap(p, page_size);
|
||||
}
|
||||
|
||||
if (warn) {
|
||||
if (warn && !result) {
|
||||
warning("HugeTLBFS is not supported by the operating system.");
|
||||
}
|
||||
|
||||
|
@ -3191,82 +3180,114 @@ static void set_coredump_filter(void) {
|
|||
|
||||
static size_t _large_page_size = 0;
|
||||
|
||||
void os::large_page_init() {
|
||||
if (!UseLargePages) {
|
||||
UseHugeTLBFS = false;
|
||||
UseSHM = false;
|
||||
return;
|
||||
}
|
||||
size_t os::Linux::find_large_page_size() {
|
||||
size_t large_page_size = 0;
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) {
|
||||
// If UseLargePages is specified on the command line try both methods,
|
||||
// if it's default, then try only HugeTLBFS.
|
||||
if (FLAG_IS_DEFAULT(UseLargePages)) {
|
||||
UseHugeTLBFS = true;
|
||||
} else {
|
||||
UseHugeTLBFS = UseSHM = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (LargePageSizeInBytes) {
|
||||
_large_page_size = LargePageSizeInBytes;
|
||||
} else {
|
||||
// large_page_size on Linux is used to round up heap size. x86 uses either
|
||||
// 2M or 4M page, depending on whether PAE (Physical Address Extensions)
|
||||
// mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
|
||||
// page as large as 256M.
|
||||
//
|
||||
// Here we try to figure out page size by parsing /proc/meminfo and looking
|
||||
// for a line with the following format:
|
||||
// Hugepagesize: 2048 kB
|
||||
//
|
||||
// If we can't determine the value (e.g. /proc is not mounted, or the text
|
||||
// format has been changed), we'll use the largest page size supported by
|
||||
// the processor.
|
||||
// large_page_size on Linux is used to round up heap size. x86 uses either
|
||||
// 2M or 4M page, depending on whether PAE (Physical Address Extensions)
|
||||
// mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
|
||||
// page as large as 256M.
|
||||
//
|
||||
// Here we try to figure out page size by parsing /proc/meminfo and looking
|
||||
// for a line with the following format:
|
||||
// Hugepagesize: 2048 kB
|
||||
//
|
||||
// If we can't determine the value (e.g. /proc is not mounted, or the text
|
||||
// format has been changed), we'll use the largest page size supported by
|
||||
// the processor.
|
||||
|
||||
#ifndef ZERO
|
||||
_large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
|
||||
ARM_ONLY(2 * M) PPC_ONLY(4 * M);
|
||||
large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
|
||||
ARM_ONLY(2 * M) PPC_ONLY(4 * M);
|
||||
#endif // ZERO
|
||||
|
||||
FILE *fp = fopen("/proc/meminfo", "r");
|
||||
if (fp) {
|
||||
while (!feof(fp)) {
|
||||
int x = 0;
|
||||
char buf[16];
|
||||
if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
|
||||
if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
|
||||
_large_page_size = x * K;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
// skip to next line
|
||||
for (;;) {
|
||||
int ch = fgetc(fp);
|
||||
if (ch == EOF || ch == (int)'\n') break;
|
||||
}
|
||||
FILE *fp = fopen("/proc/meminfo", "r");
|
||||
if (fp) {
|
||||
while (!feof(fp)) {
|
||||
int x = 0;
|
||||
char buf[16];
|
||||
if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
|
||||
if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
|
||||
large_page_size = x * K;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
// skip to next line
|
||||
for (;;) {
|
||||
int ch = fgetc(fp);
|
||||
if (ch == EOF || ch == (int)'\n') break;
|
||||
}
|
||||
}
|
||||
fclose(fp);
|
||||
}
|
||||
fclose(fp);
|
||||
}
|
||||
|
||||
// print a warning if any large page related flag is specified on command line
|
||||
bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
|
||||
if (!FLAG_IS_DEFAULT(LargePageSizeInBytes) && LargePageSizeInBytes != large_page_size) {
|
||||
warning("Setting LargePageSizeInBytes has no effect on this OS. Large page size is "
|
||||
SIZE_FORMAT "%s.", byte_size_in_proper_unit(large_page_size),
|
||||
proper_unit_for_byte_size(large_page_size));
|
||||
}
|
||||
|
||||
return large_page_size;
|
||||
}
|
||||
|
||||
size_t os::Linux::setup_large_page_size() {
|
||||
_large_page_size = Linux::find_large_page_size();
|
||||
const size_t default_page_size = (size_t)Linux::page_size();
|
||||
if (_large_page_size > default_page_size) {
|
||||
_page_sizes[0] = _large_page_size;
|
||||
_page_sizes[1] = default_page_size;
|
||||
_page_sizes[2] = 0;
|
||||
}
|
||||
UseHugeTLBFS = UseHugeTLBFS &&
|
||||
Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
|
||||
|
||||
if (UseHugeTLBFS)
|
||||
return _large_page_size;
|
||||
}
|
||||
|
||||
bool os::Linux::setup_large_page_type(size_t page_size) {
|
||||
if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
|
||||
FLAG_IS_DEFAULT(UseSHM) &&
|
||||
FLAG_IS_DEFAULT(UseTransparentHugePages)) {
|
||||
// If UseLargePages is specified on the command line try all methods,
|
||||
// if it's default, then try only UseTransparentHugePages.
|
||||
if (FLAG_IS_DEFAULT(UseLargePages)) {
|
||||
UseTransparentHugePages = true;
|
||||
} else {
|
||||
UseHugeTLBFS = UseTransparentHugePages = UseSHM = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (UseTransparentHugePages) {
|
||||
bool warn_on_failure = !FLAG_IS_DEFAULT(UseTransparentHugePages);
|
||||
if (transparent_huge_pages_sanity_check(warn_on_failure, page_size)) {
|
||||
UseHugeTLBFS = false;
|
||||
UseSHM = false;
|
||||
return true;
|
||||
}
|
||||
UseTransparentHugePages = false;
|
||||
}
|
||||
|
||||
if (UseHugeTLBFS) {
|
||||
bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
|
||||
if (hugetlbfs_sanity_check(warn_on_failure, page_size)) {
|
||||
UseSHM = false;
|
||||
return true;
|
||||
}
|
||||
UseHugeTLBFS = false;
|
||||
}
|
||||
|
||||
return UseSHM;
|
||||
}
|
||||
|
||||
void os::large_page_init() {
|
||||
if (!UseLargePages) {
|
||||
UseHugeTLBFS = false;
|
||||
UseTransparentHugePages = false;
|
||||
UseSHM = false;
|
||||
return;
|
||||
}
|
||||
|
||||
UseLargePages = UseHugeTLBFS || UseSHM;
|
||||
size_t large_page_size = Linux::setup_large_page_size();
|
||||
UseLargePages = Linux::setup_large_page_type(large_page_size);
|
||||
|
||||
set_coredump_filter();
|
||||
}
|
||||
|
@ -3275,16 +3296,22 @@ void os::large_page_init() {
|
|||
#define SHM_HUGETLB 04000
|
||||
#endif
|
||||
|
||||
char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
|
||||
char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec) {
|
||||
// "exec" is passed in but not used. Creating the shared image for
|
||||
// the code cache doesn't have an SHM_X executable permission to check.
|
||||
assert(UseLargePages && UseSHM, "only for SHM large pages");
|
||||
assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
|
||||
|
||||
if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
|
||||
return NULL; // Fallback to small pages.
|
||||
}
|
||||
|
||||
key_t key = IPC_PRIVATE;
|
||||
char *addr;
|
||||
|
||||
bool warn_on_failure = UseLargePages &&
|
||||
(!FLAG_IS_DEFAULT(UseLargePages) ||
|
||||
!FLAG_IS_DEFAULT(UseSHM) ||
|
||||
!FLAG_IS_DEFAULT(LargePageSizeInBytes)
|
||||
);
|
||||
char msg[128];
|
||||
|
@ -3332,42 +3359,219 @@ char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if ((addr != NULL) && UseNUMAInterleaving) {
|
||||
numa_make_global(addr, bytes);
|
||||
return addr;
|
||||
}
|
||||
|
||||
static void warn_on_large_pages_failure(char* req_addr, size_t bytes, int error) {
|
||||
assert(error == ENOMEM, "Only expect to fail if no memory is available");
|
||||
|
||||
bool warn_on_failure = UseLargePages &&
|
||||
(!FLAG_IS_DEFAULT(UseLargePages) ||
|
||||
!FLAG_IS_DEFAULT(UseHugeTLBFS) ||
|
||||
!FLAG_IS_DEFAULT(LargePageSizeInBytes));
|
||||
|
||||
if (warn_on_failure) {
|
||||
char msg[128];
|
||||
jio_snprintf(msg, sizeof(msg), "Failed to reserve large pages memory req_addr: "
|
||||
PTR_FORMAT " bytes: " SIZE_FORMAT " (errno = %d).", req_addr, bytes, error);
|
||||
warning(msg);
|
||||
}
|
||||
}
|
||||
|
||||
char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec) {
|
||||
assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
|
||||
assert(is_size_aligned(bytes, os::large_page_size()), "Unaligned size");
|
||||
assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
|
||||
|
||||
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
|
||||
char* addr = (char*)::mmap(req_addr, bytes, prot,
|
||||
MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB,
|
||||
-1, 0);
|
||||
|
||||
if (addr == MAP_FAILED) {
|
||||
warn_on_large_pages_failure(req_addr, bytes, errno);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// The memory is committed
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
|
||||
assert(is_ptr_aligned(addr, os::large_page_size()), "Must be");
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec) {
|
||||
size_t large_page_size = os::large_page_size();
|
||||
|
||||
assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes");
|
||||
|
||||
// Allocate small pages.
|
||||
|
||||
char* start;
|
||||
if (req_addr != NULL) {
|
||||
assert(is_ptr_aligned(req_addr, alignment), "Must be");
|
||||
assert(is_size_aligned(bytes, alignment), "Must be");
|
||||
start = os::reserve_memory(bytes, req_addr);
|
||||
assert(start == NULL || start == req_addr, "Must be");
|
||||
} else {
|
||||
start = os::reserve_memory_aligned(bytes, alignment);
|
||||
}
|
||||
|
||||
if (start == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
assert(is_ptr_aligned(start, alignment), "Must be");
|
||||
|
||||
// os::reserve_memory_special will record this memory area.
|
||||
// Need to release it here to prevent overlapping reservations.
|
||||
MemTracker::record_virtual_memory_release((address)start, bytes);
|
||||
|
||||
char* end = start + bytes;
|
||||
|
||||
// Find the regions of the allocated chunk that can be promoted to large pages.
|
||||
char* lp_start = (char*)align_ptr_up(start, large_page_size);
|
||||
char* lp_end = (char*)align_ptr_down(end, large_page_size);
|
||||
|
||||
size_t lp_bytes = lp_end - lp_start;
|
||||
|
||||
assert(is_size_aligned(lp_bytes, large_page_size), "Must be");
|
||||
|
||||
if (lp_bytes == 0) {
|
||||
// The mapped region doesn't even span the start and the end of a large page.
|
||||
// Fall back to allocate a non-special area.
|
||||
::munmap(start, end - start);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
|
||||
|
||||
|
||||
void* result;
|
||||
|
||||
if (start != lp_start) {
|
||||
result = ::mmap(start, lp_start - start, prot,
|
||||
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
|
||||
-1, 0);
|
||||
if (result == MAP_FAILED) {
|
||||
::munmap(lp_start, end - lp_start);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
result = ::mmap(lp_start, lp_bytes, prot,
|
||||
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED|MAP_HUGETLB,
|
||||
-1, 0);
|
||||
if (result == MAP_FAILED) {
|
||||
warn_on_large_pages_failure(req_addr, bytes, errno);
|
||||
// If the mmap above fails, the large pages region will be unmapped and we
|
||||
// have regions before and after with small pages. Release these regions.
|
||||
//
|
||||
// | mapped | unmapped | mapped |
|
||||
// ^ ^ ^ ^
|
||||
// start lp_start lp_end end
|
||||
//
|
||||
::munmap(start, lp_start - start);
|
||||
::munmap(lp_end, end - lp_end);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (lp_end != end) {
|
||||
result = ::mmap(lp_end, end - lp_end, prot,
|
||||
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
|
||||
-1, 0);
|
||||
if (result == MAP_FAILED) {
|
||||
::munmap(start, lp_end - start);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec) {
|
||||
assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
|
||||
assert(is_ptr_aligned(req_addr, alignment), "Must be");
|
||||
assert(is_power_of_2(alignment), "Must be");
|
||||
assert(is_power_of_2(os::large_page_size()), "Must be");
|
||||
assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes");
|
||||
|
||||
if (is_size_aligned(bytes, os::large_page_size()) && alignment <= os::large_page_size()) {
|
||||
return reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec);
|
||||
} else {
|
||||
return reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec);
|
||||
}
|
||||
}
|
||||
|
||||
char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
|
||||
assert(UseLargePages, "only for large pages");
|
||||
|
||||
char* addr;
|
||||
if (UseSHM) {
|
||||
addr = os::Linux::reserve_memory_special_shm(bytes, alignment, req_addr, exec);
|
||||
} else {
|
||||
assert(UseHugeTLBFS, "must be");
|
||||
addr = os::Linux::reserve_memory_special_huge_tlbfs(bytes, alignment, req_addr, exec);
|
||||
}
|
||||
|
||||
if (addr != NULL) {
|
||||
if (UseNUMAInterleaving) {
|
||||
numa_make_global(addr, bytes);
|
||||
}
|
||||
|
||||
// The memory is committed
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
|
||||
}
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
bool os::Linux::release_memory_special_shm(char* base, size_t bytes) {
|
||||
// detaching the SHM segment will also delete it, see reserve_memory_special_shm()
|
||||
return shmdt(base) == 0;
|
||||
}
|
||||
|
||||
bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {
|
||||
return pd_release_memory(base, bytes);
|
||||
}
|
||||
|
||||
bool os::release_memory_special(char* base, size_t bytes) {
|
||||
assert(UseLargePages, "only for large pages");
|
||||
|
||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
// detaching the SHM segment will also delete it, see reserve_memory_special()
|
||||
int rslt = shmdt(base);
|
||||
if (rslt == 0) {
|
||||
|
||||
bool res;
|
||||
if (UseSHM) {
|
||||
res = os::Linux::release_memory_special_shm(base, bytes);
|
||||
} else {
|
||||
assert(UseHugeTLBFS, "must be");
|
||||
res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
|
||||
}
|
||||
|
||||
if (res) {
|
||||
tkr.record((address)base, bytes);
|
||||
return true;
|
||||
} else {
|
||||
tkr.discard();
|
||||
return false;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
size_t os::large_page_size() {
|
||||
return _large_page_size;
|
||||
}
|
||||
|
||||
// HugeTLBFS allows application to commit large page memory on demand;
|
||||
// with SysV SHM the entire memory region must be allocated as shared
|
||||
// With SysV SHM the entire memory region must be allocated as shared
|
||||
// memory.
|
||||
// HugeTLBFS allows application to commit large page memory on demand.
|
||||
// However, when committing memory with HugeTLBFS fails, the region
|
||||
// that was supposed to be committed will lose the old reservation
|
||||
// and allow other threads to steal that memory region. Because of this
|
||||
// behavior we can't commit HugeTLBFS memory.
|
||||
bool os::can_commit_large_page_memory() {
|
||||
return UseHugeTLBFS;
|
||||
return UseTransparentHugePages;
|
||||
}
|
||||
|
||||
bool os::can_execute_large_page_memory() {
|
||||
return UseHugeTLBFS;
|
||||
return UseTransparentHugePages || UseHugeTLBFS;
|
||||
}
|
||||
|
||||
// Reserve memory at an arbitrary address, only if that area is
|
||||
|
@ -4521,21 +4725,23 @@ jint os::init_2(void)
|
|||
UseNUMA = false;
|
||||
}
|
||||
}
|
||||
// With SHM large pages we cannot uncommit a page, so there's not way
|
||||
// With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
|
||||
// we can make the adaptive lgrp chunk resizing work. If the user specified
|
||||
// both UseNUMA and UseLargePages (or UseSHM) on the command line - warn and
|
||||
// both UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn and
|
||||
// disable adaptive resizing.
|
||||
if (UseNUMA && UseLargePages && UseSHM) {
|
||||
if (!FLAG_IS_DEFAULT(UseNUMA)) {
|
||||
if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseSHM)) {
|
||||
if (UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
|
||||
if (FLAG_IS_DEFAULT(UseNUMA)) {
|
||||
UseNUMA = false;
|
||||
} else {
|
||||
if (FLAG_IS_DEFAULT(UseLargePages) &&
|
||||
FLAG_IS_DEFAULT(UseSHM) &&
|
||||
FLAG_IS_DEFAULT(UseHugeTLBFS)) {
|
||||
UseLargePages = false;
|
||||
} else {
|
||||
warning("UseNUMA is not fully compatible with SHM large pages, disabling adaptive resizing");
|
||||
warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, disabling adaptive resizing");
|
||||
UseAdaptiveSizePolicy = false;
|
||||
UseAdaptiveNUMAChunkSizing = false;
|
||||
}
|
||||
} else {
|
||||
UseNUMA = false;
|
||||
}
|
||||
}
|
||||
if (!UseNUMA && ForceNUMA) {
|
||||
|
@ -5805,3 +6011,149 @@ void MemNotifyThread::start() {
|
|||
}
|
||||
|
||||
#endif // JAVASE_EMBEDDED
|
||||
|
||||
|
||||
/////////////// Unit tests ///////////////
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
#define test_log(...) \
|
||||
do {\
|
||||
if (VerboseInternalVMTests) { \
|
||||
tty->print_cr(__VA_ARGS__); \
|
||||
tty->flush(); \
|
||||
}\
|
||||
} while (false)
|
||||
|
||||
class TestReserveMemorySpecial : AllStatic {
|
||||
public:
|
||||
static void small_page_write(void* addr, size_t size) {
|
||||
size_t page_size = os::vm_page_size();
|
||||
|
||||
char* end = (char*)addr + size;
|
||||
for (char* p = (char*)addr; p < end; p += page_size) {
|
||||
*p = 1;
|
||||
}
|
||||
}
|
||||
|
||||
static void test_reserve_memory_special_huge_tlbfs_only(size_t size) {
|
||||
if (!UseHugeTLBFS) {
|
||||
return;
|
||||
}
|
||||
|
||||
test_log("test_reserve_memory_special_huge_tlbfs_only(" SIZE_FORMAT ")", size);
|
||||
|
||||
char* addr = os::Linux::reserve_memory_special_huge_tlbfs_only(size, NULL, false);
|
||||
|
||||
if (addr != NULL) {
|
||||
small_page_write(addr, size);
|
||||
|
||||
os::Linux::release_memory_special_huge_tlbfs(addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
static void test_reserve_memory_special_huge_tlbfs_only() {
|
||||
if (!UseHugeTLBFS) {
|
||||
return;
|
||||
}
|
||||
|
||||
size_t lp = os::large_page_size();
|
||||
|
||||
for (size_t size = lp; size <= lp * 10; size += lp) {
|
||||
test_reserve_memory_special_huge_tlbfs_only(size);
|
||||
}
|
||||
}
|
||||
|
||||
static void test_reserve_memory_special_huge_tlbfs_mixed(size_t size, size_t alignment) {
|
||||
if (!UseHugeTLBFS) {
|
||||
return;
|
||||
}
|
||||
|
||||
test_log("test_reserve_memory_special_huge_tlbfs_mixed(" SIZE_FORMAT ", " SIZE_FORMAT ")",
|
||||
size, alignment);
|
||||
|
||||
assert(size >= os::large_page_size(), "Incorrect input to test");
|
||||
|
||||
char* addr = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
|
||||
|
||||
if (addr != NULL) {
|
||||
small_page_write(addr, size);
|
||||
|
||||
os::Linux::release_memory_special_huge_tlbfs(addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
static void test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(size_t size) {
|
||||
size_t lp = os::large_page_size();
|
||||
size_t ag = os::vm_allocation_granularity();
|
||||
|
||||
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
|
||||
test_reserve_memory_special_huge_tlbfs_mixed(size, alignment);
|
||||
}
|
||||
}
|
||||
|
||||
static void test_reserve_memory_special_huge_tlbfs_mixed() {
|
||||
size_t lp = os::large_page_size();
|
||||
size_t ag = os::vm_allocation_granularity();
|
||||
|
||||
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp);
|
||||
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + ag);
|
||||
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + lp / 2);
|
||||
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2);
|
||||
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + ag);
|
||||
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 - ag);
|
||||
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + lp / 2);
|
||||
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10);
|
||||
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10 + lp / 2);
|
||||
}
|
||||
|
||||
static void test_reserve_memory_special_huge_tlbfs() {
|
||||
if (!UseHugeTLBFS) {
|
||||
return;
|
||||
}
|
||||
|
||||
test_reserve_memory_special_huge_tlbfs_only();
|
||||
test_reserve_memory_special_huge_tlbfs_mixed();
|
||||
}
|
||||
|
||||
static void test_reserve_memory_special_shm(size_t size, size_t alignment) {
|
||||
if (!UseSHM) {
|
||||
return;
|
||||
}
|
||||
|
||||
test_log("test_reserve_memory_special_shm(" SIZE_FORMAT ", " SIZE_FORMAT ")", size, alignment);
|
||||
|
||||
char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false);
|
||||
|
||||
if (addr != NULL) {
|
||||
assert(is_ptr_aligned(addr, alignment), "Check");
|
||||
assert(is_ptr_aligned(addr, os::large_page_size()), "Check");
|
||||
|
||||
small_page_write(addr, size);
|
||||
|
||||
os::Linux::release_memory_special_shm(addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
static void test_reserve_memory_special_shm() {
|
||||
size_t lp = os::large_page_size();
|
||||
size_t ag = os::vm_allocation_granularity();
|
||||
|
||||
for (size_t size = ag; size < lp * 3; size += ag) {
|
||||
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
|
||||
test_reserve_memory_special_shm(size, alignment);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void test() {
|
||||
test_reserve_memory_special_huge_tlbfs();
|
||||
test_reserve_memory_special_shm();
|
||||
}
|
||||
};
|
||||
|
||||
void TestReserveMemorySpecial_test() {
|
||||
TestReserveMemorySpecial::test();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue