mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-24 05:14:52 +02:00
8017629: G1: UseSHM in combination with a G1HeapRegionSize > os::large_page_size() falls back to use small pages
Reviewed-by: pliden, sjohanss, stuefe
This commit is contained in:
parent
04ec692aca
commit
9ca6318ece
1 changed files with 141 additions and 71 deletions
|
@ -3051,6 +3051,48 @@ static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
|
||||||
return addr == MAP_FAILED ? NULL : addr;
|
return addr == MAP_FAILED ? NULL : addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Allocate (using mmap, NO_RESERVE, with small pages) at either a given request address
|
||||||
|
// (req_addr != NULL) or with a given alignment.
|
||||||
|
// - bytes shall be a multiple of alignment.
|
||||||
|
// - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
|
||||||
|
// - alignment sets the alignment at which memory shall be allocated.
|
||||||
|
// It must be a multiple of allocation granularity.
|
||||||
|
// Returns address of memory or NULL. If req_addr was not NULL, will only return
|
||||||
|
// req_addr or NULL.
|
||||||
|
static char* anon_mmap_aligned(size_t bytes, size_t alignment, char* req_addr) {
|
||||||
|
|
||||||
|
size_t extra_size = bytes;
|
||||||
|
if (req_addr == NULL && alignment > 0) {
|
||||||
|
extra_size += alignment;
|
||||||
|
}
|
||||||
|
|
||||||
|
char* start = (char*) ::mmap(req_addr, extra_size, PROT_NONE,
|
||||||
|
MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
|
||||||
|
-1, 0);
|
||||||
|
if (start == MAP_FAILED) {
|
||||||
|
start = NULL;
|
||||||
|
} else {
|
||||||
|
if (req_addr != NULL) {
|
||||||
|
if (start != req_addr) {
|
||||||
|
::munmap(start, extra_size);
|
||||||
|
start = NULL;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
char* const start_aligned = (char*) align_ptr_up(start, alignment);
|
||||||
|
char* const end_aligned = start_aligned + bytes;
|
||||||
|
char* const end = start + extra_size;
|
||||||
|
if (start_aligned > start) {
|
||||||
|
::munmap(start, start_aligned - start);
|
||||||
|
}
|
||||||
|
if (end_aligned < end) {
|
||||||
|
::munmap(end_aligned, end - end_aligned);
|
||||||
|
}
|
||||||
|
start = start_aligned;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return start;
|
||||||
|
}
|
||||||
|
|
||||||
static int anon_munmap(char * addr, size_t size) {
|
static int anon_munmap(char * addr, size_t size) {
|
||||||
return ::munmap(addr, size) == 0;
|
return ::munmap(addr, size) == 0;
|
||||||
}
|
}
|
||||||
|
@ -3327,29 +3369,113 @@ void os::large_page_init() {
|
||||||
#define SHM_HUGETLB 04000
|
#define SHM_HUGETLB 04000
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define shm_warning_format(format, ...) \
|
||||||
|
do { \
|
||||||
|
if (UseLargePages && \
|
||||||
|
(!FLAG_IS_DEFAULT(UseLargePages) || \
|
||||||
|
!FLAG_IS_DEFAULT(UseSHM) || \
|
||||||
|
!FLAG_IS_DEFAULT(LargePageSizeInBytes))) { \
|
||||||
|
warning(format, __VA_ARGS__); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define shm_warning(str) shm_warning_format("%s", str)
|
||||||
|
|
||||||
|
#define shm_warning_with_errno(str) \
|
||||||
|
do { \
|
||||||
|
int err = errno; \
|
||||||
|
shm_warning_format(str " (error = %d)", err); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
static char* shmat_with_alignment(int shmid, size_t bytes, size_t alignment) {
|
||||||
|
assert(is_size_aligned(bytes, alignment), "Must be divisible by the alignment");
|
||||||
|
|
||||||
|
if (!is_size_aligned(alignment, SHMLBA)) {
|
||||||
|
assert(false, "Code below assumes that alignment is at least SHMLBA aligned");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// To ensure that we get 'alignment' aligned memory from shmat,
|
||||||
|
// we pre-reserve aligned virtual memory and then attach to that.
|
||||||
|
|
||||||
|
char* pre_reserved_addr = anon_mmap_aligned(bytes, alignment, NULL);
|
||||||
|
if (pre_reserved_addr == NULL) {
|
||||||
|
// Couldn't pre-reserve aligned memory.
|
||||||
|
shm_warning("Failed to pre-reserve aligned memory for shmat.");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// SHM_REMAP is needed to allow shmat to map over an existing mapping.
|
||||||
|
char* addr = (char*)shmat(shmid, pre_reserved_addr, SHM_REMAP);
|
||||||
|
|
||||||
|
if ((intptr_t)addr == -1) {
|
||||||
|
int err = errno;
|
||||||
|
shm_warning_with_errno("Failed to attach shared memory.");
|
||||||
|
|
||||||
|
assert(err != EACCES, "Unexpected error");
|
||||||
|
assert(err != EIDRM, "Unexpected error");
|
||||||
|
assert(err != EINVAL, "Unexpected error");
|
||||||
|
|
||||||
|
// Since we don't know if the kernel unmapped the pre-reserved memory area
|
||||||
|
// we can't unmap it, since that would potentially unmap memory that was
|
||||||
|
// mapped from other threads.
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static char* shmat_at_address(int shmid, char* req_addr) {
|
||||||
|
if (!is_ptr_aligned(req_addr, SHMLBA)) {
|
||||||
|
assert(false, "Requested address needs to be SHMLBA aligned");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
char* addr = (char*)shmat(shmid, req_addr, 0);
|
||||||
|
|
||||||
|
if ((intptr_t)addr == -1) {
|
||||||
|
shm_warning_with_errno("Failed to attach shared memory.");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static char* shmat_large_pages(int shmid, size_t bytes, size_t alignment, char* req_addr) {
|
||||||
|
// If a req_addr has been provided, we assume that the caller has already aligned the address.
|
||||||
|
if (req_addr != NULL) {
|
||||||
|
assert(is_ptr_aligned(req_addr, os::large_page_size()), "Must be divisible by the large page size");
|
||||||
|
assert(is_ptr_aligned(req_addr, alignment), "Must be divisible by given alignment");
|
||||||
|
return shmat_at_address(shmid, req_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since shmid has been setup with SHM_HUGETLB, shmat will automatically
|
||||||
|
// return large page size aligned memory addresses when req_addr == NULL.
|
||||||
|
// However, if the alignment is larger than the large page size, we have
|
||||||
|
// to manually ensure that the memory returned is 'alignment' aligned.
|
||||||
|
if (alignment > os::large_page_size()) {
|
||||||
|
assert(is_size_aligned(alignment, os::large_page_size()), "Must be divisible by the large page size");
|
||||||
|
return shmat_with_alignment(shmid, bytes, alignment);
|
||||||
|
} else {
|
||||||
|
return shmat_at_address(shmid, NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment,
|
char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment,
|
||||||
char* req_addr, bool exec) {
|
char* req_addr, bool exec) {
|
||||||
// "exec" is passed in but not used. Creating the shared image for
|
// "exec" is passed in but not used. Creating the shared image for
|
||||||
// the code cache doesn't have an SHM_X executable permission to check.
|
// the code cache doesn't have an SHM_X executable permission to check.
|
||||||
assert(UseLargePages && UseSHM, "only for SHM large pages");
|
assert(UseLargePages && UseSHM, "only for SHM large pages");
|
||||||
assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
|
assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
|
||||||
|
assert(is_ptr_aligned(req_addr, alignment), "Unaligned address");
|
||||||
|
|
||||||
if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
|
if (!is_size_aligned(bytes, os::large_page_size())) {
|
||||||
return NULL; // Fallback to small pages.
|
return NULL; // Fallback to small pages.
|
||||||
}
|
}
|
||||||
|
|
||||||
key_t key = IPC_PRIVATE;
|
|
||||||
char *addr;
|
|
||||||
|
|
||||||
bool warn_on_failure = UseLargePages &&
|
|
||||||
(!FLAG_IS_DEFAULT(UseLargePages) ||
|
|
||||||
!FLAG_IS_DEFAULT(UseSHM) ||
|
|
||||||
!FLAG_IS_DEFAULT(LargePageSizeInBytes));
|
|
||||||
char msg[128];
|
|
||||||
|
|
||||||
// Create a large shared memory region to attach to based on size.
|
// Create a large shared memory region to attach to based on size.
|
||||||
// Currently, size is the total size of the heap
|
// Currently, size is the total size of the heap.
|
||||||
int shmid = shmget(key, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
|
int shmid = shmget(IPC_PRIVATE, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
|
||||||
if (shmid == -1) {
|
if (shmid == -1) {
|
||||||
// Possible reasons for shmget failure:
|
// Possible reasons for shmget failure:
|
||||||
// 1. shmmax is too small for Java heap.
|
// 1. shmmax is too small for Java heap.
|
||||||
|
@ -3365,16 +3491,12 @@ char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment,
|
||||||
// they are so fragmented after a long run that they can't
|
// they are so fragmented after a long run that they can't
|
||||||
// coalesce into large pages. Try to reserve large pages when
|
// coalesce into large pages. Try to reserve large pages when
|
||||||
// the system is still "fresh".
|
// the system is still "fresh".
|
||||||
if (warn_on_failure) {
|
shm_warning_with_errno("Failed to reserve shared memory.");
|
||||||
jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno);
|
|
||||||
warning("%s", msg);
|
|
||||||
}
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// attach to the region
|
// Attach to the region.
|
||||||
addr = (char*)shmat(shmid, req_addr, 0);
|
char* addr = shmat_large_pages(shmid, bytes, alignment, req_addr);
|
||||||
int err = errno;
|
|
||||||
|
|
||||||
// Remove shmid. If shmat() is successful, the actual shared memory segment
|
// Remove shmid. If shmat() is successful, the actual shared memory segment
|
||||||
// will be deleted when it's detached by shmdt() or when the process
|
// will be deleted when it's detached by shmdt() or when the process
|
||||||
|
@ -3382,14 +3504,6 @@ char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment,
|
||||||
// segment immediately.
|
// segment immediately.
|
||||||
shmctl(shmid, IPC_RMID, NULL);
|
shmctl(shmid, IPC_RMID, NULL);
|
||||||
|
|
||||||
if ((intptr_t)addr == -1) {
|
|
||||||
if (warn_on_failure) {
|
|
||||||
jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err);
|
|
||||||
warning("%s", msg);
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3432,50 +3546,6 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes,
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper for os::Linux::reserve_memory_special_huge_tlbfs_mixed().
|
|
||||||
// Allocate (using mmap, NO_RESERVE, with small pages) at either a given request address
|
|
||||||
// (req_addr != NULL) or with a given alignment.
|
|
||||||
// - bytes shall be a multiple of alignment.
|
|
||||||
// - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
|
|
||||||
// - alignment sets the alignment at which memory shall be allocated.
|
|
||||||
// It must be a multiple of allocation granularity.
|
|
||||||
// Returns address of memory or NULL. If req_addr was not NULL, will only return
|
|
||||||
// req_addr or NULL.
|
|
||||||
static char* anon_mmap_aligned(size_t bytes, size_t alignment, char* req_addr) {
|
|
||||||
|
|
||||||
size_t extra_size = bytes;
|
|
||||||
if (req_addr == NULL && alignment > 0) {
|
|
||||||
extra_size += alignment;
|
|
||||||
}
|
|
||||||
|
|
||||||
char* start = (char*) ::mmap(req_addr, extra_size, PROT_NONE,
|
|
||||||
MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
|
|
||||||
-1, 0);
|
|
||||||
if (start == MAP_FAILED) {
|
|
||||||
start = NULL;
|
|
||||||
} else {
|
|
||||||
if (req_addr != NULL) {
|
|
||||||
if (start != req_addr) {
|
|
||||||
::munmap(start, extra_size);
|
|
||||||
start = NULL;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
char* const start_aligned = (char*) align_ptr_up(start, alignment);
|
|
||||||
char* const end_aligned = start_aligned + bytes;
|
|
||||||
char* const end = start + extra_size;
|
|
||||||
if (start_aligned > start) {
|
|
||||||
::munmap(start, start_aligned - start);
|
|
||||||
}
|
|
||||||
if (end_aligned < end) {
|
|
||||||
::munmap(end_aligned, end - end_aligned);
|
|
||||||
}
|
|
||||||
start = start_aligned;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return start;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reserve memory using mmap(MAP_HUGETLB).
|
// Reserve memory using mmap(MAP_HUGETLB).
|
||||||
// - bytes shall be a multiple of alignment.
|
// - bytes shall be a multiple of alignment.
|
||||||
// - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
|
// - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue