8213827: NUMA heap allocation does not respect process membind/interleave settings

Optionally use libnuma v2 API to query for and support NUMA membind/interleave process configuration.

Reviewed-by: tschatzl, sangheki
This commit is contained in:
Amith Pawar 2019-01-16 22:32:04 +01:00 committed by Thomas Schatzl
parent a5f592e5b1
commit 8bd5f49675
2 changed files with 138 additions and 45 deletions

View file

@ -33,6 +33,7 @@
#include "compiler/disassembler.hpp"
#include "interpreter/interpreter.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/filemap.hpp"
#include "oops/oop.inline.hpp"
@ -2780,7 +2781,7 @@ int os::Linux::get_existing_num_nodes() {
// Get the total number of nodes in the system including nodes without memory.
for (node = 0; node <= highest_node_number; node++) {
if (isnode_in_existing_nodes(node)) {
if (is_node_in_existing_nodes(node)) {
num_nodes++;
}
}
@ -2796,7 +2797,7 @@ size_t os::numa_get_leaf_groups(int *ids, size_t size) {
// node number. If the nodes have been bound explicitly using numactl membind,
// then allocate memory from those nodes only.
for (int node = 0; node <= highest_node_number; node++) {
if (Linux::isnode_in_bound_nodes((unsigned int)node)) {
if (Linux::is_node_in_bound_nodes((unsigned int)node)) {
ids[i++] = node;
}
}
@ -2899,11 +2900,15 @@ bool os::Linux::libnuma_init() {
libnuma_dlsym(handle, "numa_distance")));
set_numa_get_membind(CAST_TO_FN_PTR(numa_get_membind_func_t,
libnuma_v2_dlsym(handle, "numa_get_membind")));
set_numa_get_interleave_mask(CAST_TO_FN_PTR(numa_get_interleave_mask_func_t,
libnuma_v2_dlsym(handle, "numa_get_interleave_mask")));
if (numa_available() != -1) {
set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
set_numa_all_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_all_nodes_ptr"));
set_numa_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_nodes_ptr"));
set_numa_interleave_bitmask(_numa_get_interleave_mask());
set_numa_membind_bitmask(_numa_get_membind());
// Create an index -> node mapping, since nodes are not always consecutive
_nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
rebuild_nindex_to_node_map();
@ -2929,7 +2934,7 @@ void os::Linux::rebuild_nindex_to_node_map() {
nindex_to_node()->clear();
for (int node = 0; node <= highest_node_number; node++) {
if (Linux::isnode_in_existing_nodes(node)) {
if (Linux::is_node_in_existing_nodes(node)) {
nindex_to_node()->append(node);
}
}
@ -2966,16 +2971,16 @@ void os::Linux::rebuild_cpu_to_node_map() {
// the closest configured node. Check also if node is bound, i.e. it's allowed
// to allocate memory from the node. If it's not allowed, map cpus in that node
// to the closest node from which memory allocation is allowed.
if (!isnode_in_configured_nodes(nindex_to_node()->at(i)) ||
!isnode_in_bound_nodes(nindex_to_node()->at(i))) {
if (!is_node_in_configured_nodes(nindex_to_node()->at(i)) ||
!is_node_in_bound_nodes(nindex_to_node()->at(i))) {
closest_distance = INT_MAX;
// Check distance from all remaining nodes in the system. Ignore distance
// from itself, from another non-configured node, and from another non-bound
// node.
for (size_t m = 0; m < node_num; m++) {
if (m != i &&
isnode_in_configured_nodes(nindex_to_node()->at(m)) &&
isnode_in_bound_nodes(nindex_to_node()->at(m))) {
is_node_in_configured_nodes(nindex_to_node()->at(m)) &&
is_node_in_bound_nodes(nindex_to_node()->at(m))) {
distance = numa_distance(nindex_to_node()->at(i), nindex_to_node()->at(m));
// If a closest node is found, update. There is always at least one
// configured and bound node in the system so there is always at least
@ -3030,9 +3035,13 @@ os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;
os::Linux::numa_distance_func_t os::Linux::_numa_distance;
os::Linux::numa_get_membind_func_t os::Linux::_numa_get_membind;
os::Linux::numa_get_interleave_mask_func_t os::Linux::_numa_get_interleave_mask;
os::Linux::NumaAllocationPolicy os::Linux::_current_numa_policy;
unsigned long* os::Linux::_numa_all_nodes;
struct bitmask* os::Linux::_numa_all_nodes_ptr;
struct bitmask* os::Linux::_numa_nodes_ptr;
struct bitmask* os::Linux::_numa_interleave_bitmask;
struct bitmask* os::Linux::_numa_membind_bitmask;
bool os::pd_uncommit_memory(char* addr, size_t size) {
uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
@ -4936,6 +4945,74 @@ void os::pd_init_container_support() {
OSContainer::init();
}
void os::Linux::numa_init() {
// Java can be invoked as
// 1. Without numactl and heap will be allocated/configured on all nodes as
// per the system policy.
// 2. With numactl --interleave:
// Use numa_get_interleave_mask(v2) API to get nodes bitmask. The same
// API for membind case bitmask is reset.
// Interleave is only hint and Kernel can fallback to other nodes if
// no memory is available on the target nodes.
// 3. With numactl --membind:
// Use numa_get_membind(v2) API to get nodes bitmask. The same API for
// interleave case returns bitmask of all nodes.
// numa_all_nodes_ptr holds bitmask of all nodes.
// numa_get_interleave_mask(v2) and numa_get_membind(v2) APIs returns correct
// bitmask when externally configured to run on all or fewer nodes.
if (!Linux::libnuma_init()) {
UseNUMA = false;
} else {
if ((Linux::numa_max_node() < 1) || Linux::is_bound_to_single_node()) {
// If there's only one node (they start from 0) or if the process
// is bound explicitly to a single node using membind, disable NUMA.
UseNUMA = false;
} else {
LogTarget(Info,os) log;
LogStream ls(log);
Linux::set_configured_numa_policy(Linux::identify_numa_policy());
struct bitmask* bmp = Linux::_numa_membind_bitmask;
const char* numa_mode = "membind";
if (Linux::is_running_in_interleave_mode()) {
bmp = Linux::_numa_interleave_bitmask;
numa_mode = "interleave";
}
ls.print("UseNUMA is enabled and invoked in '%s' mode."
" Heap will be configured using NUMA memory nodes:", numa_mode);
for (int node = 0; node <= Linux::numa_max_node(); node++) {
if (Linux::_numa_bitmask_isbitset(bmp, node)) {
ls.print(" %d", node);
}
}
}
}
if (UseParallelGC && UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
// With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
// we can make the adaptive lgrp chunk resizing work. If the user specified both
// UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn
// and disable adaptive resizing.
if (UseAdaptiveSizePolicy || UseAdaptiveNUMAChunkSizing) {
warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, "
"disabling adaptive resizing (-XX:-UseAdaptiveSizePolicy -XX:-UseAdaptiveNUMAChunkSizing)");
UseAdaptiveSizePolicy = false;
UseAdaptiveNUMAChunkSizing = false;
}
}
if (!UseNUMA && ForceNUMA) {
UseNUMA = true;
}
}
// this is called _after_ the global arguments have been parsed
jint os::init_2(void) {
@ -4980,32 +5057,7 @@ jint os::init_2(void) {
Linux::glibc_version(), Linux::libpthread_version());
if (UseNUMA) {
if (!Linux::libnuma_init()) {
UseNUMA = false;
} else {
if ((Linux::numa_max_node() < 1) || Linux::isbound_to_single_node()) {
// If there's only one node (they start from 0) or if the process
// is bound explicitly to a single node using membind, disable NUMA.
UseNUMA = false;
}
}
if (UseParallelGC && UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
// With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
// we can make the adaptive lgrp chunk resizing work. If the user specified both
// UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn
// and disable adaptive resizing.
if (UseAdaptiveSizePolicy || UseAdaptiveNUMAChunkSizing) {
warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, "
"disabling adaptive resizing (-XX:-UseAdaptiveSizePolicy -XX:-UseAdaptiveNUMAChunkSizing)");
UseAdaptiveSizePolicy = false;
UseAdaptiveNUMAChunkSizing = false;
}
}
if (!UseNUMA && ForceNUMA) {
UseNUMA = true;
}
Linux::numa_init();
}
if (MaxFDLimit) {