mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-18 18:14:38 +02:00
8213827: NUMA heap allocation does not respect process membind/interleave settings
Optionally use libnuma v2 API to query for and support NUMA membind/interleave process configuration. Reviewed-by: tschatzl, sangheki
This commit is contained in:
parent
a5f592e5b1
commit
8bd5f49675
2 changed files with 138 additions and 45 deletions
|
@ -211,6 +211,7 @@ class Linux {
|
|||
// none present
|
||||
|
||||
private:
|
||||
static void numa_init();
|
||||
static void expand_stack_to(address bottom);
|
||||
|
||||
typedef int (*sched_getcpu_func_t)(void);
|
||||
|
@ -222,6 +223,7 @@ class Linux {
|
|||
typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask);
|
||||
typedef void (*numa_interleave_memory_v2_func_t)(void *start, size_t size, struct bitmask* mask);
|
||||
typedef struct bitmask* (*numa_get_membind_func_t)(void);
|
||||
typedef struct bitmask* (*numa_get_interleave_mask_func_t)(void);
|
||||
|
||||
typedef void (*numa_set_bind_policy_func_t)(int policy);
|
||||
typedef int (*numa_bitmask_isbitset_func_t)(struct bitmask *bmp, unsigned int n);
|
||||
|
@ -239,9 +241,12 @@ class Linux {
|
|||
static numa_bitmask_isbitset_func_t _numa_bitmask_isbitset;
|
||||
static numa_distance_func_t _numa_distance;
|
||||
static numa_get_membind_func_t _numa_get_membind;
|
||||
static numa_get_interleave_mask_func_t _numa_get_interleave_mask;
|
||||
static unsigned long* _numa_all_nodes;
|
||||
static struct bitmask* _numa_all_nodes_ptr;
|
||||
static struct bitmask* _numa_nodes_ptr;
|
||||
static struct bitmask* _numa_interleave_bitmask;
|
||||
static struct bitmask* _numa_membind_bitmask;
|
||||
|
||||
static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; }
|
||||
static void set_numa_node_to_cpus(numa_node_to_cpus_func_t func) { _numa_node_to_cpus = func; }
|
||||
|
@ -255,10 +260,21 @@ class Linux {
|
|||
static void set_numa_bitmask_isbitset(numa_bitmask_isbitset_func_t func) { _numa_bitmask_isbitset = func; }
|
||||
static void set_numa_distance(numa_distance_func_t func) { _numa_distance = func; }
|
||||
static void set_numa_get_membind(numa_get_membind_func_t func) { _numa_get_membind = func; }
|
||||
static void set_numa_get_interleave_mask(numa_get_interleave_mask_func_t func) { _numa_get_interleave_mask = func; }
|
||||
static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
|
||||
static void set_numa_all_nodes_ptr(struct bitmask **ptr) { _numa_all_nodes_ptr = (ptr == NULL ? NULL : *ptr); }
|
||||
static void set_numa_nodes_ptr(struct bitmask **ptr) { _numa_nodes_ptr = (ptr == NULL ? NULL : *ptr); }
|
||||
static void set_numa_interleave_bitmask(struct bitmask* ptr) { _numa_interleave_bitmask = ptr ; }
|
||||
static void set_numa_membind_bitmask(struct bitmask* ptr) { _numa_membind_bitmask = ptr ; }
|
||||
static int sched_getcpu_syscall(void);
|
||||
|
||||
enum NumaAllocationPolicy{
|
||||
NotInitialized,
|
||||
Membind,
|
||||
Interleave
|
||||
};
|
||||
static NumaAllocationPolicy _current_numa_policy;
|
||||
|
||||
public:
|
||||
static int sched_getcpu() { return _sched_getcpu != NULL ? _sched_getcpu() : -1; }
|
||||
static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {
|
||||
|
@ -272,11 +288,33 @@ class Linux {
|
|||
static int numa_tonode_memory(void *start, size_t size, int node) {
|
||||
return _numa_tonode_memory != NULL ? _numa_tonode_memory(start, size, node) : -1;
|
||||
}
|
||||
|
||||
static bool is_running_in_interleave_mode() {
|
||||
return _current_numa_policy == Interleave;
|
||||
}
|
||||
|
||||
static void set_configured_numa_policy(NumaAllocationPolicy numa_policy) {
|
||||
_current_numa_policy = numa_policy;
|
||||
}
|
||||
|
||||
static NumaAllocationPolicy identify_numa_policy() {
|
||||
for (int node = 0; node <= Linux::numa_max_node(); node++) {
|
||||
if (Linux::_numa_bitmask_isbitset(Linux::_numa_interleave_bitmask, node)) {
|
||||
return Interleave;
|
||||
}
|
||||
}
|
||||
return Membind;
|
||||
}
|
||||
|
||||
static void numa_interleave_memory(void *start, size_t size) {
|
||||
// Use v2 api if available
|
||||
if (_numa_interleave_memory_v2 != NULL && _numa_all_nodes_ptr != NULL) {
|
||||
_numa_interleave_memory_v2(start, size, _numa_all_nodes_ptr);
|
||||
} else if (_numa_interleave_memory != NULL && _numa_all_nodes != NULL) {
|
||||
// Prefer v2 API
|
||||
if (_numa_interleave_memory_v2 != NULL) {
|
||||
if (is_running_in_interleave_mode()) {
|
||||
_numa_interleave_memory_v2(start, size, _numa_interleave_bitmask);
|
||||
} else if (_numa_membind_bitmask != NULL) {
|
||||
_numa_interleave_memory_v2(start, size, _numa_membind_bitmask);
|
||||
}
|
||||
} else if (_numa_interleave_memory != NULL) {
|
||||
_numa_interleave_memory(start, size, _numa_all_nodes);
|
||||
}
|
||||
}
|
||||
|
@ -291,14 +329,14 @@ class Linux {
|
|||
static int get_node_by_cpu(int cpu_id);
|
||||
static int get_existing_num_nodes();
|
||||
// Check if numa node is configured (non-zero memory node).
|
||||
static bool isnode_in_configured_nodes(unsigned int n) {
|
||||
static bool is_node_in_configured_nodes(unsigned int n) {
|
||||
if (_numa_bitmask_isbitset != NULL && _numa_all_nodes_ptr != NULL) {
|
||||
return _numa_bitmask_isbitset(_numa_all_nodes_ptr, n);
|
||||
} else
|
||||
return false;
|
||||
}
|
||||
// Check if numa node exists in the system (including zero memory nodes).
|
||||
static bool isnode_in_existing_nodes(unsigned int n) {
|
||||
static bool is_node_in_existing_nodes(unsigned int n) {
|
||||
if (_numa_bitmask_isbitset != NULL && _numa_nodes_ptr != NULL) {
|
||||
return _numa_bitmask_isbitset(_numa_nodes_ptr, n);
|
||||
} else if (_numa_bitmask_isbitset != NULL && _numa_all_nodes_ptr != NULL) {
|
||||
|
@ -317,16 +355,19 @@ class Linux {
|
|||
return false;
|
||||
}
|
||||
// Check if node is in bound node set.
|
||||
static bool isnode_in_bound_nodes(int node) {
|
||||
if (_numa_get_membind != NULL && _numa_bitmask_isbitset != NULL) {
|
||||
return _numa_bitmask_isbitset(_numa_get_membind(), node);
|
||||
} else {
|
||||
return false;
|
||||
static bool is_node_in_bound_nodes(int node) {
|
||||
if (_numa_bitmask_isbitset != NULL) {
|
||||
if (is_running_in_interleave_mode()) {
|
||||
return _numa_bitmask_isbitset(_numa_interleave_bitmask, node);
|
||||
} else {
|
||||
return _numa_membind_bitmask != NULL ? _numa_bitmask_isbitset(_numa_membind_bitmask, node) : false;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
// Check if bound to only one numa node.
|
||||
// Returns true if bound to a single numa node, otherwise returns false.
|
||||
static bool isbound_to_single_node() {
|
||||
static bool is_bound_to_single_node() {
|
||||
int nodes = 0;
|
||||
struct bitmask* bmp = NULL;
|
||||
unsigned int node = 0;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue