mirror of
https://github.com/php/php-src.git
synced 2025-08-16 22:18:50 +02:00
opcache/FreeBSD huge code page pragma support
Sort of following up on super pages support earlier, here we also detect page mappings possibly eligible to go to super pages.
This commit is contained in:
parent
a453619499
commit
6a8260a0ac
2 changed files with 48 additions and 1 deletions
|
@ -2624,6 +2624,12 @@ static void accel_gen_system_id(void)
|
|||
# ifndef MAP_FAILED
|
||||
# define MAP_FAILED ((void*)-1)
|
||||
# endif
|
||||
# ifdef MAP_ALIGNED_SUPER
|
||||
# include <sys/types.h>
|
||||
# include <sys/sysctl.h>
|
||||
# include <sys/user.h>
|
||||
# define MAP_HUGETLB MAP_ALIGNED_SUPER
|
||||
# endif
|
||||
# endif
|
||||
|
||||
# if defined(MAP_HUGETLB) || defined(MADV_HUGEPAGE)
|
||||
|
@ -2689,6 +2695,7 @@ static int accel_remap_huge_pages(void *start, size_t size, const char *name, si
|
|||
|
||||
static void accel_move_code_to_huge_pages(void)
|
||||
{
|
||||
#if defined(__linux__)
|
||||
FILE *f;
|
||||
long unsigned int huge_page_size = 2 * 1024 * 1024;
|
||||
|
||||
|
@ -2710,6 +2717,39 @@ static void accel_move_code_to_huge_pages(void)
|
|||
}
|
||||
fclose(f);
|
||||
}
|
||||
#elif defined(__FreeBSD__)
|
||||
size_t s = 0;
|
||||
int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
|
||||
long unsigned int huge_page_size = 2 * 1024 * 1024;
|
||||
if(sysctl(mib, 4, NULL, &s, NULL, 0) == 0) {
|
||||
void *addr = mmap(NULL, s * sizeof (struct kinfo_vmentry), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
|
||||
if (addr != MAP_FAILED) {
|
||||
s = s * 4 / 3;
|
||||
if (sysctl(mib, 4, addr, &s, NULL, 0) == 0) {
|
||||
uintptr_t start = (uintptr_t)addr;
|
||||
uintptr_t end = start + s;
|
||||
while (start < end) {
|
||||
struct kinfo_vmentry *entry = (struct kinfo_vmentry *)start;
|
||||
size_t sz = entry->kve_structsize;
|
||||
if (sz == 0) {
|
||||
break;
|
||||
}
|
||||
int permflags = entry->kve_protection;
|
||||
if ((permflags & KVME_PROT_READ) && !(permflags & KVME_PROT_WRITE) &&
|
||||
(permflags & KVME_PROT_EXEC) && entry->kve_path[0] != '\0') {
|
||||
long unsigned int seg_start = ZEND_MM_ALIGNED_SIZE_EX(start, huge_page_size);
|
||||
long unsigned int seg_end = (end & ~(huge_page_size-1L));
|
||||
if (seg_end > seg_start) {
|
||||
zend_accel_error(ACCEL_LOG_DEBUG, "remap to huge page %lx-%lx %s \n", seg_start, seg_end, entry->kve_path);
|
||||
accel_remap_huge_pages((void*)seg_start, seg_end - seg_start, entry->kve_path, entry->kve_offset + seg_start - start);
|
||||
}
|
||||
}
|
||||
start += sz;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
# else
|
||||
static void accel_move_code_to_huge_pages(void)
|
||||
|
|
|
@ -32,6 +32,9 @@
|
|||
#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
|
||||
# define MAP_ANONYMOUS MAP_ANON
|
||||
#endif
|
||||
#if defined(MAP_ALIGNED_SUPER)
|
||||
# define MAP_HUGETLB MAP_ALIGNED_SUPER
|
||||
#endif
|
||||
|
||||
static int create_segments(size_t requested_size, zend_shared_segment ***shared_segments_p, int *shared_segments_count, char **error_in)
|
||||
{
|
||||
|
@ -48,10 +51,14 @@ static int create_segments(size_t requested_size, zend_shared_segment ***shared_
|
|||
|
||||
#ifdef MAP_HUGETLB
|
||||
/* Try to allocate huge pages first to reduce dTLB misses.
|
||||
* OS has to be configured properly
|
||||
* OSes has to be configured properly
|
||||
* on Linux
|
||||
* (e.g. https://wiki.debian.org/Hugepages#Enabling_HugeTlbPage)
|
||||
* You may verify huge page usage with the following command:
|
||||
* `grep "Huge" /proc/meminfo`
|
||||
* on FreeBSD
|
||||
* sysctl vm.pmap.pg_ps_enabled entry
|
||||
* (boot time config only, but enabled by default on most arches).
|
||||
*/
|
||||
shared_segment->p = mmap(0, requested_size, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS|MAP_HUGETLB, -1, 0);
|
||||
if (shared_segment->p != MAP_FAILED) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue