kdump: implement reserve_crashkernel_cma

reserve_crashkernel_cma() reserves CMA ranges for the crash kernel.  If
allocating the requested size fails, try to reserve in smaller blocks.

Store the reserved ranges in the crashk_cma_ranges array and the number of
ranges in crashk_cma_cnt.

Link: https://lkml.kernel.org/r/aEqpBwOy_ekm0gw9@dwarf.suse.cz
Signed-off-by: Jiri Bohac <jbohac@suse.cz>
Cc: Baoquan He <bhe@redhat.com>
Cc: Dave Young <dyoung@redhat.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Donald Dutile <ddutile@redhat.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Philipp Rudo <prudo@redhat.com>
Cc: Pingfan Liu <piliu@redhat.com>
Cc: Tao Liu <ltao@redhat.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Jiri Bohac 2025-06-12 12:16:39 +02:00 committed by Andrew Morton
parent 35c18f2933
commit ab475510e0
2 changed files with 64 additions and 0 deletions

View file

@ -13,12 +13,24 @@
*/
extern struct resource crashk_res;
extern struct resource crashk_low_res;
extern struct range crashk_cma_ranges[];
#if defined(CONFIG_CMA) && defined(CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION)
#define CRASHKERNEL_CMA
#define CRASHKERNEL_CMA_RANGES_MAX 4
extern int crashk_cma_cnt;
#else
#define crashk_cma_cnt 0
#define CRASHKERNEL_CMA_RANGES_MAX 0
#endif
int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
unsigned long long *crash_size, unsigned long long *crash_base,
unsigned long long *low_size, unsigned long long *cma_size,
bool *high);
void __init reserve_crashkernel_cma(unsigned long long cma_size);
#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
#ifndef DEFAULT_CRASH_KERNEL_LOW_SIZE
#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)

View file

@ -14,6 +14,8 @@
#include <linux/cpuhotplug.h>
#include <linux/memblock.h>
#include <linux/kmemleak.h>
#include <linux/cma.h>
#include <linux/crash_reserve.h>
#include <asm/page.h>
#include <asm/sections.h>
@ -469,6 +471,56 @@ retry:
#endif
}
struct range crashk_cma_ranges[CRASHKERNEL_CMA_RANGES_MAX];
#ifdef CRASHKERNEL_CMA
int crashk_cma_cnt;
void __init reserve_crashkernel_cma(unsigned long long cma_size)
{
unsigned long long request_size = roundup(cma_size, PAGE_SIZE);
unsigned long long reserved_size = 0;
if (!cma_size)
return;
while (cma_size > reserved_size &&
crashk_cma_cnt < CRASHKERNEL_CMA_RANGES_MAX) {
struct cma *res;
if (cma_declare_contiguous(0, request_size, 0, 0, 0, false,
"crashkernel", &res)) {
/* reservation failed, try half-sized blocks */
if (request_size <= PAGE_SIZE)
break;
request_size = roundup(request_size / 2, PAGE_SIZE);
continue;
}
crashk_cma_ranges[crashk_cma_cnt].start = cma_get_base(res);
crashk_cma_ranges[crashk_cma_cnt].end =
crashk_cma_ranges[crashk_cma_cnt].start +
cma_get_size(res) - 1;
++crashk_cma_cnt;
reserved_size += request_size;
}
if (cma_size > reserved_size)
pr_warn("crashkernel CMA reservation failed: %lld MB requested, %lld MB reserved in %d ranges\n",
cma_size >> 20, reserved_size >> 20, crashk_cma_cnt);
else
pr_info("crashkernel CMA reserved: %lld MB in %d ranges\n",
reserved_size >> 20, crashk_cma_cnt);
}
#else /* CRASHKERNEL_CMA */
void __init reserve_crashkernel_cma(unsigned long long cma_size)
{
if (cma_size)
pr_warn("crashkernel CMA reservation not supported\n");
}
#endif
#ifndef HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
static __init int insert_crashkernel_resources(void)
{