mirror of
https://github.com/torvalds/linux.git
synced 2025-08-15 14:11:42 +02:00

LoongArch's huge_pte_offset() currently returns a pointer to a PMD slot even if the underlying entry points to invalid_pte_table (indicating no mapping). Callers like smaps_hugetlb_range() fetch this invalid entry value (the address of invalid_pte_table) via this pointer. The generic is_swap_pte() check then incorrectly identifies this address as a swap entry on LoongArch, because it satisfies the "!pte_present() && !pte_none()" conditions. This misinterpretation, combined with a coincidental match by is_migration_entry() on the address bits, leads to kernel crashes in pfn_swap_entry_to_page(). Fix this at the architecture level by modifying huge_pte_offset() to check the PMD entry's content using pmd_none() before returning. If the entry is invalid (i.e., it points to invalid_pte_table), return NULL instead of the pointer to the slot. Cc: stable@vger.kernel.org Acked-by: Peter Xu <peterx@redhat.com> Co-developed-by: Hongchen Zhang <zhanghongchen@loongson.cn> Signed-off-by: Hongchen Zhang <zhanghongchen@loongson.cn> Signed-off-by: Ming Wang <wangming01@loongson.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
65 lines
1.4 KiB
C
65 lines
1.4 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
|
*/
|
|
|
|
#include <linux/fs.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/hugetlb.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/err.h>
|
|
#include <linux/sysctl.h>
|
|
#include <asm/mman.h>
|
|
#include <asm/tlb.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
unsigned long addr, unsigned long sz)
|
|
{
|
|
pgd_t *pgd;
|
|
p4d_t *p4d;
|
|
pud_t *pud;
|
|
pte_t *pte = NULL;
|
|
|
|
pgd = pgd_offset(mm, addr);
|
|
p4d = p4d_alloc(mm, pgd, addr);
|
|
pud = pud_alloc(mm, p4d, addr);
|
|
if (pud)
|
|
pte = (pte_t *)pmd_alloc(mm, pud, addr);
|
|
|
|
return pte;
|
|
}
|
|
|
|
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
|
|
unsigned long sz)
|
|
{
|
|
pgd_t *pgd;
|
|
p4d_t *p4d;
|
|
pud_t *pud;
|
|
pmd_t *pmd = NULL;
|
|
|
|
pgd = pgd_offset(mm, addr);
|
|
if (pgd_present(pgdp_get(pgd))) {
|
|
p4d = p4d_offset(pgd, addr);
|
|
if (p4d_present(p4dp_get(p4d))) {
|
|
pud = pud_offset(p4d, addr);
|
|
if (pud_present(pudp_get(pud)))
|
|
pmd = pmd_offset(pud, addr);
|
|
}
|
|
}
|
|
return pmd_none(pmdp_get(pmd)) ? NULL : (pte_t *) pmd;
|
|
}
|
|
|
|
uint64_t pmd_to_entrylo(unsigned long pmd_val)
|
|
{
|
|
uint64_t val;
|
|
/* PMD as PTE. Must be huge page */
|
|
if (!pmd_leaf(__pmd(pmd_val)))
|
|
panic("%s", __func__);
|
|
|
|
val = pmd_val ^ _PAGE_HUGE;
|
|
val |= ((val & _PAGE_HGLOBAL) >>
|
|
(_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT));
|
|
|
|
return val;
|
|
}
|