12 hotfixes. 5 are cc:stable and the remainder address post-6.16 issues

or aren't considered necessary for -stable kernels.  10 of these fixes are
 for MM.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaJwLpgAKCRDdBJ7gKXxA
 js1sAP0c/XlVJhICq9aNJluu4Nj7cKTlzN7nvD/YRivZrG8XJQD/Q5nvFY6yeOdi
 /HxCAdTDY5HsWv28nEbvnxKKbuFligU=
 =mtKs
 -----END PGP SIGNATURE-----

Merge tag 'mm-hotfixes-stable-2025-08-12-20-50' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "12 hotfixes. 5 are cc:stable and the remainder address post-6.16
  issues or aren't considered necessary for -stable kernels.

  10 of these fixes are for MM"

* tag 'mm-hotfixes-stable-2025-08-12-20-50' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  proc: proc_maps_open allow proc_mem_open to return NULL
  mm/mremap: avoid expensive folio lookup on mremap folio pte batch
  userfaultfd: fix a crash in UFFDIO_MOVE when PMD is a migration entry
  mm: pass page directly instead of using folio_page
  selftests/proc: fix string literal warning in proc-maps-race.c
  fs/proc/task_mmu: hold PTL in pagemap_hugetlb_range and gather_hugetlb_stats
  mm/smaps: fix race between smaps_hugetlb_range and migration
  mm: fix the race between collapse and PT_RECLAIM under per-vma lock
  mm/kmemleak: avoid soft lockup in __kmemleak_do_cleanup()
  MAINTAINERS: add Masami as a reviewer of hung task detector
  mm/kmemleak: avoid deadlock by moving pr_warn() outside kmemleak_lock
  kasan/test: fix protection against compiler elision
This commit is contained in:
Linus Torvalds 2025-08-13 08:28:33 -07:00
commit 91325f31af
9 changed files with 56 additions and 31 deletions

View file

@ -11438,6 +11438,7 @@ F: drivers/tty/hvc/
HUNG TASK DETECTOR
M: Andrew Morton <akpm@linux-foundation.org>
R: Lance Yang <lance.yang@linux.dev>
R: Masami Hiramatsu <mhiramat@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
F: include/linux/hung_task.h

View file

@ -340,8 +340,8 @@ static int proc_maps_open(struct inode *inode, struct file *file,
priv->inode = inode;
priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
if (IS_ERR_OR_NULL(priv->mm)) {
int err = priv->mm ? PTR_ERR(priv->mm) : -ESRCH;
if (IS_ERR(priv->mm)) {
int err = PTR_ERR(priv->mm);
seq_release_private(inode, file);
return err;
@ -1148,10 +1148,13 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
{
struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = walk->vma;
pte_t ptent = huge_ptep_get(walk->mm, addr, pte);
struct folio *folio = NULL;
bool present = false;
spinlock_t *ptl;
pte_t ptent;
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
ptent = huge_ptep_get(walk->mm, addr, pte);
if (pte_present(ptent)) {
folio = page_folio(pte_page(ptent));
present = true;
@ -1170,6 +1173,7 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
else
mss->private_hugetlb += huge_page_size(hstate_vma(vma));
}
spin_unlock(ptl);
return 0;
}
#else
@ -2017,12 +2021,14 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
struct pagemapread *pm = walk->private;
struct vm_area_struct *vma = walk->vma;
u64 flags = 0, frame = 0;
spinlock_t *ptl;
int err = 0;
pte_t pte;
if (vma->vm_flags & VM_SOFTDIRTY)
flags |= PM_SOFT_DIRTY;
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, ptep);
pte = huge_ptep_get(walk->mm, addr, ptep);
if (pte_present(pte)) {
struct folio *folio = page_folio(pte_page(pte));
@ -2050,11 +2056,12 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
err = add_to_pagemap(&pme, pm);
if (err)
return err;
break;
if (pm->show_pfn && (flags & PM_PRESENT))
frame++;
}
spin_unlock(ptl);
cond_resched();
return err;
@ -3128,17 +3135,22 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end, struct mm_walk *walk)
{
pte_t huge_pte = huge_ptep_get(walk->mm, addr, pte);
pte_t huge_pte;
struct numa_maps *md;
struct page *page;
spinlock_t *ptl;
ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
huge_pte = huge_ptep_get(walk->mm, addr, pte);
if (!pte_present(huge_pte))
return 0;
goto out;
page = pte_page(huge_pte);
md = walk->private;
gather_stats(page, md, pte_dirty(huge_pte), 1);
out:
spin_unlock(ptl);
return 0;
}

View file

@ -47,7 +47,7 @@ static struct {
* Some tests use these global variables to store return values from function
* calls that could otherwise be eliminated by the compiler as dead code.
*/
static volatile void *kasan_ptr_result;
static void *volatile kasan_ptr_result;
static volatile int kasan_int_result;
/* Probe for console output: obtains test_status lines of interest. */

View file

@ -1172,11 +1172,11 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
if (result != SCAN_SUCCEED)
goto out_up_write;
/* check if the pmd is still valid */
vma_start_write(vma);
result = check_pmd_still_valid(mm, address, pmd);
if (result != SCAN_SUCCEED)
goto out_up_write;
vma_start_write(vma);
anon_vma_lock_write(vma->anon_vma);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,

View file

@ -470,6 +470,7 @@ static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
{
unsigned long flags;
struct kmemleak_object *object;
bool warn = false;
/* try the slab allocator first */
if (object_cache) {
@ -488,8 +489,10 @@ static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
else if (mem_pool_free_count)
object = &mem_pool[--mem_pool_free_count];
else
pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
warn = true;
raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
if (warn)
pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
return object;
}
@ -2181,6 +2184,7 @@ static const struct file_operations kmemleak_fops = {
static void __kmemleak_do_cleanup(void)
{
struct kmemleak_object *object, *tmp;
unsigned int cnt = 0;
/*
* Kmemleak has already been disabled, no need for RCU list traversal
@ -2189,6 +2193,10 @@ static void __kmemleak_do_cleanup(void)
list_for_each_entry_safe(object, tmp, &object_list, object_list) {
__remove_object(object);
__delete_object(object);
/* Call cond_resched() once per 64 iterations to avoid soft lockup */
if (!(++cnt & 0x3f))
cond_resched();
}
}

View file

@ -120,9 +120,8 @@ static int mprotect_folio_pte_batch(struct folio *folio, pte_t *ptep,
static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
pte_t oldpte, pte_t *pte, int target_node,
struct folio **foliop)
struct folio *folio)
{
struct folio *folio = NULL;
bool ret = true;
bool toptier;
int nid;
@ -131,7 +130,6 @@ static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
if (pte_protnone(oldpte))
goto skip;
folio = vm_normal_folio(vma, addr, oldpte);
if (!folio)
goto skip;
@ -173,7 +171,6 @@ static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
folio_xchg_access_time(folio, jiffies_to_msecs(jiffies));
skip:
*foliop = folio;
return ret;
}
@ -231,10 +228,9 @@ static int page_anon_exclusive_sub_batch(int start_idx, int max_len,
* retrieve sub-batches.
*/
static void commit_anon_folio_batch(struct vm_area_struct *vma,
struct folio *folio, unsigned long addr, pte_t *ptep,
struct folio *folio, struct page *first_page, unsigned long addr, pte_t *ptep,
pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb)
{
struct page *first_page = folio_page(folio, 0);
bool expected_anon_exclusive;
int sub_batch_idx = 0;
int len;
@ -251,7 +247,7 @@ static void commit_anon_folio_batch(struct vm_area_struct *vma,
}
static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma,
struct folio *folio, unsigned long addr, pte_t *ptep,
struct folio *folio, struct page *page, unsigned long addr, pte_t *ptep,
pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb)
{
bool set_write;
@ -270,7 +266,7 @@ static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma,
/* idx = */ 0, set_write, tlb);
return;
}
commit_anon_folio_batch(vma, folio, addr, ptep, oldpte, ptent, nr_ptes, tlb);
commit_anon_folio_batch(vma, folio, page, addr, ptep, oldpte, ptent, nr_ptes, tlb);
}
static long change_pte_range(struct mmu_gather *tlb,
@ -305,15 +301,19 @@ static long change_pte_range(struct mmu_gather *tlb,
const fpb_t flags = FPB_RESPECT_SOFT_DIRTY | FPB_RESPECT_WRITE;
int max_nr_ptes = (end - addr) >> PAGE_SHIFT;
struct folio *folio = NULL;
struct page *page;
pte_t ptent;
page = vm_normal_page(vma, addr, oldpte);
if (page)
folio = page_folio(page);
/*
* Avoid trapping faults against the zero or KSM
* pages. See similar comment in change_huge_pmd.
*/
if (prot_numa) {
int ret = prot_numa_skip(vma, addr, oldpte, pte,
target_node, &folio);
target_node, folio);
if (ret) {
/* determine batch to skip */
@ -323,9 +323,6 @@ static long change_pte_range(struct mmu_gather *tlb,
}
}
if (!folio)
folio = vm_normal_folio(vma, addr, oldpte);
nr_ptes = mprotect_folio_pte_batch(folio, pte, oldpte, max_nr_ptes, flags);
oldpte = modify_prot_start_ptes(vma, addr, pte, nr_ptes);
@ -351,7 +348,7 @@ static long change_pte_range(struct mmu_gather *tlb,
*/
if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) &&
!pte_write(ptent))
set_write_prot_commit_flush_ptes(vma, folio,
set_write_prot_commit_flush_ptes(vma, folio, page,
addr, pte, oldpte, ptent, nr_ptes, tlb);
else
prot_commit_flush_ptes(vma, addr, pte, oldpte, ptent,

View file

@ -179,6 +179,10 @@ static int mremap_folio_pte_batch(struct vm_area_struct *vma, unsigned long addr
if (max_nr == 1)
return 1;
/* Avoid expensive folio lookup if we stand no chance of benefit. */
if (pte_batch_hint(ptep, pte) == 1)
return 1;
folio = vm_normal_folio(vma, addr, pte);
if (!folio || !folio_test_large(folio))
return 1;

View file

@ -1821,13 +1821,16 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
/* Check if we can move the pmd without splitting it. */
if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) ||
!pmd_none(dst_pmdval)) {
struct folio *folio = pmd_folio(*src_pmd);
/* Can be a migration entry */
if (pmd_present(*src_pmd)) {
struct folio *folio = pmd_folio(*src_pmd);
if (!folio || (!is_huge_zero_folio(folio) &&
!PageAnonExclusive(&folio->page))) {
spin_unlock(ptl);
err = -EBUSY;
break;
if (!is_huge_zero_folio(folio) &&
!PageAnonExclusive(&folio->page)) {
spin_unlock(ptl);
err = -EBUSY;
break;
}
}
spin_unlock(ptl);

View file

@ -202,11 +202,11 @@ static void print_first_lines(char *text, int nr)
int offs = end - text;
text[offs] = '\0';
printf(text);
printf("%s", text);
text[offs] = '\n';
printf("\n");
} else {
printf(text);
printf("%s", text);
}
}
@ -221,7 +221,7 @@ static void print_last_lines(char *text, int nr)
nr--;
start--;
}
printf(start);
printf("%s", start);
}
static void print_boundaries(const char *title, FIXTURE_DATA(proc_maps_race) *self)