aboutsummaryrefslogtreecommitdiffstats
diff options
authorLinus Torvalds <torvalds@linux-foundation.org>2025-11-26 12:38:05 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2025-11-26 12:38:05 -0800
commit9eb220edddb20a019d1630347555bc4ae1514e61 (patch)
tree616bd7086bc4d2bc78e5419eaf57514072cb88d9
parentad8cccc24887655ebf0a3f459d38d1fb683df46a (diff)
parent7c9580f44f90f7a4c11fc7831efe323ebe446091 (diff)
downloadlinux-9eb220edddb20a019d1630347555bc4ae1514e61.tar.gz
Merge tag 'mm-hotfixes-stable-2025-11-26-11-51' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "8 hotfixes. 4 are cc:stable, 7 are against mm/. All are singletons - please see the respective changelogs for details" * tag 'mm-hotfixes-stable-2025-11-26-11-51' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm/filemap: fix logic around SIGBUS in filemap_map_pages() mm/huge_memory: fix NULL pointer deference when splitting folio MAINTAINERS: add test_kho to KHO's entry mailmap: add entry for Sam Protsenko selftests/mm: fix division-by-zero in uffd-unit-tests mm/mmap_lock: reset maple state on lock_vma_under_rcu() retry mm/memfd: fix information leak in hugetlb folios mm: swap: remove duplicate nr_swap_pages decrement in get_swap_page_of_type()
-rw-r--r--.mailmap2
-rw-r--r--MAINTAINERS1
-rw-r--r--mm/filemap.c27
-rw-r--r--mm/huge_memory.c22
-rw-r--r--mm/memfd.c27
-rw-r--r--mm/mmap_lock.c1
-rw-r--r--mm/swapfile.c4
-rw-r--r--tools/testing/selftests/mm/uffd-unit-tests.c15
8 files changed, 63 insertions, 36 deletions
diff --git a/.mailmap b/.mailmap
index b5e240f78b8c2d..fffbfd413474b9 100644
--- a/.mailmap
+++ b/.mailmap
@@ -691,6 +691,8 @@ Sachin Mokashi <sachin.mokashi@intel.com> <sachinx.mokashi@intel.com>
Sachin P Sant <ssant@in.ibm.com>
Sai Prakash Ranjan <quic_saipraka@quicinc.com> <saiprakash.ranjan@codeaurora.org>
Sakari Ailus <sakari.ailus@linux.intel.com> <sakari.ailus@iki.fi>
+Sam Protsenko <semen.protsenko@linaro.org>
+Sam Protsenko <semen.protsenko@linaro.org> <semen.protsenko@globallogic.com>
Sam Ravnborg <sam@mars.ravnborg.org>
Sankeerth Billakanti <quic_sbillaka@quicinc.com> <sbillaka@codeaurora.org>
Santosh Shilimkar <santosh.shilimkar@oracle.org>
diff --git a/MAINTAINERS b/MAINTAINERS
index 6df89b14b521ae..047d242faf682f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -13799,6 +13799,7 @@ F: Documentation/admin-guide/mm/kho.rst
F: Documentation/core-api/kho/*
F: include/linux/kexec_handover.h
F: kernel/kexec_handover.c
+F: lib/test_kho.c
F: tools/testing/selftests/kho/
KEYS-ENCRYPTED
diff --git a/mm/filemap.c b/mm/filemap.c
index 2f1e7e283a5102..024b71da5224d7 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3682,8 +3682,9 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
struct folio *folio, unsigned long start,
unsigned long addr, unsigned int nr_pages,
unsigned long *rss, unsigned short *mmap_miss,
- bool can_map_large)
+ pgoff_t file_end)
{
+ struct address_space *mapping = folio->mapping;
unsigned int ref_from_caller = 1;
vm_fault_t ret = 0;
struct page *page = folio_page(folio, start);
@@ -3692,12 +3693,16 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
unsigned long addr0;
/*
- * Map the large folio fully where possible.
+ * Map the large folio fully where possible:
*
- * The folio must not cross VMA or page table boundary.
+ * - The folio is fully within size of the file or belong
+ * to shmem/tmpfs;
+ * - The folio doesn't cross VMA boundary;
+ * - The folio doesn't cross page table boundary;
*/
addr0 = addr - start * PAGE_SIZE;
- if (can_map_large && folio_within_vma(folio, vmf->vma) &&
+ if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) &&
+ folio_within_vma(folio, vmf->vma) &&
(addr0 & PMD_MASK) == ((addr0 + folio_size(folio) - 1) & PMD_MASK)) {
vmf->pte -= start;
page -= start;
@@ -3812,7 +3817,6 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
unsigned long rss = 0;
unsigned int nr_pages = 0, folio_type;
unsigned short mmap_miss = 0, mmap_miss_saved;
- bool can_map_large;
rcu_read_lock();
folio = next_uptodate_folio(&xas, mapping, end_pgoff);
@@ -3823,16 +3827,14 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
end_pgoff = min(end_pgoff, file_end);
/*
- * Do not allow to map with PTEs beyond i_size and with PMD
- * across i_size to preserve SIGBUS semantics.
+ * Do not allow to map with PMD across i_size to preserve
+ * SIGBUS semantics.
*
* Make an exception for shmem/tmpfs that for long time
* intentionally mapped with PMDs across i_size.
*/
- can_map_large = shmem_mapping(mapping) ||
- file_end >= folio_next_index(folio);
-
- if (can_map_large && filemap_map_pmd(vmf, folio, start_pgoff)) {
+ if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) &&
+ filemap_map_pmd(vmf, folio, start_pgoff)) {
ret = VM_FAULT_NOPAGE;
goto out;
}
@@ -3861,8 +3863,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
else
ret |= filemap_map_folio_range(vmf, folio,
xas.xa_index - folio->index, addr,
- nr_pages, &rss, &mmap_miss,
- can_map_large);
+ nr_pages, &rss, &mmap_miss, file_end);
folio_unlock(folio);
} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 2f2a521e5d6830..6cba1cb14b23ac 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3619,6 +3619,16 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
if (folio != page_folio(split_at) || folio != page_folio(lock_at))
return -EINVAL;
+ /*
+ * Folios that just got truncated cannot get split. Signal to the
+ * caller that there was a race.
+ *
+ * TODO: this will also currently refuse shmem folios that are in the
+ * swapcache.
+ */
+ if (!is_anon && !folio->mapping)
+ return -EBUSY;
+
if (new_order >= folio_order(folio))
return -EINVAL;
@@ -3659,18 +3669,6 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
gfp_t gfp;
mapping = folio->mapping;
-
- /* Truncated ? */
- /*
- * TODO: add support for large shmem folio in swap cache.
- * When shmem is in swap cache, mapping is NULL and
- * folio_test_swapcache() is true.
- */
- if (!mapping) {
- ret = -EBUSY;
- goto out;
- }
-
min_order = mapping_min_folio_order(folio->mapping);
if (new_order < min_order) {
ret = -EINVAL;
diff --git a/mm/memfd.c b/mm/memfd.c
index 1d109c1acf211b..a405eaa451ee8b 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -96,9 +96,36 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
NULL,
gfp_mask);
if (folio) {
+ u32 hash;
+
+ /*
+ * Zero the folio to prevent information leaks to userspace.
+ * Use folio_zero_user() which is optimized for huge/gigantic
+ * pages. Pass 0 as addr_hint since this is not a faulting path
+ * and we don't have a user virtual address yet.
+ */
+ folio_zero_user(folio, 0);
+
+ /*
+ * Mark the folio uptodate before adding to page cache,
+ * as required by filemap.c and other hugetlb paths.
+ */
+ __folio_mark_uptodate(folio);
+
+ /*
+ * Serialize hugepage allocation and instantiation to prevent
+ * races with concurrent allocations, as required by all other
+ * callers of hugetlb_add_to_page_cache().
+ */
+ hash = hugetlb_fault_mutex_hash(memfd->f_mapping, idx);
+ mutex_lock(&hugetlb_fault_mutex_table[hash]);
+
err = hugetlb_add_to_page_cache(folio,
memfd->f_mapping,
idx);
+
+ mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+
if (err) {
folio_put(folio);
goto err_unresv;
diff --git a/mm/mmap_lock.c b/mm/mmap_lock.c
index 0a0db5849b8e70..42e3dde73e741c 100644
--- a/mm/mmap_lock.c
+++ b/mm/mmap_lock.c
@@ -241,6 +241,7 @@ retry:
if (PTR_ERR(vma) == -EAGAIN) {
count_vm_vma_lock_event(VMA_LOCK_MISS);
/* The area was replaced with another one */
+ mas_set(&mas, address);
goto retry;
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 10760240a3a29f..a1b4b9d80e3b59 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2005,10 +2005,8 @@ swp_entry_t get_swap_page_of_type(int type)
local_lock(&percpu_swap_cluster.lock);
offset = cluster_alloc_swap_entry(si, 0, 1);
local_unlock(&percpu_swap_cluster.lock);
- if (offset) {
+ if (offset)
entry = swp_entry(si->type, offset);
- atomic_long_dec(&nr_swap_pages);
- }
}
put_swap_device(si);
}
diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c
index 9e3be2ee7f1ba4..f917b4c4c94369 100644
--- a/tools/testing/selftests/mm/uffd-unit-tests.c
+++ b/tools/testing/selftests/mm/uffd-unit-tests.c
@@ -1758,10 +1758,15 @@ int main(int argc, char *argv[])
uffd_test_ops = mem_type->mem_ops;
uffd_test_case_ops = test->test_case_ops;
- if (mem_type->mem_flag & (MEM_HUGETLB_PRIVATE | MEM_HUGETLB))
+ if (mem_type->mem_flag & (MEM_HUGETLB_PRIVATE | MEM_HUGETLB)) {
gopts.page_size = default_huge_page_size();
- else
+ if (gopts.page_size == 0) {
+ uffd_test_skip("huge page size is 0, feature missing?");
+ continue;
+ }
+ } else {
gopts.page_size = psize();
+ }
/* Ensure we have at least 2 pages */
gopts.nr_pages = MAX(UFFD_TEST_MEM_SIZE, gopts.page_size * 2)
@@ -1776,12 +1781,6 @@ int main(int argc, char *argv[])
continue;
uffd_test_start("%s on %s", test->name, mem_type->name);
- if ((mem_type->mem_flag == MEM_HUGETLB ||
- mem_type->mem_flag == MEM_HUGETLB_PRIVATE) &&
- (default_huge_page_size() == 0)) {
- uffd_test_skip("huge page size is 0, feature missing?");
- continue;
- }
if (!uffd_feature_supported(test)) {
uffd_test_skip("feature missing");
continue;