aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap97
-rw-r--r--Documentation/filesystems/tmpfs.rst47
-rw-r--r--arch/um/os-Linux/sigio.c7
-rw-r--r--fs/proc/vmcore.c2
-rw-r--r--include/linux/mm.h29
-rw-r--r--include/linux/mm_types.h28
-rw-r--r--include/linux/mmap_lock.h10
-rw-r--r--mm/damon/core-test.h10
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/mmap.c1
-rw-r--r--mm/pagewalk.c5
-rw-r--r--mm/shmem.c9
-rw-r--r--scripts/spelling.txt1
13 files changed, 197 insertions, 51 deletions
diff --git a/.mailmap b/.mailmap
index a33b9f56357c..5dd318121982 100644
--- a/.mailmap
+++ b/.mailmap
@@ -13,7 +13,9 @@
Aaron Durbin <[email protected]>
+Abhijeet Dharmapurikar <[email protected]> <[email protected]>
Adam Oldham <[email protected]>
Adam Radford <[email protected]>
@@ -30,6 +32,7 @@ Alexander Mikhalitsyn <[email protected]> <alexander.mikhalitsyn@virtuozzo
Alexander Mikhalitsyn <[email protected]> <[email protected]>
Alexandre Belloni <[email protected]> <[email protected]>
+Alexei Avshalom Lazar <[email protected]> <[email protected]>
Alexei Starovoitov <[email protected]> <[email protected]>
Alexei Starovoitov <[email protected]> <[email protected]>
Alexei Starovoitov <[email protected]> <[email protected]>
@@ -37,8 +40,11 @@ Alex Hung <[email protected]> <[email protected]>
Andreas Herrmann <[email protected]>
@@ -54,6 +60,8 @@ Andrey Ryabinin <[email protected]> <[email protected]>
Andy Adamson <[email protected]>
@@ -62,9 +70,17 @@ Archit Taneja <[email protected]>
Arnaud Patard <[email protected]>
Arnd Bergmann <[email protected]>
+Arun Kumar Neelakantam <[email protected]> <[email protected]>
+Ashok Raj Nagarajan <[email protected]> <[email protected]>
+Avaneesh Kumar Dwivedi <[email protected]> <[email protected]>
Axel Dyks <[email protected]>
Axel Lin <[email protected]>
+Balakrishna Godavarthi <[email protected]> <[email protected]>
@@ -93,12 +109,15 @@ Brian Avery <[email protected]>
Brian King <[email protected]>
Christian Borntraeger <[email protected]> <[email protected]>
Christian Borntraeger <[email protected]> <[email protected]>
Christian Borntraeger <[email protected]> <[email protected]>
@@ -119,7 +138,10 @@ Daniel Borkmann <[email protected]> <[email protected]>
David Brownell <[email protected]>
David Woodhouse <[email protected]>
+Deepak Kumar Singh <[email protected]> <[email protected]>
@@ -136,6 +158,7 @@ Dmitry Safonov <[email protected]> <[email protected]>
Domen Puncer <[email protected]>
Douglas Gilbert <[email protected]>
Ed L. Cashin <[email protected]>
Enric Balletbo i Serra <[email protected]> <[email protected]>
Enric Balletbo i Serra <[email protected]> <[email protected]>
@@ -148,6 +171,7 @@ Faith Ekstrand <[email protected]> <[email protected]>
Felipe W Damasio <[email protected]>
Felix Kuhling <[email protected]>
Felix Moeller <[email protected]>
Filipe Lautert <[email protected]>
Franck Bui-Huu <[email protected]>
@@ -171,8 +195,11 @@ Greg Kurz <[email protected]> <[email protected]>
Guilherme G. Piccoli <[email protected]> <[email protected]>
Guilherme G. Piccoli <[email protected]> <[email protected]>
+Gokul Sriram Palanisamy <[email protected]> <[email protected]>
+Govindaraj Saminathan <[email protected]> <[email protected]>
+Guru Das Srinagesh <[email protected]> <[email protected]>
Gustavo Padovan <[email protected]>
Gustavo Padovan <[email protected]>
@@ -190,6 +217,7 @@ Huacai Chen <[email protected]> <[email protected]>
Jacob Shin <[email protected]>
@@ -217,10 +245,12 @@ Jayachandran C <[email protected]> <[email protected]>
Jean Tourrilhes <[email protected]>
Jeff Garzik <[email protected]>
@@ -228,6 +258,7 @@ Jens Axboe <[email protected]> <[email protected]>
Jens Osterkamp <[email protected]>
@@ -238,6 +269,7 @@ Jiri Slaby <[email protected]> <[email protected]>
@@ -256,6 +288,7 @@ Jordan Crouse <[email protected]> <[email protected]>
Juha Yrjola <at solidboot.com>
Juha Yrjola <[email protected]>
Juha Yrjola <[email protected]>
@@ -263,6 +296,8 @@ Julien Thierry <[email protected]> <[email protected]>
+Karthikeyan Periyasamy <[email protected]> <[email protected]>
Kay Sievers <[email protected]>
@@ -271,6 +306,8 @@ Kees Cook <[email protected]> <[email protected]>
Kenneth W Chen <[email protected]>
+Kenneth Westfield <[email protected]> <[email protected]>
Konstantin Khlebnikov <[email protected]> <[email protected]>
Konstantin Khlebnikov <[email protected]> <[email protected]>
@@ -279,6 +316,7 @@ Krishna Manikandan <[email protected]> <[email protected]>
Krzysztof Kozlowski <[email protected]> <[email protected]>
Krzysztof Kozlowski <[email protected]> <[email protected]>
Krzysztof Kozlowski <[email protected]> <[email protected]>
Kuninori Morimoto <[email protected]>
@@ -292,19 +330,27 @@ Leonid I Ananiev <[email protected]>
Linas Vepstas <[email protected]>
Lorenzo Pieralisi <[email protected]> <[email protected]>
Maciej W. Rozycki <[email protected]> <[email protected]>
Maciej W. Rozycki <[email protected]> <[email protected]>
+Maharaja Kennadyrajan <[email protected]> <[email protected]>
+Manikanta Pubbisetty <[email protected]> <[email protected]>
Manivannan Sadhasivam <[email protected]> <[email protected]>
Manivannan Sadhasivam <[email protected]> <[email protected]>
Marcin Nowakowski <[email protected]> <[email protected]>
@@ -334,6 +380,7 @@ Matt Ranostay <[email protected]> <[email protected]>
Matt Ranostay <[email protected]> Matthew Ranostay <[email protected]>
Mauro Carvalho Chehab <[email protected]> <[email protected]>
Mauro Carvalho Chehab <[email protected]> <[email protected]>
Mauro Carvalho Chehab <[email protected]> <[email protected]>
@@ -346,7 +393,10 @@ Maxim Mikityanskiy <[email protected]> <[email protected]>
Mayuresh Janorkar <[email protected]>
Michael Buesch <[email protected]>
Michel Dänzer <[email protected]>
@@ -357,6 +407,7 @@ Miguel Ojeda <[email protected]> <[email protected]>
Mitesh shah <[email protected]>
@@ -365,9 +416,13 @@ Morten Welinder <[email protected]>
Morten Welinder <[email protected]>
Morten Welinder <[email protected]>
Morten Welinder <[email protected]>
Mythri P K <[email protected]>
Nadia Yvette Chambers <[email protected]> William Lee Irwin III <[email protected]>
Nathan Chancellor <[email protected]> <[email protected]>
Nguyen Anh Quynh <[email protected]>
@@ -386,6 +441,7 @@ Nikolay Aleksandrov <[email protected]> <[email protected]>
Nikolay Aleksandrov <[email protected]> <[email protected]>
Nikolay Aleksandrov <[email protected]> <[email protected]>
Nikolay Aleksandrov <[email protected]> <[email protected]>
Oleksandr Natalenko <[email protected]> <[email protected]>
@@ -393,6 +449,7 @@ Oleksij Rempel <[email protected]> <[email protected]>
Paolo 'Blaisorblade' Giarrusso <[email protected]>
Patrick Mochel <[email protected]>
@@ -404,11 +461,14 @@ Paul E. McKenney <[email protected]> <[email protected]>
+Pavankumar Kondeti <[email protected]> <[email protected]>
Peter A Jonsson <[email protected]>
Peter Oruba <[email protected]>
Peter Oruba <[email protected]>
Praveen BP <[email protected]>
+Pradeep Kumar Chitrapu <[email protected]> <[email protected]>
@@ -417,10 +477,16 @@ Quentin Perret <[email protected]> <[email protected]>
Rafael J. Wysocki <[email protected]> <[email protected]>
+Rajeshwari Ravindra Kamble <[email protected]> <[email protected]>
Rajesh Shah <[email protected]>
Ralf Baechle <[email protected]>
Ralf Wildenhues <[email protected]>
+Ram Chandra Jangir <[email protected]> <[email protected]>
+Ravi Kumar Siddojigari <[email protected]> <[email protected]>
Rémi Denis-Courmont <[email protected]>
Ricardo Ribalda <[email protected]> Ricardo Ribalda Delgado <[email protected]>
@@ -429,6 +495,7 @@ Richard Leitner <[email protected]> <[email protected]>
@@ -446,24 +513,35 @@ Santosh Shilimkar <[email protected]>
Santosh Shilimkar <[email protected]>
Sarangdhar Joshi <[email protected]>
Sascha Hauer <[email protected]>
+Sathishkumar Muruganandam <[email protected]> <[email protected]>
S.Çağlar Onur <[email protected]>
Sean Christopherson <[email protected]> <[email protected]>
Sebastian Reichel <[email protected]> <[email protected]>
Sebastian Reichel <[email protected]> <[email protected]>
+Sharath Chandra Vurukala <[email protected]> <[email protected]>
Simon Kelley <[email protected]>
+Sricharan Ramabadhran <[email protected]> <[email protected]>
Stéphane Witzmann <[email protected]>
Stephen Hemminger <[email protected]> <[email protected]>
Stephen Hemminger <[email protected]> <[email protected]>
@@ -471,22 +549,30 @@ Stephen Hemminger <[email protected]> <[email protected]>
Stephen Hemminger <[email protected]> <[email protected]>
-Subash Abhinov Kasiviswanathan <[email protected]>
+Subash Abhinov Kasiviswanathan <[email protected]> <[email protected]>
+Subbaraman Narayanamurthy <[email protected]> <[email protected]>
Subhash Jadavani <[email protected]>
+Sudarshan Rajagopalan <[email protected]> <[email protected]>
Sudeep Holla <[email protected]> Sudeep KarkadaNagesha <[email protected]>
Sumit Semwal <[email protected]>
Takashi YOSHII <[email protected]>
+Tamizh Chelvam Raja <[email protected]> <[email protected]>
Tejun Heo <[email protected]>
Thomas Graf <[email protected]>
Thomas Pedersen <[email protected]>
Tony Luck <[email protected]>
Tsuneo Yoshioka <[email protected]>
@@ -499,11 +585,17 @@ Uwe Kleine-König <[email protected]>
Uwe Kleine-König <[email protected]>
Uwe Kleine-König <[email protected]>
Valdis Kletnieks <[email protected]>
+Varadarajan Narayanan <[email protected]> <[email protected]>
+Vasanthakumar Thiagarajan <[email protected]> <[email protected]>
Valentin Schneider <[email protected]> <[email protected]>
+Veera Sundaram Sankaran <[email protected]> <[email protected]>
+Veerabhadrarao Badiganti <[email protected]> <[email protected]>
+Venkateswara Naralasetty <[email protected]> <[email protected]>
@@ -513,11 +605,14 @@ Viresh Kumar <[email protected]> <[email protected]>
diff --git a/Documentation/filesystems/tmpfs.rst b/Documentation/filesystems/tmpfs.rst
index f18f46be5c0c..2cd8fa332feb 100644
--- a/Documentation/filesystems/tmpfs.rst
+++ b/Documentation/filesystems/tmpfs.rst
@@ -84,8 +84,6 @@ nr_inodes The maximum number of inodes for this instance. The default
is half of the number of your physical RAM pages, or (on a
machine with highmem) the number of lowmem RAM pages,
whichever is the lower.
-noswap Disables swap. Remounts must respect the original settings.
- By default swap is enabled.
========= ============================================================
These parameters accept a suffix k, m or g for kilo, mega and giga and
@@ -99,36 +97,31 @@ mount with such options, since it allows any user with write access to
use up all the memory on the machine; but enhances the scalability of
that instance in a system with many CPUs making intensive use of it.
+tmpfs blocks may be swapped out, when there is a shortage of memory.
+tmpfs has a mount option to disable its use of swap:
+
+====== ===========================================================
+noswap Disables swap. Remounts must respect the original settings.
+ By default swap is enabled.
+====== ===========================================================
+
tmpfs also supports Transparent Huge Pages which requires a kernel
configured with CONFIG_TRANSPARENT_HUGEPAGE and with huge supported for
your system (has_transparent_hugepage(), which is architecture specific).
The mount options for this are:
-====== ============================================================
-huge=0 never: disables huge pages for the mount
-huge=1 always: enables huge pages for the mount
-huge=2 within_size: only allocate huge pages if the page will be
- fully within i_size, also respect fadvise()/madvise() hints.
-huge=3 advise: only allocate huge pages if requested with
- fadvise()/madvise()
-====== ============================================================
-
-There is a sysfs file which you can also use to control system wide THP
-configuration for all tmpfs mounts, the file is:
-
-/sys/kernel/mm/transparent_hugepage/shmem_enabled
-
-This sysfs file is placed on top of THP sysfs directory and so is registered
-by THP code. It is however only used to control all tmpfs mounts with one
-single knob. Since it controls all tmpfs mounts it should only be used either
-for emergency or testing purposes. The values you can set for shmem_enabled are:
-
-== ============================================================
--1 deny: disables huge on shm_mnt and all mounts, for
- emergency use
--2 force: enables huge on shm_mnt and all mounts, w/o needing
- option, for testing
-== ============================================================
+================ ==============================================================
+huge=never Do not allocate huge pages. This is the default.
+huge=always Attempt to allocate huge page every time a new page is needed.
+huge=within_size Only allocate huge page if it will be fully within i_size.
+ Also respect madvise(2) hints.
+huge=advise Only allocate huge page if requested with madvise(2).
+================ ==============================================================
+
+See also Documentation/admin-guide/mm/transhuge.rst, which describes the
+sysfs file /sys/kernel/mm/transparent_hugepage/shmem_enabled: which can
+be used to deny huge pages on all tmpfs mounts in an emergency, or to
+force huge pages on all tmpfs mounts for testing.
tmpfs has a mount option to set the NUMA memory allocation policy for
all files in that instance (if CONFIG_NUMA is enabled) - which can be
diff --git a/arch/um/os-Linux/sigio.c b/arch/um/os-Linux/sigio.c
index 37d60e72cf26..9e71794839e8 100644
--- a/arch/um/os-Linux/sigio.c
+++ b/arch/um/os-Linux/sigio.c
@@ -3,7 +3,6 @@
* Copyright (C) 2002 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
-#include <linux/minmax.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
@@ -51,7 +50,7 @@ static struct pollfds all_sigio_fds;
static int write_sigio_thread(void *unused)
{
- struct pollfds *fds;
+ struct pollfds *fds, tmp;
struct pollfd *p;
int i, n, respond_fd;
char c;
@@ -78,7 +77,9 @@ static int write_sigio_thread(void *unused)
"write_sigio_thread : "
"read on socket failed, "
"err = %d\n", errno);
- swap(current_poll, next_poll);
+ tmp = current_poll;
+ current_poll = next_poll;
+ next_poll = tmp;
respond_fd = sigio_private[1];
}
else {
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index cb80a7703d58..1fb213f379a5 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -132,7 +132,7 @@ ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
u64 *ppos, bool encrypted)
{
unsigned long pfn, offset;
- size_t nr_bytes;
+ ssize_t nr_bytes;
ssize_t read = 0, tmp;
int idx;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2dd73e4f3d8e..406ab9ea818f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -641,8 +641,14 @@ static inline void vma_numab_state_free(struct vm_area_struct *vma) {}
*/
static inline bool vma_start_read(struct vm_area_struct *vma)
{
- /* Check before locking. A race might cause false locked result. */
- if (vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq))
+ /*
+ * Check before locking. A race might cause false locked result.
+ * We can use READ_ONCE() for the mm_lock_seq here, and don't need
+ * ACQUIRE semantics, because this is just a lockless check whose result
+ * we don't rely on for anything - the mm_lock_seq read against which we
+ * need ordering is below.
+ */
+ if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq))
return false;
if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0))
@@ -653,8 +659,13 @@ static inline bool vma_start_read(struct vm_area_struct *vma)
* False unlocked result is impossible because we modify and check
* vma->vm_lock_seq under vma->vm_lock protection and mm->mm_lock_seq
* modification invalidates all existing locks.
+ *
+ * We must use ACQUIRE semantics for the mm_lock_seq so that if we are
+ * racing with vma_end_write_all(), we only start reading from the VMA
+ * after it has been unlocked.
+ * This pairs with RELEASE semantics in vma_end_write_all().
*/
- if (unlikely(vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq))) {
+ if (unlikely(vma->vm_lock_seq == smp_load_acquire(&vma->vm_mm->mm_lock_seq))) {
up_read(&vma->vm_lock->lock);
return false;
}
@@ -676,7 +687,7 @@ static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
* current task is holding mmap_write_lock, both vma->vm_lock_seq and
* mm->mm_lock_seq can't be concurrently modified.
*/
- *mm_lock_seq = READ_ONCE(vma->vm_mm->mm_lock_seq);
+ *mm_lock_seq = vma->vm_mm->mm_lock_seq;
return (vma->vm_lock_seq == *mm_lock_seq);
}
@@ -688,7 +699,13 @@ static inline void vma_start_write(struct vm_area_struct *vma)
return;
down_write(&vma->vm_lock->lock);
- vma->vm_lock_seq = mm_lock_seq;
+ /*
+ * We should use WRITE_ONCE() here because we can have concurrent reads
+ * from the early lockless pessimistic check in vma_start_read().
+ * We don't really care about the correctness of that early check, but
+ * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy.
+ */
+ WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq);
up_write(&vma->vm_lock->lock);
}
@@ -702,7 +719,7 @@ static inline bool vma_try_start_write(struct vm_area_struct *vma)
if (!down_write_trylock(&vma->vm_lock->lock))
return false;
- vma->vm_lock_seq = mm_lock_seq;
+ WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq);
up_write(&vma->vm_lock->lock);
return true;
}
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index de10fc797c8e..5e74ce4a28cd 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -514,6 +514,20 @@ struct vm_area_struct {
};
#ifdef CONFIG_PER_VMA_LOCK
+ /*
+ * Can only be written (using WRITE_ONCE()) while holding both:
+ * - mmap_lock (in write mode)
+ * - vm_lock->lock (in write mode)
+ * Can be read reliably while holding one of:
+ * - mmap_lock (in read or write mode)
+ * - vm_lock->lock (in read or write mode)
+ * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
+ * while holding nothing (except RCU to keep the VMA struct allocated).
+ *
+ * This sequence counter is explicitly allowed to overflow; sequence
+ * counter reuse can only lead to occasional unnecessary use of the
+ * slowpath.
+ */
int vm_lock_seq;
struct vma_lock *vm_lock;
@@ -679,6 +693,20 @@ struct mm_struct {
* by mmlist_lock
*/
#ifdef CONFIG_PER_VMA_LOCK
+ /*
+ * This field has lock-like semantics, meaning it is sometimes
+ * accessed with ACQUIRE/RELEASE semantics.
+ * Roughly speaking, incrementing the sequence number is
+ * equivalent to releasing locks on VMAs; reading the sequence
+ * number can be part of taking a read lock on a VMA.
+ *
+ * Can be modified under write mmap_lock using RELEASE
+ * semantics.
+ * Can be read with no other protection when holding write
+ * mmap_lock.
+ * Can be read with ACQUIRE semantics if not holding write
+ * mmap_lock.
+ */
int mm_lock_seq;
#endif
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index aab8f1b28d26..e05e167dbd16 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -76,8 +76,14 @@ static inline void mmap_assert_write_locked(struct mm_struct *mm)
static inline void vma_end_write_all(struct mm_struct *mm)
{
mmap_assert_write_locked(mm);
- /* No races during update due to exclusive mmap_lock being held */
- WRITE_ONCE(mm->mm_lock_seq, mm->mm_lock_seq + 1);
+ /*
+ * Nobody can concurrently modify mm->mm_lock_seq due to exclusive
+ * mmap_lock being held.
+ * We need RELEASE semantics here to ensure that preceding stores into
+ * the VMA take effect before we unlock it with this store.
+ * Pairs with ACQUIRE semantics in vma_start_read().
+ */
+ smp_store_release(&mm->mm_lock_seq, mm->mm_lock_seq + 1);
}
#else
static inline void vma_end_write_all(struct mm_struct *mm) {}
diff --git a/mm/damon/core-test.h b/mm/damon/core-test.h
index c11210124344..bb07721909e1 100644
--- a/mm/damon/core-test.h
+++ b/mm/damon/core-test.h
@@ -320,25 +320,25 @@ static void damon_test_update_monitoring_result(struct kunit *test)
static void damon_test_set_attrs(struct kunit *test)
{
- struct damon_ctx ctx;
+ struct damon_ctx *c = damon_new_ctx();
struct damon_attrs valid_attrs = {
.min_nr_regions = 10, .max_nr_regions = 1000,
.sample_interval = 5000, .aggr_interval = 100000,};
struct damon_attrs invalid_attrs;
- KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &valid_attrs), 0);
+ KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &valid_attrs), 0);
invalid_attrs = valid_attrs;
invalid_attrs.min_nr_regions = 1;
- KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &invalid_attrs), -EINVAL);
+ KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
invalid_attrs = valid_attrs;
invalid_attrs.max_nr_regions = 9;
- KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &invalid_attrs), -EINVAL);
+ KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
invalid_attrs = valid_attrs;
invalid_attrs.aggr_interval = 4999;
- KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &invalid_attrs), -EINVAL);
+ KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
}
static struct kunit_case damon_test_cases[] = {
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index e245191e6b04..ece5d481b5ff 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -2487,7 +2487,7 @@ int unpoison_memory(unsigned long pfn)
goto unlock_mutex;
}
- if (!folio_test_hwpoison(folio)) {
+ if (!PageHWPoison(p)) {
unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
pfn, &unpoison_rs);
goto unlock_mutex;
diff --git a/mm/mmap.c b/mm/mmap.c
index 3eda23c9ebe7..3937479d0e07 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -615,6 +615,7 @@ static inline int dup_anon_vma(struct vm_area_struct *dst,
* anon pages imported.
*/
if (src->anon_vma && !dst->anon_vma) {
+ vma_start_write(dst);
dst->anon_vma = src->anon_vma;
return anon_vma_clone(dst, src);
}
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 64437105fe0d..2022333805d3 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -48,8 +48,11 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
if (walk->no_vma) {
/*
* pte_offset_map() might apply user-specific validation.
+ * Indeed, on x86_64 the pmd entries set up by init_espfix_ap()
+ * fit its pmd_bad() check (_PAGE_NX set and _PAGE_RW clear),
+ * and CONFIG_EFI_PGT_DUMP efi_mm goes so far as to walk them.
*/
- if (walk->mm == &init_mm)
+ if (walk->mm == &init_mm || addr >= TASK_SIZE)
pte = pte_offset_kernel(pmd, addr);
else
pte = pte_offset_map(pmd, addr);
diff --git a/mm/shmem.c b/mm/shmem.c
index 2f2e0e618072..f5af4b943e42 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2796,7 +2796,8 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
if (*ppos >= i_size_read(inode))
break;
- error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio, SGP_READ);
+ error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio,
+ SGP_READ);
if (error) {
if (error == -EINVAL)
error = 0;
@@ -2805,7 +2806,9 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
if (folio) {
folio_unlock(folio);
- if (folio_test_hwpoison(folio)) {
+ if (folio_test_hwpoison(folio) ||
+ (folio_test_large(folio) &&
+ folio_test_has_hwpoisoned(folio))) {
error = -EIO;
break;
}
@@ -2841,7 +2844,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
folio_put(folio);
folio = NULL;
} else {
- n = splice_zeropage_into_pipe(pipe, *ppos, len);
+ n = splice_zeropage_into_pipe(pipe, *ppos, part);
}
if (!n)
diff --git a/scripts/spelling.txt b/scripts/spelling.txt
index fc7ba95e86a0..855c4863124b 100644
--- a/scripts/spelling.txt
+++ b/scripts/spelling.txt
@@ -1541,7 +1541,6 @@ temeprature||temperature
temorary||temporary
temproarily||temporarily
temperture||temperature
-thead||thread
theads||threads
therfore||therefore
thier||their