aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c25
-rw-r--r--mm/gup.c13
-rw-r--r--mm/huge_memory.c29
-rw-r--r--mm/hugetlb_vmemmap.c13
-rw-r--r--mm/list_lru.c28
-rw-r--r--mm/memcontrol-v1.c7
-rw-r--r--mm/memcontrol.c34
-rw-r--r--mm/memory-failure.c20
-rw-r--r--mm/memory.c60
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/migrate.c16
-rw-r--r--mm/mm_init.c15
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/mseal.c14
-rw-r--r--mm/page_alloc.c59
-rw-r--r--mm/page_ext.c18
-rw-r--r--mm/shmem.c25
-rw-r--r--mm/slab.h2
-rw-r--r--mm/slab_common.c138
-rw-r--r--mm/slub.c27
-rw-r--r--mm/sparse-vmemmap.c11
-rw-r--r--mm/sparse.c7
-rw-r--r--mm/truncate.c4
-rw-r--r--mm/userfaultfd.c29
-rw-r--r--mm/vmalloc.c18
-rw-r--r--mm/vmscan.c24
-rw-r--r--mm/vmstat.c52
-rw-r--r--mm/zsmalloc.c2
28 files changed, 399 insertions, 295 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index d62150418b91..60a9cc593e9b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2712,14 +2712,12 @@ int kiocb_write_and_wait(struct kiocb *iocb, size_t count)
}
EXPORT_SYMBOL_GPL(kiocb_write_and_wait);
-int kiocb_invalidate_pages(struct kiocb *iocb, size_t count)
+int filemap_invalidate_pages(struct address_space *mapping,
+ loff_t pos, loff_t end, bool nowait)
{
- struct address_space *mapping = iocb->ki_filp->f_mapping;
- loff_t pos = iocb->ki_pos;
- loff_t end = pos + count - 1;
int ret;
- if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (nowait) {
/* we could block if there are any pages in the range */
if (filemap_range_has_page(mapping, pos, end))
return -EAGAIN;
@@ -2738,6 +2736,15 @@ int kiocb_invalidate_pages(struct kiocb *iocb, size_t count)
return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
end >> PAGE_SHIFT);
}
+
+int kiocb_invalidate_pages(struct kiocb *iocb, size_t count)
+{
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
+
+ return filemap_invalidate_pages(mapping, iocb->ki_pos,
+ iocb->ki_pos + count - 1,
+ iocb->ki_flags & IOCB_NOWAIT);
+}
EXPORT_SYMBOL_GPL(kiocb_invalidate_pages);
/**
@@ -3987,7 +3994,6 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
ssize_t written = 0;
do {
- struct page *page;
struct folio *folio;
size_t offset; /* Offset into folio */
size_t bytes; /* Bytes to write to folio */
@@ -4017,11 +4023,10 @@ retry:
}
status = a_ops->write_begin(file, mapping, pos, bytes,
- &page, &fsdata);
+ &folio, &fsdata);
if (unlikely(status < 0))
break;
- folio = page_folio(page);
offset = offset_in_folio(folio, pos);
if (bytes > folio_size(folio) - offset)
bytes = folio_size(folio) - offset;
@@ -4033,7 +4038,7 @@ retry:
flush_dcache_folio(folio);
status = a_ops->write_end(file, mapping, pos, bytes, copied,
- page, fsdata);
+ folio, fsdata);
if (unlikely(status != copied)) {
iov_iter_revert(i, copied - max(status, 0L));
if (unlikely(status < 0))
@@ -4231,7 +4236,7 @@ int filemap_invalidate_inode(struct inode *inode, bool flush,
}
/* Wait for writeback to complete on all folios and discard. */
- truncate_inode_pages_range(mapping, start, end);
+ invalidate_inode_pages2_range(mapping, start / PAGE_SIZE, end / PAGE_SIZE);
unlock:
filemap_invalidate_unlock(mapping);
diff --git a/mm/gup.c b/mm/gup.c
index 54d0dc3831fb..02c46ae33028 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -416,6 +416,19 @@ void unpin_user_pages(struct page **pages, unsigned long npages)
EXPORT_SYMBOL(unpin_user_pages);
/**
+ * unpin_user_folio() - release pages of a folio
+ * @folio: pointer to folio to be released
+ * @npages: number of pages of same folio
+ *
+ * Release npages of the folio
+ */
+void unpin_user_folio(struct folio *folio, unsigned long npages)
+{
+ gup_put_folio(folio, npages, FOLL_PIN);
+}
+EXPORT_SYMBOL(unpin_user_folio);
+
+/**
* unpin_folios() - release an array of gup-pinned folios.
* @folios: array of folios to be marked dirty and released.
* @nfolios: number of folios in the @folios array.
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index f4be468e06a4..67c86a5d64a6 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1685,7 +1685,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
spin_unlock(vmf->ptl);
- goto out;
+ return 0;
}
pmd = pmd_modify(oldpmd, vma->vm_page_prot);
@@ -1728,22 +1728,16 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
if (!migrate_misplaced_folio(folio, vma, target_nid)) {
flags |= TNF_MIGRATED;
nid = target_nid;
- } else {
- flags |= TNF_MIGRATE_FAIL;
- vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
- if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
- spin_unlock(vmf->ptl);
- goto out;
- }
- goto out_map;
- }
-
-out:
- if (nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
+ return 0;
+ }
- return 0;
-
+ flags |= TNF_MIGRATE_FAIL;
+ vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+ if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
+ spin_unlock(vmf->ptl);
+ return 0;
+ }
out_map:
/* Restore the PMD */
pmd = pmd_modify(oldpmd, vma->vm_page_prot);
@@ -1753,7 +1747,10 @@ out_map:
set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
spin_unlock(vmf->ptl);
- goto out;
+
+ if (nid != NUMA_NO_NODE)
+ task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
+ return 0;
}
/*
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 829112b0a914..0c3f56b3578e 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -185,11 +185,11 @@ static int vmemmap_remap_range(unsigned long start, unsigned long end,
static inline void free_vmemmap_page(struct page *page)
{
if (PageReserved(page)) {
+ memmap_boot_pages_add(-1);
free_bootmem_page(page);
- mod_node_page_state(page_pgdat(page), NR_MEMMAP_BOOT, -1);
} else {
+ memmap_pages_add(-1);
__free_page(page);
- mod_node_page_state(page_pgdat(page), NR_MEMMAP, -1);
}
}
@@ -341,7 +341,7 @@ static int vmemmap_remap_free(unsigned long start, unsigned long end,
copy_page(page_to_virt(walk.reuse_page),
(void *)walk.reuse_addr);
list_add(&walk.reuse_page->lru, vmemmap_pages);
- mod_node_page_state(NODE_DATA(nid), NR_MEMMAP, 1);
+ memmap_pages_add(1);
}
/*
@@ -392,14 +392,11 @@ static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
for (i = 0; i < nr_pages; i++) {
page = alloc_pages_node(nid, gfp_mask, 0);
- if (!page) {
- mod_node_page_state(NODE_DATA(nid), NR_MEMMAP, i);
+ if (!page)
goto out;
- }
list_add(&page->lru, list);
}
-
- mod_node_page_state(NODE_DATA(nid), NR_MEMMAP, nr_pages);
+ memmap_pages_add(nr_pages);
return 0;
out:
diff --git a/mm/list_lru.c b/mm/list_lru.c
index a29d96929d7c..9b7ff06e9d32 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -85,6 +85,7 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
}
#endif /* CONFIG_MEMCG */
+/* The caller must ensure the memcg lifetime. */
bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
struct mem_cgroup *memcg)
{
@@ -109,14 +110,22 @@ EXPORT_SYMBOL_GPL(list_lru_add);
bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
{
+ bool ret;
int nid = page_to_nid(virt_to_page(item));
- struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
- mem_cgroup_from_slab_obj(item) : NULL;
- return list_lru_add(lru, item, nid, memcg);
+ if (list_lru_memcg_aware(lru)) {
+ rcu_read_lock();
+ ret = list_lru_add(lru, item, nid, mem_cgroup_from_slab_obj(item));
+ rcu_read_unlock();
+ } else {
+ ret = list_lru_add(lru, item, nid, NULL);
+ }
+
+ return ret;
}
EXPORT_SYMBOL_GPL(list_lru_add_obj);
+/* The caller must ensure the memcg lifetime. */
bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
struct mem_cgroup *memcg)
{
@@ -139,11 +148,18 @@ EXPORT_SYMBOL_GPL(list_lru_del);
bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
{
+ bool ret;
int nid = page_to_nid(virt_to_page(item));
- struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
- mem_cgroup_from_slab_obj(item) : NULL;
- return list_lru_del(lru, item, nid, memcg);
+ if (list_lru_memcg_aware(lru)) {
+ rcu_read_lock();
+ ret = list_lru_del(lru, item, nid, mem_cgroup_from_slab_obj(item));
+ rcu_read_unlock();
+ } else {
+ ret = list_lru_del(lru, item, nid, NULL);
+ }
+
+ return ret;
}
EXPORT_SYMBOL_GPL(list_lru_del_obj);
diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c
index 2aeea4d8bf8e..417c96f2da28 100644
--- a/mm/memcontrol-v1.c
+++ b/mm/memcontrol-v1.c
@@ -1842,9 +1842,12 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
buf = endp + 1;
cfd = simple_strtoul(buf, &endp, 10);
- if ((*endp != ' ') && (*endp != '\0'))
+ if (*endp == '\0')
+ buf = endp;
+ else if (*endp == ' ')
+ buf = endp + 1;
+ else
return -EINVAL;
- buf = endp + 1;
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 960371788687..d563fb515766 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3386,11 +3386,28 @@ static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
#define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
static DEFINE_IDR(mem_cgroup_idr);
+static DEFINE_SPINLOCK(memcg_idr_lock);
+
+static int mem_cgroup_alloc_id(void)
+{
+ int ret;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock(&memcg_idr_lock);
+ ret = idr_alloc(&mem_cgroup_idr, NULL, 1, MEM_CGROUP_ID_MAX + 1,
+ GFP_NOWAIT);
+ spin_unlock(&memcg_idr_lock);
+ idr_preload_end();
+ return ret;
+}
static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
{
if (memcg->id.id > 0) {
+ spin_lock(&memcg_idr_lock);
idr_remove(&mem_cgroup_idr, memcg->id.id);
+ spin_unlock(&memcg_idr_lock);
+
memcg->id.id = 0;
}
}
@@ -3524,8 +3541,7 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
if (!memcg)
return ERR_PTR(error);
- memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
- 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
+ memcg->id.id = mem_cgroup_alloc_id();
if (memcg->id.id < 0) {
error = memcg->id.id;
goto fail;
@@ -3597,8 +3613,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
memcg1_soft_limit_reset(memcg);
#ifdef CONFIG_ZSWAP
memcg->zswap_max = PAGE_COUNTER_MAX;
- WRITE_ONCE(memcg->zswap_writeback,
- !parent || READ_ONCE(parent->zswap_writeback));
+ WRITE_ONCE(memcg->zswap_writeback, true);
#endif
page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
if (parent) {
@@ -3667,7 +3682,9 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
* publish it here at the end of onlining. This matches the
* regular ID destruction during offlining.
*/
+ spin_lock(&memcg_idr_lock);
idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
+ spin_unlock(&memcg_idr_lock);
return 0;
offline_kmem:
@@ -5302,7 +5319,14 @@ void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
{
/* if zswap is disabled, do not block pages going to the swapping device */
- return !zswap_is_enabled() || !memcg || READ_ONCE(memcg->zswap_writeback);
+ if (!zswap_is_enabled())
+ return true;
+
+ for (; memcg; memcg = parent_mem_cgroup(memcg))
+ if (!READ_ONCE(memcg->zswap_writeback))
+ return false;
+
+ return true;
}
static u64 zswap_current_read(struct cgroup_subsys_state *css,
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 581d3e5c9117..7066fc84f351 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -2417,7 +2417,7 @@ struct memory_failure_entry {
struct memory_failure_cpu {
DECLARE_KFIFO(fifo, struct memory_failure_entry,
MEMORY_FAILURE_FIFO_SIZE);
- spinlock_t lock;
+ raw_spinlock_t lock;
struct work_struct work;
};
@@ -2443,20 +2443,22 @@ void memory_failure_queue(unsigned long pfn, int flags)
{
struct memory_failure_cpu *mf_cpu;
unsigned long proc_flags;
+ bool buffer_overflow;
struct memory_failure_entry entry = {
.pfn = pfn,
.flags = flags,
};
mf_cpu = &get_cpu_var(memory_failure_cpu);
- spin_lock_irqsave(&mf_cpu->lock, proc_flags);
- if (kfifo_put(&mf_cpu->fifo, entry))
+ raw_spin_lock_irqsave(&mf_cpu->lock, proc_flags);
+ buffer_overflow = !kfifo_put(&mf_cpu->fifo, entry);
+ if (!buffer_overflow)
schedule_work_on(smp_processor_id(), &mf_cpu->work);
- else
+ raw_spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
+ put_cpu_var(memory_failure_cpu);
+ if (buffer_overflow)
pr_err("buffer overflow when queuing memory failure at %#lx\n",
pfn);
- spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
- put_cpu_var(memory_failure_cpu);
}
EXPORT_SYMBOL_GPL(memory_failure_queue);
@@ -2469,9 +2471,9 @@ static void memory_failure_work_func(struct work_struct *work)
mf_cpu = container_of(work, struct memory_failure_cpu, work);
for (;;) {
- spin_lock_irqsave(&mf_cpu->lock, proc_flags);
+ raw_spin_lock_irqsave(&mf_cpu->lock, proc_flags);
gotten = kfifo_get(&mf_cpu->fifo, &entry);
- spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
+ raw_spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
if (!gotten)
break;
if (entry.flags & MF_SOFT_OFFLINE)
@@ -2501,7 +2503,7 @@ static int __init memory_failure_init(void)
for_each_possible_cpu(cpu) {
mf_cpu = &per_cpu(memory_failure_cpu, cpu);
- spin_lock_init(&mf_cpu->lock);
+ raw_spin_lock_init(&mf_cpu->lock);
INIT_KFIFO(mf_cpu->fifo);
INIT_WORK(&mf_cpu->work, memory_failure_work_func);
}
diff --git a/mm/memory.c b/mm/memory.c
index 34f8402d2046..ebfc9768f801 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2632,11 +2632,7 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
return 0;
}
-/*
- * Variant of remap_pfn_range that does not call track_pfn_remap. The caller
- * must have pre-validated the caching bits of the pgprot_t.
- */
-int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
+static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
pgd_t *pgd;
@@ -2689,6 +2685,27 @@ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
return 0;
}
+/*
+ * Variant of remap_pfn_range that does not call track_pfn_remap. The caller
+ * must have pre-validated the caching bits of the pgprot_t.
+ */
+int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+ int error = remap_pfn_range_internal(vma, addr, pfn, size, prot);
+
+ if (!error)
+ return 0;
+
+ /*
+ * A partial pfn range mapping is dangerous: it does not
+ * maintain page reference counts, and callers may free
+ * pages due to the error. So zap it early.
+ */
+ zap_page_range_single(vma, addr, size, NULL);
+ return error;
+}
+
/**
* remap_pfn_range - remap kernel memory to userspace
* @vma: user vma to map to
@@ -5295,7 +5312,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
if (unlikely(!pte_same(old_pte, vmf->orig_pte))) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
- goto out;
+ return 0;
}
pte = pte_modify(old_pte, vma->vm_page_prot);
@@ -5358,23 +5375,19 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
if (!migrate_misplaced_folio(folio, vma, target_nid)) {
nid = target_nid;
flags |= TNF_MIGRATED;
- } else {
- flags |= TNF_MIGRATE_FAIL;
- vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
- vmf->address, &vmf->ptl);
- if (unlikely(!vmf->pte))
- goto out;
- if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
- pte_unmap_unlock(vmf->pte, vmf->ptl);
- goto out;
- }
- goto out_map;
+ task_numa_fault(last_cpupid, nid, nr_pages, flags);
+ return 0;
}
-out:
- if (nid != NUMA_NO_NODE)
- task_numa_fault(last_cpupid, nid, nr_pages, flags);
- return 0;
+ flags |= TNF_MIGRATE_FAIL;
+ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
+ vmf->address, &vmf->ptl);
+ if (unlikely(!vmf->pte))
+ return 0;
+ if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ return 0;
+ }
out_map:
/*
* Make it present again, depending on how arch implements
@@ -5387,7 +5400,10 @@ out_map:
numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
writable);
pte_unmap_unlock(vmf->pte, vmf->ptl);
- goto out;
+
+ if (nid != NUMA_NO_NODE)
+ task_numa_fault(last_cpupid, nid, nr_pages, flags);
+ return 0;
}
static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 66267c26ca1b..951878ab627a 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1681,7 +1681,7 @@ struct range __weak arch_get_mappable_range(void)
struct range mhp_get_pluggable_range(bool need_mapping)
{
- const u64 max_phys = (1ULL << MAX_PHYSMEM_BITS) - 1;
+ const u64 max_phys = PHYSMEM_END;
struct range mhp_range;
if (need_mapping) {
diff --git a/mm/migrate.c b/mm/migrate.c
index e7296c0fb5d5..923ea80ba744 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1479,11 +1479,17 @@ out:
return rc;
}
-static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
+static inline int try_split_folio(struct folio *folio, struct list_head *split_folios,
+ enum migrate_mode mode)
{
int rc;
- folio_lock(folio);
+ if (mode == MIGRATE_ASYNC) {
+ if (!folio_trylock(folio))
+ return -EAGAIN;
+ } else {
+ folio_lock(folio);
+ }
rc = split_folio_to_list(folio, split_folios);
folio_unlock(folio);
if (!rc)
@@ -1677,7 +1683,7 @@ static int migrate_pages_batch(struct list_head *from,
*/
if (nr_pages > 2 &&
!list_empty(&folio->_deferred_list)) {
- if (try_split_folio(folio, split_folios) == 0) {
+ if (!try_split_folio(folio, split_folios, mode)) {
nr_failed++;
stats->nr_thp_failed += is_thp;
stats->nr_thp_split += is_thp;
@@ -1699,7 +1705,7 @@ static int migrate_pages_batch(struct list_head *from,
if (!thp_migration_supported() && is_thp) {
nr_failed++;
stats->nr_thp_failed++;
- if (!try_split_folio(folio, split_folios)) {
+ if (!try_split_folio(folio, split_folios, mode)) {
stats->nr_thp_split++;
stats->nr_split++;
continue;
@@ -1731,7 +1737,7 @@ static int migrate_pages_batch(struct list_head *from,
stats->nr_thp_failed += is_thp;
/* Large folio NUMA faulting doesn't split to retry. */
if (is_large && !nosplit) {
- int ret = try_split_folio(folio, split_folios);
+ int ret = try_split_folio(folio, split_folios, mode);
if (!ret) {
stats->nr_thp_split += is_thp;
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 75c3bd42799b..51960079875b 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -1623,8 +1623,7 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat)
panic("Failed to allocate %ld bytes for node %d memory map\n",
size, pgdat->node_id);
pgdat->node_mem_map = map + offset;
- mod_node_early_perpage_metadata(pgdat->node_id,
- DIV_ROUND_UP(size, PAGE_SIZE));
+ memmap_boot_pages_add(DIV_ROUND_UP(size, PAGE_SIZE));
pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
__func__, pgdat->node_id, (unsigned long)pgdat,
(unsigned long)pgdat->node_mem_map);
@@ -2245,6 +2244,8 @@ void __init init_cma_reserved_pageblock(struct page *page)
set_pageblock_migratetype(page, MIGRATE_CMA);
set_page_refcounted(page);
+ /* pages were reserved and not allocated */
+ clear_page_tag_ref(page);
__free_pages(page, pageblock_order);
adjust_managed_page_count(page, pageblock_nr_pages);
@@ -2460,15 +2461,7 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
}
/* pages were reserved and not allocated */
- if (mem_alloc_profiling_enabled()) {
- union codetag_ref *ref = get_page_tag_ref(page);
-
- if (ref) {
- set_codetag_empty(ref);
- put_page_tag_ref(ref);
- }
- }
-
+ clear_page_tag_ref(page);
__free_pages_core(page, order, MEMINIT_EARLY);
}
diff --git a/mm/mmap.c b/mm/mmap.c
index d0dfc85b209b..6ddb278a5ee8 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1229,7 +1229,7 @@ static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
return MAX_LFS_FILESIZE;
/* Special "we do even unsigned file positions" case */
- if (file->f_mode & FMODE_UNSIGNED_OFFSET)
+ if (file->f_op->fop_flags & FOP_UNSIGNED_OFFSET)
return 0;
/* Yes, random drivers might want more. But I'm tired of buggy drivers */
diff --git a/mm/mseal.c b/mm/mseal.c
index bf783bba8ed0..15bba28acc00 100644
--- a/mm/mseal.c
+++ b/mm/mseal.c
@@ -40,9 +40,17 @@ static bool can_modify_vma(struct vm_area_struct *vma)
static bool is_madv_discard(int behavior)
{
- return behavior &
- (MADV_FREE | MADV_DONTNEED | MADV_DONTNEED_LOCKED |
- MADV_REMOVE | MADV_DONTFORK | MADV_WIPEONFORK);
+ switch (behavior) {
+ case MADV_FREE:
+ case MADV_DONTNEED:
+ case MADV_DONTNEED_LOCKED:
+ case MADV_REMOVE:
+ case MADV_DONTFORK:
+ case MADV_WIPEONFORK:
+ return true;
+ }
+
+ return false;
}
static bool is_ro_anon(struct vm_area_struct *vma)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 28f80daf5c04..91ace8ca97e2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -287,7 +287,7 @@ EXPORT_SYMBOL(nr_online_nodes);
static bool page_contains_unaccepted(struct page *page, unsigned int order);
static void accept_page(struct page *page, unsigned int order);
-static bool try_to_accept_memory(struct zone *zone, unsigned int order);
+static bool cond_accept_memory(struct zone *zone, unsigned int order);
static inline bool has_unaccepted_memory(void);
static bool __free_unaccepted(struct page *page);
@@ -1054,6 +1054,13 @@ __always_inline bool free_pages_prepare(struct page *page,
reset_page_owner(page, order);
page_table_check_free(page, order);
pgalloc_tag_sub(page, 1 << order);
+
+ /*
+ * The page is isolated and accounted for.
+ * Mark the codetag as empty to avoid accounting error
+ * when the page is freed by unpoison_memory().
+ */
+ clear_page_tag_ref(page);
return false;
}
@@ -3072,9 +3079,6 @@ static inline long __zone_watermark_unusable_free(struct zone *z,
if (!(alloc_flags & ALLOC_CMA))
unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
#endif
-#ifdef CONFIG_UNACCEPTED_MEMORY
- unusable_free += zone_page_state(z, NR_UNACCEPTED);
-#endif
return unusable_free;
}
@@ -3368,6 +3372,8 @@ retry:
}
}
+ cond_accept_memory(zone, order);
+
/*
* Detect whether the number of free pages is below high
* watermark. If so, we will decrease pcp->high and free
@@ -3393,10 +3399,8 @@ check_alloc_wmark:
gfp_mask)) {
int ret;
- if (has_unaccepted_memory()) {
- if (try_to_accept_memory(zone, order))
- goto try_this_zone;
- }
+ if (cond_accept_memory(zone, order))
+ goto try_this_zone;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/*
@@ -3450,10 +3454,8 @@ try_this_zone:
return page;
} else {
- if (has_unaccepted_memory()) {
- if (try_to_accept_memory(zone, order))
- goto try_this_zone;
- }
+ if (cond_accept_memory(zone, order))
+ goto try_this_zone;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/* Try again if zone has deferred pages */
@@ -5755,7 +5757,6 @@ void __init setup_per_cpu_pageset(void)
for_each_online_pgdat(pgdat)
pgdat->per_cpu_nodestats =
alloc_percpu(struct per_cpu_nodestat);
- store_early_perpage_metadata();
}
__meminit void zone_pcp_init(struct zone *zone)
@@ -5821,14 +5822,7 @@ unsigned long free_reserved_area(void *start, void *end, int poison, const char
void free_reserved_page(struct page *page)
{
- if (mem_alloc_profiling_enabled()) {
- union codetag_ref *ref = get_page_tag_ref(page);
-
- if (ref) {
- set_codetag_empty(ref);
- put_page_tag_ref(ref);
- }
- }
+ clear_page_tag_ref(page);
ClearPageReserved(page);
init_page_count(page);
__free_page(page);
@@ -6951,9 +6945,6 @@ static bool try_to_accept_memory_one(struct zone *zone)
struct page *page;
bool last;
- if (list_empty(&zone->unaccepted_pages))
- return false;
-
spin_lock_irqsave(&zone->lock, flags);
page = list_first_entry_or_null(&zone->unaccepted_pages,
struct page, lru);
@@ -6979,23 +6970,29 @@ static bool try_to_accept_memory_one(struct zone *zone)
return true;
}
-static bool try_to_accept_memory(struct zone *zone, unsigned int order)
+static bool cond_accept_memory(struct zone *zone, unsigned int order)
{
long to_accept;
- int ret = false;
+ bool ret = false;
+
+ if (!has_unaccepted_memory())
+ return false;
+
+ if (list_empty(&zone->unaccepted_pages))
+ return false;
/* How much to accept to get to high watermark? */
to_accept = high_wmark_pages(zone) -
(zone_page_state(zone, NR_FREE_PAGES) -
- __zone_watermark_unusable_free(zone, order, 0));
+ __zone_watermark_unusable_free(zone, order, 0) -
+ zone_page_state(zone, NR_UNACCEPTED));
- /* Accept at least one page */
- do {
+ while (to_accept > 0) {
if (!try_to_accept_memory_one(zone))
break;
ret = true;
to_accept -= MAX_ORDER_NR_PAGES;
- } while (to_accept > 0);
+ }
return ret;
}
@@ -7038,7 +7035,7 @@ static void accept_page(struct page *page, unsigned int order)
{
}
-static bool try_to_accept_memory(struct zone *zone, unsigned int order)
+static bool cond_accept_memory(struct zone *zone, unsigned int order)
{
return false;
}
diff --git a/mm/page_ext.c b/mm/page_ext.c
index c191e490c401..641d93f6af4c 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -214,8 +214,7 @@ static int __init alloc_node_page_ext(int nid)
return -ENOMEM;
NODE_DATA(nid)->node_page_ext = base;
total_usage += table_size;
- mod_node_page_state(NODE_DATA(nid), NR_MEMMAP_BOOT,
- DIV_ROUND_UP(table_size, PAGE_SIZE));
+ memmap_boot_pages_add(DIV_ROUND_UP(table_size, PAGE_SIZE));
return 0;
}
@@ -275,10 +274,8 @@ static void *__meminit alloc_page_ext(size_t size, int nid)
else
addr = vzalloc_node(size, nid);
- if (addr) {
- mod_node_page_state(NODE_DATA(nid), NR_MEMMAP,
- DIV_ROUND_UP(size, PAGE_SIZE));
- }
+ if (addr)
+ memmap_pages_add(DIV_ROUND_UP(size, PAGE_SIZE));
return addr;
}
@@ -323,25 +320,18 @@ static void free_page_ext(void *addr)
{
size_t table_size;
struct page *page;
- struct pglist_data *pgdat;
table_size = page_ext_size * PAGES_PER_SECTION;
+ memmap_pages_add(-1L * (DIV_ROUND_UP(table_size, PAGE_SIZE)));
if (is_vmalloc_addr(addr)) {
- page = vmalloc_to_page(addr);
- pgdat = page_pgdat(page);
vfree(addr);
} else {
page = virt_to_page(addr);
- pgdat = page_pgdat(page);
BUG_ON(PageReserved(page));
kmemleak_free(addr);
free_pages_exact(addr, table_size);
}
-
- mod_node_page_state(pgdat, NR_MEMMAP,
- -1L * (DIV_ROUND_UP(table_size, PAGE_SIZE)));
-
}
static void __free_page_ext(unsigned long pfn)
diff --git a/mm/shmem.c b/mm/shmem.c
index 2faa9daaf54b..b875852df51f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1629,11 +1629,6 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
unsigned long mask = READ_ONCE(huge_shmem_orders_always);
unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
unsigned long vm_flags = vma->vm_flags;
- /*
- * Check all the (large) orders below HPAGE_PMD_ORDER + 1 that
- * are enabled for this vma.
- */
- unsigned long orders = BIT(PMD_ORDER + 1) - 1;
loff_t i_size;
int order;
@@ -1678,7 +1673,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
if (global_huge)
mask |= READ_ONCE(huge_shmem_orders_inherit);
- return orders & mask;
+ return THP_ORDERS_ALL_FILE_DEFAULT & mask;
}
static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
@@ -1686,6 +1681,7 @@ static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault
unsigned long orders)
{
struct vm_area_struct *vma = vmf->vma;
+ pgoff_t aligned_index;
unsigned long pages;
int order;
@@ -1697,9 +1693,9 @@ static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault
order = highest_order(orders);
while (orders) {
pages = 1UL << order;
- index = round_down(index, pages);
- if (!xa_find(&mapping->i_pages, &index,
- index + pages - 1, XA_PRESENT))
+ aligned_index = round_down(index, pages);
+ if (!xa_find(&mapping->i_pages, &aligned_index,
+ aligned_index + pages - 1, XA_PRESENT))
break;
order = next_order(&orders, order);
}
@@ -2882,7 +2878,7 @@ static const struct inode_operations shmem_short_symlink_operations;
static int
shmem_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
+ struct folio **foliop, void **fsdata)
{
struct inode *inode = mapping->host;
struct shmem_inode_info *info = SHMEM_I(inode);
@@ -2903,23 +2899,22 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
if (ret)
return ret;
- *pagep = folio_file_page(folio, index);
- if (PageHWPoison(*pagep)) {
+ if (folio_test_hwpoison(folio) ||
+ (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) {
folio_unlock(folio);
folio_put(folio);
- *pagep = NULL;
return -EIO;
}
+ *foliop = folio;
return 0;
}
static int
shmem_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct folio *folio, void *fsdata)
{
- struct folio *folio = page_folio(page);
struct inode *inode = mapping->host;
if (pos + copied > inode->i_size)
diff --git a/mm/slab.h b/mm/slab.h
index dcdb56b8e7f5..a6051385186e 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -261,6 +261,8 @@ struct kmem_cache {
unsigned int object_size; /* Object size without metadata */
struct reciprocal_value reciprocal_size;
unsigned int offset; /* Free pointer offset */
+ /* Specific free pointer requested (if not UINT_MAX) */
+ unsigned int rcu_freeptr_offset;
#ifdef CONFIG_SLUB_CPU_PARTIAL
/* Number of per cpu partial objects to keep around */
unsigned int cpu_partial;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 40b582a014b8..95db3702f8d6 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -202,10 +202,10 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
}
static struct kmem_cache *create_cache(const char *name,
- unsigned int object_size, unsigned int align,
- slab_flags_t flags, unsigned int useroffset,
- unsigned int usersize, void (*ctor)(void *),
- struct kmem_cache *root_cache)
+ unsigned int object_size, unsigned int freeptr_offset,
+ unsigned int align, slab_flags_t flags,
+ unsigned int useroffset, unsigned int usersize,
+ void (*ctor)(void *))
{
struct kmem_cache *s;
int err;
@@ -213,6 +213,13 @@ static struct kmem_cache *create_cache(const char *name,
if (WARN_ON(useroffset + usersize > object_size))
useroffset = usersize = 0;
+ /* If a custom freelist pointer is requested make sure it's sane. */
+ err = -EINVAL;
+ if (freeptr_offset != UINT_MAX &&
+ (freeptr_offset >= object_size || !(flags & SLAB_TYPESAFE_BY_RCU) ||
+ !IS_ALIGNED(freeptr_offset, sizeof(freeptr_t))))
+ goto out;
+
err = -ENOMEM;
s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
if (!s)
@@ -220,13 +227,13 @@ static struct kmem_cache *create_cache(const char *name,
s->name = name;
s->size = s->object_size = object_size;
+ s->rcu_freeptr_offset = freeptr_offset;
s->align = align;
s->ctor = ctor;
#ifdef CONFIG_HARDENED_USERCOPY
s->useroffset = useroffset;
s->usersize = usersize;
#endif
-
err = __kmem_cache_create(s, flags);
if (err)
goto out_free_cache;
@@ -241,38 +248,10 @@ out:
return ERR_PTR(err);
}
-/**
- * kmem_cache_create_usercopy - Create a cache with a region suitable
- * for copying to userspace
- * @name: A string which is used in /proc/slabinfo to identify this cache.
- * @size: The size of objects to be created in this cache.
- * @align: The required alignment for the objects.
- * @flags: SLAB flags
- * @useroffset: Usercopy region offset
- * @usersize: Usercopy region size
- * @ctor: A constructor for the objects.
- *
- * Cannot be called within a interrupt, but can be interrupted.
- * The @ctor is run when new pages are allocated by the cache.
- *
- * The flags are
- *
- * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
- * to catch references to uninitialised memory.
- *
- * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
- * for buffer overruns.
- *
- * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
- * cacheline. This can be beneficial if you're counting cycles as closely
- * as davem.
- *
- * Return: a pointer to the cache on success, NULL on failure.
- */
-struct kmem_cache *
-kmem_cache_create_usercopy(const char *name,
- unsigned int size, unsigned int align,
- slab_flags_t flags,
+static struct kmem_cache *
+do_kmem_cache_create_usercopy(const char *name,
+ unsigned int size, unsigned int freeptr_offset,
+ unsigned int align, slab_flags_t flags,
unsigned int useroffset, unsigned int usersize,
void (*ctor)(void *))
{
@@ -332,9 +311,9 @@ kmem_cache_create_usercopy(const char *name,
goto out_unlock;
}
- s = create_cache(cache_name, size,
+ s = create_cache(cache_name, size, freeptr_offset,
calculate_alignment(flags, align, size),
- flags, useroffset, usersize, ctor, NULL);
+ flags, useroffset, usersize, ctor);
if (IS_ERR(s)) {
err = PTR_ERR(s);
kfree_const(cache_name);
@@ -356,6 +335,44 @@ out_unlock:
}
return s;
}
+
+/**
+ * kmem_cache_create_usercopy - Create a cache with a region suitable
+ * for copying to userspace
+ * @name: A string which is used in /proc/slabinfo to identify this cache.
+ * @size: The size of objects to be created in this cache.
+ * @align: The required alignment for the objects.
+ * @flags: SLAB flags
+ * @useroffset: Usercopy region offset
+ * @usersize: Usercopy region size
+ * @ctor: A constructor for the objects.
+ *
+ * Cannot be called within a interrupt, but can be interrupted.
+ * The @ctor is run when new pages are allocated by the cache.
+ *
+ * The flags are
+ *
+ * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
+ * to catch references to uninitialised memory.
+ *
+ * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
+ * for buffer overruns.
+ *
+ * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
+ * cacheline. This can be beneficial if you're counting cycles as closely
+ * as davem.
+ *
+ * Return: a pointer to the cache on success, NULL on failure.
+ */
+struct kmem_cache *
+kmem_cache_create_usercopy(const char *name, unsigned int size,
+ unsigned int align, slab_flags_t flags,
+ unsigned int useroffset, unsigned int usersize,
+ void (*ctor)(void *))
+{
+ return do_kmem_cache_create_usercopy(name, size, UINT_MAX, align, flags,
+ useroffset, usersize, ctor);
+}
EXPORT_SYMBOL(kmem_cache_create_usercopy);
/**
@@ -387,11 +404,50 @@ struct kmem_cache *
kmem_cache_create(const char *name, unsigned int size, unsigned int align,
slab_flags_t flags, void (*ctor)(void *))
{
- return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
- ctor);
+ return do_kmem_cache_create_usercopy(name, size, UINT_MAX, align, flags,
+ 0, 0, ctor);
}
EXPORT_SYMBOL(kmem_cache_create);
+/**
+ * kmem_cache_create_rcu - Create a SLAB_TYPESAFE_BY_RCU cache.
+ * @name: A string which is used in /proc/slabinfo to identify this cache.
+ * @size: The size of objects to be created in this cache.
+ * @freeptr_offset: The offset into the memory to the free pointer
+ * @flags: SLAB flags
+ *
+ * Cannot be called within an interrupt, but can be interrupted.
+ *
+ * See kmem_cache_create() for an explanation of possible @flags.
+ *
+ * By default SLAB_TYPESAFE_BY_RCU caches place the free pointer outside
+ * of the object. This might cause the object to grow in size. Callers
+ * that have a reason to avoid this can specify a custom free pointer
+ * offset in their struct where the free pointer will be placed.
+ *
+ * Note that placing the free pointer inside the object requires the
+ * caller to ensure that no fields are invalidated that are required to
+ * guard against object recycling (See SLAB_TYPESAFE_BY_RCU for
+ * details.).
+ *
+ * Using zero as a value for @freeptr_offset is valid. To request no
+ * offset UINT_MAX must be specified.
+ *
+ * Note that @ctor isn't supported with custom free pointers as a @ctor
+ * requires an external free pointer.
+ *
+ * Return: a pointer to the cache on success, NULL on failure.
+ */
+struct kmem_cache *kmem_cache_create_rcu(const char *name, unsigned int size,
+ unsigned int freeptr_offset,
+ slab_flags_t flags)
+{
+ return do_kmem_cache_create_usercopy(name, size, freeptr_offset, 0,
+ flags | SLAB_TYPESAFE_BY_RCU, 0, 0,
+ NULL);
+}
+EXPORT_SYMBOL(kmem_cache_create_rcu);
+
static struct kmem_cache *kmem_buckets_cache __ro_after_init;
/**
diff --git a/mm/slub.c b/mm/slub.c
index 3520acaf9afa..be36f1df809c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -466,12 +466,6 @@ static struct workqueue_struct *flushwq;
*******************************************************************/
/*
- * freeptr_t represents a SLUB freelist pointer, which might be encoded
- * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
- */
-typedef struct { unsigned long v; } freeptr_t;
-
-/*
* Returns freelist pointer (ptr). With hardening, this is obfuscated
* with an XOR of the address where the pointer is held and a per-cache
* random number.
@@ -2116,6 +2110,10 @@ alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
if (!mem_alloc_profiling_enabled())
return;
+ /* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */
+ if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
+ return;
+
obj_exts = slab_obj_exts(slab);
if (!obj_exts)
return;
@@ -3921,6 +3919,9 @@ static void *__slab_alloc_node(struct kmem_cache *s,
/*
* If the object has been wiped upon free, make sure it's fully initialized by
* zeroing out freelist pointer.
+ *
+ * Note that we also wipe custom freelist pointers specified via
+ * s->rcu_freeptr_offset.
*/
static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
void *obj)
@@ -4690,6 +4691,9 @@ static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
if (!df.slab)
continue;
+ if (kfence_free(df.freelist))
+ continue;
+
do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt,
_RET_IP_);
} while (likely(size));
@@ -5141,6 +5145,12 @@ static void set_cpu_partial(struct kmem_cache *s)
#endif
}
+/* Was a valid freeptr offset requested? */
+static inline bool has_freeptr_offset(const struct kmem_cache *s)
+{
+ return s->rcu_freeptr_offset != UINT_MAX;
+}
+
/*
* calculate_sizes() determines the order and the distribution of data within
* a slab object.
@@ -5186,7 +5196,8 @@ static int calculate_sizes(struct kmem_cache *s)
*/
s->inuse = size;
- if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || s->ctor ||
+ if (((flags & SLAB_TYPESAFE_BY_RCU) && !has_freeptr_offset(s)) ||
+ (flags & SLAB_POISON) || s->ctor ||
((flags & SLAB_RED_ZONE) &&
(s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) {
/*
@@ -5207,6 +5218,8 @@ static int calculate_sizes(struct kmem_cache *s)
*/
s->offset = size;
size += sizeof(void *);
+ } else if ((flags & SLAB_TYPESAFE_BY_RCU) && has_freeptr_offset(s)) {
+ s->offset = s->rcu_freeptr_offset;
} else {
/*
* Store freelist pointer near middle of object to keep
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 1dda6c53370b..edcc7a6b0f6f 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -469,13 +469,10 @@ struct page * __meminit __populate_section_memmap(unsigned long pfn,
if (r < 0)
return NULL;
- if (system_state == SYSTEM_BOOTING) {
- mod_node_early_perpage_metadata(nid, DIV_ROUND_UP(end - start,
- PAGE_SIZE));
- } else {
- mod_node_page_state(NODE_DATA(nid), NR_MEMMAP,
- DIV_ROUND_UP(end - start, PAGE_SIZE));
- }
+ if (system_state == SYSTEM_BOOTING)
+ memmap_boot_pages_add(DIV_ROUND_UP(end - start, PAGE_SIZE));
+ else
+ memmap_pages_add(DIV_ROUND_UP(end - start, PAGE_SIZE));
return pfn_to_page(pfn);
}
diff --git a/mm/sparse.c b/mm/sparse.c
index e4b830091d13..dc38539f8560 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -129,7 +129,7 @@ static inline int sparse_early_nid(struct mem_section *section)
static void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
unsigned long *end_pfn)
{
- unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
+ unsigned long max_sparsemem_pfn = (PHYSMEM_END + 1) >> PAGE_SHIFT;
/*
* Sanity checks - do not allow an architecture to pass
@@ -463,7 +463,7 @@ static void __init sparse_buffer_init(unsigned long size, int nid)
sparsemap_buf = memmap_alloc(size, section_map_size(), addr, nid, true);
sparsemap_buf_end = sparsemap_buf + size;
#ifndef CONFIG_SPARSEMEM_VMEMMAP
- mod_node_early_perpage_metadata(nid, DIV_ROUND_UP(size, PAGE_SIZE));
+ memmap_boot_pages_add(DIV_ROUND_UP(size, PAGE_SIZE));
#endif
}
@@ -643,8 +643,7 @@ static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
unsigned long start = (unsigned long) pfn_to_page(pfn);
unsigned long end = start + nr_pages * sizeof(struct page);
- mod_node_page_state(page_pgdat(pfn_to_page(pfn)), NR_MEMMAP,
- -1L * (DIV_ROUND_UP(end - start, PAGE_SIZE)));
+ memmap_pages_add(-1L * (DIV_ROUND_UP(end - start, PAGE_SIZE)));
vmemmap_free(start, end, altmap);
}
static void free_map_bootmem(struct page *memmap)
diff --git a/mm/truncate.c b/mm/truncate.c
index 4d61fbdd4b2f..0668cd340a46 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -157,7 +157,7 @@ static void truncate_cleanup_folio(struct folio *folio)
if (folio_mapped(folio))
unmap_mapping_folio(folio);
- if (folio_has_private(folio))
+ if (folio_needs_release(folio))
folio_invalidate(folio, 0, folio_size(folio));
/*
@@ -219,7 +219,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
if (!mapping_inaccessible(folio->mapping))
folio_zero_range(folio, offset, length);
- if (folio_has_private(folio))
+ if (folio_needs_release(folio))
folio_invalidate(folio, offset, length);
if (!folio_test_large(folio))
return true;
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index e54e5c8907fa..acc56c75ba99 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -787,27 +787,30 @@ retry:
}
dst_pmdval = pmdp_get_lockless(dst_pmd);
- /*
- * If the dst_pmd is mapped as THP don't
- * override it and just be strict.
- */
- if (unlikely(pmd_trans_huge(dst_pmdval))) {
- err = -EEXIST;
- break;
- }
if (unlikely(pmd_none(dst_pmdval)) &&
unlikely(__pte_alloc(dst_mm, dst_pmd))) {
err = -ENOMEM;
break;
}
- /* If an huge pmd materialized from under us fail */
- if (unlikely(pmd_trans_huge(*dst_pmd))) {
+ dst_pmdval = pmdp_get_lockless(dst_pmd);
+ /*
+ * If the dst_pmd is THP don't override it and just be strict.
+ * (This includes the case where the PMD used to be THP and
+ * changed back to none after __pte_alloc().)
+ */
+ if (unlikely(!pmd_present(dst_pmdval) || pmd_trans_huge(dst_pmdval) ||
+ pmd_devmap(dst_pmdval))) {
+ err = -EEXIST;
+ break;
+ }
+ if (unlikely(pmd_bad(dst_pmdval))) {
err = -EFAULT;
break;
}
-
- BUG_ON(pmd_none(*dst_pmd));
- BUG_ON(pmd_trans_huge(*dst_pmd));
+ /*
+ * For shmem mappings, khugepaged is allowed to remove page
+ * tables under us; pte_offset_map_lock() will deal with that.
+ */
err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr,
src_addr, flags, &folio);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 6b783baf12a1..a0df1e2e155a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2191,6 +2191,7 @@ static void purge_vmap_node(struct work_struct *work)
{
struct vmap_node *vn = container_of(work,
struct vmap_node, purge_work);
+ unsigned long nr_purged_pages = 0;
struct vmap_area *va, *n_va;
LIST_HEAD(local_list);
@@ -2208,7 +2209,7 @@ static void purge_vmap_node(struct work_struct *work)
kasan_release_vmalloc(orig_start, orig_end,
va->va_start, va->va_end);
- atomic_long_sub(nr, &vmap_lazy_nr);
+ nr_purged_pages += nr;
vn->nr_purged++;
if (is_vn_id_valid(vn_id) && !vn->skip_populate)
@@ -2219,6 +2220,8 @@ static void purge_vmap_node(struct work_struct *work)
list_add(&va->list, &local_list);
}
+ atomic_long_sub(nr_purged_pages, &vmap_lazy_nr);
+
reclaim_list_global(&local_list);
}
@@ -2626,6 +2629,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
vb->dirty_max = 0;
bitmap_set(vb->used_map, 0, (1UL << order));
INIT_LIST_HEAD(&vb->free_list);
+ vb->cpu = raw_smp_processor_id();
xa = addr_to_vb_xa(va->va_start);
vb_idx = addr_to_vb_idx(va->va_start);
@@ -2642,7 +2646,6 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
* integrity together with list_for_each_rcu from read
* side.
*/
- vb->cpu = raw_smp_processor_id();
vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu);
spin_lock(&vbq->lock);
list_add_tail_rcu(&vb->free_list, &vbq->free);
@@ -3584,15 +3587,8 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
page = alloc_pages_noprof(alloc_gfp, order);
else
page = alloc_pages_node_noprof(nid, alloc_gfp, order);
- if (unlikely(!page)) {
- if (!nofail)
- break;
-
- /* fall back to the zero order allocations */
- alloc_gfp |= __GFP_NOFAIL;
- order = 0;
- continue;
- }
+ if (unlikely(!page))
+ break;
/*
* Higher order allocations must be able to be treated as
diff --git a/mm/vmscan.c b/mm/vmscan.c
index cfa839284b92..bd489c1af228 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1604,25 +1604,6 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
}
-#ifdef CONFIG_CMA
-/*
- * It is waste of effort to scan and reclaim CMA pages if it is not available
- * for current allocation context. Kswapd can not be enrolled as it can not
- * distinguish this scenario by using sc->gfp_mask = GFP_KERNEL
- */
-static bool skip_cma(struct folio *folio, struct scan_control *sc)
-{
- return !current_is_kswapd() &&
- gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
- folio_migratetype(folio) == MIGRATE_CMA;
-}
-#else
-static bool skip_cma(struct folio *folio, struct scan_control *sc)
-{
- return false;
-}
-#endif
-
/*
* Isolating page from the lruvec to fill in @dst list by nr_to_scan times.
*
@@ -1669,8 +1650,7 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
nr_pages = folio_nr_pages(folio);
total_scan += nr_pages;
- if (folio_zonenum(folio) > sc->reclaim_idx ||
- skip_cma(folio, sc)) {
+ if (folio_zonenum(folio) > sc->reclaim_idx) {
nr_skipped[folio_zonenum(folio)] += nr_pages;
move_to = &folios_skipped;
goto move;
@@ -4320,7 +4300,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
}
/* ineligible */
- if (zone > sc->reclaim_idx || skip_cma(folio, sc)) {
+ if (zone > sc->reclaim_idx) {
gen = folio_inc_gen(lruvec, folio, false);
list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
return true;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 04a1cb6cc636..e875f2a4915f 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1033,6 +1033,24 @@ unsigned long node_page_state(struct pglist_data *pgdat,
}
#endif
+/*
+ * Count number of pages "struct page" and "struct page_ext" consume.
+ * nr_memmap_boot_pages: # of pages allocated by boot allocator
+ * nr_memmap_pages: # of pages that were allocated by buddy allocator
+ */
+static atomic_long_t nr_memmap_boot_pages = ATOMIC_LONG_INIT(0);
+static atomic_long_t nr_memmap_pages = ATOMIC_LONG_INIT(0);
+
+void memmap_boot_pages_add(long delta)
+{
+ atomic_long_add(delta, &nr_memmap_boot_pages);
+}
+
+void memmap_pages_add(long delta)
+{
+ atomic_long_add(delta, &nr_memmap_pages);
+}
+
#ifdef CONFIG_COMPACTION
struct contig_page_info {
@@ -1255,11 +1273,11 @@ const char * const vmstat_text[] = {
"pgdemote_kswapd",
"pgdemote_direct",
"pgdemote_khugepaged",
- "nr_memmap",
- "nr_memmap_boot",
- /* enum writeback_stat_item counters */
+ /* system-wide enum vm_stat_item counters */
"nr_dirty_threshold",
"nr_dirty_background_threshold",
+ "nr_memmap_pages",
+ "nr_memmap_boot_pages",
#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
/* enum vm_event_item counters */
@@ -1790,7 +1808,7 @@ static const struct seq_operations zoneinfo_op = {
#define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
NR_VM_NUMA_EVENT_ITEMS + \
NR_VM_NODE_STAT_ITEMS + \
- NR_VM_WRITEBACK_STAT_ITEMS + \
+ NR_VM_STAT_ITEMS + \
(IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
NR_VM_EVENT_ITEMS : 0))
@@ -1827,7 +1845,9 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
v + NR_DIRTY_THRESHOLD);
- v += NR_VM_WRITEBACK_STAT_ITEMS;
+ v[NR_MEMMAP_PAGES] = atomic_long_read(&nr_memmap_pages);
+ v[NR_MEMMAP_BOOT_PAGES] = atomic_long_read(&nr_memmap_boot_pages);
+ v += NR_VM_STAT_ITEMS;
#ifdef CONFIG_VM_EVENT_COUNTERS
all_vm_events(v);
@@ -2285,25 +2305,3 @@ static int __init extfrag_debug_init(void)
module_init(extfrag_debug_init);
#endif
-
-/*
- * Page metadata size (struct page and page_ext) in pages
- */
-static unsigned long early_perpage_metadata[MAX_NUMNODES] __meminitdata;
-
-void __meminit mod_node_early_perpage_metadata(int nid, long delta)
-{
- early_perpage_metadata[nid] += delta;
-}
-
-void __meminit store_early_perpage_metadata(void)
-{
- int nid;
- struct pglist_data *pgdat;
-
- for_each_online_pgdat(pgdat) {
- nid = pgdat->node_id;
- mod_node_page_state(NODE_DATA(nid), NR_MEMMAP_BOOT,
- early_perpage_metadata[nid]);
- }
-}
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 5d6581ab7c07..2d3163e4da96 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -120,8 +120,6 @@
#define CLASS_BITS 8
#define MAGIC_VAL_BITS 8
-#define MAX(a, b) ((a) >= (b) ? (a) : (b))
-
#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(CONFIG_ZSMALLOC_CHAIN_SIZE, UL))
/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */