aboutsummaryrefslogtreecommitdiff
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c38
1 files changed, 22 insertions, 16 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f294db835f4b..b34f50156f7e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -31,6 +31,7 @@
#include <linux/llist.h>
#include <linux/cma.h>
#include <linux/migrate.h>
+#include <linux/nospec.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
@@ -1320,7 +1321,9 @@ static void __destroy_compound_gigantic_page(struct page *page,
}
set_compound_order(page, 0);
+#ifdef CONFIG_64BIT
page[1].compound_nr = 0;
+#endif
__ClearPageHead(page);
}
@@ -1812,7 +1815,9 @@ out_error:
for (; j < nr_pages; j++, p = mem_map_next(p, page, j))
__ClearPageReserved(p);
set_compound_order(page, 0);
+#ifdef CONFIG_64BIT
page[1].compound_nr = 0;
+#endif
__ClearPageHead(page);
return false;
}
@@ -1854,6 +1859,7 @@ int PageHeadHuge(struct page *page_head)
return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR;
}
+EXPORT_SYMBOL_GPL(PageHeadHuge);
/*
* Find and lock address space (mapping) in write mode.
@@ -3498,8 +3504,7 @@ static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
static struct kobj_attribute _name##_attr = __ATTR_WO(_name)
#define HSTATE_ATTR(_name) \
- static struct kobj_attribute _name##_attr = \
- __ATTR(_name, 0644, _name##_show, _name##_store)
+ static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
static struct kobject *hugepages_kobj;
static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
@@ -4161,7 +4166,7 @@ static int __init hugepages_setup(char *s)
}
if (tmp >= nr_online_nodes)
goto invalid;
- node = tmp;
+ node = array_index_nospec(tmp, nr_online_nodes);
p += count + 1;
/* Parse hugepages */
if (sscanf(p, "%lu%n", &tmp, &count) != 1)
@@ -4637,7 +4642,6 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
vma->vm_page_prot));
}
entry = pte_mkyoung(entry);
- entry = pte_mkhuge(entry);
entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
return entry;
@@ -5013,7 +5017,7 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
set_page_dirty(page);
hugetlb_count_sub(pages_per_huge_page(h), mm);
- page_remove_rmap(page, true);
+ page_remove_rmap(page, vma, true);
spin_unlock(ptl);
tlb_remove_page_size(tlb, page, huge_page_size(h));
@@ -5258,7 +5262,7 @@ retry_avoidcopy:
/* Break COW */
huge_ptep_clear_flush(vma, haddr, ptep);
mmu_notifier_invalidate_range(mm, range.start, range.end);
- page_remove_rmap(old_page, true);
+ page_remove_rmap(old_page, vma, true);
hugepage_add_new_anon_rmap(new_page, vma, haddr);
set_huge_pte_at(mm, haddr, ptep,
make_huge_pte(vma, new_page, 1));
@@ -5341,6 +5345,7 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
pgoff_t idx,
unsigned int flags,
unsigned long haddr,
+ unsigned long addr,
unsigned long reason)
{
vm_fault_t ret;
@@ -5348,6 +5353,7 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
struct vm_fault vmf = {
.vma = vma,
.address = haddr,
+ .real_address = addr,
.flags = flags,
/*
@@ -5416,7 +5422,7 @@ retry:
/* Check for page in userfault range */
if (userfaultfd_missing(vma)) {
ret = hugetlb_handle_userfault(vma, mapping, idx,
- flags, haddr,
+ flags, haddr, address,
VM_UFFD_MISSING);
goto out;
}
@@ -5480,7 +5486,7 @@ retry:
unlock_page(page);
put_page(page);
ret = hugetlb_handle_userfault(vma, mapping, idx,
- flags, haddr,
+ flags, haddr, address,
VM_UFFD_MINOR);
goto out;
}
@@ -5817,7 +5823,8 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
*pagep = NULL;
goto out;
}
- folio_copy(page_folio(page), page_folio(*pagep));
+ copy_user_huge_page(page, *pagep, dst_addr, dst_vma,
+ pages_per_huge_page(h));
put_page(*pagep);
*pagep = NULL;
}
@@ -6071,7 +6078,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (pages) {
/*
- * try_grab_compound_head() should always succeed here,
+ * try_grab_folio() should always succeed here,
* because: a) we hold the ptl lock, and b) we've just
* checked that the huge page is present in the page
* tables. If the huge page is present, then the tail
@@ -6080,9 +6087,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
* any way. So this page must be available at this
* point, unless the page refcount overflowed:
*/
- if (WARN_ON_ONCE(!try_grab_compound_head(pages[i],
- refs,
- flags))) {
+ if (WARN_ON_ONCE(!try_grab_folio(pages[i], refs,
+ flags))) {
spin_unlock(ptl);
remainder = 0;
err = -ENOMEM;
@@ -6171,7 +6177,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned int shift = huge_page_shift(hstate_vma(vma));
old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
- pte = pte_mkhuge(huge_pte_modify(old_pte, newprot));
+ pte = huge_pte_modify(old_pte, newprot);
pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
pages++;
@@ -6889,9 +6895,9 @@ static int __init cmdline_parse_hugetlb_cma(char *p)
break;
if (s[count] == ':') {
- nid = tmp;
- if (nid < 0 || nid >= MAX_NUMNODES)
+ if (tmp >= MAX_NUMNODES)
break;
+ nid = array_index_nospec(tmp, MAX_NUMNODES);
s += count + 1;
tmp = memparse(s, &s);