diff options
-rw-r--r-- | fs/userfaultfd.c | 11 | ||||
-rw-r--r-- | include/linux/hugetlb.h | 3 |
2 files changed, 2 insertions, 12 deletions
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index b3ed7207df7e..68cdd89c97a3 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -371,15 +371,8 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) unsigned int blocking_state; /* - * We don't do userfault handling for the final child pid update. - * - * We also don't do userfault handling during - * coredumping. hugetlbfs has the special - * hugetlb_follow_page_mask() to skip missing pages in the - * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with - * the no_page_table() helper in follow_page_mask(), but the - * shmem_vm_ops->fault method is invoked even during - * coredumping and it ends up here. + * We don't do userfault handling for the final child pid update + * and when coredumping (faults triggered by get_dump_page()). */ if (current->flags & (PF_EXITING|PF_DUMPCORE)) goto out; diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 9b7bcfce6920..3100a52ceb73 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -127,9 +127,6 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma, unsigned long len); int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *, struct vm_area_struct *); -struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, - unsigned long address, unsigned int flags, - unsigned int *page_mask); void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long, struct page *, zap_flags_t); |