aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mm/mmap.c3
-rw-r--r--mm/vma.c4
-rw-r--r--mm/vma.h80
3 files changed, 61 insertions, 26 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 405e0432c78e..e1e5c78b6c3c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1623,8 +1623,7 @@ unacct_error:
vm_unacct_memory(charged);
abort_munmap:
- if (vms.nr_pages)
- abort_munmap_vmas(&mas_detach, vms.closed_vm_ops);
+ vms_abort_munmap_vmas(&vms, &mas_detach);
gather_failed:
validate_mm(mm);
return error;
diff --git a/mm/vma.c b/mm/vma.c
index 648c58da8ad4..d2d71d659d1e 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -878,7 +878,7 @@ userfaultfd_error:
munmap_gather_failed:
end_split_failed:
modify_vma_failed:
- abort_munmap_vmas(mas_detach, /* closed = */ false);
+ reattach_vmas(mas_detach);
start_split_failed:
map_count_exceeded:
return error;
@@ -923,7 +923,7 @@ int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
return 0;
clear_tree_failed:
- abort_munmap_vmas(&mas_detach, /* closed = */ false);
+ reattach_vmas(&mas_detach);
gather_failed:
validate_mm(mm);
return error;
diff --git a/mm/vma.h b/mm/vma.h
index 64b44f5a0a11..37b4b5d00b99 100644
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -82,6 +82,22 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end, pgoff_t pgoff);
+static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
+ struct vm_area_struct *vma, gfp_t gfp)
+
+{
+ if (vmi->mas.status != ma_start &&
+ ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
+ vma_iter_invalidate(vmi);
+
+ __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
+ mas_store_gfp(&vmi->mas, vma, gfp);
+ if (unlikely(mas_is_err(&vmi->mas)))
+ return -ENOMEM;
+
+ return 0;
+}
+
#ifdef CONFIG_MMU
/*
* init_vma_munmap() - Initializer wrapper for vma_munmap_struct
@@ -129,24 +145,60 @@ void vms_clean_up_area(struct vma_munmap_struct *vms,
struct ma_state *mas_detach, bool mm_wr_locked);
/*
- * abort_munmap_vmas - Undo any munmap work and free resources
+ * reattach_vmas() - Undo any munmap work and free resources
+ * @mas_detach: The maple state with the detached maple tree
*
* Reattach any detached vmas and free up the maple tree used to track the vmas.
*/
-static inline void abort_munmap_vmas(struct ma_state *mas_detach, bool closed)
+static inline void reattach_vmas(struct ma_state *mas_detach)
{
struct vm_area_struct *vma;
mas_set(mas_detach, 0);
- mas_for_each(mas_detach, vma, ULONG_MAX) {
+ mas_for_each(mas_detach, vma, ULONG_MAX)
vma_mark_detached(vma, false);
- if (closed && vma->vm_ops && vma->vm_ops->open)
- vma->vm_ops->open(vma);
- }
__mt_destroy(mas_detach->tree);
}
+/*
+ * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap()
+ * operation.
+ * @vms: The vma unmap structure
+ * @mas_detach: The maple state with the detached maple tree
+ *
+ * Reattach any detached vmas, free up the maple tree used to track the vmas.
+ * If that's not possible because the ptes are cleared (and vm_ops->closed() may
+ * have been called), then a NULL is written over the vmas and the vmas are
+ * removed (munmap() completed).
+ */
+static inline void vms_abort_munmap_vmas(struct vma_munmap_struct *vms,
+ struct ma_state *mas_detach)
+{
+ struct ma_state *mas = &vms->vmi->mas;
+ if (!vms->nr_pages)
+ return;
+
+ if (vms->clear_ptes)
+ return reattach_vmas(mas_detach);
+
+ /*
+ * Aborting cannot just call the vm_ops open() because they are often
+ * not symmetrical and state data has been lost. Resort to the old
+ * failure method of leaving a gap where the MAP_FIXED mapping failed.
+ */
+ mas_set_range(mas, vms->start, vms->end - 1);
+ if (unlikely(mas_store_gfp(mas, NULL, GFP_KERNEL))) {
+ pr_warn_once("%s: (%d) Unable to abort munmap() operation\n",
+ current->comm, current->pid);
+ /* Leaving vmas detached and in-tree may hamper recovery */
+ reattach_vmas(mas_detach);
+ } else {
+ /* Clean up the insertion of the unfortunate gap */
+ vms_complete_munmap_vmas(vms, mas_detach);
+ }
+}
+
int
do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
struct mm_struct *mm, unsigned long start,
@@ -299,22 +351,6 @@ static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
return mas_prev(&vmi->mas, min);
}
-static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
- struct vm_area_struct *vma, gfp_t gfp)
-{
- if (vmi->mas.status != ma_start &&
- ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
- vma_iter_invalidate(vmi);
-
- __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
- mas_store_gfp(&vmi->mas, vma, gfp);
- if (unlikely(mas_is_err(&vmi->mas)))
- return -ENOMEM;
-
- return 0;
-}
-
-
/*
* These three helpers classifies VMAs for virtual memory accounting.
*/