diff options
Diffstat (limited to 'mm/vma.h')
-rw-r--r-- | mm/vma.h | 103 |
1 files changed, 6 insertions, 97 deletions
@@ -42,8 +42,7 @@ struct vma_munmap_struct { int vma_count; /* Number of vmas that will be removed */ bool unlock; /* Unlock after the munmap */ bool clear_ptes; /* If there are outstanding PTE to be cleared */ - bool closed_vm_ops; /* call_mmap() was encountered, so vmas may be closed */ - /* 1 byte hole */ + /* 2 byte hole */ unsigned long nr_pages; /* Number of pages being removed */ unsigned long locked_vm; /* Number of locked pages */ unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */ @@ -166,100 +165,6 @@ static inline int vma_iter_store_gfp(struct vma_iterator *vmi, return 0; } -#ifdef CONFIG_MMU -/* - * init_vma_munmap() - Initializer wrapper for vma_munmap_struct - * @vms: The vma munmap struct - * @vmi: The vma iterator - * @vma: The first vm_area_struct to munmap - * @start: The aligned start address to munmap - * @end: The aligned end address to munmap - * @uf: The userfaultfd list_head - * @unlock: Unlock after the operation. Only unlocked on success - */ -static inline void init_vma_munmap(struct vma_munmap_struct *vms, - struct vma_iterator *vmi, struct vm_area_struct *vma, - unsigned long start, unsigned long end, struct list_head *uf, - bool unlock) -{ - vms->vmi = vmi; - vms->vma = vma; - if (vma) { - vms->start = start; - vms->end = end; - } else { - vms->start = vms->end = 0; - } - vms->unlock = unlock; - vms->uf = uf; - vms->vma_count = 0; - vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0; - vms->exec_vm = vms->stack_vm = vms->data_vm = 0; - vms->unmap_start = FIRST_USER_ADDRESS; - vms->unmap_end = USER_PGTABLES_CEILING; - vms->clear_ptes = false; - vms->closed_vm_ops = false; -} -#endif - -int vms_gather_munmap_vmas(struct vma_munmap_struct *vms, - struct ma_state *mas_detach); - -void vms_complete_munmap_vmas(struct vma_munmap_struct *vms, - struct ma_state *mas_detach); - -void vms_clean_up_area(struct vma_munmap_struct *vms, - struct ma_state *mas_detach); - -/* - * reattach_vmas() - Undo any munmap work and free resources - * @mas_detach: The maple state with the detached maple tree - * - * Reattach any detached vmas and free up the maple tree used to track the vmas. - */ -static inline void reattach_vmas(struct ma_state *mas_detach) -{ - struct vm_area_struct *vma; - - mas_set(mas_detach, 0); - mas_for_each(mas_detach, vma, ULONG_MAX) - vma_mark_detached(vma, false); - - __mt_destroy(mas_detach->tree); -} - -/* - * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap() - * operation. - * @vms: The vma unmap structure - * @mas_detach: The maple state with the detached maple tree - * - * Reattach any detached vmas, free up the maple tree used to track the vmas. - * If that's not possible because the ptes are cleared (and vm_ops->closed() may - * have been called), then a NULL is written over the vmas and the vmas are - * removed (munmap() completed). - */ -static inline void vms_abort_munmap_vmas(struct vma_munmap_struct *vms, - struct ma_state *mas_detach) -{ - struct ma_state *mas = &vms->vmi->mas; - if (!vms->nr_pages) - return; - - if (vms->clear_ptes) - return reattach_vmas(mas_detach); - - /* - * Aborting cannot just call the vm_ops open() because they are often - * not symmetrical and state data has been lost. Resort to the old - * failure method of leaving a gap where the MAP_FIXED mapping failed. - */ - mas_set_range(mas, vms->start, vms->end - 1); - mas_store_gfp(mas, NULL, GFP_KERNEL|__GFP_NOFAIL); - /* Clean up the insertion of the unfortunate gap */ - vms_complete_munmap_vmas(vms, mas_detach); -} - int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, struct mm_struct *mm, unsigned long start, @@ -269,7 +174,7 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf, bool unlock); -void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed); +void remove_vma(struct vm_area_struct *vma, bool unreachable); void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, struct vm_area_struct *prev, struct vm_area_struct *next); @@ -338,6 +243,10 @@ bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); int mm_take_all_locks(struct mm_struct *mm); void mm_drop_all_locks(struct mm_struct *mm); +unsigned long __mmap_region(struct file *file, unsigned long addr, + unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, + struct list_head *uf); + static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma) { /* |