diff options
author | Shachar Raindel <[email protected]> | 2015-04-14 15:46:29 -0700 |
---|---|---|
committer | Linus Torvalds <[email protected]> | 2015-04-14 16:49:03 -0700 |
commit | 28766805275c12c2298883cece3f98505ac764b4 (patch) | |
tree | 332f2b844899b5c2f86500bdd837bff047e01eb1 | |
parent | 4e047f897771222215ee572e1c0b25e9417376eb (diff) |
mm: refactor do_wp_page - rewrite the unlock flow
When do_wp_page is ending, in several cases it needs to unlock the pages
and ptls it was accessing.
Currently, this logic was "called" by using a goto jump. This makes
following the control flow of the function harder. Readability was
further hampered by the unlock case containing large amount of logic
needed only in one of the 3 cases.
Using goto for cleanup is generally allowed. However, moving the
trivial unlocking flows to the relevant call sites allow deeper
refactoring in the next patch.
Signed-off-by: Shachar Raindel <[email protected]>
Acked-by: Linus Torvalds <[email protected]>
Acked-by: Kirill A. Shutemov <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Acked-by: Andi Kleen <[email protected]>
Acked-by: Haggai Eran <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Naoya Horiguchi <[email protected]>
Cc: Andrea Arcangeli <[email protected]>
Cc: Peter Feiner <[email protected]>
Cc: Michel Lespinasse <[email protected]>
Reviewed-by: Michal Hocko <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
-rw-r--r-- | mm/memory.c | 21 |
1 files changed, 12 insertions, 9 deletions
diff --git a/mm/memory.c b/mm/memory.c index e70685f3e836..0e28fddafdaf 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2066,7 +2066,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, { struct page *old_page, *new_page = NULL; pte_t entry; - int ret = 0; + int page_copied = 0; unsigned long mmun_start = 0; /* For mmu_notifiers */ unsigned long mmun_end = 0; /* For mmu_notifiers */ struct mem_cgroup *memcg; @@ -2101,7 +2101,9 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, &ptl); if (!pte_same(*page_table, orig_pte)) { unlock_page(old_page); - goto unlock; + pte_unmap_unlock(page_table, ptl); + page_cache_release(old_page); + return 0; } page_cache_release(old_page); } @@ -2148,7 +2150,9 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, &ptl); if (!pte_same(*page_table, orig_pte)) { unlock_page(old_page); - goto unlock; + pte_unmap_unlock(page_table, ptl); + page_cache_release(old_page); + return 0; } page_mkwrite = 1; } @@ -2246,29 +2250,28 @@ gotten: /* Free the old page.. */ new_page = old_page; - ret |= VM_FAULT_WRITE; + page_copied = 1; } else mem_cgroup_cancel_charge(new_page, memcg); if (new_page) page_cache_release(new_page); -unlock: + pte_unmap_unlock(page_table, ptl); - if (mmun_end > mmun_start) - mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); + mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); if (old_page) { /* * Don't let another task, with possibly unlocked vma, * keep the mlocked page. */ - if ((ret & VM_FAULT_WRITE) && (vma->vm_flags & VM_LOCKED)) { + if (page_copied && (vma->vm_flags & VM_LOCKED)) { lock_page(old_page); /* LRU manipulation */ munlock_vma_page(old_page); unlock_page(old_page); } page_cache_release(old_page); } - return ret; + return page_copied ? VM_FAULT_WRITE : 0; oom_free_new: page_cache_release(new_page); oom: |