diff options
author | Matthew Wilcox (Oracle) <[email protected]> | 2022-09-06 19:48:54 +0000 |
---|---|---|
committer | Andrew Morton <[email protected]> | 2022-09-26 19:46:20 -0700 |
commit | a3884621163b7d7fab89b44461b2a48a29c5cc9a (patch) | |
tree | 8416c01d942fbd00b1a03ec058e32576e89412e9 | |
parent | e7b6b990e524f60994da70cf5a22159b1e88ce57 (diff) |
x86: remove vma linked list walks
Use the VMA iterator instead.
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
Signed-off-by: Liam R. Howlett <[email protected]>
Acked-by: Vlastimil Babka <[email protected]>
Reviewed-by: Davidlohr Bueso <[email protected]>
Tested-by: Yu Zhao <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: David Hildenbrand <[email protected]>
Cc: David Howells <[email protected]>
Cc: SeongJae Park <[email protected]>
Cc: Sven Schnelle <[email protected]>
Cc: Will Deacon <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
-rw-r--r-- | arch/x86/entry/vdso/vma.c | 9 |
1 files changed, 5 insertions, 4 deletions
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 1000d457c332..6292b960037b 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -127,17 +127,17 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) { struct mm_struct *mm = task->mm; struct vm_area_struct *vma; + VMA_ITERATOR(vmi, mm, 0); mmap_read_lock(mm); - - for (vma = mm->mmap; vma; vma = vma->vm_next) { + for_each_vma(vmi, vma) { unsigned long size = vma->vm_end - vma->vm_start; if (vma_is_special_mapping(vma, &vvar_mapping)) zap_page_range(vma, vma->vm_start, size); } - mmap_read_unlock(mm); + return 0; } #else @@ -354,6 +354,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; + VMA_ITERATOR(vmi, mm, 0); mmap_write_lock(mm); /* @@ -363,7 +364,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr) * We could search vma near context.vdso, but it's a slowpath, * so let's explicitly check all VMAs to be completely sure. */ - for (vma = mm->mmap; vma; vma = vma->vm_next) { + for_each_vma(vmi, vma) { if (vma_is_special_mapping(vma, &vdso_mapping) || vma_is_special_mapping(vma, &vvar_mapping)) { mmap_write_unlock(mm); |