diff options
author | Oleg Nesterov <[email protected]> | 2014-10-09 15:27:45 -0700 |
---|---|---|
committer | Linus Torvalds <[email protected]> | 2014-10-09 22:25:56 -0400 |
commit | 8d90274b3b118c9babeefb1302947f33a1364fb5 (patch) | |
tree | d500cea8a52cf24049f3f41f4ed24ba2f5d3c71a | |
parent | f15ca78e33b0bb5acc0c5d9a5d5be3c55c4f0bb7 (diff) |
mempolicy: sanitize the usage of get_task_policy()
Cleanup + preparation. Every user of get_task_policy() calls it
unconditionally, even if it is not going to use the result.
get_task_policy() is cheap but still this does not look clean, plus
the code looks simpler if get_task_policy() is called only when this
is really needed.
Note: I hope this is correct, but it is not clear why vma_policy_mof()
doesn't fall back to get_task_policy() if ->get_policy() returns NULL.
Signed-off-by: Oleg Nesterov <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: KOSAKI Motohiro <[email protected]>
Cc: Alexander Viro <[email protected]>
Cc: Cyrill Gorcunov <[email protected]>
Cc: "Eric W. Biederman" <[email protected]>
Cc: "Kirill A. Shutemov" <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Andi Kleen <[email protected]>
Cc: Naoya Horiguchi <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
-rw-r--r-- | mm/mempolicy.c | 25 |
1 files changed, 14 insertions, 11 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 656db97584f0..b86b08e77b8d 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1621,14 +1621,11 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, struct mempolicy *get_vma_policy(struct task_struct *task, struct vm_area_struct *vma, unsigned long addr) { - struct mempolicy *pol = get_task_policy(task); + struct mempolicy *pol = NULL; if (vma) { if (vma->vm_ops && vma->vm_ops->get_policy) { - struct mempolicy *vpol = vma->vm_ops->get_policy(vma, - addr); - if (vpol) - pol = vpol; + pol = vma->vm_ops->get_policy(vma, addr); } else if (vma->vm_policy) { pol = vma->vm_policy; @@ -1643,12 +1640,15 @@ struct mempolicy *get_vma_policy(struct task_struct *task, } } + if (!pol) + pol = get_task_policy(task); + return pol; } bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma) { - struct mempolicy *pol = get_task_policy(task); + struct mempolicy *pol = NULL; if (vma) { if (vma->vm_ops && vma->vm_ops->get_policy) { @@ -1660,11 +1660,14 @@ bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma) mpol_cond_put(pol); return ret; - } else if (vma->vm_policy) { - pol = vma->vm_policy; } + + pol = vma->vm_policy; } + if (!pol) + pol = get_task_policy(task); + return pol->flags & MPOL_F_MOF; } @@ -2068,12 +2071,12 @@ retry_cpuset: */ struct page *alloc_pages_current(gfp_t gfp, unsigned order) { - struct mempolicy *pol = get_task_policy(current); + struct mempolicy *pol = &default_policy; struct page *page; unsigned int cpuset_mems_cookie; - if (in_interrupt() || (gfp & __GFP_THISNODE)) - pol = &default_policy; + if (!in_interrupt() && !(gfp & __GFP_THISNODE)) + pol = get_task_policy(current); retry_cpuset: cpuset_mems_cookie = read_mems_allowed_begin(); |