aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMel Gorman <[email protected]>2015-11-06 16:28:15 -0800
committerLinus Torvalds <[email protected]>2015-11-06 17:50:42 -0800
commit46e700abc44ce215acb4341d9702ce3972eda571 (patch)
tree6730677333bd4a69a014cf87fc6e40207b8cab1c
parentc9ab0c4fbeb0202bac3548378a977e1536ebe3ca (diff)
mm, page_alloc: remove unnecessary taking of a seqlock when cpusets are disabled
There is a seqcounter that protects against spurious allocation failures when a task is changing the allowed nodes in a cpuset. There is no need to check the seqcounter until a cpuset exists. Signed-off-by: Mel Gorman <[email protected]> Acked-by: Christoph Lameter <[email protected]> Acked-by: David Rientjes <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Acked-by: Michal Hocko <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Vitaly Wool <[email protected]> Cc: Rik van Riel <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
-rw-r--r--include/linux/cpuset.h6
1 files changed, 6 insertions, 0 deletions
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 5a1311942358..85a868ccb493 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -104,6 +104,9 @@ extern void cpuset_print_current_mems_allowed(void);
*/
static inline unsigned int read_mems_allowed_begin(void)
{
+ if (!cpusets_enabled())
+ return 0;
+
return read_seqcount_begin(&current->mems_allowed_seq);
}
@@ -115,6 +118,9 @@ static inline unsigned int read_mems_allowed_begin(void)
*/
static inline bool read_mems_allowed_retry(unsigned int seq)
{
+ if (!cpusets_enabled())
+ return false;
+
return read_seqcount_retry(&current->mems_allowed_seq, seq);
}