diff options
author | Vlastimil Babka <[email protected]> | 2020-04-01 21:04:30 -0700 |
---|---|---|
committer | Linus Torvalds <[email protected]> | 2020-04-02 09:35:26 -0700 |
commit | 667c790169e2b9ebd33188c9285d30ee96844a02 (patch) | |
tree | 532e1f45de49a7e52d8761f9cf6dfb83206186b5 | |
parent | 3202fa62fb43087387c65bfa9c100feffac74aa6 (diff) |
revert "topology: add support for node_to_mem_node() to determine the fallback node"
This reverts commit ad2c8144418c6a81cefe65379fd47bbe8344cef2.
The function node_to_mem_node() was introduced by that commit for use in SLUB
on systems with memoryless nodes, but it turned out to be unreliable on some
architectures/configurations and a simpler solution exists than fixing it up.
Thus commit 0715e6c516f1 ("mm, slub: prevent kmalloc_node crashes and
memory leaks") removed the only user of node_to_mem_node() and we can
revert the commit that introduced the function.
Signed-off-by: Vlastimil Babka <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Reviewed-by: Srikar Dronamraju <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: Bharata B Rao <[email protected]>
Cc: Christopher Lameter <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Kirill Tkhai <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Michael Ellerman <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Nathan Lynch <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: PUVICHAKRAVARTHY RAMACHANDRAN <[email protected]>
Cc: Sachin Sant <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Linus Torvalds <[email protected]>
-rw-r--r-- | include/linux/topology.h | 17 | ||||
-rw-r--r-- | mm/page_alloc.c | 1 |
2 files changed, 0 insertions, 18 deletions
diff --git a/include/linux/topology.h b/include/linux/topology.h index eb2fe6edd73c..608fa4aadf0e 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -130,20 +130,11 @@ static inline int numa_node_id(void) * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem(). */ DECLARE_PER_CPU(int, _numa_mem_); -extern int _node_numa_mem_[MAX_NUMNODES]; #ifndef set_numa_mem static inline void set_numa_mem(int node) { this_cpu_write(_numa_mem_, node); - _node_numa_mem_[numa_node_id()] = node; -} -#endif - -#ifndef node_to_mem_node -static inline int node_to_mem_node(int node) -{ - return _node_numa_mem_[node]; } #endif @@ -166,7 +157,6 @@ static inline int cpu_to_mem(int cpu) static inline void set_cpu_numa_mem(int cpu, int node) { per_cpu(_numa_mem_, cpu) = node; - _node_numa_mem_[cpu_to_node(cpu)] = node; } #endif @@ -180,13 +170,6 @@ static inline int numa_mem_id(void) } #endif -#ifndef node_to_mem_node -static inline int node_to_mem_node(int node) -{ - return node; -} -#endif - #ifndef cpu_to_mem static inline int cpu_to_mem(int cpu) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3c4eb750a199..6e7e9c1d6caa 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -95,7 +95,6 @@ DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); */ DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ EXPORT_PER_CPU_SYMBOL(_numa_mem_); -int _node_numa_mem_[MAX_NUMNODES]; #endif /* work_structs for global per-cpu drains */ |