aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMuchun Song <[email protected]>2024-07-18 16:36:07 +0800
committerAndrew Morton <[email protected]>2024-08-07 18:33:56 -0700
commit5161b48712dcd08ec427c450399d4d1483e21dea (patch)
treea7d52661b6d7c484b9b1fd49f7cb779fc05be3a8
parent7d4df2dad312f270d62fecb0e5c8b086c6d7dcfc (diff)
mm: list_lru: fix UAF for memory cgroup
The mem_cgroup_from_slab_obj() is supposed to be called under rcu lock or cgroup_mutex or others which could prevent returned memcg from being freed. Fix it by adding missing rcu read lock. Found by code inspection. [[email protected]: only grab rcu lock when necessary, per Vlastimil] Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/[email protected] Fixes: 0a97c01cd20b ("list_lru: allow explicit memcg and NUMA node selection") Signed-off-by: Muchun Song <[email protected]> Acked-by: Shakeel Butt <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Nhat Pham <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
-rw-r--r--mm/list_lru.c28
1 files changed, 22 insertions, 6 deletions
diff --git a/mm/list_lru.c b/mm/list_lru.c
index a29d96929d7c..9b7ff06e9d32 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -85,6 +85,7 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
}
#endif /* CONFIG_MEMCG */
+/* The caller must ensure the memcg lifetime. */
bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
struct mem_cgroup *memcg)
{
@@ -109,14 +110,22 @@ EXPORT_SYMBOL_GPL(list_lru_add);
bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
{
+ bool ret;
int nid = page_to_nid(virt_to_page(item));
- struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
- mem_cgroup_from_slab_obj(item) : NULL;
- return list_lru_add(lru, item, nid, memcg);
+ if (list_lru_memcg_aware(lru)) {
+ rcu_read_lock();
+ ret = list_lru_add(lru, item, nid, mem_cgroup_from_slab_obj(item));
+ rcu_read_unlock();
+ } else {
+ ret = list_lru_add(lru, item, nid, NULL);
+ }
+
+ return ret;
}
EXPORT_SYMBOL_GPL(list_lru_add_obj);
+/* The caller must ensure the memcg lifetime. */
bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
struct mem_cgroup *memcg)
{
@@ -139,11 +148,18 @@ EXPORT_SYMBOL_GPL(list_lru_del);
bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
{
+ bool ret;
int nid = page_to_nid(virt_to_page(item));
- struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
- mem_cgroup_from_slab_obj(item) : NULL;
- return list_lru_del(lru, item, nid, memcg);
+ if (list_lru_memcg_aware(lru)) {
+ rcu_read_lock();
+ ret = list_lru_del(lru, item, nid, mem_cgroup_from_slab_obj(item));
+ rcu_read_unlock();
+ } else {
+ ret = list_lru_del(lru, item, nid, NULL);
+ }
+
+ return ret;
}
EXPORT_SYMBOL_GPL(list_lru_del_obj);