aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohannes Weiner <[email protected]>2024-01-29 20:34:38 -0500
committerAndrew Morton <[email protected]>2024-02-07 21:20:35 -0800
commit2e601e1e8e4b330020a346c55ba111d49e0b188e (patch)
tree7516700287f584302bb93ce534f3b67df5405b7d
parent4c2da3188b848d33c26d7f0f8b14f3150331c923 (diff)
mm: zswap: fix objcg use-after-free in entry destruction
In the per-memcg LRU universe, LRU removal uses entry->objcg to determine which list count needs to be decreased. Drop the objcg reference after updating the LRU, to fix a possible use-after-free. Link: https://lkml.kernel.org/r/[email protected] Fixes: a65b0e7607cc ("zswap: make shrinking memcg-aware") Signed-off-by: Johannes Weiner <[email protected]> Acked-by: Yosry Ahmed <[email protected]> Reviewed-by: Nhat Pham <[email protected]> Reviewed-by: Chengming Zhou <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
-rw-r--r--mm/zswap.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/zswap.c b/mm/zswap.c
index ca25b676048e..0a94b197ed32 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -536,10 +536,6 @@ static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
*/
static void zswap_free_entry(struct zswap_entry *entry)
{
- if (entry->objcg) {
- obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
- obj_cgroup_put(entry->objcg);
- }
if (!entry->length)
atomic_dec(&zswap_same_filled_pages);
else {
@@ -548,6 +544,10 @@ static void zswap_free_entry(struct zswap_entry *entry)
atomic_dec(&entry->pool->nr_stored);
zswap_pool_put(entry->pool);
}
+ if (entry->objcg) {
+ obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
+ obj_cgroup_put(entry->objcg);
+ }
zswap_entry_cache_free(entry);
atomic_dec(&zswap_stored_pages);
zswap_update_total_size();