aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/slab.h29
-rw-r--r--mm/slab.h7
-rw-r--r--mm/slub.c53
-rw-r--r--net/ipv4/inet_connection_sock.c5
4 files changed, 92 insertions, 2 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index eb2bf4629157..3be2a5ed4936 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -547,6 +547,35 @@ void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
gfp_t gfpflags) __assume_slab_alignment __malloc;
#define kmem_cache_alloc_lru(...) alloc_hooks(kmem_cache_alloc_lru_noprof(__VA_ARGS__))
+/**
+ * kmem_cache_charge - memcg charge an already allocated slab memory
+ * @objp: address of the slab object to memcg charge
+ * @gfpflags: describe the allocation context
+ *
+ * kmem_cache_charge allows charging a slab object to the current memcg,
+ * primarily in cases where charging at allocation time might not be possible
+ * because the target memcg is not known (i.e. softirq context)
+ *
+ * The objp should be pointer returned by the slab allocator functions like
+ * kmalloc (with __GFP_ACCOUNT in flags) or kmem_cache_alloc. The memcg charge
+ * behavior can be controlled through gfpflags parameter, which affects how the
+ * necessary internal metadata can be allocated. Including __GFP_NOFAIL denotes
+ * that overcharging is requested instead of failure, but is not applied for the
+ * internal metadata allocation.
+ *
+ * There are several cases where it will return true even if the charging was
+ * not done:
+ * More specifically:
+ *
+ * 1. For !CONFIG_MEMCG or cgroup_disable=memory systems.
+ * 2. Already charged slab objects.
+ * 3. For slab objects from KMALLOC_NORMAL caches - allocated by kmalloc()
+ * without __GFP_ACCOUNT
+ * 4. Allocating internal metadata has failed
+ *
+ * Return: true if charge was successful otherwise false.
+ */
+bool kmem_cache_charge(void *objp, gfp_t gfpflags);
void kmem_cache_free(struct kmem_cache *s, void *objp);
kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
diff --git a/mm/slab.h b/mm/slab.h
index dcdb56b8e7f5..9f907e930609 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -443,6 +443,13 @@ static inline bool is_kmalloc_cache(struct kmem_cache *s)
return (s->flags & SLAB_KMALLOC);
}
+static inline bool is_kmalloc_normal(struct kmem_cache *s)
+{
+ if (!is_kmalloc_cache(s))
+ return false;
+ return !(s->flags & (SLAB_CACHE_DMA|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT));
+}
+
/* Legal flag mask for kmem_cache_create(), for various configurations */
#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
SLAB_CACHE_DMA32 | SLAB_PANIC | \
diff --git a/mm/slub.c b/mm/slub.c
index 95977f25a760..aa512de974e7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2185,6 +2185,45 @@ void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
__memcg_slab_free_hook(s, slab, p, objects, obj_exts);
}
+
+static __fastpath_inline
+bool memcg_slab_post_charge(void *p, gfp_t flags)
+{
+ struct slabobj_ext *slab_exts;
+ struct kmem_cache *s;
+ struct folio *folio;
+ struct slab *slab;
+ unsigned long off;
+
+ folio = virt_to_folio(p);
+ if (!folio_test_slab(folio)) {
+ return folio_memcg_kmem(folio) ||
+ (__memcg_kmem_charge_page(folio_page(folio, 0), flags,
+ folio_order(folio)) == 0);
+ }
+
+ slab = folio_slab(folio);
+ s = slab->slab_cache;
+
+ /*
+ * Ignore KMALLOC_NORMAL cache to avoid possible circular dependency
+ * of slab_obj_exts being allocated from the same slab and thus the slab
+ * becoming effectively unfreeable.
+ */
+ if (is_kmalloc_normal(s))
+ return true;
+
+ /* Ignore already charged objects. */
+ slab_exts = slab_obj_exts(slab);
+ if (slab_exts) {
+ off = obj_to_index(s, slab, p);
+ if (unlikely(slab_exts[off].objcg))
+ return true;
+ }
+
+ return __memcg_slab_post_alloc_hook(s, NULL, flags, 1, &p);
+}
+
#else /* CONFIG_MEMCG */
static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s,
struct list_lru *lru,
@@ -2198,6 +2237,11 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
void **p, int objects)
{
}
+
+static inline bool memcg_slab_post_charge(void *p, gfp_t flags)
+{
+ return true;
+}
#endif /* CONFIG_MEMCG */
#ifdef CONFIG_SLUB_RCU_DEBUG
@@ -4105,6 +4149,15 @@ void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
}
EXPORT_SYMBOL(kmem_cache_alloc_lru_noprof);
+bool kmem_cache_charge(void *objp, gfp_t gfpflags)
+{
+ if (!memcg_kmem_online())
+ return true;
+
+ return memcg_slab_post_charge(objp, gfpflags);
+}
+EXPORT_SYMBOL(kmem_cache_charge);
+
/**
* kmem_cache_alloc_node - Allocate an object on the specified node
* @s: The cache to allocate from.
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 64d07b842e73..e25381bf32d0 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -714,6 +714,7 @@ struct sock *inet_csk_accept(struct sock *sk, struct proto_accept_arg *arg)
out:
release_sock(sk);
if (newsk && mem_cgroup_sockets_enabled) {
+ gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL;
int amt = 0;
/* atomically get the memory usage, set and charge the
@@ -731,8 +732,8 @@ out:
}
if (amt)
- mem_cgroup_charge_skmem(newsk->sk_memcg, amt,
- GFP_KERNEL | __GFP_NOFAIL);
+ mem_cgroup_charge_skmem(newsk->sk_memcg, amt, gfp);
+ kmem_cache_charge(newsk, gfp);
release_sock(newsk);
}