diff options
Diffstat (limited to 'mm/percpu.c')
| -rw-r--r-- | mm/percpu.c | 118 | 
1 files changed, 47 insertions, 71 deletions
diff --git a/mm/percpu.c b/mm/percpu.c index 4e11fc1e6def..474e3683b74d 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1392,9 +1392,9 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,  		panic("%s: Failed to allocate %zu bytes\n", __func__,  		      alloc_size); -#ifdef CONFIG_MEMCG_KMEM +#ifdef NEED_PCPUOBJ_EXT  	/* first chunk is free to use */ -	chunk->obj_cgroups = NULL; +	chunk->obj_exts = NULL;  #endif  	pcpu_init_md_blocks(chunk); @@ -1463,12 +1463,12 @@ static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)  	if (!chunk->md_blocks)  		goto md_blocks_fail; -#ifdef CONFIG_MEMCG_KMEM -	if (!mem_cgroup_kmem_disabled()) { -		chunk->obj_cgroups = +#ifdef NEED_PCPUOBJ_EXT +	if (need_pcpuobj_ext()) { +		chunk->obj_exts =  			pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) * -					sizeof(struct obj_cgroup *), gfp); -		if (!chunk->obj_cgroups) +					sizeof(struct pcpuobj_ext), gfp); +		if (!chunk->obj_exts)  			goto objcg_fail;  	}  #endif @@ -1480,7 +1480,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)  	return chunk; -#ifdef CONFIG_MEMCG_KMEM +#ifdef NEED_PCPUOBJ_EXT  objcg_fail:  	pcpu_mem_free(chunk->md_blocks);  #endif @@ -1498,8 +1498,8 @@ static void pcpu_free_chunk(struct pcpu_chunk *chunk)  {  	if (!chunk)  		return; -#ifdef CONFIG_MEMCG_KMEM -	pcpu_mem_free(chunk->obj_cgroups); +#ifdef NEED_PCPUOBJ_EXT +	pcpu_mem_free(chunk->obj_exts);  #endif  	pcpu_mem_free(chunk->md_blocks);  	pcpu_mem_free(chunk->bound_map); @@ -1646,9 +1646,9 @@ static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,  	if (!objcg)  		return; -	if (likely(chunk && chunk->obj_cgroups)) { +	if (likely(chunk && chunk->obj_exts)) {  		obj_cgroup_get(objcg); -		chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg; +		chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].cgroup = objcg;  		rcu_read_lock();  		mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, @@ -1663,13 +1663,13 @@ static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)  {  	struct obj_cgroup *objcg; -	if (unlikely(!chunk->obj_cgroups)) +	if (unlikely(!chunk->obj_exts))  		return; -	objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT]; +	objcg = chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].cgroup;  	if (!objcg)  		return; -	chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL; +	chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].cgroup = NULL;  	obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size)); @@ -1699,6 +1699,32 @@ static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)  }  #endif /* CONFIG_MEMCG_KMEM */ +#ifdef CONFIG_MEM_ALLOC_PROFILING +static void pcpu_alloc_tag_alloc_hook(struct pcpu_chunk *chunk, int off, +				      size_t size) +{ +	if (mem_alloc_profiling_enabled() && likely(chunk->obj_exts)) { +		alloc_tag_add(&chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].tag, +			      current->alloc_tag, size); +	} +} + +static void pcpu_alloc_tag_free_hook(struct pcpu_chunk *chunk, int off, size_t size) +{ +	if (mem_alloc_profiling_enabled() && likely(chunk->obj_exts)) +		alloc_tag_sub(&chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].tag, size); +} +#else +static void pcpu_alloc_tag_alloc_hook(struct pcpu_chunk *chunk, int off, +				      size_t size) +{ +} + +static void pcpu_alloc_tag_free_hook(struct pcpu_chunk *chunk, int off, size_t size) +{ +} +#endif +  /**   * pcpu_alloc - the percpu allocator   * @size: size of area to allocate in bytes @@ -1714,7 +1740,7 @@ static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)   * RETURNS:   * Percpu pointer to the allocated area on success, NULL on failure.   */ -static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, +void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved,  				 gfp_t gfp)  {  	gfp_t pcpu_gfp; @@ -1881,6 +1907,8 @@ area_found:  	pcpu_memcg_post_alloc_hook(objcg, chunk, off, size); +	pcpu_alloc_tag_alloc_hook(chunk, off, size); +  	return ptr;  fail_unlock: @@ -1909,61 +1937,7 @@ fail:  	return NULL;  } - -/** - * __alloc_percpu_gfp - allocate dynamic percpu area - * @size: size of area to allocate in bytes - * @align: alignment of area (max PAGE_SIZE) - * @gfp: allocation flags - * - * Allocate zero-filled percpu area of @size bytes aligned at @align.  If - * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can - * be called from any context but is a lot more likely to fail. If @gfp - * has __GFP_NOWARN then no warning will be triggered on invalid or failed - * allocation requests. - * - * RETURNS: - * Percpu pointer to the allocated area on success, NULL on failure. - */ -void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) -{ -	return pcpu_alloc(size, align, false, gfp); -} -EXPORT_SYMBOL_GPL(__alloc_percpu_gfp); - -/** - * __alloc_percpu - allocate dynamic percpu area - * @size: size of area to allocate in bytes - * @align: alignment of area (max PAGE_SIZE) - * - * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL). - */ -void __percpu *__alloc_percpu(size_t size, size_t align) -{ -	return pcpu_alloc(size, align, false, GFP_KERNEL); -} -EXPORT_SYMBOL_GPL(__alloc_percpu); - -/** - * __alloc_reserved_percpu - allocate reserved percpu area - * @size: size of area to allocate in bytes - * @align: alignment of area (max PAGE_SIZE) - * - * Allocate zero-filled percpu area of @size bytes aligned at @align - * from reserved percpu area if arch has set it up; otherwise, - * allocation is served from the same dynamic area.  Might sleep. - * Might trigger writeouts. - * - * CONTEXT: - * Does GFP_KERNEL allocation. - * - * RETURNS: - * Percpu pointer to the allocated area on success, NULL on failure. - */ -void __percpu *__alloc_reserved_percpu(size_t size, size_t align) -{ -	return pcpu_alloc(size, align, true, GFP_KERNEL); -} +EXPORT_SYMBOL_GPL(pcpu_alloc_noprof);  /**   * pcpu_balance_free - manage the amount of free chunks @@ -2302,6 +2276,8 @@ void free_percpu(void __percpu *ptr)  	spin_lock_irqsave(&pcpu_lock, flags);  	size = pcpu_free_area(chunk, off); +	pcpu_alloc_tag_free_hook(chunk, off, size); +  	pcpu_memcg_free_hook(chunk, off, size);  	/*  |