diff options
Diffstat (limited to 'mm/memcontrol.c')
| -rw-r--r-- | mm/memcontrol.c | 108 | 
1 files changed, 54 insertions, 54 deletions
| diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 781605e92015..2ed5f2a0879d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -776,24 +776,6 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)  	rcu_read_unlock();  } -/* - * mod_objcg_mlstate() may be called with irq enabled, so - * mod_memcg_lruvec_state() should be used. - */ -static inline void mod_objcg_mlstate(struct obj_cgroup *objcg, -				     struct pglist_data *pgdat, -				     enum node_stat_item idx, int nr) -{ -	struct mem_cgroup *memcg; -	struct lruvec *lruvec; - -	rcu_read_lock(); -	memcg = obj_cgroup_memcg(objcg); -	lruvec = mem_cgroup_lruvec(memcg, pgdat); -	mod_memcg_lruvec_state(lruvec, idx, nr); -	rcu_read_unlock(); -} -  /**   * __count_memcg_events - account VM events in a cgroup   * @memcg: the memory cgroup @@ -2137,41 +2119,6 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,  }  #endif -/* - * Most kmem_cache_alloc() calls are from user context. The irq disable/enable - * sequence used in this case to access content from object stock is slow. - * To optimize for user context access, there are now two object stocks for - * task context and interrupt context access respectively. - * - * The task context object stock can be accessed by disabling preemption only - * which is cheap in non-preempt kernel. The interrupt context object stock - * can only be accessed after disabling interrupt. User context code can - * access interrupt object stock, but not vice versa. - */ -static inline struct obj_stock *get_obj_stock(unsigned long *pflags) -{ -	struct memcg_stock_pcp *stock; - -	if (likely(in_task())) { -		*pflags = 0UL; -		preempt_disable(); -		stock = this_cpu_ptr(&memcg_stock); -		return &stock->task_obj; -	} - -	local_irq_save(*pflags); -	stock = this_cpu_ptr(&memcg_stock); -	return &stock->irq_obj; -} - -static inline void put_obj_stock(unsigned long flags) -{ -	if (likely(in_task())) -		preempt_enable(); -	else -		local_irq_restore(flags); -} -  /**   * consume_stock: Try to consume stocked charge on this cpu.   * @memcg: memcg to consume from. @@ -2816,6 +2763,59 @@ retry:   */  #define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT) +/* + * Most kmem_cache_alloc() calls are from user context. The irq disable/enable + * sequence used in this case to access content from object stock is slow. + * To optimize for user context access, there are now two object stocks for + * task context and interrupt context access respectively. + * + * The task context object stock can be accessed by disabling preemption only + * which is cheap in non-preempt kernel. The interrupt context object stock + * can only be accessed after disabling interrupt. User context code can + * access interrupt object stock, but not vice versa. + */ +static inline struct obj_stock *get_obj_stock(unsigned long *pflags) +{ +	struct memcg_stock_pcp *stock; + +	if (likely(in_task())) { +		*pflags = 0UL; +		preempt_disable(); +		stock = this_cpu_ptr(&memcg_stock); +		return &stock->task_obj; +	} + +	local_irq_save(*pflags); +	stock = this_cpu_ptr(&memcg_stock); +	return &stock->irq_obj; +} + +static inline void put_obj_stock(unsigned long flags) +{ +	if (likely(in_task())) +		preempt_enable(); +	else +		local_irq_restore(flags); +} + +/* + * mod_objcg_mlstate() may be called with irq enabled, so + * mod_memcg_lruvec_state() should be used. + */ +static inline void mod_objcg_mlstate(struct obj_cgroup *objcg, +				     struct pglist_data *pgdat, +				     enum node_stat_item idx, int nr) +{ +	struct mem_cgroup *memcg; +	struct lruvec *lruvec; + +	rcu_read_lock(); +	memcg = obj_cgroup_memcg(objcg); +	lruvec = mem_cgroup_lruvec(memcg, pgdat); +	mod_memcg_lruvec_state(lruvec, idx, nr); +	rcu_read_unlock(); +} +  int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,  				 gfp_t gfp, bool new_page)  { @@ -5558,7 +5558,7 @@ static int mem_cgroup_move_account(struct page *page,  	VM_BUG_ON(from == to);  	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); -	VM_BUG_ON(compound && !folio_test_multi(folio)); +	VM_BUG_ON(compound && !folio_test_large(folio));  	/*  	 * Prevent mem_cgroup_migrate() from looking at |