diff options
Diffstat (limited to 'include/linux/mmzone.h')
| -rw-r--r-- | include/linux/mmzone.h | 69 | 
1 files changed, 40 insertions, 29 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index f6f884970511..8379432f4f2f 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -88,12 +88,10 @@ static inline bool is_migrate_movable(int mt)  extern int page_group_by_mobility_disabled; -#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1) -#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1) +#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)  #define get_pageblock_migratetype(page)					\ -	get_pfnblock_flags_mask(page, page_to_pfn(page),		\ -			PB_migrate_end, MIGRATETYPE_MASK) +	get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)  struct free_area {  	struct list_head	free_list[MIGRATE_TYPES]; @@ -155,10 +153,6 @@ enum zone_stat_item {  	NR_ZONE_WRITE_PENDING,	/* Count of dirty, writeback and unstable pages */  	NR_MLOCK,		/* mlock()ed pages found and moved off LRU */  	NR_PAGETABLE,		/* used for pagetables */ -	NR_KERNEL_STACK_KB,	/* measured in KiB */ -#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) -	NR_KERNEL_SCS_KB,	/* measured in KiB */ -#endif  	/* Second 128 byte cacheline */  	NR_BOUNCE,  #if IS_ENABLED(CONFIG_ZSMALLOC) @@ -174,14 +168,20 @@ enum node_stat_item {  	NR_INACTIVE_FILE,	/*  "     "     "   "       "         */  	NR_ACTIVE_FILE,		/*  "     "     "   "       "         */  	NR_UNEVICTABLE,		/*  "     "     "   "       "         */ -	NR_SLAB_RECLAIMABLE, -	NR_SLAB_UNRECLAIMABLE, +	NR_SLAB_RECLAIMABLE_B, +	NR_SLAB_UNRECLAIMABLE_B,  	NR_ISOLATED_ANON,	/* Temporary isolated pages from anon lru */  	NR_ISOLATED_FILE,	/* Temporary isolated pages from file lru */  	WORKINGSET_NODES, -	WORKINGSET_REFAULT, -	WORKINGSET_ACTIVATE, -	WORKINGSET_RESTORE, +	WORKINGSET_REFAULT_BASE, +	WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE, +	WORKINGSET_REFAULT_FILE, +	WORKINGSET_ACTIVATE_BASE, +	WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE, +	WORKINGSET_ACTIVATE_FILE, +	WORKINGSET_RESTORE_BASE, +	WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE, +	WORKINGSET_RESTORE_FILE,  	WORKINGSET_NODERECLAIM,  	NR_ANON_MAPPED,	/* Mapped anonymous pages */  	NR_FILE_MAPPED,	/* pagecache pages mapped into pagetables. @@ -203,10 +203,34 @@ enum node_stat_item {  	NR_KERNEL_MISC_RECLAIMABLE,	/* reclaimable non-slab kernel pages */  	NR_FOLL_PIN_ACQUIRED,	/* via: pin_user_page(), gup flag: FOLL_PIN */  	NR_FOLL_PIN_RELEASED,	/* pages returned via unpin_user_page() */ +	NR_KERNEL_STACK_KB,	/* measured in KiB */ +#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) +	NR_KERNEL_SCS_KB,	/* measured in KiB */ +#endif  	NR_VM_NODE_STAT_ITEMS  };  /* + * Returns true if the value is measured in bytes (most vmstat values are + * measured in pages). This defines the API part, the internal representation + * might be different. + */ +static __always_inline bool vmstat_item_in_bytes(int idx) +{ +	/* +	 * Global and per-node slab counters track slab pages. +	 * It's expected that changes are multiples of PAGE_SIZE. +	 * Internally values are stored in pages. +	 * +	 * Per-memcg and per-lruvec counters track memory, consumed +	 * by individual slab objects. These counters are actually +	 * byte-precise. +	 */ +	return (idx == NR_SLAB_RECLAIMABLE_B || +		idx == NR_SLAB_UNRECLAIMABLE_B); +} + +/*   * We do arithmetic on the LRU lists in various places in the code,   * so it is important to keep the active lists LRU_ACTIVE higher in   * the array than the corresponding inactive lists, and to keep @@ -259,8 +283,8 @@ struct lruvec {  	unsigned long			file_cost;  	/* Non-resident age, driven by LRU movement */  	atomic_long_t			nonresident_age; -	/* Refaults at the time of last reclaim cycle */ -	unsigned long			refaults; +	/* Refaults at the time of last reclaim cycle, anon=0, file=1 */ +	unsigned long			refaults[2];  	/* Various lruvec state flags (enum lruvec_flags) */  	unsigned long			flags;  #ifdef CONFIG_MEMCG @@ -512,6 +536,7 @@ struct zone {  	 * On compaction failure, 1<<compact_defer_shift compactions  	 * are skipped before trying again. The number attempted since  	 * last failure is tracked with compact_considered. +	 * compact_order_failed is the minimum compaction failed order.  	 */  	unsigned int		compact_considered;  	unsigned int		compact_defer_shift; @@ -819,18 +844,6 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)  extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx); -#ifdef CONFIG_HAVE_MEMORY_PRESENT -void memory_present(int nid, unsigned long start, unsigned long end); -#else -static inline void memory_present(int nid, unsigned long start, unsigned long end) {} -#endif - -#if defined(CONFIG_SPARSEMEM) -void memblocks_present(void); -#else -static inline void memblocks_present(void) {} -#endif -  #ifdef CONFIG_HAVE_MEMORYLESS_NODES  int local_memory_node(int node_id);  #else @@ -1387,8 +1400,6 @@ struct mminit_pfnnid_cache {  #define early_pfn_valid(pfn)	(1)  #endif -void memory_present(int nid, unsigned long start, unsigned long end); -  /*   * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we   * need to check pfn validity within that MAX_ORDER_NR_PAGES block.  |