diff options
Diffstat (limited to 'mm/rmap.c')
| -rw-r--r-- | mm/rmap.c | 14 | 
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/rmap.c b/mm/rmap.c index f5d43edad529..3746a5531018 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1780,7 +1780,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,  				set_huge_pte_at(mm, address, pvmw.pte, pteval,  						hsz);  			} else { -				dec_mm_counter(mm, mm_counter(&folio->page)); +				dec_mm_counter(mm, mm_counter(folio));  				set_pte_at(mm, address, pvmw.pte, pteval);  			} @@ -1795,7 +1795,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,  			 * migration) will not expect userfaults on already  			 * copied pages.  			 */ -			dec_mm_counter(mm, mm_counter(&folio->page)); +			dec_mm_counter(mm, mm_counter(folio));  		} else if (folio_test_anon(folio)) {  			swp_entry_t entry = page_swap_entry(subpage);  			pte_t swp_pte; @@ -1903,7 +1903,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,  			 *  			 * See Documentation/mm/mmu_notifier.rst  			 */ -			dec_mm_counter(mm, mm_counter_file(&folio->page)); +			dec_mm_counter(mm, mm_counter_file(folio));  		}  discard:  		if (unlikely(folio_test_hugetlb(folio))) @@ -2169,7 +2169,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,  				swp_pte = pte_swp_mkuffd_wp(swp_pte);  			set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);  			trace_set_migration_pte(pvmw.address, pte_val(swp_pte), -						compound_order(&folio->page)); +						folio_order(folio));  			/*  			 * No need to invalidate here it will synchronize on  			 * against the special swap migration pte. @@ -2181,7 +2181,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,  				set_huge_pte_at(mm, address, pvmw.pte, pteval,  						hsz);  			} else { -				dec_mm_counter(mm, mm_counter(&folio->page)); +				dec_mm_counter(mm, mm_counter(folio));  				set_pte_at(mm, address, pvmw.pte, pteval);  			} @@ -2196,7 +2196,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,  			 * migration) will not expect userfaults on already  			 * copied pages.  			 */ -			dec_mm_counter(mm, mm_counter(&folio->page)); +			dec_mm_counter(mm, mm_counter(folio));  		} else {  			swp_entry_t entry;  			pte_t swp_pte; @@ -2261,7 +2261,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,  			else  				set_pte_at(mm, address, pvmw.pte, swp_pte);  			trace_set_migration_pte(address, pte_val(swp_pte), -						compound_order(&folio->page)); +						folio_order(folio));  			/*  			 * No need to invalidate here it will synchronize on  			 * against the special swap migration pte.  |