aboutsummaryrefslogtreecommitdiff
path: root/arch/sparc/include/asm/pgtable_64.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/include/asm/pgtable_64.h')
-rw-r--r--arch/sparc/include/asm/pgtable_64.h27
1 files changed, 19 insertions, 8 deletions
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 314b66851348..8a598528ec1f 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -12,6 +12,7 @@
* the SpitFire page tables.
*/
+#include <asm-generic/5level-fixup.h>
#include <linux/compiler.h>
#include <linux/const.h>
#include <asm/types.h>
@@ -375,7 +376,10 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
#define pgprot_noncached pgprot_noncached
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-static inline unsigned long __pte_huge_mask(void)
+extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
+ struct page *page, int writable);
+#define arch_make_huge_pte arch_make_huge_pte
+static inline unsigned long __pte_default_huge_mask(void)
{
unsigned long mask;
@@ -395,12 +399,14 @@ static inline unsigned long __pte_huge_mask(void)
static inline pte_t pte_mkhuge(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_PMD_HUGE | __pte_huge_mask());
+ return __pte(pte_val(pte) | __pte_default_huge_mask());
}
-static inline bool is_hugetlb_pte(pte_t pte)
+static inline bool is_default_hugetlb_pte(pte_t pte)
{
- return !!(pte_val(pte) & __pte_huge_mask());
+ unsigned long mask = __pte_default_huge_mask();
+
+ return (pte_val(pte) & mask) == mask;
}
static inline bool is_hugetlb_pmd(pmd_t pmd)
@@ -873,12 +879,17 @@ static inline unsigned long pud_pfn(pud_t pud)
#define pte_offset_map pte_index
#define pte_unmap(pte) do { } while (0)
+/* We cannot include <linux/mm_types.h> at this point yet: */
+extern struct mm_struct init_mm;
+
/* Actual page table PTE updates. */
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
- pte_t *ptep, pte_t orig, int fullmm);
+ pte_t *ptep, pte_t orig, int fullmm,
+ unsigned int hugepage_shift);
static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
- pte_t *ptep, pte_t orig, int fullmm)
+ pte_t *ptep, pte_t orig, int fullmm,
+ unsigned int hugepage_shift)
{
/* It is more efficient to let flush_tlb_kernel_range()
* handle init_mm tlb flushes.
@@ -887,7 +898,7 @@ static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
* and SUN4V pte layout, so this inline test is fine.
*/
if (likely(mm != &init_mm) && pte_accessible(mm, orig))
- tlb_batch_add(mm, vaddr, ptep, orig, fullmm);
+ tlb_batch_add(mm, vaddr, ptep, orig, fullmm, hugepage_shift);
}
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
@@ -906,7 +917,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t orig = *ptep;
*ptep = pte;
- maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm);
+ maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm, PAGE_SHIFT);
}
#define set_pte_at(mm,addr,ptep,pte) \