diff options
Diffstat (limited to 'arch/s390/mm/pgtable.c')
| -rw-r--r-- | arch/s390/mm/pgtable.c | 159 | 
1 files changed, 115 insertions, 44 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 301e466e4263..f2cc7da473e4 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -347,18 +347,27 @@ static inline void pmdp_idte_local(struct mm_struct *mm,  			    mm->context.asce, IDTE_LOCAL);  	else  		__pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL); +	if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) +		gmap_pmdp_idte_local(mm, addr);  }  static inline void pmdp_idte_global(struct mm_struct *mm,  				    unsigned long addr, pmd_t *pmdp)  { -	if (MACHINE_HAS_TLB_GUEST) +	if (MACHINE_HAS_TLB_GUEST) {  		__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,  			    mm->context.asce, IDTE_GLOBAL); -	else if (MACHINE_HAS_IDTE) +		if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) +			gmap_pmdp_idte_global(mm, addr); +	} else if (MACHINE_HAS_IDTE) {  		__pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL); -	else +		if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) +			gmap_pmdp_idte_global(mm, addr); +	} else {  		__pmdp_csp(pmdp); +		if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) +			gmap_pmdp_csp(mm, addr); +	}  }  static inline pmd_t pmdp_flush_direct(struct mm_struct *mm, @@ -392,6 +401,8 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,  			  cpumask_of(smp_processor_id()))) {  		pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;  		mm->context.flush_mm = 1; +		if (mm_has_pgste(mm)) +			gmap_pmdp_invalidate(mm, addr);  	} else {  		pmdp_idte_global(mm, addr, pmdp);  	} @@ -399,6 +410,24 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,  	return old;  } +static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr) +{ +	pgd_t *pgd; +	p4d_t *p4d; +	pud_t *pud; +	pmd_t *pmd; + +	pgd = pgd_offset(mm, addr); +	p4d = p4d_alloc(mm, pgd, addr); +	if (!p4d) +		return NULL; +	pud = pud_alloc(mm, p4d, addr); +	if (!pud) +		return NULL; +	pmd = pmd_alloc(mm, pud, addr); +	return pmd; +} +  pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,  		       pmd_t *pmdp, pmd_t new)  { @@ -693,40 +722,14 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)  /*   * Test and reset if a guest page is dirty   */ -bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr) +bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr, +		       pte_t *ptep)  { -	spinlock_t *ptl; -	pgd_t *pgd; -	p4d_t *p4d; -	pud_t *pud; -	pmd_t *pmd;  	pgste_t pgste; -	pte_t *ptep;  	pte_t pte;  	bool dirty;  	int nodat; -	pgd = pgd_offset(mm, addr); -	p4d = p4d_alloc(mm, pgd, addr); -	if (!p4d) -		return false; -	pud = pud_alloc(mm, p4d, addr); -	if (!pud) -		return false; -	pmd = pmd_alloc(mm, pud, addr); -	if (!pmd) -		return false; -	/* We can't run guests backed by huge pages, but userspace can -	 * still set them up and then try to migrate them without any -	 * migration support. -	 */ -	if (pmd_large(*pmd)) -		return true; - -	ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl); -	if (unlikely(!ptep)) -		return false; -  	pgste = pgste_get_lock(ptep);  	dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);  	pgste_val(pgste) &= ~PGSTE_UC_BIT; @@ -742,21 +745,43 @@ bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)  		*ptep = pte;  	}  	pgste_set_unlock(ptep, pgste); - -	spin_unlock(ptl);  	return dirty;  } -EXPORT_SYMBOL_GPL(test_and_clear_guest_dirty); +EXPORT_SYMBOL_GPL(ptep_test_and_clear_uc);  int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,  			  unsigned char key, bool nq)  { -	unsigned long keyul; +	unsigned long keyul, paddr;  	spinlock_t *ptl;  	pgste_t old, new; +	pmd_t *pmdp;  	pte_t *ptep; -	ptep = get_locked_pte(mm, addr, &ptl); +	pmdp = pmd_alloc_map(mm, addr); +	if (unlikely(!pmdp)) +		return -EFAULT; + +	ptl = pmd_lock(mm, pmdp); +	if (!pmd_present(*pmdp)) { +		spin_unlock(ptl); +		return -EFAULT; +	} + +	if (pmd_large(*pmdp)) { +		paddr = pmd_val(*pmdp) & HPAGE_MASK; +		paddr |= addr & ~HPAGE_MASK; +		/* +		 * Huge pmds need quiescing operations, they are +		 * always mapped. +		 */ +		page_set_storage_key(paddr, key, 1); +		spin_unlock(ptl); +		return 0; +	} +	spin_unlock(ptl); + +	ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);  	if (unlikely(!ptep))  		return -EFAULT; @@ -767,14 +792,14 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,  	pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;  	pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;  	if (!(pte_val(*ptep) & _PAGE_INVALID)) { -		unsigned long address, bits, skey; +		unsigned long bits, skey; -		address = pte_val(*ptep) & PAGE_MASK; -		skey = (unsigned long) page_get_storage_key(address); +		paddr = pte_val(*ptep) & PAGE_MASK; +		skey = (unsigned long) page_get_storage_key(paddr);  		bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);  		skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);  		/* Set storage key ACC and FP */ -		page_set_storage_key(address, skey, !nq); +		page_set_storage_key(paddr, skey, !nq);  		/* Merge host changed & referenced into pgste  */  		pgste_val(new) |= bits << 52;  	} @@ -830,11 +855,32 @@ EXPORT_SYMBOL(cond_set_guest_storage_key);  int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)  {  	spinlock_t *ptl; +	unsigned long paddr;  	pgste_t old, new; +	pmd_t *pmdp;  	pte_t *ptep;  	int cc = 0; -	ptep = get_locked_pte(mm, addr, &ptl); +	pmdp = pmd_alloc_map(mm, addr); +	if (unlikely(!pmdp)) +		return -EFAULT; + +	ptl = pmd_lock(mm, pmdp); +	if (!pmd_present(*pmdp)) { +		spin_unlock(ptl); +		return -EFAULT; +	} + +	if (pmd_large(*pmdp)) { +		paddr = pmd_val(*pmdp) & HPAGE_MASK; +		paddr |= addr & ~HPAGE_MASK; +		cc = page_reset_referenced(paddr); +		spin_unlock(ptl); +		return cc; +	} +	spin_unlock(ptl); + +	ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);  	if (unlikely(!ptep))  		return -EFAULT; @@ -843,7 +889,8 @@ int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)  	pgste_val(new) &= ~PGSTE_GR_BIT;  	if (!(pte_val(*ptep) & _PAGE_INVALID)) { -		cc = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); +		paddr = pte_val(*ptep) & PAGE_MASK; +		cc = page_reset_referenced(paddr);  		/* Merge real referenced bit into host-set */  		pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;  	} @@ -862,18 +909,42 @@ EXPORT_SYMBOL(reset_guest_reference_bit);  int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,  			  unsigned char *key)  { +	unsigned long paddr;  	spinlock_t *ptl;  	pgste_t pgste; +	pmd_t *pmdp;  	pte_t *ptep; -	ptep = get_locked_pte(mm, addr, &ptl); +	pmdp = pmd_alloc_map(mm, addr); +	if (unlikely(!pmdp)) +		return -EFAULT; + +	ptl = pmd_lock(mm, pmdp); +	if (!pmd_present(*pmdp)) { +		/* Not yet mapped memory has a zero key */ +		spin_unlock(ptl); +		*key = 0; +		return 0; +	} + +	if (pmd_large(*pmdp)) { +		paddr = pmd_val(*pmdp) & HPAGE_MASK; +		paddr |= addr & ~HPAGE_MASK; +		*key = page_get_storage_key(paddr); +		spin_unlock(ptl); +		return 0; +	} +	spin_unlock(ptl); + +	ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);  	if (unlikely(!ptep))  		return -EFAULT;  	pgste = pgste_get_lock(ptep);  	*key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56; +	paddr = pte_val(*ptep) & PAGE_MASK;  	if (!(pte_val(*ptep) & _PAGE_INVALID)) -		*key = page_get_storage_key(pte_val(*ptep) & PAGE_MASK); +		*key = page_get_storage_key(paddr);  	/* Reflect guest's logical view, not physical */  	*key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;  	pgste_set_unlock(ptep, pgste);  |