diff options
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
| -rw-r--r-- | arch/powerpc/kernel/iommu.c | 97 | 
1 files changed, 65 insertions, 32 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 0a67ce9f827e..9704f3f76e63 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -633,11 +633,54 @@ static void iommu_table_clear(struct iommu_table *tbl)  #endif  } +static void iommu_table_reserve_pages(struct iommu_table *tbl, +		unsigned long res_start, unsigned long res_end) +{ +	int i; + +	WARN_ON_ONCE(res_end < res_start); +	/* +	 * Reserve page 0 so it will not be used for any mappings. +	 * This avoids buggy drivers that consider page 0 to be invalid +	 * to crash the machine or even lose data. +	 */ +	if (tbl->it_offset == 0) +		set_bit(0, tbl->it_map); + +	tbl->it_reserved_start = res_start; +	tbl->it_reserved_end = res_end; + +	/* Check if res_start..res_end isn't empty and overlaps the table */ +	if (res_start && res_end && +			(tbl->it_offset + tbl->it_size < res_start || +			 res_end < tbl->it_offset)) +		return; + +	for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i) +		set_bit(i - tbl->it_offset, tbl->it_map); +} + +static void iommu_table_release_pages(struct iommu_table *tbl) +{ +	int i; + +	/* +	 * In case we have reserved the first bit, we should not emit +	 * the warning below. +	 */ +	if (tbl->it_offset == 0) +		clear_bit(0, tbl->it_map); + +	for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i) +		clear_bit(i - tbl->it_offset, tbl->it_map); +} +  /*   * Build a iommu_table structure.  This contains a bit map which   * is used to manage allocation of the tce space.   */ -struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) +struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, +		unsigned long res_start, unsigned long res_end)  {  	unsigned long sz;  	static int welcomed = 0; @@ -656,13 +699,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)  	tbl->it_map = page_address(page);  	memset(tbl->it_map, 0, sz); -	/* -	 * Reserve page 0 so it will not be used for any mappings. -	 * This avoids buggy drivers that consider page 0 to be invalid -	 * to crash the machine or even lose data. -	 */ -	if (tbl->it_offset == 0) -		set_bit(0, tbl->it_map); +	iommu_table_reserve_pages(tbl, res_start, res_end);  	/* We only split the IOMMU table if we have 1GB or more of space */  	if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024)) @@ -714,12 +751,7 @@ static void iommu_table_free(struct kref *kref)  		return;  	} -	/* -	 * In case we have reserved the first bit, we should not emit -	 * the warning below. -	 */ -	if (tbl->it_offset == 0) -		clear_bit(0, tbl->it_map); +	iommu_table_release_pages(tbl);  	/* verify that table contains no entries */  	if (!bitmap_empty(tbl->it_map, tbl->it_size)) @@ -981,29 +1013,32 @@ int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)  }  EXPORT_SYMBOL_GPL(iommu_tce_check_gpa); -long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl, +extern long iommu_tce_xchg_no_kill(struct mm_struct *mm, +		struct iommu_table *tbl,  		unsigned long entry, unsigned long *hpa,  		enum dma_data_direction *direction)  {  	long ret;  	unsigned long size = 0; -	ret = tbl->it_ops->exchange(tbl, entry, hpa, direction); - +	ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false);  	if (!ret && ((*direction == DMA_FROM_DEVICE) ||  			(*direction == DMA_BIDIRECTIONAL)) &&  			!mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,  					&size))  		SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT)); -	/* if (unlikely(ret)) -		pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n", -			__func__, hwaddr, entry << tbl->it_page_shift, -				hwaddr, ret); */ -  	return ret;  } -EXPORT_SYMBOL_GPL(iommu_tce_xchg); +EXPORT_SYMBOL_GPL(iommu_tce_xchg_no_kill); + +void iommu_tce_kill(struct iommu_table *tbl, +		unsigned long entry, unsigned long pages) +{ +	if (tbl->it_ops->tce_kill) +		tbl->it_ops->tce_kill(tbl, entry, pages, false); +} +EXPORT_SYMBOL_GPL(iommu_tce_kill);  int iommu_take_ownership(struct iommu_table *tbl)  { @@ -1017,22 +1052,21 @@ int iommu_take_ownership(struct iommu_table *tbl)  	 * requires exchange() callback defined so if it is not  	 * implemented, we disallow taking ownership over the table.  	 */ -	if (!tbl->it_ops->exchange) +	if (!tbl->it_ops->xchg_no_kill)  		return -EINVAL;  	spin_lock_irqsave(&tbl->large_pool.lock, flags);  	for (i = 0; i < tbl->nr_pools; i++)  		spin_lock(&tbl->pools[i].lock); -	if (tbl->it_offset == 0) -		clear_bit(0, tbl->it_map); +	iommu_table_release_pages(tbl);  	if (!bitmap_empty(tbl->it_map, tbl->it_size)) {  		pr_err("iommu_tce: it_map is not empty");  		ret = -EBUSY; -		/* Restore bit#0 set by iommu_init_table() */ -		if (tbl->it_offset == 0) -			set_bit(0, tbl->it_map); +		/* Undo iommu_table_release_pages, i.e. restore bit#0, etc */ +		iommu_table_reserve_pages(tbl, tbl->it_reserved_start, +				tbl->it_reserved_end);  	} else {  		memset(tbl->it_map, 0xff, sz);  	} @@ -1055,9 +1089,8 @@ void iommu_release_ownership(struct iommu_table *tbl)  	memset(tbl->it_map, 0, sz); -	/* Restore bit#0 set by iommu_init_table() */ -	if (tbl->it_offset == 0) -		set_bit(0, tbl->it_map); +	iommu_table_reserve_pages(tbl, tbl->it_reserved_start, +			tbl->it_reserved_end);  	for (i = 0; i < tbl->nr_pools; i++)  		spin_unlock(&tbl->pools[i].lock);  |