diff options
Diffstat (limited to 'drivers/iommu/amd/iommu.c')
| -rw-r--r-- | drivers/iommu/amd/iommu.c | 99 | 
1 files changed, 53 insertions, 46 deletions
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 9ea40960978b..c3b58a8389b9 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -1182,11 +1182,11 @@ static int iommu_completion_wait(struct amd_iommu *iommu)  	if (!iommu->need_sync)  		return 0; -	raw_spin_lock_irqsave(&iommu->lock, flags); - -	data = ++iommu->cmd_sem_val; +	data = atomic64_add_return(1, &iommu->cmd_sem_val);  	build_completion_wait(&cmd, iommu, data); +	raw_spin_lock_irqsave(&iommu->lock, flags); +  	ret = __iommu_queue_command_sync(iommu, &cmd, false);  	if (ret)  		goto out_unlock; @@ -1273,6 +1273,9 @@ static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)  	u32 devid;  	u16 last_bdf = iommu->pci_seg->last_bdf; +	if (iommu->irtcachedis_enabled) +		return; +  	for (devid = 0; devid <= last_bdf; devid++)  		iommu_flush_irt(iommu, devid); @@ -2313,6 +2316,8 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)  		return amdr_ivrs_remap_support;  	case IOMMU_CAP_ENFORCE_CACHE_COHERENCY:  		return true; +	case IOMMU_CAP_DEFERRED_FLUSH: +		return true;  	default:  		break;  	} @@ -2822,6 +2827,32 @@ EXPORT_SYMBOL(amd_iommu_device_info);  static struct irq_chip amd_ir_chip;  static DEFINE_SPINLOCK(iommu_table_lock); +static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid) +{ +	int ret; +	u64 data; +	unsigned long flags; +	struct iommu_cmd cmd, cmd2; + +	if (iommu->irtcachedis_enabled) +		return; + +	build_inv_irt(&cmd, devid); +	data = atomic64_add_return(1, &iommu->cmd_sem_val); +	build_completion_wait(&cmd2, iommu, data); + +	raw_spin_lock_irqsave(&iommu->lock, flags); +	ret = __iommu_queue_command_sync(iommu, &cmd, true); +	if (ret) +		goto out; +	ret = __iommu_queue_command_sync(iommu, &cmd2, false); +	if (ret) +		goto out; +	wait_on_sem(iommu, data); +out: +	raw_spin_unlock_irqrestore(&iommu->lock, flags); +} +  static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,  			      struct irq_remap_table *table)  { @@ -3021,7 +3052,7 @@ out:  }  static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, -			  struct irte_ga *irte, struct amd_ir_data *data) +			  struct irte_ga *irte)  {  	struct irq_remap_table *table;  	struct irte_ga *entry; @@ -3046,13 +3077,9 @@ static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,  	old = entry->irte;  	WARN_ON(!try_cmpxchg128(&entry->irte, &old, irte->irte)); -	if (data) -		data->ref = entry; -  	raw_spin_unlock_irqrestore(&table->lock, flags); -	iommu_flush_irt(iommu, devid); -	iommu_completion_wait(iommu); +	iommu_flush_irt_and_complete(iommu, devid);  	return 0;  } @@ -3071,8 +3098,7 @@ static int modify_irte(struct amd_iommu *iommu,  	table->table[index] = irte->val;  	raw_spin_unlock_irqrestore(&table->lock, flags); -	iommu_flush_irt(iommu, devid); -	iommu_completion_wait(iommu); +	iommu_flush_irt_and_complete(iommu, devid);  	return 0;  } @@ -3090,8 +3116,7 @@ static void free_irte(struct amd_iommu *iommu, u16 devid, int index)  	iommu->irte_ops->clear_allocated(table, index);  	raw_spin_unlock_irqrestore(&table->lock, flags); -	iommu_flush_irt(iommu, devid); -	iommu_completion_wait(iommu); +	iommu_flush_irt_and_complete(iommu, devid);  }  static void irte_prepare(void *entry, @@ -3137,7 +3162,7 @@ static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u1  	struct irte_ga *irte = (struct irte_ga *) entry;  	irte->lo.fields_remap.valid = 1; -	modify_irte_ga(iommu, devid, index, irte, NULL); +	modify_irte_ga(iommu, devid, index, irte);  }  static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) @@ -3153,7 +3178,7 @@ static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid,  	struct irte_ga *irte = (struct irte_ga *) entry;  	irte->lo.fields_remap.valid = 0; -	modify_irte_ga(iommu, devid, index, irte, NULL); +	modify_irte_ga(iommu, devid, index, irte);  }  static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index, @@ -3177,7 +3202,7 @@ static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid  					APICID_TO_IRTE_DEST_LO(dest_apicid);  		irte->hi.fields.destination =  					APICID_TO_IRTE_DEST_HI(dest_apicid); -		modify_irte_ga(iommu, devid, index, irte, NULL); +		modify_irte_ga(iommu, devid, index, irte);  	}  } @@ -3527,7 +3552,7 @@ int amd_iommu_activate_guest_mode(void *data)  	entry->lo.fields_vapic.ga_tag      = ir_data->ga_tag;  	return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, -			      ir_data->irq_2_irte.index, entry, ir_data); +			      ir_data->irq_2_irte.index, entry);  }  EXPORT_SYMBOL(amd_iommu_activate_guest_mode); @@ -3557,7 +3582,7 @@ int amd_iommu_deactivate_guest_mode(void *data)  				APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);  	return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, -			      ir_data->irq_2_irte.index, entry, ir_data); +			      ir_data->irq_2_irte.index, entry);  }  EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode); @@ -3719,44 +3744,26 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu)  int amd_iommu_update_ga(int cpu, bool is_run, void *data)  { -	unsigned long flags; -	struct amd_iommu *iommu; -	struct irq_remap_table *table;  	struct amd_ir_data *ir_data = (struct amd_ir_data *)data; -	int devid = ir_data->irq_2_irte.devid;  	struct irte_ga *entry = (struct irte_ga *) ir_data->entry; -	struct irte_ga *ref = (struct irte_ga *) ir_data->ref;  	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || -	    !ref || !entry || !entry->lo.fields_vapic.guest_mode) +	    !entry || !entry->lo.fields_vapic.guest_mode)  		return 0; -	iommu = ir_data->iommu; -	if (!iommu) +	if (!ir_data->iommu)  		return -ENODEV; -	table = get_irq_table(iommu, devid); -	if (!table) -		return -ENODEV; - -	raw_spin_lock_irqsave(&table->lock, flags); - -	if (ref->lo.fields_vapic.guest_mode) { -		if (cpu >= 0) { -			ref->lo.fields_vapic.destination = -						APICID_TO_IRTE_DEST_LO(cpu); -			ref->hi.fields.destination = -						APICID_TO_IRTE_DEST_HI(cpu); -		} -		ref->lo.fields_vapic.is_run = is_run; -		barrier(); +	if (cpu >= 0) { +		entry->lo.fields_vapic.destination = +					APICID_TO_IRTE_DEST_LO(cpu); +		entry->hi.fields.destination = +					APICID_TO_IRTE_DEST_HI(cpu);  	} +	entry->lo.fields_vapic.is_run = is_run; -	raw_spin_unlock_irqrestore(&table->lock, flags); - -	iommu_flush_irt(iommu, devid); -	iommu_completion_wait(iommu); -	return 0; +	return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, +			      ir_data->irq_2_irte.index, entry);  }  EXPORT_SYMBOL(amd_iommu_update_ga);  #endif  |