diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-06-29 20:51:03 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-06-29 20:51:03 -0700 |
commit | d35ac6ac0e80e55bcea79af18d935f19a3e8554c (patch) | |
tree | 84281d7724f5e67c2c0a599dd9ffdeccb96bce9f /drivers/iommu/amd/iommu.c | |
parent | 0b26eadbf200abf6c97c6d870286c73219cdac65 (diff) | |
parent | a7a334076dd725b8c3b5d64f68e3992ffcfd1d25 (diff) |
Merge tag 'iommu-updates-v6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu updates from Joerg Roedel:
"Core changes:
- iova_magazine_alloc() optimization
- Make flush-queue an IOMMU driver capability
- Consolidate the error handling around device attachment
AMD IOMMU changes:
- AVIC Interrupt Remapping Improvements
- Some minor fixes and cleanups
Intel VT-d changes from Lu Baolu:
- Small and misc cleanups
ARM-SMMU changes from Will Deacon:
- Device-tree binding updates:
- Add missing clocks for SC8280XP and SA8775 Adreno SMMUs
- Add two new Qualcomm SMMUs in SDX75 and SM6375
- Workarounds for Arm MMU-700 errata:
- 1076982: Avoid use of SEV-based cmdq wakeup
- 2812531: Terminate command batches with a CMD_SYNC
- Enforce single-stage translation to avoid nesting-related errata
- Set the correct level hint for range TLB invalidation on teardown
.. and some other minor fixes and cleanups (including Freescale PAMU
and virtio-iommu changes)"
* tag 'iommu-updates-v6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (50 commits)
iommu/vt-d: Remove commented-out code
iommu/vt-d: Remove two WARN_ON in domain_context_mapping_one()
iommu/vt-d: Handle the failure case of dmar_reenable_qi()
iommu/vt-d: Remove unnecessary (void*) conversions
iommu/amd: Remove extern from function prototypes
iommu/amd: Use BIT/BIT_ULL macro to define bit fields
iommu/amd: Fix DTE_IRQ_PHYS_ADDR_MASK macro
iommu/amd: Fix compile error for unused function
iommu/amd: Improving Interrupt Remapping Table Invalidation
iommu/amd: Do not Invalidate IRT when IRTE caching is disabled
iommu/amd: Introduce Disable IRTE Caching Support
iommu/amd: Remove the unused struct amd_ir_data.ref
iommu/amd: Switch amd_iommu_update_ga() to use modify_irte_ga()
iommu/arm-smmu-v3: Set TTL invalidation hint better
iommu/arm-smmu-v3: Document nesting-related errata
iommu/arm-smmu-v3: Add explicit feature for nesting
iommu/arm-smmu-v3: Document MMU-700 erratum 2812531
iommu/arm-smmu-v3: Work around MMU-600 erratum 1076982
dt-bindings: arm-smmu: Add SDX75 SMMU compatible
dt-bindings: arm-smmu: Add SM6375 GPU SMMU
...
Diffstat (limited to 'drivers/iommu/amd/iommu.c')
-rw-r--r-- | drivers/iommu/amd/iommu.c | 99 |
1 files changed, 53 insertions, 46 deletions
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 9ea40960978b..c3b58a8389b9 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -1182,11 +1182,11 @@ static int iommu_completion_wait(struct amd_iommu *iommu) if (!iommu->need_sync) return 0; - raw_spin_lock_irqsave(&iommu->lock, flags); - - data = ++iommu->cmd_sem_val; + data = atomic64_add_return(1, &iommu->cmd_sem_val); build_completion_wait(&cmd, iommu, data); + raw_spin_lock_irqsave(&iommu->lock, flags); + ret = __iommu_queue_command_sync(iommu, &cmd, false); if (ret) goto out_unlock; @@ -1273,6 +1273,9 @@ static void amd_iommu_flush_irt_all(struct amd_iommu *iommu) u32 devid; u16 last_bdf = iommu->pci_seg->last_bdf; + if (iommu->irtcachedis_enabled) + return; + for (devid = 0; devid <= last_bdf; devid++) iommu_flush_irt(iommu, devid); @@ -2313,6 +2316,8 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap) return amdr_ivrs_remap_support; case IOMMU_CAP_ENFORCE_CACHE_COHERENCY: return true; + case IOMMU_CAP_DEFERRED_FLUSH: + return true; default: break; } @@ -2822,6 +2827,32 @@ EXPORT_SYMBOL(amd_iommu_device_info); static struct irq_chip amd_ir_chip; static DEFINE_SPINLOCK(iommu_table_lock); +static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid) +{ + int ret; + u64 data; + unsigned long flags; + struct iommu_cmd cmd, cmd2; + + if (iommu->irtcachedis_enabled) + return; + + build_inv_irt(&cmd, devid); + data = atomic64_add_return(1, &iommu->cmd_sem_val); + build_completion_wait(&cmd2, iommu, data); + + raw_spin_lock_irqsave(&iommu->lock, flags); + ret = __iommu_queue_command_sync(iommu, &cmd, true); + if (ret) + goto out; + ret = __iommu_queue_command_sync(iommu, &cmd2, false); + if (ret) + goto out; + wait_on_sem(iommu, data); +out: + raw_spin_unlock_irqrestore(&iommu->lock, flags); +} + static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid, struct irq_remap_table *table) { @@ -3021,7 +3052,7 @@ out: } static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, - struct irte_ga *irte, struct amd_ir_data *data) + struct irte_ga *irte) { struct irq_remap_table *table; struct irte_ga *entry; @@ -3046,13 +3077,9 @@ static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, old = entry->irte; WARN_ON(!try_cmpxchg128(&entry->irte, &old, irte->irte)); - if (data) - data->ref = entry; - raw_spin_unlock_irqrestore(&table->lock, flags); - iommu_flush_irt(iommu, devid); - iommu_completion_wait(iommu); + iommu_flush_irt_and_complete(iommu, devid); return 0; } @@ -3071,8 +3098,7 @@ static int modify_irte(struct amd_iommu *iommu, table->table[index] = irte->val; raw_spin_unlock_irqrestore(&table->lock, flags); - iommu_flush_irt(iommu, devid); - iommu_completion_wait(iommu); + iommu_flush_irt_and_complete(iommu, devid); return 0; } @@ -3090,8 +3116,7 @@ static void free_irte(struct amd_iommu *iommu, u16 devid, int index) iommu->irte_ops->clear_allocated(table, index); raw_spin_unlock_irqrestore(&table->lock, flags); - iommu_flush_irt(iommu, devid); - iommu_completion_wait(iommu); + iommu_flush_irt_and_complete(iommu, devid); } static void irte_prepare(void *entry, @@ -3137,7 +3162,7 @@ static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u1 struct irte_ga *irte = (struct irte_ga *) entry; irte->lo.fields_remap.valid = 1; - modify_irte_ga(iommu, devid, index, irte, NULL); + modify_irte_ga(iommu, devid, index, irte); } static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) @@ -3153,7 +3178,7 @@ static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, struct irte_ga *irte = (struct irte_ga *) entry; irte->lo.fields_remap.valid = 0; - modify_irte_ga(iommu, devid, index, irte, NULL); + modify_irte_ga(iommu, devid, index, irte); } static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index, @@ -3177,7 +3202,7 @@ static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid APICID_TO_IRTE_DEST_LO(dest_apicid); irte->hi.fields.destination = APICID_TO_IRTE_DEST_HI(dest_apicid); - modify_irte_ga(iommu, devid, index, irte, NULL); + modify_irte_ga(iommu, devid, index, irte); } } @@ -3527,7 +3552,7 @@ int amd_iommu_activate_guest_mode(void *data) entry->lo.fields_vapic.ga_tag = ir_data->ga_tag; return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, - ir_data->irq_2_irte.index, entry, ir_data); + ir_data->irq_2_irte.index, entry); } EXPORT_SYMBOL(amd_iommu_activate_guest_mode); @@ -3557,7 +3582,7 @@ int amd_iommu_deactivate_guest_mode(void *data) APICID_TO_IRTE_DEST_HI(cfg->dest_apicid); return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, - ir_data->irq_2_irte.index, entry, ir_data); + ir_data->irq_2_irte.index, entry); } EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode); @@ -3719,44 +3744,26 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu) int amd_iommu_update_ga(int cpu, bool is_run, void *data) { - unsigned long flags; - struct amd_iommu *iommu; - struct irq_remap_table *table; struct amd_ir_data *ir_data = (struct amd_ir_data *)data; - int devid = ir_data->irq_2_irte.devid; struct irte_ga *entry = (struct irte_ga *) ir_data->entry; - struct irte_ga *ref = (struct irte_ga *) ir_data->ref; if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || - !ref || !entry || !entry->lo.fields_vapic.guest_mode) + !entry || !entry->lo.fields_vapic.guest_mode) return 0; - iommu = ir_data->iommu; - if (!iommu) + if (!ir_data->iommu) return -ENODEV; - table = get_irq_table(iommu, devid); - if (!table) - return -ENODEV; - - raw_spin_lock_irqsave(&table->lock, flags); - - if (ref->lo.fields_vapic.guest_mode) { - if (cpu >= 0) { - ref->lo.fields_vapic.destination = - APICID_TO_IRTE_DEST_LO(cpu); - ref->hi.fields.destination = - APICID_TO_IRTE_DEST_HI(cpu); - } - ref->lo.fields_vapic.is_run = is_run; - barrier(); + if (cpu >= 0) { + entry->lo.fields_vapic.destination = + APICID_TO_IRTE_DEST_LO(cpu); + entry->hi.fields.destination = + APICID_TO_IRTE_DEST_HI(cpu); } + entry->lo.fields_vapic.is_run = is_run; - raw_spin_unlock_irqrestore(&table->lock, flags); - - iommu_flush_irt(iommu, devid); - iommu_completion_wait(iommu); - return 0; + return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, + ir_data->irq_2_irte.index, entry); } EXPORT_SYMBOL(amd_iommu_update_ga); #endif |