aboutsummaryrefslogtreecommitdiff
path: root/drivers/iommu/intel/iommu.h
diff options
context:
space:
mode:
authorLinus Torvalds <[email protected]>2024-11-22 19:55:10 -0800
committerLinus Torvalds <[email protected]>2024-11-22 19:55:10 -0800
commitceba6f6f33f29ab838b23a567621b847e527d085 (patch)
treeb6d72001108e6ca83bf28458470c1699ce5cb338 /drivers/iommu/intel/iommu.h
parenteb78332b1067776ca4a474ccfd92460014e8d8e3 (diff)
parent42f0cbb2a253bcd7d4f20e80462014622f19d88e (diff)
Merge tag 'iommu-updates-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux
Pull iommu updates from Joerg Roedel: "Core Updates: - Convert call-sites using iommu_domain_alloc() to more specific versions and remove function - Introduce iommu_paging_domain_alloc_flags() - Extend support for allocating PASID-capable domains to more drivers - Remove iommu_present() - Some smaller improvements New IOMMU driver for RISC-V Intel VT-d Updates: - Add domain_alloc_paging support - Enable user space IOPFs in non-PASID and non-svm cases - Small code refactoring and cleanups - Add domain replacement support for pasid AMD-Vi Updates: - Adapt to iommu_paging_domain_alloc_flags() interface and alloc V2 page-tables by default - Replace custom domain ID allocator with IDA allocator - Add ops->release_domain() support - Other improvements to device attach and domain allocation code paths ARM-SMMU Updates: - SMMUv2: - Return -EPROBE_DEFER for client devices probing before their SMMU - Devicetree binding updates for Qualcomm MMU-500 implementations - SMMUv3: - Minor fixes and cleanup for NVIDIA's virtual command queue driver - IO-PGTable: - Fix indexing of concatenated PGDs and extend selftest coverage - Remove unused block-splitting support S390 IOMMU: - Implement support for blocking domain Mediatek IOMMU: - Enable 35-bit physical address support for mt8186 OMAP IOMMU driver: - Adapt to recent IOMMU core changes and unbreak driver" * tag 'iommu-updates-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux: (92 commits) iommu/tegra241-cmdqv: Fix alignment failure at max_n_shift iommu: Make set_dev_pasid op support domain replacement iommu/arm-smmu-v3: Make set_dev_pasid() op support replace iommu/vt-d: Add set_dev_pasid callback for nested domain iommu/vt-d: Make identity_domain_set_dev_pasid() to handle domain replacement iommu/vt-d: Make intel_svm_set_dev_pasid() support domain replacement iommu/vt-d: Limit intel_iommu_set_dev_pasid() for paging domain iommu/vt-d: Make intel_iommu_set_dev_pasid() to handle domain replacement iommu/vt-d: Add iommu_domain_did() to get did iommu/vt-d: Consolidate the struct dev_pasid_info add/remove iommu/vt-d: Add pasid replace helpers iommu/vt-d: Refactor the pasid setup helpers iommu/vt-d: Add a helper to flush cache for updating present pasid entry iommu: Pass old domain to set_dev_pasid op iommu/iova: Fix typo 'adderss' iommu: Add a kdoc to iommu_unmap() iommu/io-pgtable-arm-v7s: Remove split on unmap behavior iommu/io-pgtable-arm: Remove split on unmap behavior iommu/vt-d: Drain PRQs when domain removed from RID iommu/vt-d: Drop pasid requirement for prq initialization ...
Diffstat (limited to 'drivers/iommu/intel/iommu.h')
-rw-r--r--drivers/iommu/intel/iommu.h56
1 files changed, 42 insertions, 14 deletions
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index 1497f3112b12..2cca094c259d 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -22,6 +22,7 @@
#include <linux/bitfield.h>
#include <linux/xarray.h>
#include <linux/perf_event.h>
+#include <linux/pci.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -653,8 +654,6 @@ struct dmar_domain {
struct {
/* parent page table which the user domain is nested on */
struct dmar_domain *s2_domain;
- /* user page table pointer (in GPA) */
- unsigned long s1_pgtbl;
/* page table attributes */
struct iommu_hwpt_vtd_s1 s1_cfg;
/* link to parent domain siblings */
@@ -720,7 +719,7 @@ struct intel_iommu {
int msagaw; /* max sagaw of this iommu */
unsigned int irq, pr_irq, perf_irq;
u16 segment; /* PCI segment# */
- unsigned char name[13]; /* Device Name */
+ unsigned char name[16]; /* Device Name */
#ifdef CONFIG_INTEL_IOMMU
unsigned long *domain_ids; /* bitmap of domains */
@@ -730,12 +729,10 @@ struct intel_iommu {
struct iommu_flush flush;
#endif
-#ifdef CONFIG_INTEL_IOMMU_SVM
struct page_req_dsc *prq;
unsigned char prq_name[16]; /* Name for PRQ interrupt */
unsigned long prq_seq_number;
struct completion prq_complete;
-#endif
struct iopf_queue *iopf_queue;
unsigned char iopfq_name[16];
/* Synchronization between fault report and iommu device release. */
@@ -810,6 +807,13 @@ static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
return container_of(dom, struct dmar_domain, domain);
}
+/*
+ * Domain ID reserved for pasid entries programmed for first-level
+ * only and pass-through transfer modes.
+ */
+#define FLPT_DEFAULT_DID 1
+#define NUM_RESERVED_DID 2
+
/* Retrieve the domain ID which has allocated to the domain */
static inline u16
domain_id_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
@@ -820,6 +824,21 @@ domain_id_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
return info->did;
}
+static inline u16
+iommu_domain_did(struct iommu_domain *domain, struct intel_iommu *iommu)
+{
+ if (domain->type == IOMMU_DOMAIN_SVA ||
+ domain->type == IOMMU_DOMAIN_IDENTITY)
+ return FLPT_DEFAULT_DID;
+ return domain_id_iommu(to_dmar_domain(domain), iommu);
+}
+
+static inline bool dev_is_real_dma_subdevice(struct device *dev)
+{
+ return dev && dev_is_pci(dev) &&
+ pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev);
+}
+
/*
* 0: readable
* 1: writable
@@ -1230,9 +1249,18 @@ void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
void device_block_translation(struct device *dev);
-int prepare_domain_attach_device(struct iommu_domain *domain,
- struct device *dev);
-void domain_update_iommu_cap(struct dmar_domain *domain);
+int paging_domain_compatible(struct iommu_domain *domain, struct device *dev);
+
+struct dev_pasid_info *
+domain_add_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid);
+void domain_remove_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid);
+
+int __domain_setup_first_level(struct intel_iommu *iommu,
+ struct device *dev, ioasid_t pasid,
+ u16 did, pgd_t *pgd, int flags,
+ struct iommu_domain *old);
int dmar_ir_support(void);
@@ -1278,18 +1306,18 @@ void intel_context_flush_present(struct device_domain_info *info,
struct context_entry *context,
u16 did, bool affect_domains);
+int intel_iommu_enable_prq(struct intel_iommu *iommu);
+int intel_iommu_finish_prq(struct intel_iommu *iommu);
+void intel_iommu_page_response(struct device *dev, struct iopf_fault *evt,
+ struct iommu_page_response *msg);
+void intel_iommu_drain_pasid_prq(struct device *dev, u32 pasid);
+
#ifdef CONFIG_INTEL_IOMMU_SVM
void intel_svm_check(struct intel_iommu *iommu);
-int intel_svm_enable_prq(struct intel_iommu *iommu);
-int intel_svm_finish_prq(struct intel_iommu *iommu);
-void intel_svm_page_response(struct device *dev, struct iopf_fault *evt,
- struct iommu_page_response *msg);
struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
struct mm_struct *mm);
-void intel_drain_pasid_prq(struct device *dev, u32 pasid);
#else
static inline void intel_svm_check(struct intel_iommu *iommu) {}
-static inline void intel_drain_pasid_prq(struct device *dev, u32 pasid) {}
static inline struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
struct mm_struct *mm)
{