diff options
Diffstat (limited to 'include/linux/intel-iommu.h')
| -rw-r--r-- | include/linux/intel-iommu.h | 25 | 
1 files changed, 18 insertions, 7 deletions
| diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 6d8bf4bdf240..4a16b39ae353 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -34,10 +34,13 @@  #define VTD_STRIDE_SHIFT        (9)  #define VTD_STRIDE_MASK         (((u64)-1) << VTD_STRIDE_SHIFT) -#define DMA_PTE_READ (1) -#define DMA_PTE_WRITE (2) -#define DMA_PTE_LARGE_PAGE (1 << 7) -#define DMA_PTE_SNP (1 << 11) +#define DMA_PTE_READ		BIT_ULL(0) +#define DMA_PTE_WRITE		BIT_ULL(1) +#define DMA_PTE_LARGE_PAGE	BIT_ULL(7) +#define DMA_PTE_SNP		BIT_ULL(11) + +#define DMA_FL_PTE_PRESENT	BIT_ULL(0) +#define DMA_FL_PTE_XD		BIT_ULL(63)  #define CONTEXT_TT_MULTI_LEVEL	0  #define CONTEXT_TT_DEV_IOTLB	1 @@ -435,8 +438,10 @@ enum {  #define VTD_FLAG_TRANS_PRE_ENABLED	(1 << 0)  #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED	(1 << 1) +#define VTD_FLAG_SVM_CAPABLE		(1 << 2)  extern int intel_iommu_sm; +extern spinlock_t device_domain_lock;  #define sm_supported(iommu)	(intel_iommu_sm && ecap_smts((iommu)->ecap))  #define pasid_supported(iommu)	(sm_supported(iommu) &&			\ @@ -609,10 +614,11 @@ static inline void dma_clear_pte(struct dma_pte *pte)  static inline u64 dma_pte_addr(struct dma_pte *pte)  {  #ifdef CONFIG_64BIT -	return pte->val & VTD_PAGE_MASK; +	return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD);  #else  	/* Must have a full atomic 64-bit read */ -	return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK; +	return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & +			VTD_PAGE_MASK & (~DMA_FL_PTE_XD);  #endif  } @@ -645,6 +651,8 @@ extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,  			  unsigned int size_order, u64 type);  extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,  			u16 qdep, u64 addr, unsigned mask); +void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, +		     unsigned long npages, bool ih);  extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);  extern int dmar_ir_support(void); @@ -656,9 +664,10 @@ int for_each_device_domain(int (*fn)(struct device_domain_info *info,  				     void *data), void *data);  void iommu_flush_write_buffer(struct intel_iommu *iommu);  int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev); +struct dmar_domain *find_domain(struct device *dev);  #ifdef CONFIG_INTEL_IOMMU_SVM -int intel_svm_init(struct intel_iommu *iommu); +extern void intel_svm_check(struct intel_iommu *iommu);  extern int intel_svm_enable_prq(struct intel_iommu *iommu);  extern int intel_svm_finish_prq(struct intel_iommu *iommu); @@ -686,6 +695,8 @@ struct intel_svm {  };  extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev); +#else +static inline void intel_svm_check(struct intel_iommu *iommu) {}  #endif  #ifdef CONFIG_INTEL_IOMMU_DEBUGFS |