diff options
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
| -rw-r--r-- | drivers/iommu/intel-iommu.c | 361 | 
1 files changed, 332 insertions, 29 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index c4e0e4a9ee9e..3f974919d3bd 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -41,9 +41,11 @@  #include <linux/dma-direct.h>  #include <linux/crash_dump.h>  #include <linux/numa.h> +#include <linux/swiotlb.h>  #include <asm/irq_remapping.h>  #include <asm/cacheflush.h>  #include <asm/iommu.h> +#include <trace/events/intel_iommu.h>  #include "irq_remapping.h"  #include "intel-pasid.h" @@ -346,6 +348,8 @@ static int domain_detach_iommu(struct dmar_domain *domain,  static bool device_is_rmrr_locked(struct device *dev);  static int intel_iommu_attach_device(struct iommu_domain *domain,  				     struct device *dev); +static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, +					    dma_addr_t iova);  #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON  int dmar_disabled = 0; @@ -362,6 +366,7 @@ static int dmar_forcedac;  static int intel_iommu_strict;  static int intel_iommu_superpage = 1;  static int iommu_identity_mapping; +static int intel_no_bounce;  #define IDENTMAP_ALL		1  #define IDENTMAP_GFX		2 @@ -375,6 +380,9 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);  static DEFINE_SPINLOCK(device_domain_lock);  static LIST_HEAD(device_domain_list); +#define device_needs_bounce(d) (!intel_no_bounce && dev_is_pci(d) &&	\ +				to_pci_dev(d)->untrusted) +  /*   * Iterate over elements in device_domain_list and call the specified   * callback @fn against each element. @@ -457,6 +465,9 @@ static int __init intel_iommu_setup(char *str)  			printk(KERN_INFO  				"Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");  			intel_iommu_tboot_noforce = 1; +		} else if (!strncmp(str, "nobounce", 8)) { +			pr_info("Intel-IOMMU: No bounce buffer. This could expose security risks of DMA attacks\n"); +			intel_no_bounce = 1;  		}  		str += strcspn(str, ","); @@ -3296,7 +3307,7 @@ static int __init init_dmars(void)  		iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);  	} -	if (iommu_pass_through) +	if (iommu_default_passthrough())  		iommu_identity_mapping |= IDENTMAP_ALL;  #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA @@ -3534,6 +3545,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,  	start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;  	start_paddr += paddr & ~PAGE_MASK; + +	trace_map_single(dev, start_paddr, paddr, size << VTD_PAGE_SHIFT); +  	return start_paddr;  error: @@ -3589,10 +3603,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)  	if (dev_is_pci(dev))  		pdev = to_pci_dev(dev); -	dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn); -  	freelist = domain_unmap(domain, start_pfn, last_pfn); -  	if (intel_iommu_strict || (pdev && pdev->untrusted) ||  			!has_iova_flush_queue(&domain->iovad)) {  		iommu_flush_iotlb_psi(iommu, domain, start_pfn, @@ -3608,6 +3619,8 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)  		 * cpu used up by the iotlb flush operation...  		 */  	} + +	trace_unmap_single(dev, dev_addr, size);  }  static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, @@ -3698,6 +3711,8 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,  	}  	intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT); + +	trace_unmap_sg(dev, startaddr, nrpages << VTD_PAGE_SHIFT);  }  static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, @@ -3754,6 +3769,9 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele  		return 0;  	} +	trace_map_sg(dev, iova_pfn << PAGE_SHIFT, +		     sg_phys(sglist), size << VTD_PAGE_SHIFT); +  	return nelems;  } @@ -3767,6 +3785,254 @@ static const struct dma_map_ops intel_dma_ops = {  	.map_resource = intel_map_resource,  	.unmap_resource = intel_unmap_resource,  	.dma_supported = dma_direct_supported, +	.mmap = dma_common_mmap, +	.get_sgtable = dma_common_get_sgtable, +}; + +static void +bounce_sync_single(struct device *dev, dma_addr_t addr, size_t size, +		   enum dma_data_direction dir, enum dma_sync_target target) +{ +	struct dmar_domain *domain; +	phys_addr_t tlb_addr; + +	domain = find_domain(dev); +	if (WARN_ON(!domain)) +		return; + +	tlb_addr = intel_iommu_iova_to_phys(&domain->domain, addr); +	if (is_swiotlb_buffer(tlb_addr)) +		swiotlb_tbl_sync_single(dev, tlb_addr, size, dir, target); +} + +static dma_addr_t +bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size, +		  enum dma_data_direction dir, unsigned long attrs, +		  u64 dma_mask) +{ +	size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE); +	struct dmar_domain *domain; +	struct intel_iommu *iommu; +	unsigned long iova_pfn; +	unsigned long nrpages; +	phys_addr_t tlb_addr; +	int prot = 0; +	int ret; + +	domain = find_domain(dev); +	if (WARN_ON(dir == DMA_NONE || !domain)) +		return DMA_MAPPING_ERROR; + +	iommu = domain_get_iommu(domain); +	if (WARN_ON(!iommu)) +		return DMA_MAPPING_ERROR; + +	nrpages = aligned_nrpages(0, size); +	iova_pfn = intel_alloc_iova(dev, domain, +				    dma_to_mm_pfn(nrpages), dma_mask); +	if (!iova_pfn) +		return DMA_MAPPING_ERROR; + +	/* +	 * Check if DMAR supports zero-length reads on write only +	 * mappings.. +	 */ +	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || +			!cap_zlr(iommu->cap)) +		prot |= DMA_PTE_READ; +	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) +		prot |= DMA_PTE_WRITE; + +	/* +	 * If both the physical buffer start address and size are +	 * page aligned, we don't need to use a bounce page. +	 */ +	if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) { +		tlb_addr = swiotlb_tbl_map_single(dev, +				__phys_to_dma(dev, io_tlb_start), +				paddr, size, aligned_size, dir, attrs); +		if (tlb_addr == DMA_MAPPING_ERROR) { +			goto swiotlb_error; +		} else { +			/* Cleanup the padding area. */ +			void *padding_start = phys_to_virt(tlb_addr); +			size_t padding_size = aligned_size; + +			if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && +			    (dir == DMA_TO_DEVICE || +			     dir == DMA_BIDIRECTIONAL)) { +				padding_start += size; +				padding_size -= size; +			} + +			memset(padding_start, 0, padding_size); +		} +	} else { +		tlb_addr = paddr; +	} + +	ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn), +				 tlb_addr >> VTD_PAGE_SHIFT, nrpages, prot); +	if (ret) +		goto mapping_error; + +	trace_bounce_map_single(dev, iova_pfn << PAGE_SHIFT, paddr, size); + +	return (phys_addr_t)iova_pfn << PAGE_SHIFT; + +mapping_error: +	if (is_swiotlb_buffer(tlb_addr)) +		swiotlb_tbl_unmap_single(dev, tlb_addr, size, +					 aligned_size, dir, attrs); +swiotlb_error: +	free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages)); +	dev_err(dev, "Device bounce map: %zx@%llx dir %d --- failed\n", +		size, (unsigned long long)paddr, dir); + +	return DMA_MAPPING_ERROR; +} + +static void +bounce_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, +		    enum dma_data_direction dir, unsigned long attrs) +{ +	size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE); +	struct dmar_domain *domain; +	phys_addr_t tlb_addr; + +	domain = find_domain(dev); +	if (WARN_ON(!domain)) +		return; + +	tlb_addr = intel_iommu_iova_to_phys(&domain->domain, dev_addr); +	if (WARN_ON(!tlb_addr)) +		return; + +	intel_unmap(dev, dev_addr, size); +	if (is_swiotlb_buffer(tlb_addr)) +		swiotlb_tbl_unmap_single(dev, tlb_addr, size, +					 aligned_size, dir, attrs); + +	trace_bounce_unmap_single(dev, dev_addr, size); +} + +static dma_addr_t +bounce_map_page(struct device *dev, struct page *page, unsigned long offset, +		size_t size, enum dma_data_direction dir, unsigned long attrs) +{ +	return bounce_map_single(dev, page_to_phys(page) + offset, +				 size, dir, attrs, *dev->dma_mask); +} + +static dma_addr_t +bounce_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size, +		    enum dma_data_direction dir, unsigned long attrs) +{ +	return bounce_map_single(dev, phys_addr, size, +				 dir, attrs, *dev->dma_mask); +} + +static void +bounce_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, +		  enum dma_data_direction dir, unsigned long attrs) +{ +	bounce_unmap_single(dev, dev_addr, size, dir, attrs); +} + +static void +bounce_unmap_resource(struct device *dev, dma_addr_t dev_addr, size_t size, +		      enum dma_data_direction dir, unsigned long attrs) +{ +	bounce_unmap_single(dev, dev_addr, size, dir, attrs); +} + +static void +bounce_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, +		enum dma_data_direction dir, unsigned long attrs) +{ +	struct scatterlist *sg; +	int i; + +	for_each_sg(sglist, sg, nelems, i) +		bounce_unmap_page(dev, sg->dma_address, +				  sg_dma_len(sg), dir, attrs); +} + +static int +bounce_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, +	      enum dma_data_direction dir, unsigned long attrs) +{ +	int i; +	struct scatterlist *sg; + +	for_each_sg(sglist, sg, nelems, i) { +		sg->dma_address = bounce_map_page(dev, sg_page(sg), +						  sg->offset, sg->length, +						  dir, attrs); +		if (sg->dma_address == DMA_MAPPING_ERROR) +			goto out_unmap; +		sg_dma_len(sg) = sg->length; +	} + +	return nelems; + +out_unmap: +	bounce_unmap_sg(dev, sglist, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); +	return 0; +} + +static void +bounce_sync_single_for_cpu(struct device *dev, dma_addr_t addr, +			   size_t size, enum dma_data_direction dir) +{ +	bounce_sync_single(dev, addr, size, dir, SYNC_FOR_CPU); +} + +static void +bounce_sync_single_for_device(struct device *dev, dma_addr_t addr, +			      size_t size, enum dma_data_direction dir) +{ +	bounce_sync_single(dev, addr, size, dir, SYNC_FOR_DEVICE); +} + +static void +bounce_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, +		       int nelems, enum dma_data_direction dir) +{ +	struct scatterlist *sg; +	int i; + +	for_each_sg(sglist, sg, nelems, i) +		bounce_sync_single(dev, sg_dma_address(sg), +				   sg_dma_len(sg), dir, SYNC_FOR_CPU); +} + +static void +bounce_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, +			  int nelems, enum dma_data_direction dir) +{ +	struct scatterlist *sg; +	int i; + +	for_each_sg(sglist, sg, nelems, i) +		bounce_sync_single(dev, sg_dma_address(sg), +				   sg_dma_len(sg), dir, SYNC_FOR_DEVICE); +} + +static const struct dma_map_ops bounce_dma_ops = { +	.alloc			= intel_alloc_coherent, +	.free			= intel_free_coherent, +	.map_sg			= bounce_map_sg, +	.unmap_sg		= bounce_unmap_sg, +	.map_page		= bounce_map_page, +	.unmap_page		= bounce_unmap_page, +	.sync_single_for_cpu	= bounce_sync_single_for_cpu, +	.sync_single_for_device	= bounce_sync_single_for_device, +	.sync_sg_for_cpu	= bounce_sync_sg_for_cpu, +	.sync_sg_for_device	= bounce_sync_sg_for_device, +	.map_resource		= bounce_map_resource, +	.unmap_resource		= bounce_unmap_resource, +	.dma_supported		= dma_direct_supported,  };  static inline int iommu_domain_cache_init(void) @@ -4569,22 +4835,20 @@ const struct attribute_group *intel_iommu_groups[] = {  	NULL,  }; -static int __init platform_optin_force_iommu(void) +static inline bool has_untrusted_dev(void)  {  	struct pci_dev *pdev = NULL; -	bool has_untrusted_dev = false; -	if (!dmar_platform_optin() || no_platform_optin) -		return 0; +	for_each_pci_dev(pdev) +		if (pdev->untrusted) +			return true; -	for_each_pci_dev(pdev) { -		if (pdev->untrusted) { -			has_untrusted_dev = true; -			break; -		} -	} +	return false; +} -	if (!has_untrusted_dev) +static int __init platform_optin_force_iommu(void) +{ +	if (!dmar_platform_optin() || no_platform_optin || !has_untrusted_dev())  		return 0;  	if (no_iommu || dmar_disabled) @@ -4598,9 +4862,6 @@ static int __init platform_optin_force_iommu(void)  		iommu_identity_mapping |= IDENTMAP_ALL;  	dmar_disabled = 0; -#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB) -	swiotlb = 0; -#endif  	no_iommu = 0;  	return 1; @@ -4740,7 +5001,14 @@ int __init intel_iommu_init(void)  	up_write(&dmar_global_lock);  #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB) -	swiotlb = 0; +	/* +	 * If the system has no untrusted device or the user has decided +	 * to disable the bounce page mechanisms, we don't need swiotlb. +	 * Mark this and the pre-allocated bounce pages will be released +	 * later. +	 */ +	if (!has_untrusted_dev() || intel_no_bounce) +		swiotlb = 0;  #endif  	dma_ops = &intel_dma_ops; @@ -5204,7 +5472,8 @@ static int intel_iommu_map(struct iommu_domain *domain,  }  static size_t intel_iommu_unmap(struct iommu_domain *domain, -				unsigned long iova, size_t size) +				unsigned long iova, size_t size, +				struct iommu_iotlb_gather *gather)  {  	struct dmar_domain *dmar_domain = to_dmar_domain(domain);  	struct page *freelist = NULL; @@ -5360,6 +5629,11 @@ static int intel_iommu_add_device(struct device *dev)  		}  	} +	if (device_needs_bounce(dev)) { +		dev_info(dev, "Use Intel IOMMU bounce page dma_ops\n"); +		set_dma_ops(dev, &bounce_dma_ops); +	} +  	return 0;  } @@ -5377,6 +5651,9 @@ static void intel_iommu_remove_device(struct device *dev)  	iommu_group_remove_device(dev);  	iommu_device_unlink(&iommu->iommu, dev); + +	if (device_needs_bounce(dev)) +		set_dma_ops(dev, NULL);  }  static void intel_iommu_get_resv_regions(struct device *device, @@ -5690,20 +5967,46 @@ const struct iommu_ops intel_iommu_ops = {  	.pgsize_bitmap		= INTEL_IOMMU_PGSIZES,  }; -static void quirk_iommu_g4x_gfx(struct pci_dev *dev) +static void quirk_iommu_igfx(struct pci_dev *dev)  { -	/* G4x/GM45 integrated gfx dmar support is totally busted. */  	pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");  	dmar_map_gfx = 0;  } -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx); +/* G4x/GM45 integrated gfx dmar support is totally busted. */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx); + +/* Broadwell igfx malfunctions with dmar */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160E, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1602, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160A, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160D, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1616, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161B, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161E, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1612, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161A, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161D, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1626, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162B, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162E, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1622, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162A, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162D, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1636, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163B, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163E, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);  static void quirk_iommu_rwbf(struct pci_dev *dev)  {  |