aboutsummaryrefslogtreecommitdiff
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c2
-rw-r--r--drivers/pci/intel-iommu.c109
-rw-r--r--drivers/pci/msi.c64
-rw-r--r--drivers/pci/msi.h10
-rw-r--r--drivers/pci/pci.c15
-rw-r--r--drivers/pci/pcie/aer/ecrc.c2
-rw-r--r--drivers/pci/quirks.c5
-rw-r--r--drivers/pci/slot.c4
8 files changed, 158 insertions, 53 deletions
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 844580489d4d..5c5043f239cf 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -555,6 +555,8 @@ static struct hotplug_slot *get_slot_from_name (const char *name)
* @slot: pointer to the &struct hotplug_slot to register
* @devnr: device number
* @name: name registered with kobject core
+ * @owner: caller module owner
+ * @mod_name: caller module name
*
* Registers a hotplug slot with the pci hotplug subsystem, which will allow
* userspace interaction to the slot.
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 53075424a434..ebc9b8dca881 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -2117,6 +2117,47 @@ static int domain_add_dev_info(struct dmar_domain *domain,
return 0;
}
+static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
+{
+ if (iommu_identity_mapping == 2)
+ return IS_GFX_DEVICE(pdev);
+
+ /*
+ * We want to start off with all devices in the 1:1 domain, and
+ * take them out later if we find they can't access all of memory.
+ *
+ * However, we can't do this for PCI devices behind bridges,
+ * because all PCI devices behind the same bridge will end up
+ * with the same source-id on their transactions.
+ *
+ * Practically speaking, we can't change things around for these
+ * devices at run-time, because we can't be sure there'll be no
+ * DMA transactions in flight for any of their siblings.
+ *
+ * So PCI devices (unless they're on the root bus) as well as
+ * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
+ * the 1:1 domain, just in _case_ one of their siblings turns out
+ * not to be able to map all of memory.
+ */
+ if (!pdev->is_pcie) {
+ if (!pci_is_root_bus(pdev->bus))
+ return 0;
+ if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
+ return 0;
+ } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+ return 0;
+
+ /*
+ * At boot time, we don't yet know if devices will be 64-bit capable.
+ * Assume that they will -- if they turn out not to be, then we can
+ * take them out of the 1:1 domain later.
+ */
+ if (!startup)
+ return pdev->dma_mask > DMA_BIT_MASK(32);
+
+ return 1;
+}
+
static int iommu_prepare_static_identity_mapping(void)
{
struct pci_dev *pdev = NULL;
@@ -2127,16 +2168,18 @@ static int iommu_prepare_static_identity_mapping(void)
return -EFAULT;
for_each_pci_dev(pdev) {
- printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
- pci_name(pdev));
+ if (iommu_should_identity_map(pdev, 1)) {
+ printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
+ pci_name(pdev));
- ret = domain_context_mapping(si_domain, pdev,
- CONTEXT_TT_MULTI_LEVEL);
- if (ret)
- return ret;
- ret = domain_add_dev_info(si_domain, pdev);
- if (ret)
- return ret;
+ ret = domain_context_mapping(si_domain, pdev,
+ CONTEXT_TT_MULTI_LEVEL);
+ if (ret)
+ return ret;
+ ret = domain_add_dev_info(si_domain, pdev);
+ if (ret)
+ return ret;
+ }
}
return 0;
@@ -2291,6 +2334,10 @@ int __init init_dmars(void)
* identity mapping if iommu_identity_mapping is set.
*/
if (!iommu_pass_through) {
+#ifdef CONFIG_DMAR_BROKEN_GFX_WA
+ if (!iommu_identity_mapping)
+ iommu_identity_mapping = 2;
+#endif
if (iommu_identity_mapping)
iommu_prepare_static_identity_mapping();
/*
@@ -2368,15 +2415,15 @@ error:
return ret;
}
+/* Returns a number of VTD pages, but aligned to MM page size */
static inline unsigned long aligned_nrpages(unsigned long host_addr,
size_t size)
{
host_addr &= ~PAGE_MASK;
- host_addr += size + PAGE_SIZE - 1;
-
- return host_addr >> VTD_PAGE_SHIFT;
+ return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
}
+/* This takes a number of _MM_ pages, not VTD pages */
static struct iova *intel_alloc_iova(struct device *dev,
struct dmar_domain *domain,
unsigned long nrpages, uint64_t dma_mask)
@@ -2443,16 +2490,24 @@ static int iommu_dummy(struct pci_dev *pdev)
}
/* Check if the pdev needs to go through non-identity map and unmap process.*/
-static int iommu_no_mapping(struct pci_dev *pdev)
+static int iommu_no_mapping(struct device *dev)
{
+ struct pci_dev *pdev;
int found;
+ if (unlikely(dev->bus != &pci_bus_type))
+ return 1;
+
+ pdev = to_pci_dev(dev);
+ if (iommu_dummy(pdev))
+ return 1;
+
if (!iommu_identity_mapping)
- return iommu_dummy(pdev);
+ return 0;
found = identity_mapping(pdev);
if (found) {
- if (pdev->dma_mask > DMA_BIT_MASK(32))
+ if (iommu_should_identity_map(pdev, 0))
return 1;
else {
/*
@@ -2469,9 +2524,12 @@ static int iommu_no_mapping(struct pci_dev *pdev)
* In case of a detached 64 bit DMA device from vm, the device
* is put into si_domain for identity mapping.
*/
- if (pdev->dma_mask > DMA_BIT_MASK(32)) {
+ if (iommu_should_identity_map(pdev, 0)) {
int ret;
ret = domain_add_dev_info(si_domain, pdev);
+ if (ret)
+ return 0;
+ ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
if (!ret) {
printk(KERN_INFO "64bit %s uses identity mapping\n",
pci_name(pdev));
@@ -2480,7 +2538,7 @@ static int iommu_no_mapping(struct pci_dev *pdev)
}
}
- return iommu_dummy(pdev);
+ return 0;
}
static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
@@ -2496,7 +2554,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
BUG_ON(dir == DMA_NONE);
- if (iommu_no_mapping(pdev))
+ if (iommu_no_mapping(hwdev))
return paddr;
domain = get_valid_domain_for_dev(pdev);
@@ -2506,7 +2564,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
iommu = domain_get_iommu(domain);
size = aligned_nrpages(paddr, size);
- iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
+ iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
+ pdev->dma_mask);
if (!iova)
goto error;
@@ -2635,7 +2694,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
struct iova *iova;
struct intel_iommu *iommu;
- if (iommu_no_mapping(pdev))
+ if (iommu_no_mapping(dev))
return;
domain = find_domain(pdev);
@@ -2726,7 +2785,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
struct iova *iova;
struct intel_iommu *iommu;
- if (iommu_no_mapping(pdev))
+ if (iommu_no_mapping(hwdev))
return;
domain = find_domain(pdev);
@@ -2785,7 +2844,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
struct intel_iommu *iommu;
BUG_ON(dir == DMA_NONE);
- if (iommu_no_mapping(pdev))
+ if (iommu_no_mapping(hwdev))
return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
domain = get_valid_domain_for_dev(pdev);
@@ -2797,7 +2856,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
for_each_sg(sglist, sg, nelems, i)
size += aligned_nrpages(sg->offset, sg->length);
- iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
+ iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
+ pdev->dma_mask);
if (!iova) {
sglist->dma_length = 0;
return 0;
@@ -3540,6 +3600,9 @@ static void intel_iommu_unmap_range(struct iommu_domain *domain,
{
struct dmar_domain *dmar_domain = domain->priv;
+ if (!size)
+ return;
+
dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
(iova + size - 1) >> VTD_PAGE_SHIFT);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index d9f06fbfa0bf..d986afb7032b 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -127,17 +127,23 @@ static inline __attribute_const__ u32 msi_enabled_mask(u16 control)
* reliably as devices without an INTx disable bit will then generate a
* level IRQ which will never be cleared.
*/
-static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+static u32 __msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{
u32 mask_bits = desc->masked;
if (!desc->msi_attrib.maskbit)
- return;
+ return 0;
mask_bits &= ~mask;
mask_bits |= flag;
pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits);
- desc->masked = mask_bits;
+
+ return mask_bits;
+}
+
+static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+{
+ desc->masked = __msi_mask_irq(desc, mask, flag);
}
/*
@@ -147,15 +153,21 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
* file. This saves a few milliseconds when initialising devices with lots
* of MSI-X interrupts.
*/
-static void msix_mask_irq(struct msi_desc *desc, u32 flag)
+static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag)
{
u32 mask_bits = desc->masked;
unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
+ PCI_MSIX_ENTRY_VECTOR_CTRL;
mask_bits &= ~1;
mask_bits |= flag;
writel(mask_bits, desc->mask_base + offset);
- desc->masked = mask_bits;
+
+ return mask_bits;
+}
+
+static void msix_mask_irq(struct msi_desc *desc, u32 flag)
+{
+ desc->masked = __msix_mask_irq(desc, flag);
}
static void msi_set_mask_bit(unsigned irq, u32 flag)
@@ -188,9 +200,9 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
void __iomem *base = entry->mask_base +
entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
- msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
- msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
- msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
+ msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
+ msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
+ msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
} else {
struct pci_dev *dev = entry->dev;
int pos = entry->msi_attrib.pos;
@@ -225,11 +237,9 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
base = entry->mask_base +
entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
- writel(msg->address_lo,
- base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
- writel(msg->address_hi,
- base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
- writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
+ writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
+ writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
+ writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
} else {
struct pci_dev *dev = entry->dev;
int pos = entry->msi_attrib.pos;
@@ -385,6 +395,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
/* Configure MSI capability structure */
ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
if (ret) {
+ msi_mask_irq(entry, mask, ~mask);
msi_free_irqs(dev);
return ret;
}
@@ -439,8 +450,14 @@ static int msix_capability_init(struct pci_dev *dev,
for (i = 0; i < nvec; i++) {
entry = alloc_msi_entry(dev);
- if (!entry)
- break;
+ if (!entry) {
+ if (!i)
+ iounmap(base);
+ else
+ msi_free_irqs(dev);
+ /* No enough memory. Don't try again */
+ return -ENOMEM;
+ }
j = entries[i].entry;
entry->msi_attrib.is_msix = 1;
@@ -487,7 +504,7 @@ static int msix_capability_init(struct pci_dev *dev,
set_irq_msi(entry->irq, entry);
j = entries[i].entry;
entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
+ PCI_MSIX_ENTRY_VECTOR_CTRL);
msix_mask_irq(entry, 1);
i++;
}
@@ -611,9 +628,11 @@ void pci_msi_shutdown(struct pci_dev *dev)
pci_intx_for_msi(dev, 1);
dev->msi_enabled = 0;
+ /* Return the device with MSI unmasked as initial states */
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl);
mask = msi_capable_mask(ctrl);
- msi_mask_irq(desc, mask, ~mask);
+ /* Keep cached state to be restored */
+ __msi_mask_irq(desc, mask, ~mask);
/* Restore dev->irq to its default pin-assertion irq */
dev->irq = desc->msi_attrib.default_irq;
@@ -653,7 +672,6 @@ static int msi_free_irqs(struct pci_dev* dev)
list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
if (entry->msi_attrib.is_msix) {
- msix_mask_irq(entry, 1);
if (list_is_last(&entry->list, &dev->msi_list))
iounmap(entry->mask_base);
}
@@ -741,9 +759,17 @@ static void msix_free_all_irqs(struct pci_dev *dev)
void pci_msix_shutdown(struct pci_dev* dev)
{
+ struct msi_desc *entry;
+
if (!pci_msi_enable || !dev || !dev->msix_enabled)
return;
+ /* Return the device with MSI-X masked as initial states */
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ /* Keep cached states to be restored */
+ __msix_mask_irq(entry, 1);
+ }
+
msix_set_enable(dev, 0);
pci_intx_for_msi(dev, 1);
dev->msix_enabled = 0;
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index a0662842550b..de27c1cb5a2b 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -6,11 +6,11 @@
#ifndef MSI_H
#define MSI_H
-#define PCI_MSIX_ENTRY_SIZE 16
-#define PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET 0
-#define PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET 4
-#define PCI_MSIX_ENTRY_DATA_OFFSET 8
-#define PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET 12
+#define PCI_MSIX_ENTRY_SIZE 16
+#define PCI_MSIX_ENTRY_LOWER_ADDR 0
+#define PCI_MSIX_ENTRY_UPPER_ADDR 4
+#define PCI_MSIX_ENTRY_DATA 8
+#define PCI_MSIX_ENTRY_VECTOR_CTRL 12
#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6c93af5ced18..dbd0f947f497 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1517,11 +1517,20 @@ void pci_enable_ari(struct pci_dev *dev)
*
* Perform INTx swizzling for a device behind one level of bridge. This is
* required by section 9.1 of the PCI-to-PCI bridge specification for devices
- * behind bridges on add-in cards.
+ * behind bridges on add-in cards. For devices with ARI enabled, the slot
+ * number is always 0 (see the Implementation Note in section 2.2.8.1 of
+ * the PCI Express Base Specification, Revision 2.1)
*/
u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
{
- return (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1;
+ int slot;
+
+ if (pci_ari_enabled(dev->bus))
+ slot = 0;
+ else
+ slot = PCI_SLOT(dev->devfn);
+
+ return (((pin - 1) + slot) % 4) + 1;
}
int
@@ -2171,7 +2180,7 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
u16 ctrl;
struct pci_dev *pdev;
- if (dev->subordinate)
+ if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
return -ENOTTY;
list_for_each_entry(pdev, &dev->bus->devices, bus_list)
diff --git a/drivers/pci/pcie/aer/ecrc.c b/drivers/pci/pcie/aer/ecrc.c
index ece97df4df6d..a928d8ab6bda 100644
--- a/drivers/pci/pcie/aer/ecrc.c
+++ b/drivers/pci/pcie/aer/ecrc.c
@@ -106,7 +106,7 @@ void pcie_set_ecrc_checking(struct pci_dev *dev)
disable_ecrc_checking(dev);
break;
case ECRC_POLICY_ON:
- enable_ecrc_checking(dev);;
+ enable_ecrc_checking(dev);
break;
default:
return;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 56552d74abea..06b965623962 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1058,6 +1058,11 @@ static void __devinit quirk_no_ata_d3(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID, quirk_no_ata_d3);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID, quirk_no_ata_d3);
+/* ALi loses some register settings that we cannot then restore */
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, quirk_no_ata_d3);
+/* VIA comes back fine but we need to keep it alive or ACPI GTM failures
+ occur when mode detecting */
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_no_ata_d3);
/* This was originally an Alpha specific thing, but it really fits here.
* The i82375 PCI/EISA bridge appears as non-classified. Fix that.
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index eddb0748b0ea..8c02b6c53bdb 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -311,7 +311,7 @@ EXPORT_SYMBOL_GPL(pci_destroy_slot);
#include <linux/pci_hotplug.h>
/**
* pci_hp_create_link - create symbolic link to the hotplug driver module.
- * @slot: struct pci_slot
+ * @pci_slot: struct pci_slot
*
* Helper function for pci_hotplug_core.c to create symbolic link to
* the hotplug driver module.
@@ -334,7 +334,7 @@ EXPORT_SYMBOL_GPL(pci_hp_create_module_link);
/**
* pci_hp_remove_link - remove symbolic link to the hotplug driver module.
- * @slot: struct pci_slot
+ * @pci_slot: struct pci_slot
*
* Helper function for pci_hotplug_core.c to remove symbolic link to
* the hotplug driver module.