diff options
Diffstat (limited to 'drivers/cxl/pci.c')
| -rw-r--r-- | drivers/cxl/pci.c | 134 |
1 files changed, 67 insertions, 67 deletions
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 44a21ab7add5..2ff361e756d6 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ +#include <asm-generic/unaligned.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/moduleparam.h> #include <linux/module.h> @@ -85,25 +86,28 @@ static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds) status & CXLMDEV_DEV_FATAL ? " fatal" : "", \ status & CXLMDEV_FW_HALT ? " firmware-halt" : "") +/* + * Threaded irq dev_id's must be globally unique. cxl_dev_id provides a unique + * wrapper object for each irq within the same cxlds. + */ struct cxl_dev_id { struct cxl_dev_state *cxlds; }; static int cxl_request_irq(struct cxl_dev_state *cxlds, int irq, - irq_handler_t handler, irq_handler_t thread_fn) + irq_handler_t thread_fn) { struct device *dev = cxlds->dev; struct cxl_dev_id *dev_id; - /* dev_id must be globally unique and must contain the cxlds */ dev_id = devm_kzalloc(dev, sizeof(*dev_id), GFP_KERNEL); if (!dev_id) return -ENOMEM; dev_id->cxlds = cxlds; - return devm_request_threaded_irq(dev, irq, handler, thread_fn, - IRQF_SHARED | IRQF_ONESHOT, - NULL, dev_id); + return devm_request_threaded_irq(dev, irq, NULL, thread_fn, + IRQF_SHARED | IRQF_ONESHOT, NULL, + dev_id); } static bool cxl_mbox_background_complete(struct cxl_dev_state *cxlds) @@ -128,10 +132,10 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id) reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET); opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg); if (opcode == CXL_MBOX_OP_SANITIZE) { + mutex_lock(&mds->mbox_mutex); if (mds->security.sanitize_node) - sysfs_notify_dirent(mds->security.sanitize_node); - - dev_dbg(cxlds->dev, "Sanitization operation ended\n"); + mod_delayed_work(system_wq, &mds->security.poll_dwork, 0); + mutex_unlock(&mds->mbox_mutex); } else { /* short-circuit the wait in __cxl_pci_mbox_send_cmd() */ rcuwait_wake_up(&mds->mbox_wait); @@ -152,18 +156,16 @@ static void cxl_mbox_sanitize_work(struct work_struct *work) mutex_lock(&mds->mbox_mutex); if (cxl_mbox_background_complete(cxlds)) { mds->security.poll_tmo_secs = 0; - put_device(cxlds->dev); - if (mds->security.sanitize_node) sysfs_notify_dirent(mds->security.sanitize_node); + mds->security.sanitize_active = false; dev_dbg(cxlds->dev, "Sanitization operation ended\n"); } else { int timeout = mds->security.poll_tmo_secs + 10; mds->security.poll_tmo_secs = min(15 * 60, timeout); - queue_delayed_work(system_wq, &mds->security.poll_dwork, - timeout * HZ); + schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ); } mutex_unlock(&mds->mbox_mutex); } @@ -295,18 +297,15 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds, * and allow userspace to poll(2) for completion. */ if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) { - if (mds->security.poll) { - /* hold the device throughout */ - get_device(cxlds->dev); - - /* give first timeout a second */ - timeout = 1; - mds->security.poll_tmo_secs = timeout; - queue_delayed_work(system_wq, - &mds->security.poll_dwork, - timeout * HZ); - } - + if (mds->security.sanitize_active) + return -EBUSY; + + /* give first timeout a second */ + timeout = 1; + mds->security.poll_tmo_secs = timeout; + mds->security.sanitize_active = true; + schedule_delayed_work(&mds->security.poll_dwork, + timeout * HZ); dev_dbg(dev, "Sanitization operation started\n"); goto success; } @@ -383,13 +382,15 @@ static int cxl_pci_mbox_send(struct cxl_memdev_state *mds, return rc; } -static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds) +static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds, bool irq_avail) { struct cxl_dev_state *cxlds = &mds->cxlds; const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET); struct device *dev = cxlds->dev; unsigned long timeout; + int irq, msgnum; u64 md_status; + u32 ctrl; timeout = jiffies + mbox_ready_timeout * HZ; do { @@ -437,33 +438,26 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds) dev_dbg(dev, "Mailbox payload sized %zu", mds->payload_size); rcuwait_init(&mds->mbox_wait); + INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work); - if (cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) { - u32 ctrl; - int irq, msgnum; - struct pci_dev *pdev = to_pci_dev(cxlds->dev); - - msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap); - irq = pci_irq_vector(pdev, msgnum); - if (irq < 0) - goto mbox_poll; - - if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq, NULL)) - goto mbox_poll; + /* background command interrupts are optional */ + if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) || !irq_avail) + return 0; - /* enable background command mbox irq support */ - ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); - ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ; - writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); + msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap); + irq = pci_irq_vector(to_pci_dev(cxlds->dev), msgnum); + if (irq < 0) + return 0; + if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq)) return 0; - } -mbox_poll: - mds->security.poll = true; - INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work); + dev_dbg(cxlds->dev, "Mailbox interrupts enabled\n"); + /* enable background command mbox irq support */ + ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); + ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ; + writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); - dev_dbg(cxlds->dev, "Mailbox interrupts are unsupported"); return 0; } @@ -484,7 +478,7 @@ static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev, resource_size_t component_reg_phys; *map = (struct cxl_register_map) { - .dev = &pdev->dev, + .host = &pdev->dev, .resource = CXL_RESOURCE_NONE, }; @@ -594,7 +588,7 @@ static int cxl_mem_alloc_event_buf(struct cxl_memdev_state *mds) return devm_add_action_or_reset(mds->cxlds.dev, free_event_buf, buf); } -static int cxl_alloc_irq_vectors(struct pci_dev *pdev) +static bool cxl_alloc_irq_vectors(struct pci_dev *pdev) { int nvecs; @@ -611,9 +605,9 @@ static int cxl_alloc_irq_vectors(struct pci_dev *pdev) PCI_IRQ_MSIX | PCI_IRQ_MSI); if (nvecs < 1) { dev_dbg(&pdev->dev, "Failed to alloc irq vectors: %d\n", nvecs); - return -ENXIO; + return false; } - return 0; + return true; } static irqreturn_t cxl_event_thread(int irq, void *id) @@ -653,7 +647,7 @@ static int cxl_event_req_irq(struct cxl_dev_state *cxlds, u8 setting) if (irq < 0) return irq; - return cxl_request_irq(cxlds, irq, NULL, cxl_event_thread); + return cxl_request_irq(cxlds, irq, cxl_event_thread); } static int cxl_event_get_int_policy(struct cxl_memdev_state *mds, @@ -749,7 +743,7 @@ static bool cxl_event_int_is_fw(u8 setting) } static int cxl_event_config(struct pci_host_bridge *host_bridge, - struct cxl_memdev_state *mds) + struct cxl_memdev_state *mds, bool irq_avail) { struct cxl_event_interrupt_policy policy; int rc; @@ -761,6 +755,11 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge, if (!host_bridge->native_cxl_error) return 0; + if (!irq_avail) { + dev_info(mds->cxlds.dev, "No interrupt support, disable event processing.\n"); + return 0; + } + rc = cxl_mem_alloc_event_buf(mds); if (rc) return rc; @@ -795,6 +794,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) struct cxl_register_map map; struct cxl_memdev *cxlmd; int i, rc, pmu_count; + bool irq_avail; /* * Double check the anonymous union trickery in struct cxl_regs @@ -834,16 +834,14 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) * If the component registers can't be found, the cxl_pci driver may * still be useful for management functions so don't return an error. */ - cxlds->component_reg_phys = CXL_RESOURCE_NONE; - rc = cxl_pci_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT, &map); + rc = cxl_pci_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT, + &cxlds->reg_map); if (rc) dev_warn(&pdev->dev, "No component registers (%d)\n", rc); - else if (!map.component_map.ras.valid) + else if (!cxlds->reg_map.component_map.ras.valid) dev_dbg(&pdev->dev, "RAS registers not found\n"); - cxlds->component_reg_phys = map.resource; - - rc = cxl_map_component_regs(&map, &cxlds->regs.component, + rc = cxl_map_component_regs(&cxlds->reg_map, &cxlds->regs.component, BIT(CXL_CM_CAP_CAP_ID_RAS)); if (rc) dev_dbg(&pdev->dev, "Failed to map RAS capability.\n"); @@ -854,11 +852,9 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) else dev_warn(&pdev->dev, "Media not active (%d)\n", rc); - rc = cxl_alloc_irq_vectors(pdev); - if (rc) - return rc; + irq_avail = cxl_alloc_irq_vectors(pdev); - rc = cxl_pci_setup_mailbox(mds); + rc = cxl_pci_setup_mailbox(mds, irq_avail); if (rc) return rc; @@ -882,11 +878,15 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) return rc; - cxlmd = devm_cxl_add_memdev(cxlds); + cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds); if (IS_ERR(cxlmd)) return PTR_ERR(cxlmd); - rc = cxl_memdev_setup_fw_upload(mds); + rc = devm_cxl_setup_fw_upload(&pdev->dev, mds); + if (rc) + return rc; + + rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd); if (rc) return rc; @@ -900,7 +900,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) break; } - rc = cxl_map_pmu_regs(pdev, &pmu_regs, &map); + rc = cxl_map_pmu_regs(&map, &pmu_regs); if (rc) { dev_dbg(&pdev->dev, "Could not map PMU regs\n"); break; @@ -913,7 +913,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) } } - rc = cxl_event_config(host_bridge, mds); + rc = cxl_event_config(host_bridge, mds, irq_avail); if (rc) return rc; @@ -974,6 +974,6 @@ static struct pci_driver cxl_pci_driver = { }, }; -MODULE_LICENSE("GPL v2"); module_pci_driver(cxl_pci_driver); +MODULE_LICENSE("GPL v2"); MODULE_IMPORT_NS(CXL); |