diff options
Diffstat (limited to 'drivers/nvme/host/pci.c')
-rw-r--r-- | drivers/nvme/host/pci.c | 100 |
1 files changed, 61 insertions, 39 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 0f093f14d348..4cb9b156cab7 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -310,6 +310,11 @@ static int nvme_init_iod(struct request *rq, unsigned size, iod->npages = -1; iod->nents = 0; iod->length = size; + + if (!(rq->cmd_flags & REQ_DONTPREP)) { + rq->retries = 0; + rq->cmd_flags |= REQ_DONTPREP; + } return 0; } @@ -520,8 +525,8 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req, goto out_unmap; } - cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); - cmnd->rw.prp2 = cpu_to_le64(iod->first_dma); + cmnd->rw.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); + cmnd->rw.dptr.prp2 = cpu_to_le64(iod->first_dma); if (blk_integrity_rq(req)) cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg)); return BLK_MQ_RQ_QUEUE_OK; @@ -623,6 +628,7 @@ static void nvme_complete_rq(struct request *req) if (unlikely(req->errors)) { if (nvme_req_needs_retry(req, req->errors)) { + req->retries++; nvme_requeue_req(req); return; } @@ -901,7 +907,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) req->tag, nvmeq->qid); abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd, - BLK_MQ_REQ_NOWAIT); + BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); if (IS_ERR(abort_req)) { atomic_inc(&dev->ctrl.abort_limit); return BLK_EH_RESET_TIMER; @@ -919,22 +925,6 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) return BLK_EH_RESET_TIMER; } -static void nvme_cancel_io(struct request *req, void *data, bool reserved) -{ - int status; - - if (!blk_mq_request_started(req)) - return; - - dev_dbg_ratelimited(((struct nvme_dev *) data)->ctrl.device, - "Cancelling I/O %d", req->tag); - - status = NVME_SC_ABORT_REQ; - if (blk_queue_dying(req->q)) - status |= NVME_SC_DNR; - blk_mq_complete_request(req, status); -} - static void nvme_free_queue(struct nvme_queue *nvmeq) { dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), @@ -1394,21 +1384,13 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) struct pci_dev *pdev = to_pci_dev(dev->dev); int result, i, vecs, nr_io_queues, size; - nr_io_queues = num_possible_cpus(); + nr_io_queues = num_online_cpus(); result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); if (result < 0) return result; - /* - * Degraded controllers might return an error when setting the queue - * count. We still want to be able to bring them online and offer - * access to the admin queue, as that might be only way to fix them up. - */ - if (result > 0) { - dev_err(dev->ctrl.device, - "Could not set queue count (%d)\n", result); + if (nr_io_queues == 0) return 0; - } if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) { result = nvme_cmb_qdepth(dev, nr_io_queues, @@ -1536,7 +1518,7 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) cmd.delete_queue.opcode = opcode; cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); - req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT); + req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); if (IS_ERR(req)) return PTR_ERR(req); @@ -1551,12 +1533,12 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) static void nvme_disable_io_queues(struct nvme_dev *dev) { - int pass; + int pass, queues = dev->online_queues - 1; unsigned long timeout; u8 opcode = nvme_admin_delete_sq; for (pass = 0; pass < 2; pass++) { - int sent = 0, i = dev->queue_count - 1; + int sent = 0, i = queues; reinit_completion(&dev->ioq_wait); retry: @@ -1679,9 +1661,14 @@ static int nvme_pci_enable(struct nvme_dev *dev) static void nvme_dev_unmap(struct nvme_dev *dev) { + struct pci_dev *pdev = to_pci_dev(dev->dev); + int bars; + if (dev->bar) iounmap(dev->bar); - pci_release_regions(to_pci_dev(dev->dev)); + + bars = pci_select_bars(pdev, IORESOURCE_MEM); + pci_release_selected_regions(pdev, bars); } static void nvme_pci_disable(struct nvme_dev *dev) @@ -1722,8 +1709,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) } nvme_pci_disable(dev); - blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_io, dev); - blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_io, dev); + blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); + blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl); mutex_unlock(&dev->shutdown_lock); } @@ -1857,7 +1844,7 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work) nvme_kill_queues(&dev->ctrl); if (pci_get_drvdata(pdev)) - pci_stop_and_remove_bus_device_locked(pdev); + device_release_driver(&pdev->dev); nvme_put_ctrl(&dev->ctrl); } @@ -1897,6 +1884,7 @@ static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl) } static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { + .name = "pcie", .module = THIS_MODULE, .reg_read32 = nvme_pci_reg_read32, .reg_write32 = nvme_pci_reg_write32, @@ -1924,7 +1912,7 @@ static int nvme_dev_map(struct nvme_dev *dev) return 0; release: - pci_release_regions(pdev); + pci_release_selected_regions(pdev, bars); return -ENODEV; } @@ -1935,7 +1923,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) node = dev_to_node(&pdev->dev); if (node == NUMA_NO_NODE) - set_dev_node(&pdev->dev, 0); + set_dev_node(&pdev->dev, first_memory_node); dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); if (!dev) @@ -2017,6 +2005,10 @@ static void nvme_remove(struct pci_dev *pdev) nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); pci_set_drvdata(pdev, NULL); + + if (!pci_device_is_present(pdev)) + nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); + flush_work(&dev->reset_work); nvme_uninit_ctrl(&dev->ctrl); nvme_dev_disable(dev, true); @@ -2028,6 +2020,24 @@ static void nvme_remove(struct pci_dev *pdev) nvme_put_ctrl(&dev->ctrl); } +static int nvme_pci_sriov_configure(struct pci_dev *pdev, int numvfs) +{ + int ret = 0; + + if (numvfs == 0) { + if (pci_vfs_assigned(pdev)) { + dev_warn(&pdev->dev, + "Cannot disable SR-IOV VFs while assigned\n"); + return -EPERM; + } + pci_disable_sriov(pdev); + return 0; + } + + ret = pci_enable_sriov(pdev, numvfs); + return ret ? ret : numvfs; +} + #ifdef CONFIG_PM_SLEEP static int nvme_suspend(struct device *dev) { @@ -2060,14 +2070,17 @@ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, * shutdown the controller to quiesce. The controller will be restarted * after the slot reset through driver's slot_reset callback. */ - dev_warn(dev->ctrl.device, "error detected: state:%d\n", state); switch (state) { case pci_channel_io_normal: return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: + dev_warn(dev->ctrl.device, + "frozen state error detected, reset controller\n"); nvme_dev_disable(dev, false); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: + dev_warn(dev->ctrl.device, + "failure state error detected, request disconnect\n"); return PCI_ERS_RESULT_DISCONNECT; } return PCI_ERS_RESULT_NEED_RESET; @@ -2102,8 +2115,16 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_VDEVICE(INTEL, 0x0953), .driver_data = NVME_QUIRK_STRIPE_SIZE | NVME_QUIRK_DISCARD_ZEROES, }, + { PCI_VDEVICE(INTEL, 0x0a53), + .driver_data = NVME_QUIRK_STRIPE_SIZE | + NVME_QUIRK_DISCARD_ZEROES, }, + { PCI_VDEVICE(INTEL, 0x0a54), + .driver_data = NVME_QUIRK_STRIPE_SIZE | + NVME_QUIRK_DISCARD_ZEROES, }, { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, + { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, { 0, } @@ -2119,6 +2140,7 @@ static struct pci_driver nvme_driver = { .driver = { .pm = &nvme_dev_pm_ops, }, + .sriov_configure = nvme_pci_sriov_configure, .err_handler = &nvme_err_handler, }; |