diff options
Diffstat (limited to 'drivers/scsi/hisi_sas')
-rw-r--r-- | drivers/scsi/hisi_sas/hisi_sas.h | 11 | ||||
-rw-r--r-- | drivers/scsi/hisi_sas/hisi_sas_main.c | 152 | ||||
-rw-r--r-- | drivers/scsi/hisi_sas/hisi_sas_v1_hw.c | 10 | ||||
-rw-r--r-- | drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | 10 | ||||
-rw-r--r-- | drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 191 |
5 files changed, 299 insertions, 75 deletions
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 6f8a52a1b808..fb7c52c119df 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -207,6 +207,7 @@ struct hisi_sas_cq { int rd_point; int id; int irq_no; + spinlock_t poll_lock; }; struct hisi_sas_dq { @@ -344,7 +345,7 @@ struct hisi_sas_hw { int delay_ms, int timeout_ms); void (*debugfs_snapshot_regs)(struct hisi_hba *hisi_hba); int complete_hdr_size; - struct scsi_host_template *sht; + const struct scsi_host_template *sht; }; #define HISI_SAS_MAX_DEBUGFS_DUMP (50) @@ -484,6 +485,8 @@ struct hisi_hba { struct dentry *debugfs_dump_dentry; struct dentry *debugfs_bist_dentry; struct dentry *debugfs_fifo_dentry; + + int iopoll_q_cnt; }; /* Generic HW DMA host memory structures */ @@ -653,16 +656,18 @@ extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy, extern void hisi_sas_phy_bcast(struct hisi_sas_phy *phy); extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, - struct hisi_sas_slot *slot); + struct hisi_sas_slot *slot, + bool need_lock); extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba); extern void hisi_sas_rst_work_handler(struct work_struct *work); extern void hisi_sas_sync_rst_work_handler(struct work_struct *work); -extern void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba); extern void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no); extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, enum hisi_sas_phy_event event); extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba); extern u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max); +extern void hisi_sas_sync_cqs(struct hisi_hba *hisi_hba); +extern void hisi_sas_sync_poll_cqs(struct hisi_hba *hisi_hba); extern void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba); extern void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba); #endif diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 8c038ccf1c09..412431c901a7 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -205,7 +205,7 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, } void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, - struct hisi_sas_slot *slot) + struct hisi_sas_slot *slot, bool need_lock) { int device_id = slot->device_id; struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; @@ -239,9 +239,13 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, } } - spin_lock(&sas_dev->lock); - list_del_init(&slot->entry); - spin_unlock(&sas_dev->lock); + if (need_lock) { + spin_lock(&sas_dev->lock); + list_del_init(&slot->entry); + spin_unlock(&sas_dev->lock); + } else { + list_del_init(&slot->entry); + } memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); @@ -529,10 +533,21 @@ static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) dq_index = blk_mq_unique_tag_to_hwq(blk_tag); dq = &hisi_hba->dq[dq_index]; } else { - struct Scsi_Host *shost = hisi_hba->shost; - struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; - int queue = qmap->mq_map[raw_smp_processor_id()]; + int queue; + + if (hisi_hba->iopoll_q_cnt) { + /* + * Use interrupt queue (queue 0) to deliver and complete + * internal IOs of libsas or libata when there is at least + * one iopoll queue + */ + queue = 0; + } else { + struct Scsi_Host *shost = hisi_hba->shost; + struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; + queue = qmap->mq_map[raw_smp_processor_id()]; + } dq = &hisi_hba->dq[queue]; } break; @@ -672,6 +687,55 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) return sas_dev; } +static void hisi_sas_sync_poll_cq(struct hisi_sas_cq *cq) +{ + /* make sure CQ entries being processed are processed to completion */ + spin_lock(&cq->poll_lock); + spin_unlock(&cq->poll_lock); +} + +static bool hisi_sas_queue_is_poll(struct hisi_sas_cq *cq) +{ + struct hisi_hba *hisi_hba = cq->hisi_hba; + + if (cq->id < hisi_hba->queue_count - hisi_hba->iopoll_q_cnt) + return false; + return true; +} + +static void hisi_sas_sync_cq(struct hisi_sas_cq *cq) +{ + if (hisi_sas_queue_is_poll(cq)) + hisi_sas_sync_poll_cq(cq); + else + synchronize_irq(cq->irq_no); +} + +void hisi_sas_sync_poll_cqs(struct hisi_hba *hisi_hba) +{ + int i; + + for (i = 0; i < hisi_hba->queue_count; i++) { + struct hisi_sas_cq *cq = &hisi_hba->cq[i]; + + if (hisi_sas_queue_is_poll(cq)) + hisi_sas_sync_poll_cq(cq); + } +} +EXPORT_SYMBOL_GPL(hisi_sas_sync_poll_cqs); + +void hisi_sas_sync_cqs(struct hisi_hba *hisi_hba) +{ + int i; + + for (i = 0; i < hisi_hba->queue_count; i++) { + struct hisi_sas_cq *cq = &hisi_hba->cq[i]; + + hisi_sas_sync_cq(cq); + } +} +EXPORT_SYMBOL_GPL(hisi_sas_sync_cqs); + static void hisi_sas_tmf_aborted(struct sas_task *task) { struct hisi_sas_slot *slot = task->lldd_task; @@ -683,10 +747,10 @@ static void hisi_sas_tmf_aborted(struct sas_task *task) struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; /* - * sync irq to avoid free'ing task + * sync irq or poll queue to avoid free'ing task * before using task in IO completion */ - synchronize_irq(cq->irq_no); + hisi_sas_sync_cq(cq); slot->task = NULL; } } @@ -1021,7 +1085,7 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) } static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, - struct hisi_sas_slot *slot) + struct hisi_sas_slot *slot, bool need_lock) { if (task) { unsigned long flags; @@ -1038,7 +1102,7 @@ static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task spin_unlock_irqrestore(&task->task_state_lock, flags); } - hisi_sas_slot_task_free(hisi_hba, task, slot); + hisi_sas_slot_task_free(hisi_hba, task, slot, need_lock); } static void hisi_sas_release_task(struct hisi_hba *hisi_hba, @@ -1047,8 +1111,11 @@ static void hisi_sas_release_task(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, *slot2; struct hisi_sas_device *sas_dev = device->lldd_dev; + spin_lock(&sas_dev->lock); list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) - hisi_sas_do_release_task(hisi_hba, slot->task, slot); + hisi_sas_do_release_task(hisi_hba, slot->task, slot, false); + + spin_unlock(&sas_dev->lock); } void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) @@ -1453,13 +1520,41 @@ void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) } EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare); +static void hisi_sas_async_init_wait_phyup(void *data, async_cookie_t cookie) +{ + struct hisi_sas_phy *phy = data; + struct hisi_hba *hisi_hba = phy->hisi_hba; + struct device *dev = hisi_hba->dev; + DECLARE_COMPLETION_ONSTACK(completion); + int phy_no = phy->sas_phy.id; + + phy->reset_completion = &completion; + hisi_sas_phy_enable(hisi_hba, phy_no, 1); + if (!wait_for_completion_timeout(&completion, + HISI_SAS_WAIT_PHYUP_TIMEOUT)) + dev_warn(dev, "phy%d wait phyup timed out\n", phy_no); + + phy->reset_completion = NULL; +} + void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) { struct Scsi_Host *shost = hisi_hba->shost; + ASYNC_DOMAIN_EXCLUSIVE(async); + int phy_no; /* Init and wait for PHYs to come up and all libsas event finished. */ - hisi_hba->hw->phys_init(hisi_hba); - msleep(1000); + for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + + if (!(hisi_hba->phy_state & BIT(phy_no))) + continue; + + async_schedule_domain(hisi_sas_async_init_wait_phyup, + phy, &async); + } + + async_synchronize_full_domain(&async); hisi_sas_refresh_port_id(hisi_hba); clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); @@ -1540,11 +1635,11 @@ static int hisi_sas_abort_task(struct sas_task *task) if (slot) { /* - * sync irq to avoid free'ing task + * sync irq or poll queue to avoid free'ing task * before using task in IO completion */ cq = &hisi_hba->cq[slot->dlvry_queue]; - synchronize_irq(cq->irq_no); + hisi_sas_sync_cq(cq); } spin_unlock_irqrestore(&task->task_state_lock, flags); rc = TMF_RESP_FUNC_COMPLETE; @@ -1574,7 +1669,7 @@ static int hisi_sas_abort_task(struct sas_task *task) */ if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { if (task->lldd_task) - hisi_sas_do_release_task(hisi_hba, task, slot); + hisi_sas_do_release_task(hisi_hba, task, slot, true); } } else if (task->task_proto & SAS_PROTOCOL_SATA || task->task_proto & SAS_PROTOCOL_STP) { @@ -1594,7 +1689,7 @@ static int hisi_sas_abort_task(struct sas_task *task) */ if ((sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) && qc && qc->scsicmd) { - hisi_sas_do_release_task(hisi_hba, task, slot); + hisi_sas_do_release_task(hisi_hba, task, slot, true); rc = TMF_RESP_FUNC_COMPLETE; } else { rc = hisi_sas_softreset_ata_disk(device); @@ -1611,10 +1706,10 @@ static int hisi_sas_abort_task(struct sas_task *task) if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && task->lldd_task) { /* - * sync irq to avoid free'ing task + * sync irq or poll queue to avoid free'ing task * before using task in IO completion */ - synchronize_irq(cq->irq_no); + hisi_sas_sync_cq(cq); slot->task = NULL; } } @@ -1885,10 +1980,10 @@ static bool hisi_sas_internal_abort_timeout(struct sas_task *task, struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; /* - * sync irq to avoid free'ing task + * sync irq or poll queue to avoid free'ing task * before using task in IO completion */ - synchronize_irq(cq->irq_no); + hisi_sas_sync_cq(cq); slot->task = NULL; } @@ -1992,18 +2087,6 @@ void hisi_sas_phy_bcast(struct hisi_sas_phy *phy) } EXPORT_SYMBOL_GPL(hisi_sas_phy_bcast); -void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba) -{ - int i; - - for (i = 0; i < hisi_hba->cq_nvecs; i++) { - struct hisi_sas_cq *cq = &hisi_hba->cq[i]; - - synchronize_irq(cq->irq_no); - } -} -EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs); - int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type) { struct hisi_hba *hisi_hba = shost_priv(shost); @@ -2101,6 +2184,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba) /* Completion queue structure */ cq->id = i; cq->hisi_hba = hisi_hba; + spin_lock_init(&cq->poll_lock); /* Delivery queue structure */ spin_lock_init(&dq->lock); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index d643c5a49aa9..0aa8c9c88535 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -1258,7 +1258,11 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba, slot_err_v1_hw(hisi_hba, task, slot); if (unlikely(slot->abort)) { - sas_task_abort(task); + if (dev_is_sata(device) && task->ata_task.use_ncq) + sas_ata_device_link_abort(device, true); + else + sas_task_abort(task); + return; } goto out; @@ -1306,7 +1310,7 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba, } out: - hisi_sas_slot_task_free(hisi_hba, task, slot); + hisi_sas_slot_task_free(hisi_hba, task, slot, true); if (task->task_done) task->task_done(task); @@ -1735,7 +1739,7 @@ static struct attribute *host_v1_hw_attrs[] = { ATTRIBUTE_GROUPS(host_v1_hw); -static struct scsi_host_template sht_v1_hw = { +static const struct scsi_host_template sht_v1_hw = { .name = DRV_NAME, .proc_name = DRV_NAME, .module = THIS_MODULE, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index cded42f4ca44..cd78e4c983aa 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -2404,7 +2404,11 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba, error_info[2], error_info[3]); if (unlikely(slot->abort)) { - sas_task_abort(task); + if (dev_is_sata(device) && task->ata_task.use_ncq) + sas_ata_device_link_abort(device, true); + else + sas_task_abort(task); + return; } goto out; @@ -2462,7 +2466,7 @@ out: } task->task_state_flags |= SAS_TASK_STATE_DONE; spin_unlock_irqrestore(&task->task_state_lock, flags); - hisi_sas_slot_task_free(hisi_hba, task, slot); + hisi_sas_slot_task_free(hisi_hba, task, slot, true); if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) { spin_lock_irqsave(&device->done_lock, flags); @@ -3551,7 +3555,7 @@ static void map_queues_v2_hw(struct Scsi_Host *shost) } } -static struct scsi_host_template sht_v2_hw = { +static const struct scsi_host_template sht_v2_hw = { .name = DRV_NAME, .proc_name = DRV_NAME, .module = THIS_MODULE, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index a63279f55d09..12d588454f5d 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -552,6 +552,11 @@ static int prot_mask; module_param(prot_mask, int, 0444); MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 "); +/* the index of iopoll queues are bigger than interrupt queues' */ +static int experimental_iopoll_q_cnt; +module_param(experimental_iopoll_q_cnt, int, 0444); +MODULE_PARM_DESC(experimental_iopoll_q_cnt, "number of queues to be used as poll mode, def=0"); + static void debugfs_work_handler_v3_hw(struct work_struct *work); static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba); @@ -599,6 +604,27 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, readl_poll_timeout_atomic(regs, val, cond, delay_us, timeout_us);\ }) +static void interrupt_enable_v3_hw(struct hisi_hba *hisi_hba) +{ + int i; + + for (i = 0; i < hisi_hba->queue_count; i++) + hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0); + + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffc220ff); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x155555); + + for (i = 0; i < hisi_hba->n_phy; i++) { + hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xf2057fff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0); + hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); + } +} + static void init_reg_v3_hw(struct hisi_hba *hisi_hba) { int i, j; @@ -619,20 +645,14 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff); hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff); hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff); - hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe); - hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe); - hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffc220ff); hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0); hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0); hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0); - hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x155555); hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0); hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0); - for (i = 0; i < hisi_hba->queue_count; i++) - hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0); - hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); + interrupt_enable_v3_hw(hisi_hba); for (i = 0; i < hisi_hba->n_phy; i++) { enum sas_linkrate max; struct hisi_sas_phy *phy = &hisi_hba->phy[i]; @@ -655,13 +675,8 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); - hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xf2057fff); - hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe); hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); - hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); - hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0); - hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1); hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120); hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01); @@ -883,6 +898,7 @@ static void dereg_device_v3_hw(struct hisi_hba *hisi_hba, cfg_abt_set_query_iptt = hisi_sas_read32(hisi_hba, CFG_ABT_SET_QUERY_IPTT); + spin_lock(&sas_dev->lock); list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) { cfg_abt_set_query_iptt &= ~CFG_SET_ABORTED_IPTT_MSK; cfg_abt_set_query_iptt |= (1 << CFG_SET_ABORTED_EN_OFF) | @@ -890,6 +906,7 @@ static void dereg_device_v3_hw(struct hisi_hba *hisi_hba, hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, cfg_abt_set_query_iptt); } + spin_unlock(&sas_dev->lock); cfg_abt_set_query_iptt &= ~(1 << CFG_SET_ABORTED_EN_OFF); hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT, cfg_abt_set_query_iptt); @@ -2320,7 +2337,11 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba, error_info[0], error_info[1], error_info[2], error_info[3]); if (unlikely(slot->abort)) { - sas_task_abort(task); + if (dev_is_sata(device) && task->ata_task.use_ncq) + sas_ata_device_link_abort(device, true); + else + sas_task_abort(task); + return; } goto out; @@ -2374,7 +2395,7 @@ out: } task->task_state_flags |= SAS_TASK_STATE_DONE; spin_unlock_irqrestore(&task->task_state_lock, flags); - hisi_sas_slot_task_free(hisi_hba, task, slot); + hisi_sas_slot_task_free(hisi_hba, task, slot, true); if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) { spin_lock_irqsave(&device->done_lock, flags); @@ -2391,23 +2412,25 @@ out: task->task_done(task); } -static irqreturn_t cq_thread_v3_hw(int irq_no, void *p) +static int complete_v3_hw(struct hisi_sas_cq *cq) { - struct hisi_sas_cq *cq = p; - struct hisi_hba *hisi_hba = cq->hisi_hba; - struct hisi_sas_slot *slot; struct hisi_sas_complete_v3_hdr *complete_queue; - u32 rd_point = cq->rd_point, wr_point; + struct hisi_hba *hisi_hba = cq->hisi_hba; + u32 rd_point, wr_point; int queue = cq->id; + int completed; + rd_point = cq->rd_point; complete_queue = hisi_hba->complete_hdr[queue]; wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + (0x14 * queue)); + completed = (wr_point + HISI_SAS_QUEUE_SLOTS - rd_point) % HISI_SAS_QUEUE_SLOTS; while (rd_point != wr_point) { struct hisi_sas_complete_v3_hdr *complete_hdr; struct device *dev = hisi_hba->dev; + struct hisi_sas_slot *slot; u32 dw0, dw1, dw3; int iptt; @@ -2451,6 +2474,28 @@ static irqreturn_t cq_thread_v3_hw(int irq_no, void *p) cq->rd_point = rd_point; hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); + return completed; +} + +static int queue_complete_v3_hw(struct Scsi_Host *shost, unsigned int queue) +{ + struct hisi_hba *hisi_hba = shost_priv(shost); + struct hisi_sas_cq *cq = &hisi_hba->cq[queue]; + int completed; + + spin_lock(&cq->poll_lock); + completed = complete_v3_hw(cq); + spin_unlock(&cq->poll_lock); + + return completed; +} + +static irqreturn_t cq_thread_v3_hw(int irq_no, void *p) +{ + struct hisi_sas_cq *cq = p; + + complete_v3_hw(cq); + return IRQ_HANDLED; } @@ -2474,8 +2519,9 @@ static void hisi_sas_v3_free_vectors(void *data) static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba) { - int vectors; - int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi; + /* Allocate all MSI vectors to avoid re-insertion issue */ + int max_msi = HISI_SAS_MSI_COUNT_V3_HW; + int vectors, min_msi; struct Scsi_Host *shost = hisi_hba->shost; struct pci_dev *pdev = hisi_hba->pci_dev; struct irq_affinity desc = { @@ -2492,8 +2538,8 @@ static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba) return -ENOENT; - hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW; - shost->nr_hw_queues = hisi_hba->cq_nvecs; + hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW - hisi_hba->iopoll_q_cnt; + shost->nr_hw_queues = hisi_hba->cq_nvecs + hisi_hba->iopoll_q_cnt; return devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev); } @@ -2625,7 +2671,7 @@ static int disable_host_v3_hw(struct hisi_hba *hisi_hba) u32 status, reg_val; int rc; - interrupt_disable_v3_hw(hisi_hba); + hisi_sas_sync_poll_cqs(hisi_hba); hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); hisi_sas_stop_phys(hisi_hba); @@ -2655,6 +2701,7 @@ static int soft_reset_v3_hw(struct hisi_hba *hisi_hba) struct device *dev = hisi_hba->dev; int rc; + interrupt_disable_v3_hw(hisi_hba); rc = disable_host_v3_hw(hisi_hba); if (rc) { dev_err(dev, "soft reset: disable host failed rc=%d\n", rc); @@ -2823,6 +2870,18 @@ static ssize_t intr_coal_count_v3_hw_store(struct device *dev, } static DEVICE_ATTR_RW(intr_coal_count_v3_hw); +static ssize_t iopoll_q_cnt_v3_hw_show(struct device *dev, + struct device_attribute + *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct hisi_hba *hisi_hba = shost_priv(shost); + + return scnprintf(buf, PAGE_SIZE, "%u\n", + hisi_hba->iopoll_q_cnt); +} +static DEVICE_ATTR_RO(iopoll_q_cnt_v3_hw); + static int slave_configure_v3_hw(struct scsi_device *sdev) { struct Scsi_Host *shost = dev_to_shost(&sdev->sdev_gendev); @@ -2852,6 +2911,7 @@ static struct attribute *host_v3_hw_attrs[] = { &dev_attr_intr_conv_v3_hw.attr, &dev_attr_intr_coal_ticks_v3_hw.attr, &dev_attr_intr_coal_count_v3_hw.attr, + &dev_attr_iopoll_q_cnt_v3_hw.attr, NULL }; @@ -3038,7 +3098,7 @@ static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba) wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000); - hisi_sas_sync_irqs(hisi_hba); + hisi_sas_sync_cqs(hisi_hba); } static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba) @@ -3210,12 +3270,34 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable) static void hisi_sas_map_queues(struct Scsi_Host *shost) { struct hisi_hba *hisi_hba = shost_priv(shost); - struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; + struct blk_mq_queue_map *qmap; + int i, qoff; + + for (i = 0, qoff = 0; i < shost->nr_maps; i++) { + qmap = &shost->tag_set.map[i]; + if (i == HCTX_TYPE_DEFAULT) { + qmap->nr_queues = hisi_hba->cq_nvecs; + } else if (i == HCTX_TYPE_POLL) { + qmap->nr_queues = hisi_hba->iopoll_q_cnt; + } else { + qmap->nr_queues = 0; + continue; + } - blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev, BASE_VECTORS_V3_HW); + /* At least one interrupt hardware queue */ + if (!qmap->nr_queues) + WARN_ON(i == HCTX_TYPE_DEFAULT); + qmap->queue_offset = qoff; + if (i == HCTX_TYPE_POLL) + blk_mq_map_queues(qmap); + else + blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev, + BASE_VECTORS_V3_HW); + qoff += qmap->nr_queues; + } } -static struct scsi_host_template sht_v3_hw = { +static const struct scsi_host_template sht_v3_hw = { .name = DRV_NAME, .proc_name = DRV_NAME, .module = THIS_MODULE, @@ -3244,6 +3326,7 @@ static struct scsi_host_template sht_v3_hw = { .tag_alloc_policy = BLK_TAG_ALLOC_RR, .host_reset = hisi_sas_host_reset, .host_tagset = 1, + .mq_poll = queue_complete_v3_hw, }; static const struct hisi_sas_hw hisi_sas_v3_hw = { @@ -3303,6 +3386,13 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev) if (hisi_sas_get_fw_info(hisi_hba) < 0) goto err_out; + if (experimental_iopoll_q_cnt < 0 || + experimental_iopoll_q_cnt >= hisi_hba->queue_count) + dev_err(dev, "iopoll queue count %d cannot exceed or equal 16, using default 0\n", + experimental_iopoll_q_cnt); + else + hisi_hba->iopoll_q_cnt = experimental_iopoll_q_cnt; + if (hisi_sas_alloc(hisi_hba)) { hisi_sas_free(hisi_hba); goto err_out; @@ -4858,6 +4948,10 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) shost->max_cmd_len = 16; shost->can_queue = HISI_SAS_UNRESERVED_IPTT; shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; + if (hisi_hba->iopoll_q_cnt) + shost->nr_maps = 3; + else + shost->nr_maps = 1; sha->sas_ha_name = DRV_NAME; sha->dev = dev; @@ -4976,6 +5070,7 @@ static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev) set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); hisi_sas_controller_reset_prepare(hisi_hba); + interrupt_disable_v3_hw(hisi_hba); rc = disable_host_v3_hw(hisi_hba); if (rc) dev_err(dev, "FLR: disable host failed rc=%d\n", rc); @@ -5005,6 +5100,21 @@ enum { hip08, }; +static void enable_host_v3_hw(struct hisi_hba *hisi_hba) +{ + u32 reg_val; + + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, + (u32)((1ULL << hisi_hba->queue_count) - 1)); + + phys_init_v3_hw(hisi_hba); + reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + + AM_CTRL_GLOBAL); + reg_val &= ~AM_CTRL_SHUTDOWN_REQ_MSK; + hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + + AM_CTRL_GLOBAL, reg_val); +} + static int _suspend_v3_hw(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); @@ -5027,14 +5137,20 @@ static int _suspend_v3_hw(struct device *device) scsi_block_requests(shost); set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); flush_workqueue(hisi_hba->wq); + interrupt_disable_v3_hw(hisi_hba); + +#ifdef CONFIG_PM + if (atomic_read(&device->power.usage_count)) { + dev_err(dev, "PM suspend: host status cannot be suspended\n"); + rc = -EBUSY; + goto err_out; + } +#endif rc = disable_host_v3_hw(hisi_hba); if (rc) { dev_err(dev, "PM suspend: disable host failed rc=%d\n", rc); - clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); - clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); - scsi_unblock_requests(shost); - return rc; + goto err_out_recover_host; } hisi_sas_init_mem(hisi_hba); @@ -5045,6 +5161,17 @@ static int _suspend_v3_hw(struct device *device) dev_warn(dev, "end of suspending controller\n"); return 0; + +err_out_recover_host: + enable_host_v3_hw(hisi_hba); +#ifdef CONFIG_PM +err_out: +#endif + interrupt_enable_v3_hw(hisi_hba); + clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); + clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); + scsi_unblock_requests(shost); + return rc; } static int _resume_v3_hw(struct device *device) |