diff options
Diffstat (limited to 'drivers/scsi')
41 files changed, 1101 insertions, 1024 deletions
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index 3ebe66151dcb..f41c93454f0c 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -690,7 +690,7 @@ static void twl_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm newcommand->request_id__lunl = cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id)); if (length) { - newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1); + newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache)); newcommand->sg_list[0].length = TW_CPU_TO_SGL(length); } newcommand->sgl_entries__lunh = @@ -702,7 +702,7 @@ static void twl_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) { /* Load the sg list */ sgl = (TW_SG_Entry_ISO *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry_ISO)/4) + pae + (sizeof(dma_addr_t) > 4 ? 1 : 0)); - sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1); + sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache)); sgl->length = TW_CPU_TO_SGL(length); oldcommand->size += pae; oldcommand->size += sizeof(dma_addr_t) > 4 ? 1 : 0; @@ -748,7 +748,7 @@ static long twl_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511; /* Now allocate ioctl buf memory */ - cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL); + cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted + sizeof(TW_Ioctl_Buf_Apache), &dma_handle, GFP_KERNEL); if (!cpu_addr) { retval = -ENOMEM; goto out2; @@ -757,7 +757,7 @@ static long twl_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr; /* Now copy down the entire ioctl */ - if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1)) + if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache))) goto out3; /* See which ioctl we are doing */ @@ -815,11 +815,11 @@ static long twl_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long } /* Now copy the entire response to userspace */ - if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0) + if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length) == 0) retval = 0; out3: /* Now free ioctl buf memory */ - dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle); + dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted + sizeof(TW_Ioctl_Buf_Apache), cpu_addr, dma_handle); out2: mutex_unlock(&tw_dev->ioctl_lock); out: diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h index b0508039a280..096dec29e2ac 100644 --- a/drivers/scsi/3w-sas.h +++ b/drivers/scsi/3w-sas.h @@ -335,7 +335,7 @@ typedef struct TAG_TW_Ioctl_Apache { TW_Ioctl_Driver_Command driver_command; char padding[488]; TW_Command_Full firmware_command; - char data_buffer[1]; + char data_buffer[]; } TW_Ioctl_Buf_Apache; /* GetParam descriptor */ @@ -344,7 +344,7 @@ typedef struct { unsigned short parameter_id; unsigned short parameter_size_bytes; unsigned short actual_parameter_size_bytes; - unsigned char data[1]; + unsigned char data[]; } TW_Param_Apache; /* Compatibility information structure */ diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 1ccce706167a..4a7e835c24cd 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -239,7 +239,17 @@ static struct sas_internal *dev_to_sas_internal(struct domain_device *dev) return to_sas_internal(dev->port->ha->core.shost->transportt); } -static int sas_get_ata_command_set(struct domain_device *dev); +static int sas_get_ata_command_set(struct domain_device *dev) +{ + struct ata_taskfile tf; + + if (dev->dev_type == SAS_SATA_PENDING) + return ATA_DEV_UNKNOWN; + + ata_tf_from_fis(dev->frame_rcvd, &tf); + + return ata_dev_classify(&tf); +} int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy) { @@ -637,20 +647,6 @@ void sas_ata_task_abort(struct sas_task *task) complete(waiting); } -static int sas_get_ata_command_set(struct domain_device *dev) -{ - struct dev_to_host_fis *fis = - (struct dev_to_host_fis *) dev->frame_rcvd; - struct ata_taskfile tf; - - if (dev->dev_type == SAS_SATA_PENDING) - return ATA_DEV_UNKNOWN; - - ata_tf_from_fis((const u8 *)fis, &tf); - - return ata_dev_classify(&tf); -} - void sas_probe_sata(struct asd_sas_port *port) { struct domain_device *dev, *n; @@ -679,6 +675,68 @@ void sas_probe_sata(struct asd_sas_port *port) } +int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy, + struct domain_device *child, int phy_id) +{ + struct sas_rphy *rphy; + int ret; + + if (child->linkrate > parent->min_linkrate) { + struct sas_phy *cphy = child->phy; + enum sas_linkrate min_prate = cphy->minimum_linkrate, + parent_min_lrate = parent->min_linkrate, + min_linkrate = (min_prate > parent_min_lrate) ? + parent_min_lrate : 0; + struct sas_phy_linkrates rates = { + .maximum_linkrate = parent->min_linkrate, + .minimum_linkrate = min_linkrate, + }; + + pr_notice("ex %016llx phy%02d SATA device linkrate > min pathway connection rate, attempting to lower device linkrate\n", + SAS_ADDR(child->sas_addr), phy_id); + ret = sas_smp_phy_control(parent, phy_id, + PHY_FUNC_LINK_RESET, &rates); + if (ret) { + pr_err("ex %016llx phy%02d SATA device could not set linkrate (%d)\n", + SAS_ADDR(child->sas_addr), phy_id, ret); + return ret; + } + pr_notice("ex %016llx phy%02d SATA device set linkrate successfully\n", + SAS_ADDR(child->sas_addr), phy_id); + child->linkrate = child->min_linkrate; + } + ret = sas_get_ata_info(child, phy); + if (ret) + return ret; + + sas_init_dev(child); + ret = sas_ata_init(child); + if (ret) + return ret; + + rphy = sas_end_device_alloc(phy->port); + if (!rphy) + return -ENOMEM; + + rphy->identify.phy_identifier = phy_id; + child->rphy = rphy; + get_device(&rphy->dev); + + list_add_tail(&child->disco_list_node, &parent->port->disco_list); + + ret = sas_discover_sata(child); + if (ret) { + pr_notice("sas_discover_sata() for device %16llx at %016llx:%02d returned 0x%x\n", + SAS_ADDR(child->sas_addr), + SAS_ADDR(parent->sas_addr), phy_id, ret); + sas_rphy_free(child->rphy); + list_del(&child->disco_list_node); + return ret; + } + + return 0; +} + static void sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func) { struct domain_device *dev, *n; diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index d5bc1314c341..72fdb2e5d047 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -455,14 +455,8 @@ static void sas_discover_domain(struct work_struct *work) break; case SAS_SATA_DEV: case SAS_SATA_PM: -#ifdef CONFIG_SCSI_SAS_ATA error = sas_discover_sata(dev); break; -#else - pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n"); - fallthrough; -#endif - /* Fall through - only for the #else condition above. */ default: error = -ENXIO; pr_err("unhandled device %d\n", dev->dev_type); diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index a04cad620e93..dc670304f181 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -751,13 +751,46 @@ static void sas_ex_get_linkrate(struct domain_device *parent, child->pathways = min(child->pathways, parent->pathways); } +static int sas_ex_add_dev(struct domain_device *parent, struct ex_phy *phy, + struct domain_device *child, int phy_id) +{ + struct sas_rphy *rphy; + int res; + + child->dev_type = SAS_END_DEVICE; + rphy = sas_end_device_alloc(phy->port); + if (!rphy) + return -ENOMEM; + + child->tproto = phy->attached_tproto; + sas_init_dev(child); + + child->rphy = rphy; + get_device(&rphy->dev); + rphy->identify.phy_identifier = phy_id; + sas_fill_in_rphy(child, rphy); + + list_add_tail(&child->disco_list_node, &parent->port->disco_list); + + res = sas_notify_lldd_dev_found(child); + if (res) { + pr_notice("notify lldd for device %016llx at %016llx:%02d returned 0x%x\n", + SAS_ADDR(child->sas_addr), + SAS_ADDR(parent->sas_addr), phy_id, res); + sas_rphy_free(child->rphy); + list_del(&child->disco_list_node); + return res; + } + + return 0; +} + static struct domain_device *sas_ex_discover_end_dev( struct domain_device *parent, int phy_id) { struct expander_device *parent_ex = &parent->ex_dev; struct ex_phy *phy = &parent_ex->ex_phy[phy_id]; struct domain_device *child = NULL; - struct sas_rphy *rphy; int res; if (phy->attached_sata_host || phy->attached_sata_ps) @@ -785,99 +818,23 @@ static struct domain_device *sas_ex_discover_end_dev( sas_ex_get_linkrate(parent, child, phy); sas_device_set_phy(child, phy->port); -#ifdef CONFIG_SCSI_SAS_ATA if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) { - if (child->linkrate > parent->min_linkrate) { - struct sas_phy *cphy = child->phy; - enum sas_linkrate min_prate = cphy->minimum_linkrate, - parent_min_lrate = parent->min_linkrate, - min_linkrate = (min_prate > parent_min_lrate) ? - parent_min_lrate : 0; - struct sas_phy_linkrates rates = { - .maximum_linkrate = parent->min_linkrate, - .minimum_linkrate = min_linkrate, - }; - int ret; - - pr_notice("ex %016llx phy%02d SATA device linkrate > min pathway connection rate, attempting to lower device linkrate\n", - SAS_ADDR(child->sas_addr), phy_id); - ret = sas_smp_phy_control(parent, phy_id, - PHY_FUNC_LINK_RESET, &rates); - if (ret) { - pr_err("ex %016llx phy%02d SATA device could not set linkrate (%d)\n", - SAS_ADDR(child->sas_addr), phy_id, ret); - goto out_free; - } - pr_notice("ex %016llx phy%02d SATA device set linkrate successfully\n", - SAS_ADDR(child->sas_addr), phy_id); - child->linkrate = child->min_linkrate; - } - res = sas_get_ata_info(child, phy); - if (res) - goto out_free; - - sas_init_dev(child); - res = sas_ata_init(child); - if (res) - goto out_free; - rphy = sas_end_device_alloc(phy->port); - if (!rphy) - goto out_free; - rphy->identify.phy_identifier = phy_id; - - child->rphy = rphy; - get_device(&rphy->dev); - - list_add_tail(&child->disco_list_node, &parent->port->disco_list); - - res = sas_discover_sata(child); - if (res) { - pr_notice("sas_discover_sata() for device %16llx at %016llx:%02d returned 0x%x\n", - SAS_ADDR(child->sas_addr), - SAS_ADDR(parent->sas_addr), phy_id, res); - goto out_list_del; - } - } else -#endif - if (phy->attached_tproto & SAS_PROTOCOL_SSP) { - child->dev_type = SAS_END_DEVICE; - rphy = sas_end_device_alloc(phy->port); - /* FIXME: error handling */ - if (unlikely(!rphy)) - goto out_free; - child->tproto = phy->attached_tproto; - sas_init_dev(child); - - child->rphy = rphy; - get_device(&rphy->dev); - rphy->identify.phy_identifier = phy_id; - sas_fill_in_rphy(child, rphy); - - list_add_tail(&child->disco_list_node, &parent->port->disco_list); - - res = sas_discover_end_dev(child); - if (res) { - pr_notice("sas_discover_end_dev() for device %016llx at %016llx:%02d returned 0x%x\n", - SAS_ADDR(child->sas_addr), - SAS_ADDR(parent->sas_addr), phy_id, res); - goto out_list_del; - } + res = sas_ata_add_dev(parent, phy, child, phy_id); + } else if (phy->attached_tproto & SAS_PROTOCOL_SSP) { + res = sas_ex_add_dev(parent, phy, child, phy_id); } else { pr_notice("target proto 0x%x at %016llx:0x%x not handled\n", phy->attached_tproto, SAS_ADDR(parent->sas_addr), phy_id); - goto out_free; + res = -ENODEV; } + if (res) + goto out_free; + list_add_tail(&child->siblings, &parent_ex->children); return child; - out_list_del: - sas_rphy_free(child->rphy); - list_del(&child->disco_list_node); - spin_lock_irq(&parent->port->dev_list_lock); - list_del(&child->dev_list_node); - spin_unlock_irq(&parent->port->dev_list_lock); out_free: sas_port_delete(phy->port); out_err: diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 9ad233b40a9e..cf55f8e3bd9f 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -1592,8 +1592,6 @@ struct lpfc_hba { struct timer_list cpuhp_poll_timer; struct list_head poll_list; /* slowpath eq polling list */ #define LPFC_POLL_HB 1 /* slowpath heartbeat */ -#define LPFC_POLL_FASTPATH 0 /* called from fastpath */ -#define LPFC_POLL_SLOWPATH 1 /* called from slowpath */ char os_host_name[MAXHOSTNAMELEN]; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 77e1b2911cb4..76c3434f8976 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -1905,8 +1905,7 @@ lpfc_xcvr_data_show(struct device *dev, struct device_attribute *attr, goto out_free_rdp; } - strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_NAME], 16); - chbuf[16] = 0; + strscpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_NAME], 16); len = scnprintf(buf, PAGE_SIZE - len, "VendorName:\t%s\n", chbuf); len += scnprintf(buf + len, PAGE_SIZE - len, @@ -1914,17 +1913,13 @@ lpfc_xcvr_data_show(struct device *dev, struct device_attribute *attr, (uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI], (uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI + 1], (uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI + 2]); - strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_PN], 16); - chbuf[16] = 0; + strscpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_PN], 16); len += scnprintf(buf + len, PAGE_SIZE - len, "VendorPN:\t%s\n", chbuf); - strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_SN], 16); - chbuf[16] = 0; + strscpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_SN], 16); len += scnprintf(buf + len, PAGE_SIZE - len, "VendorSN:\t%s\n", chbuf); - strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_REV], 4); - chbuf[4] = 0; + strscpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_REV], 4); len += scnprintf(buf + len, PAGE_SIZE - len, "VendorRev:\t%s\n", chbuf); - strncpy(chbuf, &rdp_context->page_a0[SSF_DATE_CODE], 8); - chbuf[8] = 0; + strscpy(chbuf, &rdp_context->page_a0[SSF_DATE_CODE], 8); len += scnprintf(buf + len, PAGE_SIZE - len, "DateCode:\t%s\n", chbuf); len += scnprintf(buf + len, PAGE_SIZE - len, "Identifier:\t%xh\n", (uint8_t)rdp_context->page_a0[SSF_IDENTIFIER]); @@ -1941,33 +1936,25 @@ lpfc_xcvr_data_show(struct device *dev, struct device_attribute *attr, &rdp_context->page_a0[SSF_TRANSCEIVER_CODE_B7]; len += scnprintf(buf + len, PAGE_SIZE - len, "Speeds: \t"); - if (*(uint8_t *)trasn_code_byte7 == 0) { - len += scnprintf(buf + len, PAGE_SIZE - len, - "Unknown\n"); - } else { - if (trasn_code_byte7->fc_sp_100MB) - len += scnprintf(buf + len, PAGE_SIZE - len, - "1 "); - if (trasn_code_byte7->fc_sp_200mb) - len += scnprintf(buf + len, PAGE_SIZE - len, - "2 "); - if (trasn_code_byte7->fc_sp_400MB) - len += scnprintf(buf + len, PAGE_SIZE - len, - "4 "); - if (trasn_code_byte7->fc_sp_800MB) - len += scnprintf(buf + len, PAGE_SIZE - len, - "8 "); - if (trasn_code_byte7->fc_sp_1600MB) - len += scnprintf(buf + len, PAGE_SIZE - len, - "16 "); - if (trasn_code_byte7->fc_sp_3200MB) - len += scnprintf(buf + len, PAGE_SIZE - len, - "32 "); - if (trasn_code_byte7->speed_chk_ecc) - len += scnprintf(buf + len, PAGE_SIZE - len, - "64 "); - len += scnprintf(buf + len, PAGE_SIZE - len, "GB\n"); - } + if (*(uint8_t *)trasn_code_byte7 == 0) { + len += scnprintf(buf + len, PAGE_SIZE - len, "Unknown\n"); + } else { + if (trasn_code_byte7->fc_sp_100MB) + len += scnprintf(buf + len, PAGE_SIZE - len, "1 "); + if (trasn_code_byte7->fc_sp_200mb) + len += scnprintf(buf + len, PAGE_SIZE - len, "2 "); + if (trasn_code_byte7->fc_sp_400MB) + len += scnprintf(buf + len, PAGE_SIZE - len, "4 "); + if (trasn_code_byte7->fc_sp_800MB) + len += scnprintf(buf + len, PAGE_SIZE - len, "8 "); + if (trasn_code_byte7->fc_sp_1600MB) + len += scnprintf(buf + len, PAGE_SIZE - len, "16 "); + if (trasn_code_byte7->fc_sp_3200MB) + len += scnprintf(buf + len, PAGE_SIZE - len, "32 "); + if (trasn_code_byte7->speed_chk_ecc) + len += scnprintf(buf + len, PAGE_SIZE - len, "64 "); + len += scnprintf(buf + len, PAGE_SIZE - len, "GB\n"); + } temperature = (rdp_context->page_a2[SFF_TEMPERATURE_B1] << 8 | rdp_context->page_a2[SFF_TEMPERATURE_B0]); vcc = (rdp_context->page_a2[SFF_VCC_B1] << 8 | diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 8928f016d09e..976fd5ee7f7e 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -253,7 +253,6 @@ int lpfc_read_object(struct lpfc_hba *phba, char *s, uint32_t *datap, uint32_t len); void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba); -int lpfc_sli4_poll_eq(struct lpfc_queue *q, uint8_t path); void lpfc_sli4_poll_hbtimer(struct timer_list *t); void lpfc_sli4_start_polling(struct lpfc_queue *q); void lpfc_sli4_stop_polling(struct lpfc_queue *q); @@ -684,6 +683,7 @@ int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, union lpfc_vmid_io_tag *tag); void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport); int lpfc_issue_els_qfpa(struct lpfc_vport *vport); +void lpfc_reinit_vmid(struct lpfc_vport *vport); void lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 919741bbe267..569639dc8b2c 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -1123,6 +1123,9 @@ stop_rr_fcf_flogi: if (sp->cmn.priority_tagging) vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA | LPFC_VMID_TYPE_PRIO); + /* reinitialize the VMID datastructure before returning */ + if (lpfc_is_vmid_enabled(phba)) + lpfc_reinit_vmid(vport); /* * Address a timing race with dev_loss. If dev_loss is active on @@ -2373,15 +2376,30 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* PRLI failed */ lpfc_printf_vlog(vport, mode, loglevel, "2754 PRLI failure DID:%06X Status:x%x/x%x, " - "data: x%x\n", + "data: x%x x%x\n", ndlp->nlp_DID, ulp_status, - ulp_word4, ndlp->fc4_prli_sent); + ulp_word4, ndlp->nlp_state, + ndlp->fc4_prli_sent); /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ if (!lpfc_error_lost_link(ulp_status, ulp_word4)) lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_PRLI); + /* The following condition catches an inflight transition + * mismatch typically caused by an RSCN. Skip any + * processing to allow recovery. + */ + if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && + ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, + "2784 PRLI cmpl: state mismatch " + "DID x%06x nstate x%x nflag x%x\n", + ndlp->nlp_DID, ndlp->nlp_state, + ndlp->nlp_flag); + goto out; + } + /* * For P2P topology, retain the node so that PLOGI can be * attempted on it again. @@ -4673,6 +4691,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* the nameserver fails */ maxretry = 0; delay = 100; + } else if (cmd == ELS_CMD_PRLI && + ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { + /* State-command disagreement. The PRLI was + * failed with an invalid rpi meaning there + * some unexpected state change. Don't retry. + */ + maxretry = 0; + retry = 0; + break; } retry = 1; break; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 80375d73b732..a6df0a5b4006 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -1129,21 +1129,6 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) struct lpfc_nodelist *ndlp, *next_ndlp; list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { - if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { - /* It's possible the FLOGI to the fabric node never - * successfully completed and never registered with the - * transport. In this case there is no way to clean up - * the node. - */ - if (ndlp->nlp_DID == Fabric_DID) { - if (ndlp->nlp_prev_state == - NLP_STE_UNUSED_NODE && - !ndlp->fc4_xpt_flags) - lpfc_nlp_put(ndlp); - } - continue; - } - if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || ((vport->port_type == LPFC_NPIV_PORT) && ((ndlp->nlp_DID == NameServer_DID) || diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index fb3504dbb899..58fa39c403a0 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2009-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -4201,6 +4201,8 @@ struct lpfc_acqe_fc_la { #define LPFC_FC_LA_TYPE_MDS_LOOPBACK 0x5 #define LPFC_FC_LA_TYPE_UNEXP_WWPN 0x6 #define LPFC_FC_LA_TYPE_TRUNKING_EVENT 0x7 +#define LPFC_FC_LA_TYPE_ACTIVATE_FAIL 0x8 +#define LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT 0x9 #define lpfc_acqe_fc_la_port_type_SHIFT 6 #define lpfc_acqe_fc_la_port_type_MASK 0x00000003 #define lpfc_acqe_fc_la_port_type_WORD word0 @@ -4242,6 +4244,9 @@ struct lpfc_acqe_fc_la { #define lpfc_acqe_fc_la_fault_SHIFT 0 #define lpfc_acqe_fc_la_fault_MASK 0x000000FF #define lpfc_acqe_fc_la_fault_WORD word1 +#define lpfc_acqe_fc_la_link_status_SHIFT 8 +#define lpfc_acqe_fc_la_link_status_MASK 0x0000007F +#define lpfc_acqe_fc_la_link_status_WORD word1 #define lpfc_acqe_fc_la_trunk_fault_SHIFT 0 #define lpfc_acqe_fc_la_trunk_fault_MASK 0x0000000F #define lpfc_acqe_fc_la_trunk_fault_WORD word1 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 25ba20e42825..faaaeae25d44 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -5189,16 +5189,25 @@ static void lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, struct lpfc_acqe_link *acqe_link) { - switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { - case LPFC_ASYNC_LINK_FAULT_NONE: - case LPFC_ASYNC_LINK_FAULT_LOCAL: - case LPFC_ASYNC_LINK_FAULT_REMOTE: - case LPFC_ASYNC_LINK_FAULT_LR_LRR: + switch (bf_get(lpfc_acqe_fc_la_att_type, acqe_link)) { + case LPFC_FC_LA_TYPE_LINK_DOWN: + case LPFC_FC_LA_TYPE_TRUNKING_EVENT: + case LPFC_FC_LA_TYPE_ACTIVATE_FAIL: + case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT: break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "0398 Unknown link fault code: x%x\n", - bf_get(lpfc_acqe_link_fault, acqe_link)); + switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { + case LPFC_ASYNC_LINK_FAULT_NONE: + case LPFC_ASYNC_LINK_FAULT_LOCAL: + case LPFC_ASYNC_LINK_FAULT_REMOTE: + case LPFC_ASYNC_LINK_FAULT_LR_LRR: + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "0398 Unknown link fault code: x%x\n", + bf_get(lpfc_acqe_link_fault, acqe_link)); + break; + } break; } } @@ -6281,6 +6290,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) LPFC_MBOXQ_t *pmb; MAILBOX_t *mb; struct lpfc_mbx_read_top *la; + char *log_level; int rc; if (bf_get(lpfc_trailer_type, acqe_fc) != @@ -6312,25 +6322,70 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); phba->sli4_hba.link_state.fault = bf_get(lpfc_acqe_link_fault, acqe_fc); + phba->sli4_hba.link_state.link_status = + bf_get(lpfc_acqe_fc_la_link_status, acqe_fc); - if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == - LPFC_FC_LA_TYPE_LINK_DOWN) - phba->sli4_hba.link_state.logical_speed = 0; - else if (!phba->sli4_hba.conf_trunk) - phba->sli4_hba.link_state.logical_speed = + /* + * Only select attention types need logical speed modification to what + * was previously set. + */ + if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_LINK_UP && + phba->sli4_hba.link_state.status < LPFC_FC_LA_TYPE_ACTIVATE_FAIL) { + if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) == + LPFC_FC_LA_TYPE_LINK_DOWN) + phba->sli4_hba.link_state.logical_speed = 0; + else if (!phba->sli4_hba.conf_trunk) + phba->sli4_hba.link_state.logical_speed = bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; + } lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2896 Async FC event - Speed:%dGBaud Topology:x%x " "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" - "%dMbps Fault:%d\n", + "%dMbps Fault:x%x Link Status:x%x\n", phba->sli4_hba.link_state.speed, phba->sli4_hba.link_state.topology, phba->sli4_hba.link_state.status, phba->sli4_hba.link_state.type, phba->sli4_hba.link_state.number, phba->sli4_hba.link_state.logical_speed, - phba->sli4_hba.link_state.fault); + phba->sli4_hba.link_state.fault, + phba->sli4_hba.link_state.link_status); + + /* + * The following attention types are informational only, providing + * further details about link status. Overwrite the value of + * link_state.status appropriately. No further action is required. + */ + if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_ACTIVATE_FAIL) { + switch (phba->sli4_hba.link_state.status) { + case LPFC_FC_LA_TYPE_ACTIVATE_FAIL: + log_level = KERN_WARNING; + phba->sli4_hba.link_state.status = + LPFC_FC_LA_TYPE_LINK_DOWN; + break; + case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT: + /* + * During bb credit recovery establishment, receiving + * this attention type is normal. Link Up attention + * type is expected to occur before this informational + * attention type so keep the Link Up status. + */ + log_level = KERN_INFO; + phba->sli4_hba.link_state.status = + LPFC_FC_LA_TYPE_LINK_UP; + break; + default: + log_level = KERN_INFO; + break; + } + lpfc_log_msg(phba, log_level, LOG_SLI, + "2992 Async FC event - Informational Link " + "Attention Type x%x\n", + bf_get(lpfc_acqe_fc_la_att_type, acqe_fc)); + return; + } + pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, @@ -13917,6 +13972,13 @@ fcponly: if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; + rc = dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "6400 Can't set dma maximum segment size\n"); + return rc; + } + /* * Check whether the adapter supports an embedded copy of the * FCP CMD IU within the WQE for FCP_Ixxx commands. In order diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 7a1563564df7..e989f130434e 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -1689,7 +1689,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct lpfc_pde6 *pde6 = NULL; struct lpfc_pde7 *pde7 = NULL; dma_addr_t dataphysaddr, protphysaddr; - unsigned short curr_data = 0, curr_prot = 0; + unsigned short curr_prot = 0; unsigned int split_offset; unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; unsigned int protgrp_blks, protgrp_bytes; @@ -1858,7 +1858,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, bpl->tus.w = le32_to_cpu(bpl->tus.w); num_bde++; - curr_data++; if (split_offset) break; @@ -2119,7 +2118,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct scatterlist *sgpe = NULL; /* s/g prot entry */ struct sli4_sge_diseed *diseed = NULL; dma_addr_t dataphysaddr, protphysaddr; - unsigned short curr_data = 0, curr_prot = 0; + unsigned short curr_prot = 0; unsigned int split_offset; unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; unsigned int protgrp_blks, protgrp_bytes; @@ -2364,7 +2363,6 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, dma_offset += dma_len; num_sge++; - curr_data++; if (split_offset) { sgl++; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 182aaae60386..edbd81c3b643 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -11270,6 +11270,30 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) } } +inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq) +{ + struct lpfc_hba *phba = eq->phba; + + /* + * Unlocking an irq is one of the entry point to check + * for re-schedule, but we are good for io submission + * path as midlayer does a get_cpu to glue us in. Flush + * out the invalidate queue so we can see the updated + * value for flag. + */ + smp_rmb(); + + if (READ_ONCE(eq->mode) == LPFC_EQ_POLL) + /* We will not likely get the completion for the caller + * during this iteration but i guess that's fine. + * Future io's coming on this eq should be able to + * pick it up. As for the case of single io's, they + * will be handled through a sched from polling timer + * function which is currently triggered every 1msec. + */ + lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM); +} + /** * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb * @phba: Pointer to HBA context object. @@ -11309,7 +11333,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); spin_unlock_irqrestore(&pring->ring_lock, iflags); - lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH); + lpfc_sli4_poll_eq(eq); } else { /* For now, SLI2/3 will still use hbalock */ spin_lock_irqsave(&phba->hbalock, iflags); @@ -15625,12 +15649,11 @@ void lpfc_sli4_poll_hbtimer(struct timer_list *t) { struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer); struct lpfc_queue *eq; - int i = 0; rcu_read_lock(); list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list) - i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH); + lpfc_sli4_poll_eq(eq); if (!list_empty(&phba->poll_list)) mod_timer(&phba->cpuhp_poll_timer, jiffies + msecs_to_jiffies(LPFC_POLL_HB)); @@ -15638,33 +15661,6 @@ void lpfc_sli4_poll_hbtimer(struct timer_list *t) rcu_read_unlock(); } -inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path) -{ - struct lpfc_hba *phba = eq->phba; - int i = 0; - - /* - * Unlocking an irq is one of the entry point to check - * for re-schedule, but we are good for io submission - * path as midlayer does a get_cpu to glue us in. Flush - * out the invalidate queue so we can see the updated - * value for flag. - */ - smp_rmb(); - - if (READ_ONCE(eq->mode) == LPFC_EQ_POLL) - /* We will not likely get the completion for the caller - * during this iteration but i guess that's fine. - * Future io's coming on this eq should be able to - * pick it up. As for the case of single io's, they - * will be handled through a sched from polling timer - * function which is currently triggered every 1msec. - */ - i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM); - - return i; -} - static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq) { struct lpfc_hba *phba = eq->phba; @@ -20819,6 +20815,7 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, struct lpfc_mbx_wr_object *wr_object; LPFC_MBOXQ_t *mbox; int rc = 0, i = 0; + int mbox_status = 0; uint32_t shdr_status, shdr_add_status, shdr_add_status_2; uint32_t shdr_change_status = 0, shdr_csf = 0; uint32_t mbox_tmo; @@ -20864,11 +20861,15 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, wr_object->u.request.bde_count = i; bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); if (!phba->sli4_hba.intr_enable) - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + mbox_status = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); else { mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); - rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + mbox_status = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); } + + /* The mbox status needs to be maintained to detect MBOX_TIMEOUT. */ + rc = mbox_status; + /* The IOCTL status is embedded in the mailbox subheader. */ shdr_status = bf_get(lpfc_mbox_hdr_status, &wr_object->header.cfg_shdr.response); @@ -20883,10 +20884,6 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, &wr_object->u.response); } - if (!phba->sli4_hba.intr_enable) - mempool_free(mbox, phba->mbox_mem_pool); - else if (rc != MBX_TIMEOUT) - mempool_free(mbox, phba->mbox_mem_pool); if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3025 Write Object mailbox failed with " @@ -20904,6 +20901,12 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status, shdr_add_status_2, shdr_change_status, shdr_csf); + + if (!phba->sli4_hba.intr_enable) + mempool_free(mbox, phba->mbox_mem_pool); + else if (mbox_status != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + return rc; } @@ -21276,7 +21279,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); spin_unlock_irqrestore(&pring->ring_lock, iflags); - lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH); + lpfc_sli4_poll_eq(qp->hba_eq); return 0; } @@ -21298,7 +21301,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); spin_unlock_irqrestore(&pring->ring_lock, iflags); - lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH); + lpfc_sli4_poll_eq(qp->hba_eq); return 0; } @@ -21328,7 +21331,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); spin_unlock_irqrestore(&pring->ring_lock, iflags); - lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH); + lpfc_sli4_poll_eq(qp->hba_eq); return 0; } return WQE_ERROR; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index f927c2a25d54..3b62c4032c31 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2009-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -291,8 +291,9 @@ struct lpfc_sli4_link { uint8_t type; uint8_t number; uint8_t fault; - uint32_t logical_speed; + uint8_t link_status; uint16_t topology; + uint32_t logical_speed; }; struct lpfc_fcf_rec { diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 41a1128f8651..0238208cdd11 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "14.2.0.9" +#define LPFC_DRIVER_VERSION "14.2.0.10" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ @@ -32,6 +32,6 @@ #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ LPFC_DRIVER_VERSION -#define LPFC_COPYRIGHT "Copyright (C) 2017-2022 Broadcom. All Rights " \ +#define LPFC_COPYRIGHT "Copyright (C) 2017-2023 Broadcom. All Rights " \ "Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \ "and/or its subsidiaries." diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c index ed1d7f7b88a3..cf8ba840d0ea 100644 --- a/drivers/scsi/lpfc/lpfc_vmid.c +++ b/drivers/scsi/lpfc/lpfc_vmid.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -284,3 +284,42 @@ int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, } return rc; } + +/* + * lpfc_reinit_vmid - reinitializes the vmid data structure + * @vport: pointer to vport data structure + * + * This routine reinitializes the vmid post flogi completion + * + * Return codes + * None + */ +void +lpfc_reinit_vmid(struct lpfc_vport *vport) +{ + u32 bucket, i, cpu; + struct lpfc_vmid *cur; + struct lpfc_vmid *vmp = NULL; + struct hlist_node *tmp; + + write_lock(&vport->vmid_lock); + vport->cur_vmid_cnt = 0; + + for (i = 0; i < vport->max_vmid; i++) { + vmp = &vport->vmid[i]; + vmp->flag = LPFC_VMID_SLOT_FREE; + memset(vmp->host_vmid, 0, sizeof(vmp->host_vmid)); + vmp->io_rd_cnt = 0; + vmp->io_wr_cnt = 0; + + if (vmp->last_io_time) + for_each_possible_cpu(cpu) + *per_cpu_ptr(vmp->last_io_time, cpu) = 0; + } + + /* for all elements in the hash table */ + if (!hash_empty(vport->hash_table)) + hash_for_each_safe(vport->hash_table, bucket, tmp, cur, hnode) + hash_del(&cur->hnode); + write_unlock(&vport->vmid_lock); +} diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 4d171f5c213f..6c7559cf1a4b 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -534,7 +534,7 @@ disable_vport(struct fc_vport *fc_vport) { struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; struct lpfc_hba *phba = vport->phba; - struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL; + struct lpfc_nodelist *ndlp = NULL; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); /* Can't disable during an outstanding delete. */ @@ -546,17 +546,7 @@ disable_vport(struct fc_vport *fc_vport) (void)lpfc_send_npiv_logo(vport, ndlp); lpfc_sli_host_down(vport); - - /* Mark all nodes for discovery so we can remove them by - * calling lpfc_cleanup_rpis(vport, 1) - */ - list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { - if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) - continue; - lpfc_disc_state_machine(vport, ndlp, NULL, - NLP_EVT_DEVICE_RECOVERY); - } - lpfc_cleanup_rpis(vport, 1); + lpfc_cleanup_rpis(vport, 0); lpfc_stop_vport_timers(vport); lpfc_unreg_all_rpis(vport); diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c index b3dcb8918618..60c65586f30e 100644 --- a/drivers/scsi/mvumi.c +++ b/drivers/scsi/mvumi.c @@ -1841,7 +1841,7 @@ static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba, cmd->frame->request_id = mhba->io_seq++; cmd->request_id = cmd->frame->request_id; mhba->tag_cmd[cmd->frame->tag] = cmd; - frame_len = sizeof(*ib_frame) - 4 + + frame_len = sizeof(*ib_frame) + ib_frame->sg_counts * sizeof(struct mvumi_sgl); if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { struct mvumi_dyn_list_entry *dle; @@ -2387,7 +2387,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba) struct Scsi_Host *host = mhba->shost; struct scsi_device *sdev = NULL; int ret; - unsigned int max_sg = (mhba->ib_max_size + 4 - + unsigned int max_sg = (mhba->ib_max_size - sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl); host->irq = mhba->pdev->irq; diff --git a/drivers/scsi/mvumi.h b/drivers/scsi/mvumi.h index a88c58787b68..1306a4abf19a 100644 --- a/drivers/scsi/mvumi.h +++ b/drivers/scsi/mvumi.h @@ -279,7 +279,7 @@ struct mvumi_msg_frame { u16 request_id; u16 reserved1; u8 cdb[MAX_COMMAND_SIZE]; - u32 payload[1]; + u32 payload[]; }; /* @@ -294,7 +294,7 @@ struct mvumi_rsp_frame { u8 req_status; u8 rsp_flag; /* Indicates the type of Data_Payload.*/ u16 request_id; - u32 payload[1]; + u32 payload[]; }; struct mvumi_ob_data { @@ -380,7 +380,7 @@ struct mvumi_hs_header { u8 page_code; u8 checksum; u16 frame_length; - u32 frame_content[1]; + u32 frame_content[]; }; /* diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c index 73f036bed128..5c26a13ffbd2 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.c +++ b/drivers/scsi/pm8001/pm8001_ctl.c @@ -61,10 +61,10 @@ static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev, struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; if (pm8001_ha->chip_id == chip_8001) { - return snprintf(buf, PAGE_SIZE, "%d\n", + return sysfs_emit(buf, "%d\n", pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev); } else { - return snprintf(buf, PAGE_SIZE, "%d\n", + return sysfs_emit(buf, "%d\n", pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev); } } @@ -86,7 +86,7 @@ static ssize_t controller_fatal_error_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%d\n", + return sysfs_emit(buf, "%d\n", pm8001_ha->controller_fatal_error); } static DEVICE_ATTR_RO(controller_fatal_error); @@ -107,13 +107,13 @@ static ssize_t pm8001_ctl_fw_version_show(struct device *cdev, struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; if (pm8001_ha->chip_id == chip_8001) { - return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", + return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n", (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 24), (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 16), (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 8), (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev)); } else { - return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", + return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n", (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 24), (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 16), (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 8), @@ -138,7 +138,7 @@ static ssize_t pm8001_ctl_ila_version_show(struct device *cdev, struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; if (pm8001_ha->chip_id != chip_8001) { - return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", + return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n", (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 24), (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 16), (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 8), @@ -164,7 +164,7 @@ static ssize_t pm8001_ctl_inactive_fw_version_show(struct device *cdev, struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; if (pm8001_ha->chip_id != chip_8001) { - return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", + return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n", (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 24), (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 16), (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 8), @@ -191,10 +191,10 @@ static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev, struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; if (pm8001_ha->chip_id == chip_8001) { - return snprintf(buf, PAGE_SIZE, "%d\n", + return sysfs_emit(buf, "%d\n", pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io); } else { - return snprintf(buf, PAGE_SIZE, "%d\n", + return sysfs_emit(buf, "%d\n", pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io); } } @@ -215,13 +215,11 @@ static ssize_t pm8001_ctl_max_devices_show(struct device *cdev, struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; if (pm8001_ha->chip_id == chip_8001) { - return snprintf(buf, PAGE_SIZE, "%04d\n", - (u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16) - ); + return sysfs_emit(buf, "%04d\n", + (u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16)); } else { - return snprintf(buf, PAGE_SIZE, "%04d\n", - (u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16) - ); + return sysfs_emit(buf, "%04d\n", + (u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16)); } } static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL); @@ -242,13 +240,11 @@ static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev, struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; if (pm8001_ha->chip_id == chip_8001) { - return snprintf(buf, PAGE_SIZE, "%04d\n", - pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF - ); + return sysfs_emit(buf, "%04d\n", + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF); } else { - return snprintf(buf, PAGE_SIZE, "%04d\n", - pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF - ); + return sysfs_emit(buf, "%04d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF); } } static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL); @@ -315,7 +311,7 @@ static ssize_t pm8001_ctl_host_sas_address_show(struct device *cdev, struct Scsi_Host *shost = class_to_shost(cdev); struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "0x%016llx\n", + return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr)); } static DEVICE_ATTR(host_sas_address, S_IRUGO, @@ -336,7 +332,7 @@ static ssize_t pm8001_ctl_logging_level_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%08xh\n", pm8001_ha->logging_level); + return sysfs_emit(buf, "%08xh\n", pm8001_ha->logging_level); } static ssize_t pm8001_ctl_logging_level_store(struct device *cdev, @@ -517,7 +513,7 @@ static ssize_t event_log_size_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%d\n", + return sysfs_emit(buf, "%d\n", pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size); } static DEVICE_ATTR_RO(event_log_size); @@ -604,7 +600,7 @@ static ssize_t non_fatal_count_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%08x", + return sysfs_emit(buf, "%08x\n", pm8001_ha->non_fatal_count); } diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index b67ad30d56e6..70cfc94c3d43 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2732,7 +2732,7 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) spin_lock_irqsave(host->host_lock, flags); /* Confirm port has not reappeared before clearing pointers. */ if (rport->port_state != FC_PORTSTATE_ONLINE) { - fcport->rport = fcport->drport = NULL; + fcport->rport = NULL; *((fc_port_t **)rport->dd_data) = NULL; } spin_unlock_irqrestore(host->host_lock, flags); @@ -3171,8 +3171,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) set_bit(VPORT_DELETE, &vha->dpc_flags); - while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) || - test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) + while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags)) msleep(1000); diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index cd75b179410d..dba7bba788d7 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -278,8 +278,8 @@ qla2x00_process_els(struct bsg_job *bsg_job) const char *type; int req_sg_cnt, rsp_sg_cnt; int rval = (DID_ERROR << 16); - uint16_t nextlid = 0; uint32_t els_cmd = 0; + int qla_port_allocated = 0; if (bsg_request->msgcode == FC_BSG_RPT_ELS) { rport = fc_bsg_to_rport(bsg_job); @@ -329,9 +329,9 @@ qla2x00_process_els(struct bsg_job *bsg_job) /* make sure the rport is logged in, * if not perform fabric login */ - if (qla2x00_fabric_login(vha, fcport, &nextlid)) { + if (atomic_read(&fcport->state) != FCS_ONLINE) { ql_dbg(ql_dbg_user, vha, 0x7003, - "Failed to login port %06X for ELS passthru.\n", + "Port %06X is not online for ELS passthru.\n", fcport->d_id.b24); rval = -EIO; goto done; @@ -348,6 +348,7 @@ qla2x00_process_els(struct bsg_job *bsg_job) goto done; } + qla_port_allocated = 1; /* Initialize all required fields of fcport */ fcport->vha = vha; fcport->d_id.b.al_pa = @@ -432,7 +433,7 @@ done_unmap_sg: goto done_free_fcport; done_free_fcport: - if (bsg_request->msgcode != FC_BSG_RPT_ELS) + if (qla_port_allocated) qla2x00_free_fcport(fcport); done: return rval; diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index a26a373be9da..ec0e987b71fa 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -384,6 +384,13 @@ struct els_reject { struct req_que; struct qla_tgt_sess; +struct qla_buf_dsc { + u16 tag; +#define TAG_FREED 0xffff + void *buf; + dma_addr_t buf_dma; +}; + /* * SCSI Request Block */ @@ -392,14 +399,16 @@ struct srb_cmd { uint32_t request_sense_length; uint32_t fw_sense_length; uint8_t *request_sense_ptr; - struct ct6_dsd *ct6_ctx; struct crc_context *crc_ctx; + struct ct6_dsd ct6_ctx; + struct qla_buf_dsc buf_dsc; }; /* * SRB flag definitions */ #define SRB_DMA_VALID BIT_0 /* Command sent to ISP */ +#define SRB_GOT_BUF BIT_1 #define SRB_FCP_CMND_DMA_VALID BIT_12 /* DIF: DSD List valid */ #define SRB_CRC_CTX_DMA_VALID BIT_2 /* DIF: context DMA valid */ #define SRB_CRC_PROT_DMA_VALID BIT_4 /* DIF: prot DMA valid */ @@ -660,7 +669,7 @@ enum { struct iocb_resource { u8 res_type; - u8 pad; + u8 exch_cnt; u16 iocb_cnt; }; @@ -2485,7 +2494,6 @@ struct ct_sns_desc { enum discovery_state { DSC_DELETED, - DSC_GNN_ID, DSC_GNL, DSC_LOGIN_PEND, DSC_LOGIN_FAILED, @@ -2596,7 +2604,7 @@ typedef struct fc_port { int login_retry; - struct fc_rport *rport, *drport; + struct fc_rport *rport; u32 supported_classes; uint8_t fc4_type; @@ -2699,7 +2707,6 @@ extern const char *const port_state_str[5]; static const char *const port_dstate_str[] = { [DSC_DELETED] = "DELETED", - [DSC_GNN_ID] = "GNN_ID", [DSC_GNL] = "GNL", [DSC_LOGIN_PEND] = "LOGIN_PEND", [DSC_LOGIN_FAILED] = "LOGIN_FAILED", @@ -3462,6 +3469,7 @@ struct qla_msix_entry { int have_irq; int in_use; uint32_t vector; + uint32_t vector_base0; uint16_t entry; char name[30]; void *handle; @@ -3479,7 +3487,6 @@ enum qla_work_type { QLA_EVT_ASYNC_ADISC, QLA_EVT_UEVENT, QLA_EVT_AENFX, - QLA_EVT_GPNID, QLA_EVT_UNMAP, QLA_EVT_NEW_SESS, QLA_EVT_GPDB, @@ -3493,7 +3500,6 @@ enum qla_work_type { QLA_EVT_GPNFT, QLA_EVT_GPNFT_DONE, QLA_EVT_GNNFT_DONE, - QLA_EVT_GNNID, QLA_EVT_GFPNID, QLA_EVT_SP_RETRY, QLA_EVT_IIDMA, @@ -3536,15 +3542,12 @@ struct qla_work_evt { } iosb; struct { port_id_t id; - } gpnid; - struct { - port_id_t id; u8 port_name[8]; u8 node_name[8]; void *pla; u8 fc4_type; } new_sess; - struct { /*Get PDB, Get Speed, update fcport, gnl, gidpn */ + struct { /*Get PDB, Get Speed, update fcport, gnl */ fc_port_t *fcport; u8 opt; } fcport; @@ -3721,10 +3724,27 @@ struct qla_fw_resources { u16 iocbs_limit; u16 iocbs_qp_limit; u16 iocbs_used; + u16 exch_total; + u16 exch_limit; + u16 exch_used; + u16 pad; }; #define QLA_IOCB_PCT_LIMIT 95 +struct qla_buf_pool { + u16 num_bufs; + u16 num_active; + u16 max_used; + u16 num_alloc; + u16 prev_max; + u16 pad; + uint32_t take_snapshot:1; + unsigned long *buf_map; + void **buf_array; + dma_addr_t *dma_array; +}; + /*Queue pair data structure */ struct qla_qpair { spinlock_t qp_lock; @@ -3778,6 +3798,7 @@ struct qla_qpair { struct qla_tgt_counters tgt_counters; uint16_t cpuid; struct qla_fw_resources fwres ____cacheline_aligned; + struct qla_buf_pool buf_pool; u32 cmd_cnt; u32 cmd_completion_cnt; u32 prev_completion_cnt; @@ -3938,7 +3959,6 @@ struct qlt_hw_data { __le32 __iomem *atio_q_out; const struct qla_tgt_func_tmpl *tgt_ops; - struct qla_tgt_vp_map *tgt_vp_map; int saved_set; __le16 saved_exchange_count; @@ -4106,6 +4126,7 @@ struct qla_hw_data { struct req_que **req_q_map; struct rsp_que **rsp_q_map; struct qla_qpair **queue_pair_map; + struct qla_qpair **qp_cpu_map; unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; unsigned long qpair_qid_map[(QLA_MAX_QUEUES / 8) @@ -4762,6 +4783,7 @@ struct qla_hw_data { spinlock_t sadb_lock; /* protects list */ struct els_reject elsrej; u8 edif_post_stop_cnt_down; + struct qla_vp_map *vp_map; }; #define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES)) @@ -4857,6 +4879,7 @@ typedef struct scsi_qla_host { #define LOOP_READY 5 #define LOOP_DEAD 6 + unsigned long buf_expired; unsigned long relogin_jif; unsigned long dpc_flags; #define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */ @@ -4872,7 +4895,6 @@ typedef struct scsi_qla_host { #define ISP_ABORT_RETRY 10 /* ISP aborted. */ #define BEACON_BLINK_NEEDED 11 #define REGISTER_FDMI_NEEDED 12 -#define FCPORT_UPDATE_NEEDED 13 #define VP_DPC_NEEDED 14 /* wake up for VP dpc handling */ #define UNLOADING 15 #define NPIV_CONFIG_NEEDED 16 @@ -5022,7 +5044,6 @@ typedef struct scsi_qla_host { uint8_t n2n_port_name[WWN_SIZE]; uint16_t n2n_id; __le16 dport_data[4]; - struct list_head gpnid_list; struct fab_scan scan; uint8_t scm_fabric_connection_flags; @@ -5064,7 +5085,7 @@ struct qla27xx_image_status { #define SET_AL_PA 2 #define RESET_VP_IDX 3 #define RESET_AL_PA 4 -struct qla_tgt_vp_map { +struct qla_vp_map { uint8_t idx; scsi_qla_host_t *vha; }; diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 777808af5634..1925cc6897b6 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -235,7 +235,7 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) uint16_t mb[MAX_IOCB_MB_REG]; int rc; struct qla_hw_data *ha = vha->hw; - u16 iocbs_used, i; + u16 iocbs_used, i, exch_used; rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG); if (rc != QLA_SUCCESS) { @@ -263,13 +263,19 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) if (ql2xenforce_iocb_limit) { /* lock is not require. It's an estimate. */ iocbs_used = ha->base_qpair->fwres.iocbs_used; + exch_used = ha->base_qpair->fwres.exch_used; for (i = 0; i < ha->max_qpairs; i++) { - if (ha->queue_pair_map[i]) + if (ha->queue_pair_map[i]) { iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used; + exch_used += ha->queue_pair_map[i]->fwres.exch_used; + } } seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n", iocbs_used, ha->base_qpair->fwres.iocbs_limit); + + seq_printf(s, "estimate exchange used[%d] high water limit [%d] n", + exch_used, ha->base_qpair->fwres.exch_limit); } return 0; diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c index e4240aae5f9e..ec0e20255bd3 100644 --- a/drivers/scsi/qla2xxx/qla_edif.c +++ b/drivers/scsi/qla2xxx/qla_edif.c @@ -480,6 +480,49 @@ void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport) } /** + * qla_delete_n2n_sess_and_wait: search for N2N session, tear it down and + * wait for tear down to complete. In N2N topology, there is only one + * session being active in tracking the remote device. + * @vha: host adapter pointer + * return code: 0 - found the session and completed the tear down. + * 1 - timeout occurred. Caller to use link bounce to reset. + */ +static int qla_delete_n2n_sess_and_wait(scsi_qla_host_t *vha) +{ + struct fc_port *fcport; + int rc = -EIO; + ulong expire = jiffies + 23 * HZ; + + if (!N2N_TOPO(vha->hw)) + return 0; + + fcport = NULL; + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (!fcport->n2n_flag) + continue; + + ql_dbg(ql_dbg_disc, fcport->vha, 0x2016, + "%s reset sess at app start \n", __func__); + + qla_edif_sa_ctl_init(vha, fcport); + qlt_schedule_sess_for_deletion(fcport); + + while (time_before_eq(jiffies, expire)) { + if (fcport->disc_state != DSC_DELETE_PEND) { + rc = 0; + break; + } + msleep(1); + } + + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + break; + } + + return rc; +} + +/** * qla_edif_app_start: application has announce its present * @vha: host adapter pointer * @bsg_job: user request @@ -518,18 +561,17 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job) fcport->n2n_link_reset_cnt = 0; if (vha->hw->flags.n2n_fw_acc_sec) { - list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) - qla_edif_sa_ctl_init(vha, fcport); - + bool link_bounce = false; /* * While authentication app was not running, remote device * could still try to login with this local port. Let's - * clear the state and try again. + * reset the session, reconnect and re-authenticate. */ - qla2x00_wait_for_sess_deletion(vha); + if (qla_delete_n2n_sess_and_wait(vha)) + link_bounce = true; - /* bounce the link to get the other guy to relogin */ - if (!vha->hw->flags.n2n_bigger) { + /* bounce the link to start login */ + if (!vha->hw->flags.n2n_bigger || link_bounce) { set_bit(N2N_LINK_RESET, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } @@ -925,7 +967,9 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job) if (!(fcport->flags & FCF_FCSP_DEVICE)) continue; - tdid = app_req.remote_pid; + tdid.b.domain = app_req.remote_pid.domain; + tdid.b.area = app_req.remote_pid.area; + tdid.b.al_pa = app_req.remote_pid.al_pa; ql_dbg(ql_dbg_edif, vha, 0x2058, "APP request entry - portid=%06x.\n", tdid.b24); @@ -2989,9 +3033,10 @@ qla28xx_start_scsi_edif(srb_t *sp) tot_dsds = nseg; req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); - sp->iores.res_type = RESOURCE_INI; + sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; + sp->iores.exch_cnt = 1; sp->iores.iocb_cnt = req_cnt; - if (qla_get_iocbs(sp->qpair, &sp->iores)) + if (qla_get_fw_resources(sp->qpair, &sp->iores)) goto queuing_error; if (req->cnt < (req_cnt + 2)) { @@ -3006,26 +3051,16 @@ qla28xx_start_scsi_edif(srb_t *sp) goto queuing_error; } - ctx = sp->u.scmd.ct6_ctx = - mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); - if (!ctx) { - ql_log(ql_log_fatal, vha, 0x3010, - "Failed to allocate ctx for cmd=%p.\n", cmd); - goto queuing_error; - } - - memset(ctx, 0, sizeof(struct ct6_dsd)); - ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool, - GFP_ATOMIC, &ctx->fcp_cmnd_dma); - if (!ctx->fcp_cmnd) { + if (qla_get_buf(vha, sp->qpair, &sp->u.scmd.buf_dsc)) { ql_log(ql_log_fatal, vha, 0x3011, - "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); + "Failed to allocate buf for fcp_cmnd for cmd=%p.\n", cmd); goto queuing_error; } - /* Initialize the DSD list and dma handle */ - INIT_LIST_HEAD(&ctx->dsd_list); - ctx->dsd_use_cnt = 0; + sp->flags |= SRB_GOT_BUF; + ctx = &sp->u.scmd.ct6_ctx; + ctx->fcp_cmnd = sp->u.scmd.buf_dsc.buf; + ctx->fcp_cmnd_dma = sp->u.scmd.buf_dsc.buf_dma; if (cmd->cmd_len > 16) { additional_cdb_len = cmd->cmd_len - 16; @@ -3144,7 +3179,6 @@ no_dsds: cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); put_unaligned_le64(ctx->fcp_cmnd_dma, &cmd_pkt->fcp_cmnd_dseg_address); - sp->flags |= SRB_FCP_CMND_DMA_VALID; cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); /* Set total data segment count. */ cmd_pkt->entry_count = (uint8_t)req_cnt; @@ -3176,16 +3210,12 @@ no_dsds: return QLA_SUCCESS; queuing_error_fcp_cmnd: - dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma); queuing_error: if (tot_dsds) scsi_dma_unmap(cmd); - if (sp->u.scmd.ct6_ctx) { - mempool_free(sp->u.scmd.ct6_ctx, ha->ctx_mempool); - sp->u.scmd.ct6_ctx = NULL; - } - qla_put_iocbs(sp->qpair, &sp->iores); + qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc); + qla_put_fw_resources(sp->qpair, &sp->iores); spin_unlock_irqrestore(lock, flags); return QLA_FUNCTION_FAILED; diff --git a/drivers/scsi/qla2xxx/qla_edif.h b/drivers/scsi/qla2xxx/qla_edif.h index 7cdb89ccdc6e..aa566cdb77e5 100644 --- a/drivers/scsi/qla2xxx/qla_edif.h +++ b/drivers/scsi/qla2xxx/qla_edif.h @@ -145,4 +145,6 @@ struct enode { (qla_ini_mode_enabled(_s->vha) && (_s->disc_state == DSC_DELETE_PEND || \ _s->disc_state == DSC_DELETED)) +#define EDIF_CAP(_ha) (ql2xsecenable && IS_QLA28XX(_ha)) + #endif /* __QLA_EDIF_H */ diff --git a/drivers/scsi/qla2xxx/qla_edif_bsg.h b/drivers/scsi/qla2xxx/qla_edif_bsg.h index 0931f4e4e127..514c265ba86e 100644 --- a/drivers/scsi/qla2xxx/qla_edif_bsg.h +++ b/drivers/scsi/qla2xxx/qla_edif_bsg.h @@ -89,7 +89,20 @@ struct app_plogi_reply { struct app_pinfo_req { struct app_id app_info; uint8_t num_ports; - port_id_t remote_pid; + struct { +#ifdef __BIG_ENDIAN + uint8_t domain; + uint8_t area; + uint8_t al_pa; +#elif defined(__LITTLE_ENDIAN) + uint8_t al_pa; + uint8_t area; + uint8_t domain; +#else +#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!" +#endif + uint8_t rsvd_1; + } remote_pid; uint8_t version; uint8_t pad[VND_CMD_PAD_SIZE]; uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index e3256e721be1..9142df876c73 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -257,6 +257,7 @@ struct edif_sa_ctl *qla_edif_find_sa_ctl_by_index(fc_port_t *fcport, /* * Global Functions in qla_mid.c source file. */ +extern void qla_update_vp_map(struct scsi_qla_host *, int); extern struct scsi_host_template qla2xxx_driver_template; extern struct scsi_transport_template *qla2xxx_transport_vport_template; extern void qla2x00_timer(struct timer_list *); @@ -292,6 +293,7 @@ extern void qla2x00_alert_all_vps(struct rsp_que *, uint16_t *); extern void qla2x00_async_event(scsi_qla_host_t *, struct rsp_que *, uint16_t *); extern int qla2x00_vp_abort_isp(scsi_qla_host_t *); +void qla_adjust_buf(struct scsi_qla_host *); /* * Global Function Prototypes in qla_iocb.c source file. @@ -721,10 +723,6 @@ extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *, struct ct_sns_rsp *, const char *); extern void qla2x00_async_iocb_timeout(void *data); -extern int qla24xx_post_gpnid_work(struct scsi_qla_host *, port_id_t *); -extern int qla24xx_async_gpnid(scsi_qla_host_t *, port_id_t *); -void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *); - int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *); int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *); void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *); @@ -734,9 +732,6 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool); int qla24xx_async_gpnft(scsi_qla_host_t *, u8, srb_t *); void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *); void qla24xx_async_gnnft_done(scsi_qla_host_t *, srb_t *); -int qla24xx_async_gnnid(scsi_qla_host_t *, fc_port_t *); -void qla24xx_handle_gnnid_event(scsi_qla_host_t *, struct event_arg *); -int qla24xx_post_gnnid_work(struct scsi_qla_host *, fc_port_t *); int qla24xx_post_gfpnid_work(struct scsi_qla_host *, fc_port_t *); int qla24xx_async_gfpnid(scsi_qla_host_t *, fc_port_t *); void qla24xx_handle_gfpnid_event(scsi_qla_host_t *, struct event_arg *); @@ -962,7 +957,7 @@ extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *, uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **); void qla24xx_delete_sess_fn(struct work_struct *); void qlt_unknown_atio_work_fn(struct work_struct *); -void qlt_update_host_map(struct scsi_qla_host *, port_id_t); +void qla_update_host_map(struct scsi_qla_host *, port_id_t); void qla_remove_hostmap(struct qla_hw_data *ha); void qlt_clr_qp_table(struct scsi_qla_host *vha); void qlt_set_mode(struct scsi_qla_host *); @@ -975,6 +970,8 @@ extern void qla_nvme_abort_set_option (struct abort_entry_24xx *abt, srb_t *sp); extern void qla_nvme_abort_process_comp_status (struct abort_entry_24xx *abt, srb_t *sp); +struct scsi_qla_host *qla_find_host_by_vp_idx(struct scsi_qla_host *vha, + uint16_t vp_idx); /* nvme.c */ void qla_nvme_unregister_remote_port(struct fc_port *fcport); @@ -1019,5 +1016,8 @@ int qla2xxx_enable_port(struct Scsi_Host *shost); uint64_t qla2x00_get_num_tgts(scsi_qla_host_t *vha); uint64_t qla2x00_count_set_bits(u32 num); - +int qla_create_buf_pool(struct scsi_qla_host *, struct qla_qpair *); +void qla_free_buf_pool(struct qla_qpair *); +int qla_get_buf(struct scsi_qla_host *, struct qla_qpair *, struct qla_buf_dsc *); +void qla_put_buf(struct qla_qpair *, struct qla_buf_dsc *); #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 64ab070b8716..4738f8935f7f 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -2949,22 +2949,6 @@ done: return rval; } -int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id) -{ - struct qla_work_evt *e; - - if (test_bit(UNLOADING, &vha->dpc_flags) || - (vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags))) - return 0; - - e = qla2x00_alloc_work(vha, QLA_EVT_GPNID); - if (!e) - return QLA_FUNCTION_FAILED; - - e->u.gpnid.id = *id; - return qla2x00_post_work(vha, e); -} - void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp) { struct srb_iocb *c = &sp->u.iocb_cmd; @@ -2997,287 +2981,6 @@ void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp) kref_put(&sp->cmd_kref, qla2x00_sp_release); } -void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) -{ - fc_port_t *fcport, *conflict, *t; - u16 data[2]; - - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d port_id: %06x\n", - __func__, __LINE__, ea->id.b24); - - if (ea->rc) { - /* cable is disconnected */ - list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) { - if (fcport->d_id.b24 == ea->id.b24) - fcport->scan_state = QLA_FCPORT_SCAN; - - qlt_schedule_sess_for_deletion(fcport); - } - } else { - /* cable is connected */ - fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1); - if (fcport) { - list_for_each_entry_safe(conflict, t, &vha->vp_fcports, - list) { - if ((conflict->d_id.b24 == ea->id.b24) && - (fcport != conflict)) - /* - * 2 fcports with conflict Nport ID or - * an existing fcport is having nport ID - * conflict with new fcport. - */ - - conflict->scan_state = QLA_FCPORT_SCAN; - - qlt_schedule_sess_for_deletion(conflict); - } - - fcport->scan_needed = 0; - fcport->rscn_gen++; - fcport->scan_state = QLA_FCPORT_FOUND; - fcport->flags |= FCF_FABRIC_DEVICE; - if (fcport->login_retry == 0) { - fcport->login_retry = - vha->hw->login_retry_count; - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Port login retry %8phN, lid 0x%04x cnt=%d.\n", - fcport->port_name, fcport->loop_id, - fcport->login_retry); - } - switch (fcport->disc_state) { - case DSC_LOGIN_COMPLETE: - /* recheck session is still intact. */ - ql_dbg(ql_dbg_disc, vha, 0x210d, - "%s %d %8phC revalidate session with ADISC\n", - __func__, __LINE__, fcport->port_name); - data[0] = data[1] = 0; - qla2x00_post_async_adisc_work(vha, fcport, - data); - break; - case DSC_DELETED: - ql_dbg(ql_dbg_disc, vha, 0x210d, - "%s %d %8phC login\n", __func__, __LINE__, - fcport->port_name); - fcport->d_id = ea->id; - qla24xx_fcport_handle_login(vha, fcport); - break; - case DSC_DELETE_PEND: - fcport->d_id = ea->id; - break; - default: - fcport->d_id = ea->id; - break; - } - } else { - list_for_each_entry_safe(conflict, t, &vha->vp_fcports, - list) { - if (conflict->d_id.b24 == ea->id.b24) { - /* 2 fcports with conflict Nport ID or - * an existing fcport is having nport ID - * conflict with new fcport. - */ - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC DS %d\n", - __func__, __LINE__, - conflict->port_name, - conflict->disc_state); - - conflict->scan_state = QLA_FCPORT_SCAN; - qlt_schedule_sess_for_deletion(conflict); - } - } - - /* create new fcport */ - ql_dbg(ql_dbg_disc, vha, 0x2065, - "%s %d %8phC post new sess\n", - __func__, __LINE__, ea->port_name); - qla24xx_post_newsess_work(vha, &ea->id, - ea->port_name, NULL, NULL, 0); - } - } -} - -static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res) -{ - struct scsi_qla_host *vha = sp->vha; - struct ct_sns_req *ct_req = - (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; - struct ct_sns_rsp *ct_rsp = - (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp; - struct event_arg ea; - struct qla_work_evt *e; - unsigned long flags; - - if (res) - ql_dbg(ql_dbg_disc, vha, 0x2066, - "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n", - sp->name, res, sp->gen1, &ct_req->req.port_id.port_id, - ct_rsp->rsp.gpn_id.port_name); - else - ql_dbg(ql_dbg_disc, vha, 0x2066, - "Async done-%s good rscn gen %d ID %3phC. %8phC\n", - sp->name, sp->gen1, &ct_req->req.port_id.port_id, - ct_rsp->rsp.gpn_id.port_name); - - memset(&ea, 0, sizeof(ea)); - memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); - ea.sp = sp; - ea.id = be_to_port_id(ct_req->req.port_id.port_id); - ea.rc = res; - - spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); - list_del(&sp->elem); - spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); - - if (res) { - if (res == QLA_FUNCTION_TIMEOUT) { - qla24xx_post_gpnid_work(sp->vha, &ea.id); - /* ref: INIT */ - kref_put(&sp->cmd_kref, qla2x00_sp_release); - return; - } - } else if (sp->gen1) { - /* There was another RSCN for this Nport ID */ - qla24xx_post_gpnid_work(sp->vha, &ea.id); - /* ref: INIT */ - kref_put(&sp->cmd_kref, qla2x00_sp_release); - return; - } - - qla24xx_handle_gpnid_event(vha, &ea); - - e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); - if (!e) { - /* please ignore kernel warning. otherwise, we have mem leak. */ - dma_free_coherent(&vha->hw->pdev->dev, - sp->u.iocb_cmd.u.ctarg.req_allocated_size, - sp->u.iocb_cmd.u.ctarg.req, - sp->u.iocb_cmd.u.ctarg.req_dma); - sp->u.iocb_cmd.u.ctarg.req = NULL; - - dma_free_coherent(&vha->hw->pdev->dev, - sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, - sp->u.iocb_cmd.u.ctarg.rsp, - sp->u.iocb_cmd.u.ctarg.rsp_dma); - sp->u.iocb_cmd.u.ctarg.rsp = NULL; - - /* ref: INIT */ - kref_put(&sp->cmd_kref, qla2x00_sp_release); - return; - } - - e->u.iosb.sp = sp; - qla2x00_post_work(vha, e); -} - -/* Get WWPN with Nport ID. */ -int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) -{ - int rval = QLA_FUNCTION_FAILED; - struct ct_sns_req *ct_req; - srb_t *sp, *tsp; - struct ct_sns_pkt *ct_sns; - unsigned long flags; - - if (!vha->flags.online) - goto done; - - /* ref: INIT */ - sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); - if (!sp) - goto done; - - sp->type = SRB_CT_PTHRU_CMD; - sp->name = "gpnid"; - sp->u.iocb_cmd.u.ctarg.id = *id; - sp->gen1 = 0; - qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, - qla2x00_async_gpnid_sp_done); - - spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); - list_for_each_entry(tsp, &vha->gpnid_list, elem) { - if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) { - tsp->gen1++; - spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); - /* ref: INIT */ - kref_put(&sp->cmd_kref, qla2x00_sp_release); - goto done; - } - } - list_add_tail(&sp->elem, &vha->gpnid_list); - spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); - - sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, - GFP_KERNEL); - sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); - if (!sp->u.iocb_cmd.u.ctarg.req) { - ql_log(ql_log_warn, vha, 0xd041, - "Failed to allocate ct_sns request.\n"); - goto done_free_sp; - } - - sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, - GFP_KERNEL); - sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); - if (!sp->u.iocb_cmd.u.ctarg.rsp) { - ql_log(ql_log_warn, vha, 0xd042, - "Failed to allocate ct_sns request.\n"); - goto done_free_sp; - } - - ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; - memset(ct_sns, 0, sizeof(*ct_sns)); - - ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; - /* CT_IU preamble */ - ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE); - - /* GPN_ID req */ - ct_req->req.port_id.port_id = port_id_to_be_id(*id); - - sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE; - sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE; - sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - - ql_dbg(ql_dbg_disc, vha, 0x2067, - "Async-%s hdl=%x ID %3phC.\n", sp->name, - sp->handle, &ct_req->req.port_id.port_id); - - rval = qla2x00_start_sp(sp); - if (rval != QLA_SUCCESS) - goto done_free_sp; - - return rval; - -done_free_sp: - spin_lock_irqsave(&vha->hw->vport_slock, flags); - list_del(&sp->elem); - spin_unlock_irqrestore(&vha->hw->vport_slock, flags); - - if (sp->u.iocb_cmd.u.ctarg.req) { - dma_free_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), - sp->u.iocb_cmd.u.ctarg.req, - sp->u.iocb_cmd.u.ctarg.req_dma); - sp->u.iocb_cmd.u.ctarg.req = NULL; - } - if (sp->u.iocb_cmd.u.ctarg.rsp) { - dma_free_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), - sp->u.iocb_cmd.u.ctarg.rsp, - sp->u.iocb_cmd.u.ctarg.rsp_dma); - sp->u.iocb_cmd.u.ctarg.rsp = NULL; - } - /* ref: INIT */ - kref_put(&sp->cmd_kref, qla2x00_sp_release); -done: - return rval; -} - - void qla24xx_async_gffid_sp_done(srb_t *sp, int res) { struct scsi_qla_host *vha = sp->vha; @@ -4190,116 +3893,6 @@ void qla_scan_work_fn(struct work_struct *work) spin_unlock_irqrestore(&vha->work_lock, flags); } -/* GNN_ID */ -void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea) -{ - qla24xx_post_gnl_work(vha, ea->fcport); -} - -static void qla2x00_async_gnnid_sp_done(srb_t *sp, int res) -{ - struct scsi_qla_host *vha = sp->vha; - fc_port_t *fcport = sp->fcport; - u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name; - struct event_arg ea; - u64 wwnn; - - fcport->flags &= ~FCF_ASYNC_SENT; - wwnn = wwn_to_u64(node_name); - if (wwnn) - memcpy(fcport->node_name, node_name, WWN_SIZE); - - memset(&ea, 0, sizeof(ea)); - ea.fcport = fcport; - ea.sp = sp; - ea.rc = res; - - ql_dbg(ql_dbg_disc, vha, 0x204f, - "Async done-%s res %x, WWPN %8phC %8phC\n", - sp->name, res, fcport->port_name, fcport->node_name); - - qla24xx_handle_gnnid_event(vha, &ea); - - /* ref: INIT */ - kref_put(&sp->cmd_kref, qla2x00_sp_release); -} - -int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport) -{ - int rval = QLA_FUNCTION_FAILED; - struct ct_sns_req *ct_req; - srb_t *sp; - - if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) - return rval; - - qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID); - /* ref: INIT */ - sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); - if (!sp) - goto done; - - fcport->flags |= FCF_ASYNC_SENT; - sp->type = SRB_CT_PTHRU_CMD; - sp->name = "gnnid"; - sp->gen1 = fcport->rscn_gen; - sp->gen2 = fcport->login_gen; - qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, - qla2x00_async_gnnid_sp_done); - - /* CT_IU preamble */ - ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD, - GNN_ID_RSP_SIZE); - - /* GNN_ID req */ - ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id); - - - /* req & rsp use the same buffer */ - sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; - sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; - sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; - sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; - sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE; - sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE; - sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n", - sp->name, fcport->port_name, - sp->handle, fcport->loop_id, fcport->d_id.b24); - - rval = qla2x00_start_sp(sp); - if (rval != QLA_SUCCESS) - goto done_free_sp; - return rval; - -done_free_sp: - /* ref: INIT */ - kref_put(&sp->cmd_kref, qla2x00_sp_release); - fcport->flags &= ~FCF_ASYNC_SENT; -done: - return rval; -} - -int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport) -{ - struct qla_work_evt *e; - int ls; - - ls = atomic_read(&vha->loop_state); - if (((ls != LOOP_READY) && (ls != LOOP_UP)) || - test_bit(UNLOADING, &vha->dpc_flags)) - return 0; - - e = qla2x00_alloc_work(vha, QLA_EVT_GNNID); - if (!e) - return QLA_FUNCTION_FAILED; - - e->u.fcport.fcport = fcport; - return qla2x00_post_work(vha, e); -} - /* GPFN_ID */ void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) { diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 8d9ecabb1aac..c5e73d5a26b1 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -128,12 +128,14 @@ static void qla24xx_abort_iocb_timeout(void *data) sp->cmd_sp)) { qpair->req->outstanding_cmds[handle] = NULL; cmdsp_found = 1; + qla_put_fw_resources(qpair, &sp->cmd_sp->iores); } /* removing the abort */ if (qpair->req->outstanding_cmds[handle] == sp) { qpair->req->outstanding_cmds[handle] = NULL; sp_found = 1; + qla_put_fw_resources(qpair, &sp->iores); break; } } @@ -388,6 +390,12 @@ done_free_sp: fcport->flags &= ~FCF_ASYNC_SENT; done: fcport->flags &= ~FCF_ASYNC_ACTIVE; + + /* + * async login failed. Could be due to iocb/exchange resource + * being low. Set state DELETED for re-login process to start again. + */ + qla2x00_set_fcport_disc_state(fcport, DSC_DELETED); return rval; } @@ -1710,12 +1718,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) } break; default: - if (wwn == 0) { - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC post GNNID\n", - __func__, __LINE__, fcport->port_name); - qla24xx_post_gnnid_work(vha, fcport); - } else if (fcport->loop_id == FC_NO_LOOP_ID) { + if (fcport->loop_id == FC_NO_LOOP_ID) { ql_dbg(ql_dbg_disc, vha, 0x20bd, "%s %d %8phC post gnl\n", __func__, __LINE__, fcport->port_name); @@ -2000,6 +2003,7 @@ qla2x00_tmf_iocb_timeout(void *data) for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { if (sp->qpair->req->outstanding_cmds[h] == sp) { sp->qpair->req->outstanding_cmds[h] = NULL; + qla_put_fw_resources(sp->qpair, &sp->iores); break; } } @@ -2073,7 +2077,6 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, done_free_sp: /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); - fcport->flags &= ~FCF_ASYNC_SENT; done: return rval; } @@ -2315,7 +2318,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) ea->fcport->login_pause = 1; ql_dbg(ql_dbg_disc, vha, 0x20ed, - "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n", + "%s %d %8phC NPortId %06x inuse with loopid 0x%x.\n", __func__, __LINE__, ea->fcport->port_name, ea->fcport->d_id.b24, lid); } else { @@ -3943,6 +3946,12 @@ void qla_init_iocb_limit(scsi_qla_host_t *vha) ha->base_qpair->fwres.iocbs_limit = limit; ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps; ha->base_qpair->fwres.iocbs_used = 0; + + ha->base_qpair->fwres.exch_total = ha->orig_fw_xcb_count; + ha->base_qpair->fwres.exch_limit = (ha->orig_fw_xcb_count * + QLA_IOCB_PCT_LIMIT) / 100; + ha->base_qpair->fwres.exch_used = 0; + for (i = 0; i < ha->max_qpairs; i++) { if (ha->queue_pair_map[i]) { ha->queue_pair_map[i]->fwres.iocbs_total = @@ -3951,6 +3960,10 @@ void qla_init_iocb_limit(scsi_qla_host_t *vha) ha->queue_pair_map[i]->fwres.iocbs_qp_limit = limit / num_qps; ha->queue_pair_map[i]->fwres.iocbs_used = 0; + ha->queue_pair_map[i]->fwres.exch_total = ha->orig_fw_xcb_count; + ha->queue_pair_map[i]->fwres.exch_limit = + (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100; + ha->queue_pair_map[i]->fwres.exch_used = 0; } } } @@ -4809,9 +4822,9 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) spin_lock_irqsave(&ha->hardware_lock, flags); if (vha->hw->flags.edif_enabled) { if (topo != 2) - qlt_update_host_map(vha, id); + qla_update_host_map(vha, id); } else if (!(topo == 2 && ha->flags.n2n_bigger)) - qlt_update_host_map(vha, id); + qla_update_host_map(vha, id); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (!vha->flags.init_done) @@ -5206,27 +5219,6 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) return (rval); } -static void -qla2x00_rport_del(void *data) -{ - fc_port_t *fcport = data; - struct fc_rport *rport; - unsigned long flags; - - spin_lock_irqsave(fcport->vha->host->host_lock, flags); - rport = fcport->drport ? fcport->drport : fcport->rport; - fcport->drport = NULL; - spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); - if (rport) { - ql_dbg(ql_dbg_disc, fcport->vha, 0x210b, - "%s %8phN. rport %p roles %x\n", - __func__, fcport->port_name, rport, - rport->roles); - - fc_remote_port_delete(rport); - } -} - void qla2x00_set_fcport_state(fc_port_t *fcport, int state) { int old_state; @@ -6743,33 +6735,6 @@ int qla2x00_perform_loop_resync(scsi_qla_host_t *ha) return rval; } -void -qla2x00_update_fcports(scsi_qla_host_t *base_vha) -{ - fc_port_t *fcport; - struct scsi_qla_host *vha, *tvp; - struct qla_hw_data *ha = base_vha->hw; - unsigned long flags; - - spin_lock_irqsave(&ha->vport_slock, flags); - /* Go with deferred removal of rport references. */ - list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) { - atomic_inc(&vha->vref_count); - list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (fcport->drport && - atomic_read(&fcport->state) != FCS_UNCONFIGURED) { - spin_unlock_irqrestore(&ha->vport_slock, flags); - qla2x00_rport_del(fcport); - - spin_lock_irqsave(&ha->vport_slock, flags); - } - } - atomic_dec(&vha->vref_count); - wake_up(&vha->vref_waitq); - } - spin_unlock_irqrestore(&ha->vport_slock, flags); -} - /* Assumes idc_lock always held on entry */ void qla83xx_reset_ownership(scsi_qla_host_t *vha) @@ -9461,8 +9426,6 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, qpair->req = ha->req_q_map[req_id]; qpair->rsp->req = qpair->req; qpair->rsp->qpair = qpair; - /* init qpair to this cpu. Will adjust at run time. */ - qla_cpu_update(qpair, raw_smp_processor_id()); if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { if (ha->fw_attributes & BIT_4) @@ -9477,6 +9440,13 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, goto fail_mempool; } + if (qla_create_buf_pool(vha, qpair)) { + ql_log(ql_log_warn, vha, 0xd036, + "Failed to initialize buf pool for qpair %d\n", + qpair->id); + goto fail_bufpool; + } + /* Mark as online */ qpair->online = 1; @@ -9492,7 +9462,10 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, } return qpair; +fail_bufpool: + mempool_destroy(qpair->srb_mempool); fail_mempool: + qla25xx_delete_req_que(vha, qpair->req); fail_req: qla25xx_delete_rsp_que(vha, qpair->rsp); fail_rsp: @@ -9518,6 +9491,8 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) qpair->delete_in_progress = 1; + qla_free_buf_pool(qpair); + ret = qla25xx_delete_req_que(vha, qpair->req); if (ret != QLA_SUCCESS) goto fail; diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 5185dc5daf80..cce6e425c121 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -380,24 +380,26 @@ qla2xxx_get_fc4_priority(struct scsi_qla_host *vha) enum { RESOURCE_NONE, - RESOURCE_INI, + RESOURCE_IOCB = BIT_0, + RESOURCE_EXCH = BIT_1, /* exchange */ + RESOURCE_FORCE = BIT_2, }; static inline int -qla_get_iocbs(struct qla_qpair *qp, struct iocb_resource *iores) +qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores) { u16 iocbs_used, i; + u16 exch_used; struct qla_hw_data *ha = qp->vha->hw; if (!ql2xenforce_iocb_limit) { iores->res_type = RESOURCE_NONE; return 0; } + if (iores->res_type & RESOURCE_FORCE) + goto force; - if ((iores->iocb_cnt + qp->fwres.iocbs_used) < qp->fwres.iocbs_qp_limit) { - qp->fwres.iocbs_used += iores->iocb_cnt; - return 0; - } else { + if ((iores->iocb_cnt + qp->fwres.iocbs_used) >= qp->fwres.iocbs_qp_limit) { /* no need to acquire qpair lock. It's just rough calculation */ iocbs_used = ha->base_qpair->fwres.iocbs_used; for (i = 0; i < ha->max_qpairs; i++) { @@ -405,30 +407,49 @@ qla_get_iocbs(struct qla_qpair *qp, struct iocb_resource *iores) iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used; } - if ((iores->iocb_cnt + iocbs_used) < qp->fwres.iocbs_limit) { - qp->fwres.iocbs_used += iores->iocb_cnt; - return 0; - } else { + if ((iores->iocb_cnt + iocbs_used) >= qp->fwres.iocbs_limit) { + iores->res_type = RESOURCE_NONE; + return -ENOSPC; + } + } + + if (iores->res_type & RESOURCE_EXCH) { + exch_used = ha->base_qpair->fwres.exch_used; + for (i = 0; i < ha->max_qpairs; i++) { + if (ha->queue_pair_map[i]) + exch_used += ha->queue_pair_map[i]->fwres.exch_used; + } + + if ((exch_used + iores->exch_cnt) >= qp->fwres.exch_limit) { iores->res_type = RESOURCE_NONE; return -ENOSPC; } } +force: + qp->fwres.iocbs_used += iores->iocb_cnt; + qp->fwres.exch_used += iores->exch_cnt; + return 0; } static inline void -qla_put_iocbs(struct qla_qpair *qp, struct iocb_resource *iores) +qla_put_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores) { - switch (iores->res_type) { - case RESOURCE_NONE: - break; - default: + if (iores->res_type & RESOURCE_IOCB) { if (qp->fwres.iocbs_used >= iores->iocb_cnt) { qp->fwres.iocbs_used -= iores->iocb_cnt; } else { - // should not happen + /* should not happen */ qp->fwres.iocbs_used = 0; } - break; + } + + if (iores->res_type & RESOURCE_EXCH) { + if (qp->fwres.exch_used >= iores->exch_cnt) { + qp->fwres.exch_used -= iores->exch_cnt; + } else { + /* should not happen */ + qp->fwres.exch_used = 0; + } } iores->res_type = RESOURCE_NONE; } @@ -494,3 +515,58 @@ fcport_is_bigger(fc_port_t *fcport) { return !fcport_is_smaller(fcport); } + +static inline struct qla_qpair * +qla_mapq_nvme_select_qpair(struct qla_hw_data *ha, struct qla_qpair *qpair) +{ + int cpuid = smp_processor_id(); + + if (qpair->cpuid != cpuid && + ha->qp_cpu_map[cpuid]) { + qpair = ha->qp_cpu_map[cpuid]; + } + return qpair; +} + +static inline void +qla_mapq_init_qp_cpu_map(struct qla_hw_data *ha, + struct qla_msix_entry *msix, + struct qla_qpair *qpair) +{ + const struct cpumask *mask; + unsigned int cpu; + + if (!ha->qp_cpu_map) + return; + mask = pci_irq_get_affinity(ha->pdev, msix->vector_base0); + qpair->cpuid = cpumask_first(mask); + for_each_cpu(cpu, mask) { + ha->qp_cpu_map[cpu] = qpair; + } + msix->cpuid = qpair->cpuid; +} + +static inline void +qla_mapq_free_qp_cpu_map(struct qla_hw_data *ha) +{ + if (ha->qp_cpu_map) { + kfree(ha->qp_cpu_map); + ha->qp_cpu_map = NULL; + } +} + +static inline int qla_mapq_alloc_qp_cpu_map(struct qla_hw_data *ha) +{ + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + if (!ha->qp_cpu_map) { + ha->qp_cpu_map = kcalloc(NR_CPUS, sizeof(struct qla_qpair *), + GFP_KERNEL); + if (!ha->qp_cpu_map) { + ql_log(ql_log_fatal, vha, 0x0180, + "Unable to allocate memory for qp_cpu_map ptrs.\n"); + return -1; + } + } + return 0; +} diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 42ce4e1fe744..b9b3e6f80ea9 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -623,7 +623,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, } cur_seg = scsi_sglist(cmd); - ctx = sp->u.scmd.ct6_ctx; + ctx = &sp->u.scmd.ct6_ctx; while (tot_dsds) { avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? @@ -1589,9 +1589,10 @@ qla24xx_start_scsi(srb_t *sp) tot_dsds = nseg; req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); - sp->iores.res_type = RESOURCE_INI; + sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; + sp->iores.exch_cnt = 1; sp->iores.iocb_cnt = req_cnt; - if (qla_get_iocbs(sp->qpair, &sp->iores)) + if (qla_get_fw_resources(sp->qpair, &sp->iores)) goto queuing_error; if (req->cnt < (req_cnt + 2)) { @@ -1678,7 +1679,7 @@ queuing_error: if (tot_dsds) scsi_dma_unmap(cmd); - qla_put_iocbs(sp->qpair, &sp->iores); + qla_put_fw_resources(sp->qpair, &sp->iores); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_FUNCTION_FAILED; @@ -1793,9 +1794,10 @@ qla24xx_dif_start_scsi(srb_t *sp) tot_prot_dsds = nseg; tot_dsds += nseg; - sp->iores.res_type = RESOURCE_INI; + sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; + sp->iores.exch_cnt = 1; sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds); - if (qla_get_iocbs(sp->qpair, &sp->iores)) + if (qla_get_fw_resources(sp->qpair, &sp->iores)) goto queuing_error; if (req->cnt < (req_cnt + 2)) { @@ -1883,7 +1885,7 @@ queuing_error: } /* Cleanup will be performed by the caller (queuecommand) */ - qla_put_iocbs(sp->qpair, &sp->iores); + qla_put_fw_resources(sp->qpair, &sp->iores); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_FUNCTION_FAILED; @@ -1952,9 +1954,10 @@ qla2xxx_start_scsi_mq(srb_t *sp) tot_dsds = nseg; req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); - sp->iores.res_type = RESOURCE_INI; + sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; + sp->iores.exch_cnt = 1; sp->iores.iocb_cnt = req_cnt; - if (qla_get_iocbs(sp->qpair, &sp->iores)) + if (qla_get_fw_resources(sp->qpair, &sp->iores)) goto queuing_error; if (req->cnt < (req_cnt + 2)) { @@ -2041,7 +2044,7 @@ queuing_error: if (tot_dsds) scsi_dma_unmap(cmd); - qla_put_iocbs(sp->qpair, &sp->iores); + qla_put_fw_resources(sp->qpair, &sp->iores); spin_unlock_irqrestore(&qpair->qp_lock, flags); return QLA_FUNCTION_FAILED; @@ -2171,9 +2174,10 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp) tot_prot_dsds = nseg; tot_dsds += nseg; - sp->iores.res_type = RESOURCE_INI; + sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; + sp->iores.exch_cnt = 1; sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds); - if (qla_get_iocbs(sp->qpair, &sp->iores)) + if (qla_get_fw_resources(sp->qpair, &sp->iores)) goto queuing_error; if (req->cnt < (req_cnt + 2)) { @@ -2260,7 +2264,7 @@ queuing_error: } /* Cleanup will be performed by the caller (queuecommand) */ - qla_put_iocbs(sp->qpair, &sp->iores); + qla_put_fw_resources(sp->qpair, &sp->iores); spin_unlock_irqrestore(&qpair->qp_lock, flags); return QLA_FUNCTION_FAILED; @@ -2916,7 +2920,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) conflict_fcport->conflict = fcport; fcport->login_pause = 1; ql_dbg(ql_dbg_disc, vha, 0x20ed, - "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n", + "%s %d %8phC pid %06x inuse with lid %#x.\n", __func__, __LINE__, fcport->port_name, fcport->d_id.b24, lid); @@ -3455,13 +3459,7 @@ sufficient_dsds: goto queuing_error; } - ctx = sp->u.scmd.ct6_ctx = - mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); - if (!ctx) { - ql_log(ql_log_fatal, vha, 0x3010, - "Failed to allocate ctx for cmd=%p.\n", cmd); - goto queuing_error; - } + ctx = &sp->u.scmd.ct6_ctx; memset(ctx, 0, sizeof(struct ct6_dsd)); ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool, @@ -3813,6 +3811,65 @@ qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio) logio->vp_index = sp->fcport->vha->vp_idx; } +static int qla_get_iocbs_resource(struct srb *sp) +{ + bool get_exch; + bool push_it_through = false; + + if (!ql2xenforce_iocb_limit) { + sp->iores.res_type = RESOURCE_NONE; + return 0; + } + sp->iores.res_type = RESOURCE_NONE; + + switch (sp->type) { + case SRB_TM_CMD: + case SRB_PRLI_CMD: + case SRB_ADISC_CMD: + push_it_through = true; + fallthrough; + case SRB_LOGIN_CMD: + case SRB_ELS_CMD_RPT: + case SRB_ELS_CMD_HST: + case SRB_ELS_CMD_HST_NOLOGIN: + case SRB_CT_CMD: + case SRB_NVME_LS: + case SRB_ELS_DCMD: + get_exch = true; + break; + + case SRB_FXIOCB_DCMD: + case SRB_FXIOCB_BCMD: + sp->iores.res_type = RESOURCE_NONE; + return 0; + + case SRB_SA_UPDATE: + case SRB_SA_REPLACE: + case SRB_MB_IOCB: + case SRB_ABT_CMD: + case SRB_NACK_PLOGI: + case SRB_NACK_PRLI: + case SRB_NACK_LOGO: + case SRB_LOGOUT_CMD: + case SRB_CTRL_VP: + push_it_through = true; + fallthrough; + default: + get_exch = false; + } + + sp->iores.res_type |= RESOURCE_IOCB; + sp->iores.iocb_cnt = 1; + if (get_exch) { + sp->iores.res_type |= RESOURCE_EXCH; + sp->iores.exch_cnt = 1; + } + if (push_it_through) + sp->iores.res_type |= RESOURCE_FORCE; + + return qla_get_fw_resources(sp->qpair, &sp->iores); +} + int qla2x00_start_sp(srb_t *sp) { @@ -3827,6 +3884,12 @@ qla2x00_start_sp(srb_t *sp) return -EIO; spin_lock_irqsave(qp->qp_lock_ptr, flags); + rval = qla_get_iocbs_resource(sp); + if (rval) { + spin_unlock_irqrestore(qp->qp_lock_ptr, flags); + return -EAGAIN; + } + pkt = __qla2x00_alloc_iocbs(sp->qpair, sp); if (!pkt) { rval = EAGAIN; @@ -3927,6 +3990,8 @@ qla2x00_start_sp(srb_t *sp) wmb(); qla2x00_start_iocbs(vha, qp->req); done: + if (rval) + qla_put_fw_resources(sp->qpair, &sp->iores); spin_unlock_irqrestore(qp->qp_lock_ptr, flags); return rval; } diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index e19fde304e5c..46e8b38603f0 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -3112,6 +3112,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, } bsg_reply->reply_payload_rcv_len = 0; + qla_put_fw_resources(sp->qpair, &sp->iores); done: /* Return the vendor specific reply to API */ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; @@ -3197,7 +3198,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) } return; } - qla_put_iocbs(sp->qpair, &sp->iores); + qla_put_fw_resources(sp->qpair, &sp->iores); if (sp->cmd_type != TYPE_SRB) { req->outstanding_cmds[handle] = NULL; @@ -3362,8 +3363,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", resid, scsi_bufflen(cp)); - vha->interface_err_cnt++; - res = DID_ERROR << 16 | lscsi_status; goto check_scsi_status; } @@ -3618,7 +3617,6 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) default: sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (sp) { - qla_put_iocbs(sp->qpair, &sp->iores); sp->done(sp, res); return 0; } @@ -3771,7 +3769,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) { rsp->qpair->rcv_intr = 1; - qla_cpu_update(rsp->qpair, smp_processor_id()); } #define __update_rsp_in(_is_shadow_hba, _rsp, _rsp_in) \ @@ -4379,6 +4376,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) for (i = 0; i < ha->msix_count; i++) { qentry = &ha->msix_entries[i]; qentry->vector = pci_irq_vector(ha->pdev, i); + qentry->vector_base0 = i; qentry->entry = i; qentry->have_irq = 0; qentry->in_use = 0; @@ -4606,5 +4604,6 @@ int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, } msix->have_irq = 1; msix->handle = qpair; + qla_mapq_init_qp_cpu_map(ha, msix, qpair); return ret; } diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 359595a64664..254fd4c64262 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -4010,7 +4010,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); ha->current_topology = ISP_CFG_NL; - qlt_update_host_map(vha, id); + qla_update_host_map(vha, id); } else if (rptid_entry->format == 1) { /* fabric */ @@ -4126,7 +4126,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, WWN_SIZE); } - qlt_update_host_map(vha, id); + qla_update_host_map(vha, id); } set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); @@ -4153,7 +4153,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, if (!found) return; - qlt_update_host_map(vp, id); + qla_update_host_map(vp, id); /* * Cannot configure here as we are still sitting on the @@ -4184,7 +4184,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, ha->flags.n2n_ae = 1; spin_lock_irqsave(&ha->vport_slock, flags); - qlt_update_vp_map(vha, SET_AL_PA); + qla_update_vp_map(vha, SET_AL_PA); spin_unlock_irqrestore(&ha->vport_slock, flags); list_for_each_entry(fcport, &vha->vp_fcports, list) { diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 16a9f22bb860..c6ca39b8e23d 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -52,7 +52,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha) spin_unlock_irqrestore(&ha->vport_slock, flags); spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_update_vp_map(vha, SET_VP_IDX); + qla_update_vp_map(vha, SET_VP_IDX); spin_unlock_irqrestore(&ha->hardware_lock, flags); mutex_unlock(&ha->vport_lock); @@ -80,7 +80,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) spin_lock_irqsave(&ha->vport_slock, flags); if (atomic_read(&vha->vref_count) == 0) { list_del(&vha->list); - qlt_update_vp_map(vha, RESET_VP_IDX); + qla_update_vp_map(vha, RESET_VP_IDX); bailout = 1; } spin_unlock_irqrestore(&ha->vport_slock, flags); @@ -95,7 +95,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) "vha->vref_count=%u timeout\n", vha->vref_count.counter); spin_lock_irqsave(&ha->vport_slock, flags); list_del(&vha->list); - qlt_update_vp_map(vha, RESET_VP_IDX); + qla_update_vp_map(vha, RESET_VP_IDX); spin_unlock_irqrestore(&ha->vport_slock, flags); } @@ -187,7 +187,7 @@ qla24xx_disable_vp(scsi_qla_host_t *vha) /* Remove port id from vp target map */ spin_lock_irqsave(&vha->hw->hardware_lock, flags); - qlt_update_vp_map(vha, RESET_AL_PA); + qla_update_vp_map(vha, RESET_AL_PA); spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); qla2x00_mark_vp_devices_dead(vha); @@ -384,15 +384,6 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha) } } - if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) { - ql_dbg(ql_dbg_dpc, vha, 0x4016, - "FCPort update scheduled.\n"); - qla2x00_update_fcports(vha); - clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags); - ql_dbg(ql_dbg_dpc, vha, 0x4017, - "FCPort update end.\n"); - } - if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) && !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) && atomic_read(&vha->loop_state) != LOOP_DOWN) { @@ -1014,3 +1005,288 @@ done: kref_put(&sp->cmd_kref, qla2x00_sp_release); return rval; } + +struct scsi_qla_host *qla_find_host_by_vp_idx(struct scsi_qla_host *vha, uint16_t vp_idx) +{ + struct qla_hw_data *ha = vha->hw; + + if (vha->vp_idx == vp_idx) + return vha; + + BUG_ON(ha->vp_map == NULL); + if (likely(test_bit(vp_idx, ha->vp_idx_map))) + return ha->vp_map[vp_idx].vha; + + return NULL; +} + +/* vport_slock to be held by the caller */ +void +qla_update_vp_map(struct scsi_qla_host *vha, int cmd) +{ + void *slot; + u32 key; + int rc; + + if (!vha->hw->vp_map) + return; + + key = vha->d_id.b24; + + switch (cmd) { + case SET_VP_IDX: + vha->hw->vp_map[vha->vp_idx].vha = vha; + break; + case SET_AL_PA: + slot = btree_lookup32(&vha->hw->host_map, key); + if (!slot) { + ql_dbg(ql_dbg_disc, vha, 0xf018, + "Save vha in host_map %p %06x\n", vha, key); + rc = btree_insert32(&vha->hw->host_map, + key, vha, GFP_ATOMIC); + if (rc) + ql_log(ql_log_info, vha, 0xd03e, + "Unable to insert s_id into host_map: %06x\n", + key); + return; + } + ql_dbg(ql_dbg_disc, vha, 0xf019, + "replace existing vha in host_map %p %06x\n", vha, key); + btree_update32(&vha->hw->host_map, key, vha); + break; + case RESET_VP_IDX: + vha->hw->vp_map[vha->vp_idx].vha = NULL; + break; + case RESET_AL_PA: + ql_dbg(ql_dbg_disc, vha, 0xf01a, + "clear vha in host_map %p %06x\n", vha, key); + slot = btree_lookup32(&vha->hw->host_map, key); + if (slot) + btree_remove32(&vha->hw->host_map, key); + vha->d_id.b24 = 0; + break; + } +} + +void qla_update_host_map(struct scsi_qla_host *vha, port_id_t id) +{ + + if (!vha->d_id.b24) { + vha->d_id = id; + qla_update_vp_map(vha, SET_AL_PA); + } else if (vha->d_id.b24 != id.b24) { + qla_update_vp_map(vha, RESET_AL_PA); + vha->d_id = id; + qla_update_vp_map(vha, SET_AL_PA); + } +} + +int qla_create_buf_pool(struct scsi_qla_host *vha, struct qla_qpair *qp) +{ + int sz; + + qp->buf_pool.num_bufs = qp->req->length; + + sz = BITS_TO_LONGS(qp->req->length); + qp->buf_pool.buf_map = kcalloc(sz, sizeof(long), GFP_KERNEL); + if (!qp->buf_pool.buf_map) { + ql_log(ql_log_warn, vha, 0x0186, + "Failed to allocate buf_map(%ld).\n", sz * sizeof(unsigned long)); + return -ENOMEM; + } + sz = qp->req->length * sizeof(void *); + qp->buf_pool.buf_array = kcalloc(qp->req->length, sizeof(void *), GFP_KERNEL); + if (!qp->buf_pool.buf_array) { + ql_log(ql_log_warn, vha, 0x0186, + "Failed to allocate buf_array(%d).\n", sz); + kfree(qp->buf_pool.buf_map); + return -ENOMEM; + } + sz = qp->req->length * sizeof(dma_addr_t); + qp->buf_pool.dma_array = kcalloc(qp->req->length, sizeof(dma_addr_t), GFP_KERNEL); + if (!qp->buf_pool.dma_array) { + ql_log(ql_log_warn, vha, 0x0186, + "Failed to allocate dma_array(%d).\n", sz); + kfree(qp->buf_pool.buf_map); + kfree(qp->buf_pool.buf_array); + return -ENOMEM; + } + set_bit(0, qp->buf_pool.buf_map); + return 0; +} + +void qla_free_buf_pool(struct qla_qpair *qp) +{ + int i; + struct qla_hw_data *ha = qp->vha->hw; + + for (i = 0; i < qp->buf_pool.num_bufs; i++) { + if (qp->buf_pool.buf_array[i] && qp->buf_pool.dma_array[i]) + dma_pool_free(ha->fcp_cmnd_dma_pool, qp->buf_pool.buf_array[i], + qp->buf_pool.dma_array[i]); + qp->buf_pool.buf_array[i] = NULL; + qp->buf_pool.dma_array[i] = 0; + } + + kfree(qp->buf_pool.dma_array); + kfree(qp->buf_pool.buf_array); + kfree(qp->buf_pool.buf_map); +} + +/* it is assume qp->qp_lock is held at this point */ +int qla_get_buf(struct scsi_qla_host *vha, struct qla_qpair *qp, struct qla_buf_dsc *dsc) +{ + u16 tag, i = 0; + void *buf; + dma_addr_t buf_dma; + struct qla_hw_data *ha = vha->hw; + + dsc->tag = TAG_FREED; +again: + tag = find_first_zero_bit(qp->buf_pool.buf_map, qp->buf_pool.num_bufs); + if (tag >= qp->buf_pool.num_bufs) { + ql_dbg(ql_dbg_io, vha, 0x00e2, + "qp(%d) ran out of buf resource.\n", qp->id); + return -EIO; + } + if (tag == 0) { + set_bit(0, qp->buf_pool.buf_map); + i++; + if (i == 5) { + ql_dbg(ql_dbg_io, vha, 0x00e3, + "qp(%d) unable to get tag.\n", qp->id); + return -EIO; + } + goto again; + } + + if (!qp->buf_pool.buf_array[tag]) { + buf = dma_pool_zalloc(ha->fcp_cmnd_dma_pool, GFP_ATOMIC, &buf_dma); + if (!buf) { + ql_log(ql_log_fatal, vha, 0x13b1, + "Failed to allocate buf.\n"); + return -ENOMEM; + } + + dsc->buf = qp->buf_pool.buf_array[tag] = buf; + dsc->buf_dma = qp->buf_pool.dma_array[tag] = buf_dma; + qp->buf_pool.num_alloc++; + } else { + dsc->buf = qp->buf_pool.buf_array[tag]; + dsc->buf_dma = qp->buf_pool.dma_array[tag]; + memset(dsc->buf, 0, FCP_CMND_DMA_POOL_SIZE); + } + + qp->buf_pool.num_active++; + if (qp->buf_pool.num_active > qp->buf_pool.max_used) + qp->buf_pool.max_used = qp->buf_pool.num_active; + + dsc->tag = tag; + set_bit(tag, qp->buf_pool.buf_map); + return 0; +} + +void qla_trim_buf(struct qla_qpair *qp, u16 trim) +{ + int i, j; + struct qla_hw_data *ha = qp->vha->hw; + + if (!trim) + return; + + for (i = 0; i < trim; i++) { + j = qp->buf_pool.num_alloc - 1; + if (test_bit(j, qp->buf_pool.buf_map)) { + ql_dbg(ql_dbg_io + ql_dbg_verbose, qp->vha, 0x300b, + "QP id(%d): trim active buf[%d]. Remain %d bufs\n", + qp->id, j, qp->buf_pool.num_alloc); + return; + } + + if (qp->buf_pool.buf_array[j]) { + dma_pool_free(ha->fcp_cmnd_dma_pool, qp->buf_pool.buf_array[j], + qp->buf_pool.dma_array[j]); + qp->buf_pool.buf_array[j] = NULL; + qp->buf_pool.dma_array[j] = 0; + } + qp->buf_pool.num_alloc--; + if (!qp->buf_pool.num_alloc) + break; + } + ql_dbg(ql_dbg_io + ql_dbg_verbose, qp->vha, 0x3010, + "QP id(%d): trimmed %d bufs. Remain %d bufs\n", + qp->id, trim, qp->buf_pool.num_alloc); +} + +void __qla_adjust_buf(struct qla_qpair *qp) +{ + u32 trim; + + qp->buf_pool.take_snapshot = 0; + qp->buf_pool.prev_max = qp->buf_pool.max_used; + qp->buf_pool.max_used = qp->buf_pool.num_active; + + if (qp->buf_pool.prev_max > qp->buf_pool.max_used && + qp->buf_pool.num_alloc > qp->buf_pool.max_used) { + /* down trend */ + trim = qp->buf_pool.num_alloc - qp->buf_pool.max_used; + trim = (trim * 10) / 100; + trim = trim ? trim : 1; + qla_trim_buf(qp, trim); + } else if (!qp->buf_pool.prev_max && !qp->buf_pool.max_used) { + /* 2 periods of no io */ + qla_trim_buf(qp, qp->buf_pool.num_alloc); + } +} + +/* it is assume qp->qp_lock is held at this point */ +void qla_put_buf(struct qla_qpair *qp, struct qla_buf_dsc *dsc) +{ + if (dsc->tag == TAG_FREED) + return; + lockdep_assert_held(qp->qp_lock_ptr); + + clear_bit(dsc->tag, qp->buf_pool.buf_map); + qp->buf_pool.num_active--; + dsc->tag = TAG_FREED; + + if (qp->buf_pool.take_snapshot) + __qla_adjust_buf(qp); +} + +#define EXPIRE (60 * HZ) +void qla_adjust_buf(struct scsi_qla_host *vha) +{ + unsigned long flags; + int i; + struct qla_qpair *qp; + + if (vha->vp_idx) + return; + + if (!vha->buf_expired) { + vha->buf_expired = jiffies + EXPIRE; + return; + } + if (time_before(jiffies, vha->buf_expired)) + return; + + vha->buf_expired = jiffies + EXPIRE; + + for (i = 0; i < vha->hw->num_qpairs; i++) { + qp = vha->hw->queue_pair_map[i]; + if (!qp) + continue; + if (!qp->buf_pool.num_alloc) + continue; + + if (qp->buf_pool.take_snapshot) { + /* no io has gone through in the last EXPIRE period */ + spin_lock_irqsave(qp->qp_lock_ptr, flags); + __qla_adjust_buf(qp); + spin_unlock_irqrestore(qp->qp_lock_ptr, flags); + } else { + qp->buf_pool.take_snapshot = 1; + } + } +} diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 02fdeb0d31ec..648e8f798606 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -170,18 +170,6 @@ out: qla2xxx_rel_qpair_sp(sp->qpair, sp); } -static void qla_nvme_ls_unmap(struct srb *sp, struct nvmefc_ls_req *fd) -{ - if (sp->flags & SRB_DMA_VALID) { - struct srb_iocb *nvme = &sp->u.iocb_cmd; - struct qla_hw_data *ha = sp->fcport->vha->hw; - - dma_unmap_single(&ha->pdev->dev, nvme->u.nvme.cmd_dma, - fd->rqstlen, DMA_TO_DEVICE); - sp->flags &= ~SRB_DMA_VALID; - } -} - static void qla_nvme_release_ls_cmd_kref(struct kref *kref) { struct srb *sp = container_of(kref, struct srb, cmd_kref); @@ -199,7 +187,6 @@ static void qla_nvme_release_ls_cmd_kref(struct kref *kref) fd = priv->fd; - qla_nvme_ls_unmap(sp, fd); fd->done(fd, priv->comp_status); out: qla2x00_rel_sp(sp); @@ -365,13 +352,10 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, nvme->u.nvme.rsp_len = fd->rsplen; nvme->u.nvme.rsp_dma = fd->rspdma; nvme->u.nvme.timeout_sec = fd->timeout; - nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr, - fd->rqstlen, DMA_TO_DEVICE); + nvme->u.nvme.cmd_dma = fd->rqstdma; dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma, fd->rqstlen, DMA_TO_DEVICE); - sp->flags |= SRB_DMA_VALID; - rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x700e, @@ -379,7 +363,6 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, wake_up(&sp->nvme_ls_waitq); sp->priv = NULL; priv->sp = NULL; - qla_nvme_ls_unmap(sp, fd); qla2x00_rel_sp(sp); return rval; } @@ -445,13 +428,24 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp) goto queuing_error; } req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); + + sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; + sp->iores.exch_cnt = 1; + sp->iores.iocb_cnt = req_cnt; + if (qla_get_fw_resources(sp->qpair, &sp->iores)) { + rval = -EBUSY; + goto queuing_error; + } + if (req->cnt < (req_cnt + 2)) { if (IS_SHADOW_REG_CAPABLE(ha)) { cnt = *req->out_ptr; } else { cnt = rd_reg_dword_relaxed(req->req_q_out); - if (qla2x00_check_reg16_for_disconnect(vha, cnt)) + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) { + rval = -EBUSY; goto queuing_error; + } } if (req->ring_index < cnt) @@ -600,6 +594,8 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp) qla24xx_process_response_queue(vha, rsp); queuing_error: + if (rval) + qla_put_fw_resources(sp->qpair, &sp->iores); spin_unlock_irqrestore(&qpair->qp_lock, flags); return rval; @@ -613,6 +609,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, fc_port_t *fcport; struct srb_iocb *nvme; struct scsi_qla_host *vha; + struct qla_hw_data *ha; int rval; srb_t *sp; struct qla_qpair *qpair = hw_queue_handle; @@ -633,6 +630,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, return -ENODEV; vha = fcport->vha; + ha = vha->hw; if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) return -EBUSY; @@ -647,6 +645,8 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, if (fcport->nvme_flag & NVME_FLAG_RESETTING) return -EBUSY; + qpair = qla_mapq_nvme_select_qpair(ha, qpair); + /* Alloc SRB structure */ sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC); if (!sp) diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 7fb28c207ee5..545167627e48 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -472,6 +472,11 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, "Unable to allocate memory for queue pair ptrs.\n"); goto fail_qpair_map; } + if (qla_mapq_alloc_qp_cpu_map(ha) != 0) { + kfree(ha->queue_pair_map); + ha->queue_pair_map = NULL; + goto fail_qpair_map; + } } /* @@ -546,6 +551,7 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) ha->base_qpair = NULL; } + qla_mapq_free_qp_cpu_map(ha); spin_lock_irqsave(&ha->hardware_lock, flags); for (cnt = 0; cnt < ha->max_req_queues; cnt++) { if (!test_bit(cnt, ha->req_qid_map)) @@ -733,15 +739,17 @@ void qla2x00_sp_free_dma(srb_t *sp) } if (sp->flags & SRB_FCP_CMND_DMA_VALID) { - struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx; + struct ct6_dsd *ctx1 = &sp->u.scmd.ct6_ctx; dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, ctx1->fcp_cmnd_dma); list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; ha->gbl_dsd_avail += ctx1->dsd_use_cnt; - mempool_free(ctx1, ha->ctx_mempool); } + + if (sp->flags & SRB_GOT_BUF) + qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc); } void qla2x00_sp_compl(srb_t *sp, int res) @@ -817,14 +825,13 @@ void qla2xxx_qpair_sp_free_dma(srb_t *sp) } if (sp->flags & SRB_FCP_CMND_DMA_VALID) { - struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx; + struct ct6_dsd *ctx1 = &sp->u.scmd.ct6_ctx; dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, ctx1->fcp_cmnd_dma); list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; ha->gbl_dsd_avail += ctx1->dsd_use_cnt; - mempool_free(ctx1, ha->ctx_mempool); sp->flags &= ~SRB_FCP_CMND_DMA_VALID; } @@ -834,6 +841,9 @@ void qla2xxx_qpair_sp_free_dma(srb_t *sp) dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma); sp->flags &= ~SRB_CRC_CTX_DMA_VALID; } + + if (sp->flags & SRB_GOT_BUF) + qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc); } void qla2xxx_qpair_sp_compl(srb_t *sp, int res) @@ -4118,10 +4128,16 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, char name[16]; int rc; + if (QLA_TGT_MODE_ENABLED() || EDIF_CAP(ha)) { + ha->vp_map = kcalloc(MAX_MULTI_ID_FABRIC, sizeof(struct qla_vp_map), GFP_KERNEL); + if (!ha->vp_map) + goto fail; + } + ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, &ha->init_cb_dma, GFP_KERNEL); if (!ha->init_cb) - goto fail; + goto fail_free_vp_map; rc = btree_init32(&ha->host_map); if (rc) @@ -4540,6 +4556,8 @@ fail_free_init_cb: ha->init_cb_dma); ha->init_cb = NULL; ha->init_cb_dma = 0; +fail_free_vp_map: + kfree(ha->vp_map); fail: ql_log(ql_log_fatal, NULL, 0x0030, "Memory allocation failure.\n"); @@ -4981,6 +4999,9 @@ qla2x00_mem_free(struct qla_hw_data *ha) ha->sf_init_cb = NULL; ha->sf_init_cb_dma = 0; ha->loop_id_map = NULL; + + kfree(ha->vp_map); + ha->vp_map = NULL; } struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, @@ -5016,7 +5037,6 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, INIT_LIST_HEAD(&vha->plogi_ack_list); INIT_LIST_HEAD(&vha->qp_list); INIT_LIST_HEAD(&vha->gnl.fcports); - INIT_LIST_HEAD(&vha->gpnid_list); INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn); INIT_LIST_HEAD(&vha->purex_list.head); @@ -5461,9 +5481,6 @@ qla2x00_do_work(struct scsi_qla_host *vha) case QLA_EVT_AENFX: qlafx00_process_aen(vha, e); break; - case QLA_EVT_GPNID: - qla24xx_async_gpnid(vha, &e->u.gpnid.id); - break; case QLA_EVT_UNMAP: qla24xx_sp_unmap(vha, e->u.iosb.sp); break; @@ -5506,9 +5523,6 @@ qla2x00_do_work(struct scsi_qla_host *vha) case QLA_EVT_GNNFT_DONE: qla24xx_async_gnnft_done(vha, e->u.iosb.sp); break; - case QLA_EVT_GNNID: - qla24xx_async_gnnid(vha, e->u.fcport.fcport); - break; case QLA_EVT_GFPNID: qla24xx_async_gfpnid(vha, e->u.fcport.fcport); break; @@ -7025,11 +7039,6 @@ qla2x00_do_dpc(void *data) } } - if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, - &base_vha->dpc_flags)) { - qla2x00_update_fcports(base_vha); - } - if (IS_QLAFX00(ha)) goto loop_resync_check; @@ -7094,9 +7103,12 @@ qla2x00_do_dpc(void *data) } } loop_resync_check: - if (test_and_clear_bit(LOOP_RESYNC_NEEDED, + if (!qla2x00_reset_active(base_vha) && + test_and_clear_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags)) { - + /* + * Allow abort_isp to complete before moving on to scanning. + */ ql_dbg(ql_dbg_dpc, base_vha, 0x400f, "Loop resync scheduled.\n"); @@ -7447,7 +7459,7 @@ qla2x00_timer(struct timer_list *t) /* if the loop has been down for 4 minutes, reinit adapter */ if (atomic_dec_and_test(&vha->loop_down_timer) != 0) { - if (!(vha->device_flags & DFLG_NO_CABLE)) { + if (!(vha->device_flags & DFLG_NO_CABLE) && !vha->vp_idx) { ql_log(ql_log_warn, vha, 0x6009, "Loop down - aborting ISP.\n"); @@ -7516,13 +7528,13 @@ qla2x00_timer(struct timer_list *t) set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags); start_dpc++; } + qla_adjust_buf(vha); /* borrowing w to signify dpc will run */ w = 0; /* Schedule the DPC routine if needed */ if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) || - test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) || start_dpc || test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) || test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) || @@ -7533,13 +7545,10 @@ qla2x00_timer(struct timer_list *t) test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags))) { ql_dbg(ql_dbg_timer, vha, 0x600b, "isp_abort_needed=%d loop_resync_needed=%d " - "fcport_update_needed=%d start_dpc=%d " - "reset_marker_needed=%d", + "start_dpc=%d reset_marker_needed=%d", test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags), test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags), - test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags), - start_dpc, - test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)); + start_dpc, test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)); ql_dbg(ql_dbg_timer, vha, 0x600c, "beacon_blink_needed=%d isp_unrecoverable=%d " "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d " diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 548f22705ddc..dbd6660c0bf8 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -198,22 +198,6 @@ struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha, return host; } -static inline -struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, - uint16_t vp_idx) -{ - struct qla_hw_data *ha = vha->hw; - - if (vha->vp_idx == vp_idx) - return vha; - - BUG_ON(ha->tgt.tgt_vp_map == NULL); - if (likely(test_bit(vp_idx, ha->vp_idx_map))) - return ha->tgt.tgt_vp_map[vp_idx].vha; - - return NULL; -} - static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha) { unsigned long flags; @@ -371,7 +355,7 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, if ((entry->u.isp24.vp_index != 0xFF) && (entry->u.isp24.nport_handle != cpu_to_le16(0xFFFF))) { - host = qlt_find_host_by_vp_idx(vha, + host = qla_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe03f, @@ -395,7 +379,7 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, { struct abts_recv_from_24xx *entry = (struct abts_recv_from_24xx *)atio; - struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, + struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, entry->vp_index); unsigned long flags; @@ -438,7 +422,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, case CTIO_TYPE7: { struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; - struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, + struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, entry->vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe041, @@ -457,7 +441,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *entry = (struct imm_ntfy_from_isp *)pkt; - host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); + host = qla_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe042, "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) " @@ -475,7 +459,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, struct nack_to_isp *entry = (struct nack_to_isp *)pkt; if (0xFF != entry->u.isp24.vp_index) { - host = qlt_find_host_by_vp_idx(vha, + host = qla_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe043, @@ -495,7 +479,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, { struct abts_recv_from_24xx *entry = (struct abts_recv_from_24xx *)pkt; - struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, + struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, entry->vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe044, @@ -512,7 +496,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, { struct abts_resp_to_24xx *entry = (struct abts_resp_to_24xx *)pkt; - struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, + struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, entry->vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe045, @@ -7145,7 +7129,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) qlt_clear_mode(base_vha); - qlt_update_vp_map(base_vha, SET_VP_IDX); + qla_update_vp_map(base_vha, SET_VP_IDX); } irqreturn_t @@ -7224,17 +7208,10 @@ qlt_mem_alloc(struct qla_hw_data *ha) if (!QLA_TGT_MODE_ENABLED()) return 0; - ha->tgt.tgt_vp_map = kcalloc(MAX_MULTI_ID_FABRIC, - sizeof(struct qla_tgt_vp_map), - GFP_KERNEL); - if (!ha->tgt.tgt_vp_map) - return -ENOMEM; - ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), &ha->tgt.atio_dma, GFP_KERNEL); if (!ha->tgt.atio_ring) { - kfree(ha->tgt.tgt_vp_map); return -ENOMEM; } return 0; @@ -7253,70 +7230,6 @@ qlt_mem_free(struct qla_hw_data *ha) } ha->tgt.atio_ring = NULL; ha->tgt.atio_dma = 0; - kfree(ha->tgt.tgt_vp_map); - ha->tgt.tgt_vp_map = NULL; -} - -/* vport_slock to be held by the caller */ -void -qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) -{ - void *slot; - u32 key; - int rc; - - key = vha->d_id.b24; - - switch (cmd) { - case SET_VP_IDX: - if (!QLA_TGT_MODE_ENABLED()) - return; - vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; - break; - case SET_AL_PA: - slot = btree_lookup32(&vha->hw->host_map, key); - if (!slot) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018, - "Save vha in host_map %p %06x\n", vha, key); - rc = btree_insert32(&vha->hw->host_map, - key, vha, GFP_ATOMIC); - if (rc) - ql_log(ql_log_info, vha, 0xd03e, - "Unable to insert s_id into host_map: %06x\n", - key); - return; - } - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, - "replace existing vha in host_map %p %06x\n", vha, key); - btree_update32(&vha->hw->host_map, key, vha); - break; - case RESET_VP_IDX: - if (!QLA_TGT_MODE_ENABLED()) - return; - vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; - break; - case RESET_AL_PA: - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a, - "clear vha in host_map %p %06x\n", vha, key); - slot = btree_lookup32(&vha->hw->host_map, key); - if (slot) - btree_remove32(&vha->hw->host_map, key); - vha->d_id.b24 = 0; - break; - } -} - -void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id) -{ - - if (!vha->d_id.b24) { - vha->d_id = id; - qlt_update_vp_map(vha, SET_AL_PA); - } else if (vha->d_id.b24 != id.b24) { - qlt_update_vp_map(vha, RESET_AL_PA); - vha->d_id = id; - qlt_update_vp_map(vha, SET_AL_PA); - } } static int __init qlt_parse_ini_mode(void) diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 7df86578214f..354fca2e7feb 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -1017,7 +1017,6 @@ extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int); extern int __init qlt_init(void); extern void qlt_exit(void); -extern void qlt_update_vp_map(struct scsi_qla_host *, int); extern void qlt_free_session_done(struct work_struct *); /* * This macro is used during early initializations when host->active_mode diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 03f3e2cd62b5..42d69d89834f 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -6,9 +6,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "10.02.07.900-k" +#define QLA2XXX_VERSION "10.02.08.200-k" #define QLA_DRIVER_MAJOR_VER 10 #define QLA_DRIVER_MINOR_VER 2 -#define QLA_DRIVER_PATCH_VER 7 -#define QLA_DRIVER_BETA_VER 900 +#define QLA_DRIVER_PATCH_VER 8 +#define QLA_DRIVER_BETA_VER 200 |