diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom')
24 files changed, 1213 insertions, 868 deletions
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 9b017d9c58e9..90e54d5488dc 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -596,6 +596,7 @@ static void b44_timer(unsigned long __opaque) static void b44_tx(struct b44 *bp) { u32 cur, cons; + unsigned bytes_compl = 0, pkts_compl = 0; cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK; cur /= sizeof(struct dma_desc); @@ -612,9 +613,14 @@ static void b44_tx(struct b44 *bp) skb->len, DMA_TO_DEVICE); rp->skb = NULL; + + bytes_compl += skb->len; + pkts_compl++; + dev_kfree_skb_irq(skb); } + netdev_completed_queue(bp->dev, pkts_compl, bytes_compl); bp->tx_cons = cons; if (netif_queue_stopped(bp->dev) && TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH) @@ -1018,6 +1024,8 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev) if (bp->flags & B44_FLAG_REORDER_BUG) br32(bp, B44_DMATX_PTR); + netdev_sent_queue(dev, skb->len); + if (TX_BUFFS_AVAIL(bp) < 1) netif_stop_queue(dev); @@ -1416,6 +1424,8 @@ static void b44_init_hw(struct b44 *bp, int reset_kind) val = br32(bp, B44_ENET_CTRL); bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE)); + + netdev_reset_queue(bp->dev); } static int b44_open(struct net_device *dev) @@ -2101,7 +2111,7 @@ static int b44_get_invariants(struct b44 *bp) * valid PHY address. */ bp->phy_addr &= 0x1F; - memcpy(bp->dev->dev_addr, addr, 6); + memcpy(bp->dev->dev_addr, addr, ETH_ALEN); if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){ pr_err("Invalid MAC address found in EEPROM\n"); @@ -2183,8 +2193,7 @@ static int b44_init_one(struct ssb_device *sdev, goto err_out_free_dev; } - if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) || - dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) { + if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) { dev_err(sdev->dev, "Required 30BIT DMA mask unsupported by the system\n"); goto err_out_powerdown; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 249468f95365..e2aa09ce6af7 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -149,6 +149,8 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, dma_desc->ctl0 = cpu_to_le32(ctl0); dma_desc->ctl1 = cpu_to_le32(ctl1); + netdev_sent_queue(net_dev, skb->len); + wmb(); /* Increase ring->end to point empty slot. We tell hardware the first @@ -178,6 +180,7 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) struct device *dma_dev = bgmac->core->dma_dev; int empty_slot; bool freed = false; + unsigned bytes_compl = 0, pkts_compl = 0; /* The last slot that hardware didn't consume yet */ empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); @@ -195,6 +198,9 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) slot->skb->len, DMA_TO_DEVICE); slot->dma_addr = 0; + bytes_compl += slot->skb->len; + pkts_compl++; + /* Free memory! :) */ dev_kfree_skb(slot->skb); slot->skb = NULL; @@ -208,6 +214,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) freed = true; } + netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl); + if (freed && netif_queue_stopped(bgmac->net_dev)) netif_wake_queue(bgmac->net_dev); } @@ -244,31 +252,59 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, struct bgmac_slot_info *slot) { struct device *dma_dev = bgmac->core->dma_dev; + struct sk_buff *skb; + dma_addr_t dma_addr; struct bgmac_rx_header *rx; /* Alloc skb */ - slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE); - if (!slot->skb) + skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE); + if (!skb) return -ENOMEM; /* Poison - if everything goes fine, hardware will overwrite it */ - rx = (struct bgmac_rx_header *)slot->skb->data; + rx = (struct bgmac_rx_header *)skb->data; rx->len = cpu_to_le16(0xdead); rx->flags = cpu_to_le16(0xbeef); /* Map skb for the DMA */ - slot->dma_addr = dma_map_single(dma_dev, slot->skb->data, - BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(dma_dev, slot->dma_addr)) { + dma_addr = dma_map_single(dma_dev, skb->data, + BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(dma_dev, dma_addr)) { bgmac_err(bgmac, "DMA mapping error\n"); + dev_kfree_skb(skb); return -ENOMEM; } + + /* Update the slot */ + slot->skb = skb; + slot->dma_addr = dma_addr; + if (slot->dma_addr & 0xC0000000) bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); return 0; } +static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac, + struct bgmac_dma_ring *ring, int desc_idx) +{ + struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx; + u32 ctl0 = 0, ctl1 = 0; + + if (desc_idx == ring->num_slots - 1) + ctl0 |= BGMAC_DESC_CTL0_EOT; + ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN; + /* Is there any BGMAC device that requires extension? */ + /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) & + * B43_DMA64_DCTL1_ADDREXT_MASK; + */ + + dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr)); + dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr)); + dma_desc->ctl0 = cpu_to_le32(ctl0); + dma_desc->ctl1 = cpu_to_le32(ctl1); +} + static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, int weight) { @@ -287,7 +323,6 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, struct device *dma_dev = bgmac->core->dma_dev; struct bgmac_slot_info *slot = &ring->slots[ring->start]; struct sk_buff *skb = slot->skb; - struct sk_buff *new_skb; struct bgmac_rx_header *rx; u16 len, flags; @@ -300,38 +335,51 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, len = le16_to_cpu(rx->len); flags = le16_to_cpu(rx->flags); - /* Check for poison and drop or pass the packet */ - if (len == 0xdead && flags == 0xbeef) { - bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", - ring->start); - } else { + do { + dma_addr_t old_dma_addr = slot->dma_addr; + int err; + + /* Check for poison and drop or pass the packet */ + if (len == 0xdead && flags == 0xbeef) { + bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", + ring->start); + dma_sync_single_for_device(dma_dev, + slot->dma_addr, + BGMAC_RX_BUF_SIZE, + DMA_FROM_DEVICE); + break; + } + /* Omit CRC. */ len -= ETH_FCS_LEN; - new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len); - if (new_skb) { - skb_put(new_skb, len); - skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET, - new_skb->data, - len); - skb_checksum_none_assert(skb); - new_skb->protocol = - eth_type_trans(new_skb, bgmac->net_dev); - netif_receive_skb(new_skb); - handled++; - } else { - bgmac->net_dev->stats.rx_dropped++; - bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n"); + /* Prepare new skb as replacement */ + err = bgmac_dma_rx_skb_for_slot(bgmac, slot); + if (err) { + /* Poison the old skb */ + rx->len = cpu_to_le16(0xdead); + rx->flags = cpu_to_le16(0xbeef); + + dma_sync_single_for_device(dma_dev, + slot->dma_addr, + BGMAC_RX_BUF_SIZE, + DMA_FROM_DEVICE); + break; } + bgmac_dma_rx_setup_desc(bgmac, ring, ring->start); - /* Poison the old skb */ - rx->len = cpu_to_le16(0xdead); - rx->flags = cpu_to_le16(0xbeef); - } + /* Unmap old skb, we'll pass it to the netfif */ + dma_unmap_single(dma_dev, old_dma_addr, + BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); + + skb_put(skb, BGMAC_RX_FRAME_OFFSET + len); + skb_pull(skb, BGMAC_RX_FRAME_OFFSET); - /* Make it back accessible to the hardware */ - dma_sync_single_for_device(dma_dev, slot->dma_addr, - BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); + skb_checksum_none_assert(skb); + skb->protocol = eth_type_trans(skb, bgmac->net_dev); + netif_receive_skb(skb); + handled++; + } while (0); if (++ring->start >= BGMAC_RX_RING_SLOTS) ring->start = 0; @@ -495,8 +543,6 @@ err_dma_free: static void bgmac_dma_init(struct bgmac *bgmac) { struct bgmac_dma_ring *ring; - struct bgmac_dma_desc *dma_desc; - u32 ctl0, ctl1; int i; for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { @@ -529,23 +575,8 @@ static void bgmac_dma_init(struct bgmac *bgmac) if (ring->unaligned) bgmac_dma_rx_enable(bgmac, ring); - for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots; - j++, dma_desc++) { - ctl0 = ctl1 = 0; - - if (j == ring->num_slots - 1) - ctl0 |= BGMAC_DESC_CTL0_EOT; - ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN; - /* Is there any BGMAC device that requires extension? */ - /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) & - * B43_DMA64_DCTL1_ADDREXT_MASK; - */ - - dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[j].dma_addr)); - dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[j].dma_addr)); - dma_desc->ctl0 = cpu_to_le32(ctl0); - dma_desc->ctl1 = cpu_to_le32(ctl1); - } + for (j = 0; j < ring->num_slots; j++) + bgmac_dma_rx_setup_desc(bgmac, ring, j); bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX, ring->index_base + @@ -988,6 +1019,8 @@ static void bgmac_chip_reset(struct bgmac *bgmac) bgmac_miiconfig(bgmac); bgmac_phy_init(bgmac); + netdev_reset_queue(bgmac->net_dev); + bgmac->int_status = 0; } diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index e838a3f74b69..d9980ad00b4b 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -5761,8 +5761,8 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) if (!skb) return -ENOMEM; packet = skb_put(skb, pkt_size); - memcpy(packet, bp->dev->dev_addr, 6); - memset(packet + 6, 0x0, 8); + memcpy(packet, bp->dev->dev_addr, ETH_ALEN); + memset(packet + ETH_ALEN, 0x0, 8); for (i = 14; i < pkt_size; i++) packet[i] = (unsigned char) (i & 0xff); @@ -8413,7 +8413,6 @@ err_out_release: err_out_disable: pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); err_out: return rc; @@ -8514,7 +8513,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, dev); - memcpy(dev->dev_addr, bp->mac_addr, 6); + memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN); dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN | @@ -8546,7 +8545,6 @@ error: pci_iounmap(pdev, bp->regview); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); err_free: free_netdev(dev); return rc; @@ -8578,7 +8576,6 @@ bnx2_remove_one(struct pci_dev *pdev) pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } static int diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 97b3d32a98bd..ec6119089b82 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -520,10 +520,12 @@ struct bnx2x_fastpath { #define BNX2X_FP_STATE_IDLE 0 #define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ #define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ -#define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */ -#define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */ +#define BNX2X_FP_STATE_DISABLED (1 << 2) +#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */ +#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */ +#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) #define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) -#define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) +#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED) #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) /* protect state */ spinlock_t lock; @@ -613,7 +615,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) { bool rc = true; - spin_lock(&fp->lock); + spin_lock_bh(&fp->lock); if (fp->state & BNX2X_FP_LOCKED) { WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); fp->state |= BNX2X_FP_STATE_NAPI_YIELD; @@ -622,7 +624,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) /* we don't care if someone yielded */ fp->state = BNX2X_FP_STATE_NAPI; } - spin_unlock(&fp->lock); + spin_unlock_bh(&fp->lock); return rc; } @@ -631,14 +633,16 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) { bool rc = false; - spin_lock(&fp->lock); + spin_lock_bh(&fp->lock); WARN_ON(fp->state & (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); if (fp->state & BNX2X_FP_STATE_POLL_YIELD) rc = true; - fp->state = BNX2X_FP_STATE_IDLE; - spin_unlock(&fp->lock); + + /* state ==> idle, unless currently disabled */ + fp->state &= BNX2X_FP_STATE_DISABLED; + spin_unlock_bh(&fp->lock); return rc; } @@ -669,7 +673,9 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) if (fp->state & BNX2X_FP_STATE_POLL_YIELD) rc = true; - fp->state = BNX2X_FP_STATE_IDLE; + + /* state ==> idle, unless currently disabled */ + fp->state &= BNX2X_FP_STATE_DISABLED; spin_unlock_bh(&fp->lock); return rc; } @@ -677,9 +683,23 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) /* true if a socket is polling, even if it did not get the lock */ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) { - WARN_ON(!(fp->state & BNX2X_FP_LOCKED)); + WARN_ON(!(fp->state & BNX2X_FP_OWNED)); return fp->state & BNX2X_FP_USER_PEND; } + +/* false if fp is currently owned */ +static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) +{ + int rc = true; + + spin_lock_bh(&fp->lock); + if (fp->state & BNX2X_FP_OWNED) + rc = false; + fp->state |= BNX2X_FP_STATE_DISABLED; + spin_unlock_bh(&fp->lock); + + return rc; +} #else static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) { @@ -709,6 +729,10 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) { return false; } +static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) +{ + return true; +} #endif /* CONFIG_NET_RX_BUSY_POLL */ /* Use 2500 as a mini-jumbo MTU for FCoE */ @@ -1197,8 +1221,9 @@ union cdu_context { /* TM (timers) host DB constants */ #define TM_ILT_PAGE_SZ_HW 0 #define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ -/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ -#define TM_CONN_NUM 1024 +#define TM_CONN_NUM (BNX2X_FIRST_VF_CID + \ + BNX2X_VF_CIDS + \ + CNIC_ISCSI_CID_MAX) #define TM_ILT_SZ (8 * TM_CONN_NUM) #define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) @@ -1249,7 +1274,10 @@ struct bnx2x_slowpath { * Therefore, if they would have been defined in the same union, * data can get corrupted. */ - struct afex_vif_list_ramrod_data func_afex_rdata; + union { + struct afex_vif_list_ramrod_data viflist_data; + struct function_update_data func_update; + } func_afex_rdata; /* used by dmae command executer */ struct dmae_command dmae[MAX_DMAE_C]; @@ -1375,7 +1403,6 @@ enum { BNX2X_SP_RTNL_RX_MODE, BNX2X_SP_RTNL_HYPERVISOR_VLAN, BNX2X_SP_RTNL_TX_STOP, - BNX2X_SP_RTNL_TX_RESUME, }; struct bnx2x_prev_path_list { @@ -1527,7 +1554,6 @@ struct bnx2x { #define PCI_32BIT_FLAG (1 << 1) #define ONE_PORT_FLAG (1 << 2) #define NO_WOL_FLAG (1 << 3) -#define USING_DAC_FLAG (1 << 4) #define USING_MSIX_FLAG (1 << 5) #define USING_MSI_FLAG (1 << 6) #define DISABLE_MSI_FLAG (1 << 7) @@ -1546,6 +1572,7 @@ struct bnx2x { #define IS_VF_FLAG (1 << 22) #define INTERRUPTS_ENABLED_FLAG (1 << 23) #define BC_SUPPORTS_RMMOD_CMD (1 << 24) +#define HAS_PHYS_PORT_ID (1 << 25) #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) @@ -1621,7 +1648,7 @@ struct bnx2x { u16 rx_ticks_int; u16 rx_ticks; /* Maximal coalescing timeout in us */ -#define BNX2X_MAX_COALESCE_TOUT (0xf0*12) +#define BNX2X_MAX_COALESCE_TOUT (0xff*BNX2X_BTR) u32 lin_cnt; @@ -1876,6 +1903,8 @@ struct bnx2x { u32 dump_preset_idx; bool stats_started; struct semaphore stats_sema; + + u8 phys_port_id[ETH_ALEN]; }; /* Tx queues may be less or equal to Rx queues */ @@ -2072,7 +2101,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, u8 src_type, u8 dst_type); -int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae); +int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, + u32 *comp); /* FLR related routines */ u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp); @@ -2231,7 +2261,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, #define BNX2X_NUM_TESTS_SF 7 #define BNX2X_NUM_TESTS_MF 3 #define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \ - BNX2X_NUM_TESTS_SF) + IS_VF(bp) ? 0 : BNX2X_NUM_TESTS_SF) #define BNX2X_PHY_LOOPBACK 0 #define BNX2X_MAC_LOOPBACK 1 @@ -2491,11 +2521,11 @@ enum { #define NUM_MACS 8 -enum bnx2x_pci_bus_speed { - BNX2X_PCI_LINK_SPEED_2500 = 2500, - BNX2X_PCI_LINK_SPEED_5000 = 5000, - BNX2X_PCI_LINK_SPEED_8000 = 8000 -}; - void bnx2x_set_local_cmng(struct bnx2x *bp); + +#define MCPR_SCRATCH_BASE(bp) \ + (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) + +#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX)) + #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 61726af1de6e..bf811565ee24 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -160,6 +160,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, struct sk_buff *skb = tx_buf->skb; u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; int nbd; + u16 split_bd_len = 0; /* prefetch skb end pointer to speedup dev_kfree_skb() */ prefetch(&skb->end); @@ -167,10 +168,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", txdata->txq_index, idx, tx_buf, skb); - /* unmap first bd */ tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; - dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), - BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); nbd = le16_to_cpu(tx_start_bd->nbd) - 1; #ifdef BNX2X_STOP_ON_ERROR @@ -188,12 +186,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, --nbd; bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); - /* ...and the TSO split header bd since they have no mapping */ + /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { + tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; + split_bd_len = BD_UNMAP_LEN(tx_data_bd); --nbd; bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); } + /* unmap first bd */ + dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), + BD_UNMAP_LEN(tx_start_bd) + split_bd_len, + DMA_TO_DEVICE); + /* now free frags */ while (nbd > 0) { @@ -681,6 +686,7 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, } } #endif + skb_record_rx_queue(skb, fp->rx_queue); napi_gro_receive(&fp->napi, skb); } @@ -1789,26 +1795,22 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp) { int i; - local_bh_disable(); for_each_rx_queue_cnic(bp, i) { napi_disable(&bnx2x_fp(bp, i, napi)); - while (!bnx2x_fp_lock_napi(&bp->fp[i])) - mdelay(1); + while (!bnx2x_fp_ll_disable(&bp->fp[i])) + usleep_range(1000, 2000); } - local_bh_enable(); } static void bnx2x_napi_disable(struct bnx2x *bp) { int i; - local_bh_disable(); for_each_eth_queue(bp, i) { napi_disable(&bnx2x_fp(bp, i, napi)); - while (!bnx2x_fp_lock_napi(&bp->fp[i])) - mdelay(1); + while (!bnx2x_fp_ll_disable(&bp->fp[i])) + usleep_range(1000, 2000); } - local_bh_enable(); } void bnx2x_netif_start(struct bnx2x *bp) @@ -1831,7 +1833,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) bnx2x_napi_disable_cnic(bp); } -u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) +u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { struct bnx2x *bp = netdev_priv(dev); @@ -2481,8 +2484,7 @@ load_error_cnic2: load_error_cnic1: bnx2x_napi_disable_cnic(bp); /* Update the number of queues without the cnic queues */ - rc = bnx2x_set_real_num_queues(bp, 0); - if (rc) + if (bnx2x_set_real_num_queues(bp, 0)) BNX2X_ERR("Unable to set real_num_queues not including cnic\n"); load_error_cnic0: BNX2X_ERR("CNIC-related load failed\n"); @@ -2545,10 +2547,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) } } - /* Allocated memory for FW statistics */ - if (bnx2x_alloc_fw_stats_mem(bp)) - LOAD_ERROR_EXIT(bp, load_error0); - /* need to be done after alloc mem, since it's self adjusting to amount * of memory available for RSS queues */ @@ -2558,6 +2556,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) LOAD_ERROR_EXIT(bp, load_error0); } + /* Allocated memory for FW statistics */ + if (bnx2x_alloc_fw_stats_mem(bp)) + LOAD_ERROR_EXIT(bp, load_error0); + /* request pf to initialize status blocks */ if (IS_VF(bp)) { rc = bnx2x_vfpf_init(bp); @@ -2812,8 +2814,8 @@ load_error1: if (IS_PF(bp)) bnx2x_clear_pf_load(bp); load_error0: - bnx2x_free_fp_mem(bp); bnx2x_free_fw_stats_mem(bp); + bnx2x_free_fp_mem(bp); bnx2x_free_mem(bp); return rc; @@ -2959,6 +2961,10 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) bp->port.pmf = 0; + /* clear pending work in rtnl task */ + bp->sp_rtnl_state = 0; + smp_mb(); + /* Free SKBs, SGEs, TPA pool and driver internals */ bnx2x_free_skbs(bp); if (CNIC_LOADED(bp)) @@ -3256,14 +3262,16 @@ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) if (prot == IPPROTO_TCP) rc |= XMIT_CSUM_TCP; - if (skb_is_gso_v6(skb)) { - rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP); - if (rc & XMIT_CSUM_ENC) - rc |= XMIT_GSO_ENC_V6; - } else if (skb_is_gso(skb)) { - rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP); - if (rc & XMIT_CSUM_ENC) - rc |= XMIT_GSO_ENC_V4; + if (skb_is_gso(skb)) { + if (skb_is_gso_v6(skb)) { + rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP); + if (rc & XMIT_CSUM_ENC) + rc |= XMIT_GSO_ENC_V6; + } else { + rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP); + if (rc & XMIT_CSUM_ENC) + rc |= XMIT_GSO_ENC_V4; + } } return rc; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index da8fcaa74495..41f3ca5ad972 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -524,7 +524,8 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac); int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); /* select_queue callback */ -u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); +u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv); static inline void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index fcf2761d8828..fdace204b054 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -778,11 +778,6 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) /* ets may affect cmng configuration: reinit it in hw */ bnx2x_set_local_cmng(bp); - - set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state); - - schedule_delayed_work(&bp->sp_rtnl_task, 0); - return; case BNX2X_DCBX_STATE_TX_RELEASED: DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_RELEASED\n"); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 324de5f05332..32d0f1435fb4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -639,6 +639,9 @@ static int bnx2x_get_regs_len(struct net_device *dev) struct bnx2x *bp = netdev_priv(dev); int regdump_len = 0; + if (IS_VF(bp)) + return 0; + regdump_len = __bnx2x_get_regs_len(bp); regdump_len *= 4; regdump_len += sizeof(struct dump_header); @@ -891,17 +894,8 @@ static void bnx2x_get_regs(struct net_device *dev, * will re-enable parity attentions right after the dump. */ - /* Disable parity on path 0 */ - bnx2x_pretend_func(bp, 0); bnx2x_disable_blocks_parity(bp); - /* Disable parity on path 1 */ - bnx2x_pretend_func(bp, 1); - bnx2x_disable_blocks_parity(bp); - - /* Return to current function */ - bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); - dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1; dump_hdr.preset = DUMP_ALL_PRESETS; dump_hdr.version = BNX2X_DUMP_VERSION; @@ -928,18 +922,9 @@ static void bnx2x_get_regs(struct net_device *dev, /* Actually read the registers */ __bnx2x_get_regs(bp, p); - /* Re-enable parity attentions on path 0 */ - bnx2x_pretend_func(bp, 0); + /* Re-enable parity attentions */ bnx2x_clear_blocks_parity(bp); bnx2x_enable_blocks_parity(bp); - - /* Re-enable parity attentions on path 1 */ - bnx2x_pretend_func(bp, 1); - bnx2x_clear_blocks_parity(bp); - bnx2x_enable_blocks_parity(bp); - - /* Return to current function */ - bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); } static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset) @@ -993,17 +978,8 @@ static int bnx2x_get_dump_data(struct net_device *dev, * will re-enable parity attentions right after the dump. */ - /* Disable parity on path 0 */ - bnx2x_pretend_func(bp, 0); bnx2x_disable_blocks_parity(bp); - /* Disable parity on path 1 */ - bnx2x_pretend_func(bp, 1); - bnx2x_disable_blocks_parity(bp); - - /* Return to current function */ - bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); - dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1; dump_hdr.preset = bp->dump_preset_idx; dump_hdr.version = BNX2X_DUMP_VERSION; @@ -1032,19 +1008,10 @@ static int bnx2x_get_dump_data(struct net_device *dev, /* Actually read the registers */ __bnx2x_get_preset_regs(bp, p, dump_hdr.preset); - /* Re-enable parity attentions on path 0 */ - bnx2x_pretend_func(bp, 0); - bnx2x_clear_blocks_parity(bp); - bnx2x_enable_blocks_parity(bp); - - /* Re-enable parity attentions on path 1 */ - bnx2x_pretend_func(bp, 1); + /* Re-enable parity attentions */ bnx2x_clear_blocks_parity(bp); bnx2x_enable_blocks_parity(bp); - /* Return to current function */ - bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); - return 0; } @@ -2900,9 +2867,16 @@ static void bnx2x_self_test(struct net_device *dev, memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp)); + if (bnx2x_test_nvram(bp) != 0) { + if (!IS_MF(bp)) + buf[4] = 1; + else + buf[0] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + if (!netif_running(dev)) { - DP(BNX2X_MSG_ETHTOOL, - "Can't perform self-test when interface is down\n"); + DP(BNX2X_MSG_ETHTOOL, "Interface is down\n"); return; } @@ -2964,13 +2938,7 @@ static void bnx2x_self_test(struct net_device *dev, /* wait until link state is restored */ bnx2x_wait_for_link(bp, link_up, is_serdes); } - if (bnx2x_test_nvram(bp) != 0) { - if (!IS_MF(bp)) - buf[4] = 1; - else - buf[0] = 1; - etest->flags |= ETH_TEST_FL_FAILED; - } + if (bnx2x_test_intr(bp) != 0) { if (!IS_MF(bp)) buf[5] = 1; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 32767f6aa33f..cf1df8b62e2c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -172,6 +172,7 @@ struct shared_hw_cfg { /* NVRAM Offset */ #define SHARED_HW_CFG_LED_MAC4 0x000c0000 #define SHARED_HW_CFG_LED_PHY8 0x000d0000 #define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000 + #define SHARED_HW_CFG_LED_EXTPHY2 0x000f0000 #define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h index 76df015f486a..c2dfea7968f4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h @@ -640,23 +640,35 @@ static const struct { * [30] MCP Latched ump_tx_parity * [31] MCP Latched scpad_parity */ -#define MISC_AEU_ENABLE_MCP_PRTY_BITS \ +#define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS \ (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ - AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY) + +#define MISC_AEU_ENABLE_MCP_PRTY_BITS \ + (MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) /* Below registers control the MCP parity attention output. When * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are * enabled, when cleared - disabled. */ -static const u32 mcp_attn_ctl_regs[] = { - MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, - MISC_REG_AEU_ENABLE4_NIG_0, - MISC_REG_AEU_ENABLE4_PXP_0, - MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, - MISC_REG_AEU_ENABLE4_NIG_1, - MISC_REG_AEU_ENABLE4_PXP_1 +static const struct { + u32 addr; + u32 bits; +} mcp_attn_ctl_regs[] = { + { MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, + MISC_AEU_ENABLE_MCP_PRTY_BITS }, + { MISC_REG_AEU_ENABLE4_NIG_0, + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }, + { MISC_REG_AEU_ENABLE4_PXP_0, + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }, + { MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, + MISC_AEU_ENABLE_MCP_PRTY_BITS }, + { MISC_REG_AEU_ENABLE4_NIG_1, + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }, + { MISC_REG_AEU_ENABLE4_PXP_1, + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS } }; static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable) @@ -665,14 +677,14 @@ static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable) u32 reg_val; for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) { - reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]); + reg_val = REG_RD(bp, mcp_attn_ctl_regs[i].addr); if (enable) - reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS; + reg_val |= mcp_attn_ctl_regs[i].bits; else - reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS; + reg_val &= ~mcp_attn_ctl_regs[i].bits; - REG_WR(bp, mcp_attn_ctl_regs[i], reg_val); + REG_WR(bp, mcp_attn_ctl_regs[i].addr, reg_val); } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index d60a2ea3da19..11fc79585491 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -175,6 +175,7 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy, #define EDC_MODE_LINEAR 0x0022 #define EDC_MODE_LIMITING 0x0044 #define EDC_MODE_PASSIVE_DAC 0x0055 +#define EDC_MODE_ACTIVE_DAC 0x0066 /* ETS defines*/ #define DCBX_INVALID_COS (0xFF) @@ -3121,7 +3122,7 @@ static void bnx2x_bsc_module_sel(struct link_params *params) } static int bnx2x_bsc_read(struct link_params *params, - struct bnx2x_phy *phy, + struct bnx2x *bp, u8 sl_devid, u16 sl_addr, u8 lc_addr, @@ -3130,7 +3131,6 @@ static int bnx2x_bsc_read(struct link_params *params, { u32 val, i; int rc = 0; - struct bnx2x *bp = params->bp; if (xfer_cnt > 16) { DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n", @@ -3684,6 +3684,41 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy, bnx2x_update_link_attr(params, vars->link_attr_sync); } +static void bnx2x_disable_kr2(struct link_params *params, + struct link_vars *vars, + struct bnx2x_phy *phy) +{ + struct bnx2x *bp = params->bp; + int i; + static struct bnx2x_reg_set reg_set[] = { + /* Step 1 - Program the TX/RX alignment markers */ + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000} + }; + DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n"); + + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val); + vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; + bnx2x_update_link_attr(params, vars->link_attr_sync); + + vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT; +} + static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy, struct link_params *params) { @@ -3715,7 +3750,6 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { u16 lane, i, cl72_ctrl, an_adv = 0; - u16 ucode_ver; struct bnx2x *bp = params->bp; static struct bnx2x_reg_set reg_set[] = { {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, @@ -3806,15 +3840,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, /* Advertise pause */ bnx2x_ext_phy_set_pause(params, phy, vars); - /* Set KR Autoneg Work-Around flag for Warpcore version older than D108 - */ - bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver); - if (ucode_ver < 0xd108) { - DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n", - ucode_ver); - vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; - } + vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0x100); @@ -3838,6 +3864,21 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, bnx2x_set_aer_mmd(params, phy); bnx2x_warpcore_enable_AN_KR2(phy, params, vars); + } else { + /* Enable Auto-Detect to support 1G over CL37 as well */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10); + + /* Force cl48 sync_status LOW to avoid getting stuck in CL73 + * parallel-detect loop when CL73 and CL37 are enabled. + */ + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800); + bnx2x_set_aer_mmd(params, phy); + + bnx2x_disable_kr2(params, vars, phy); } /* Enable Autoneg: only on the main lane */ @@ -4347,20 +4388,14 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u32 serdes_net_if; u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0; - u16 lane = bnx2x_get_warpcore_lane(phy, params); vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1; if (!vars->turn_to_run_wc_rt) return; - /* Return if there is no link partner */ - if (!(bnx2x_warpcore_get_sigdet(phy, params))) { - DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n"); - return; - } - if (vars->rx_tx_asic_rst) { + u16 lane = bnx2x_get_warpcore_lane(phy, params); serdes_net_if = (REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, dev_info. port_hw_config[params->port].default_cfg)) & @@ -4375,14 +4410,8 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy, /*10G KR*/ lnkup_kr = (gp_status1 >> (12+lane)) & 0x1; - DP(NETIF_MSG_LINK, - "gp_status1 0x%x\n", gp_status1); - if (lnkup_kr || lnkup) { - vars->rx_tx_asic_rst = 0; - DP(NETIF_MSG_LINK, - "link up, rx_tx_asic_rst 0x%x\n", - vars->rx_tx_asic_rst); + vars->rx_tx_asic_rst = 0; } else { /* Reset the lane to see if link comes up.*/ bnx2x_warpcore_reset_lane(bp, phy, 1); @@ -4507,10 +4536,14 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, * enabled transmitter to avoid current leakage in case * no module is connected */ - if (bnx2x_is_sfp_module_plugged(phy, params)) - bnx2x_sfp_module_detection(phy, params); - else - bnx2x_sfp_e3_set_transmitter(params, phy, 1); + if ((params->loopback_mode == LOOPBACK_NONE) || + (params->loopback_mode == LOOPBACK_EXT)) { + if (bnx2x_is_sfp_module_plugged(phy, params)) + bnx2x_sfp_module_detection(phy, params); + else + bnx2x_sfp_e3_set_transmitter(params, + phy, 1); + } bnx2x_warpcore_config_sfi(phy, params); break; @@ -5757,6 +5790,11 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy, rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed, duplex); + /* In case of KR link down, start up the recovering procedure */ + if ((!link_up) && (phy->media_type == ETH_PHY_KR) && + (!(phy->flags & FLAGS_WC_DUAL_MODE))) + vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; + DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", vars->duplex, vars->flow_ctrl, vars->link_status); return rc; @@ -6345,9 +6383,15 @@ int bnx2x_set_led(struct link_params *params, * intended override. */ break; - } else + } else { + u32 nig_led_mode = ((params->hw_led_mode << + SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY2) ? + (SHARED_HW_CFG_LED_PHY1 >> + SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode; REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, - hw_led_mode); + nig_led_mode); + } REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0); /* Set blinking rate to ~15.9Hz */ @@ -6507,6 +6551,11 @@ static int bnx2x_link_initialize(struct link_params *params, params->phy[INT_PHY].config_init(phy, params, vars); } + /* Re-read this value in case it was changed inside config_init due to + * limitations of optic module + */ + vars->line_speed = params->phy[INT_PHY].req_line_speed; + /* Init external phy*/ if (non_ext_phy) { if (params->phy[INT_PHY].supported & @@ -7886,7 +7935,7 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, usleep_range(1000, 2000); bnx2x_warpcore_power_module(params, 1); } - rc = bnx2x_bsc_read(params, phy, dev_addr, addr32, 0, byte_cnt, + rc = bnx2x_bsc_read(params, bp, dev_addr, addr32, 0, byte_cnt, data_array); } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT)); @@ -8080,18 +8129,24 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, if (copper_module_type & SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { DP(NETIF_MSG_LINK, "Active Copper cable detected\n"); - check_limiting_mode = 1; - } else if (copper_module_type & - SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) + *edc_mode = EDC_MODE_ACTIVE_DAC; + else + check_limiting_mode = 1; + } else { + *edc_mode = EDC_MODE_PASSIVE_DAC; + /* Even in case PASSIVE_DAC indication is not set, + * treat it as a passive DAC cable, since some cables + * don't have this indication. + */ + if (copper_module_type & + SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { DP(NETIF_MSG_LINK, "Passive Copper cable detected\n"); - *edc_mode = - EDC_MODE_PASSIVE_DAC; - } else { - DP(NETIF_MSG_LINK, - "Unknown copper-cable-type 0x%x !!!\n", - copper_module_type); - return -EINVAL; + } else { + DP(NETIF_MSG_LINK, + "Unknown copper-cable-type\n"); + } } break; } @@ -8555,6 +8610,7 @@ static void bnx2x_warpcore_set_limiting_mode(struct link_params *params, mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; break; case EDC_MODE_PASSIVE_DAC: + case EDC_MODE_ACTIVE_DAC: mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC; break; default: @@ -9730,32 +9786,41 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, an_1000_val); - /* set 100 speed advertisement */ - if ((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | - PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))) { - an_10_100_val |= (1<<7); - /* Enable autoneg and restart autoneg for legacy speeds */ - autoneg_val |= (1<<9 | 1<<12); - - if (phy->req_duplex == DUPLEX_FULL) + /* Set 10/100 speed advertisement */ + if (phy->req_line_speed == SPEED_AUTO_NEG) { + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) { + /* Enable autoneg and restart autoneg for legacy speeds + */ + autoneg_val |= (1<<9 | 1<<12); an_10_100_val |= (1<<8); - DP(NETIF_MSG_LINK, "Advertising 100M\n"); - } - /* set 10 speed advertisement */ - if (((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | - PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) && - (phy->supported & - (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full)))) { - an_10_100_val |= (1<<5); - autoneg_val |= (1<<9 | 1<<12); - if (phy->req_duplex == DUPLEX_FULL) + DP(NETIF_MSG_LINK, "Advertising 100M-FD\n"); + } + + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) { + /* Enable autoneg and restart autoneg for legacy speeds + */ + autoneg_val |= (1<<9 | 1<<12); + an_10_100_val |= (1<<7); + DP(NETIF_MSG_LINK, "Advertising 100M-HD\n"); + } + + if ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) && + (phy->supported & SUPPORTED_10baseT_Full)) { an_10_100_val |= (1<<6); - DP(NETIF_MSG_LINK, "Advertising 10M\n"); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 10M-FD\n"); + } + + if ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) && + (phy->supported & SUPPORTED_10baseT_Half)) { + an_10_100_val |= (1<<5); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 10M-HD\n"); + } } /* Only 10/100 are allowed to work in FORCE mode */ @@ -10609,10 +10674,18 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, 0x40); } else { + /* EXTPHY2 LED mode indicate that the 100M/1G/10G LED + * sources are all wired through LED1, rather than only + * 10G in other modes. + */ + val = ((params->hw_led_mode << + SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY2) ? 0x98 : 0x80; + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, - 0x80); + val); /* Tell LED3 to blink on source */ bnx2x_cl45_read(bp, phy, @@ -10768,9 +10841,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, (1<<11)); if (((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || - (phy->req_line_speed == SPEED_1000)) { + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (phy->req_line_speed == SPEED_1000)) { an_1000_val |= (1<<8); autoneg_val |= (1<<9 | 1<<12); if (phy->req_duplex == DUPLEX_FULL) @@ -10786,30 +10859,32 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, 0x09, &an_1000_val); - /* Set 100 speed advertisement */ - if (((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | - PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) { - an_10_100_val |= (1<<7); - /* Enable autoneg and restart autoneg for legacy speeds */ - autoneg_val |= (1<<9 | 1<<12); - - if (phy->req_duplex == DUPLEX_FULL) - an_10_100_val |= (1<<8); - DP(NETIF_MSG_LINK, "Advertising 100M\n"); - } - - /* Set 10 speed advertisement */ - if (((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | - PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) { - an_10_100_val |= (1<<5); - autoneg_val |= (1<<9 | 1<<12); - if (phy->req_duplex == DUPLEX_FULL) + /* Advertise 10/100 link speed */ + if (phy->req_line_speed == SPEED_AUTO_NEG) { + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) { + an_10_100_val |= (1<<5); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 10M-HD\n"); + } + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) { an_10_100_val |= (1<<6); - DP(NETIF_MSG_LINK, "Advertising 10M\n"); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 10M-FD\n"); + } + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) { + an_10_100_val |= (1<<7); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 100M-HD\n"); + } + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) { + an_10_100_val |= (1<<8); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 100M-FD\n"); + } } /* Only 10/100 are allowed to work in FORCE mode */ @@ -13285,6 +13360,10 @@ static u8 bnx2x_analyze_link_error(struct link_params *params, DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, old_status, status); + /* Do not touch the link in case physical link down */ + if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) + return 1; + /* a. Update shmem->link_status accordingly * b. Update link_vars->link_up */ @@ -13432,43 +13511,6 @@ static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy, } } } -static void bnx2x_disable_kr2(struct link_params *params, - struct link_vars *vars, - struct bnx2x_phy *phy) -{ - struct bnx2x *bp = params->bp; - int i; - static struct bnx2x_reg_set reg_set[] = { - /* Step 1 - Program the TX/RX alignment markers */ - {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690}, - {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647}, - {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0}, - {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690}, - {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647}, - {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0}, - {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c}, - {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000}, - {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000}, - {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002}, - {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000}, - {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7}, - {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7}, - {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002}, - {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000} - }; - DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n"); - - for (i = 0; i < ARRAY_SIZE(reg_set); i++) - bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, - reg_set[i].val); - vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; - bnx2x_update_link_attr(params, vars->link_attr_sync); - - vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT; - /* Restart AN on leading lane */ - bnx2x_warpcore_restart_AN_KR(phy, params); -} - static void bnx2x_kr2_recovery(struct link_params *params, struct link_vars *vars, struct bnx2x_phy *phy) @@ -13530,7 +13572,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, */ not_kr2_device = (((base_page & 0x8000) == 0) || (((base_page & 0x8000) && - ((next_page & 0xe0) == 0x2)))); + ((next_page & 0xe0) == 0x20)))); /* In case KR2 is already disabled, check if we need to re-enable it */ if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { @@ -13546,6 +13588,8 @@ static void bnx2x_check_kr2_wa(struct link_params *params, /* Disable KR2 on both lanes */ DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page); bnx2x_disable_kr2(params, vars, phy); + /* Restart AN on leading lane */ + bnx2x_warpcore_restart_AN_KR(phy, params); return; } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index a6704b555042..8b3107b2fcc1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -503,9 +503,9 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, } /* issue a dmae command over the init-channel and wait for completion */ -int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae) +int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, + u32 *comp) { - u32 *wb_comp = bnx2x_sp(bp, wb_comp); int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; int rc = 0; @@ -518,14 +518,14 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae) spin_lock_bh(&bp->dmae_lock); /* reset completion */ - *wb_comp = 0; + *comp = 0; /* post the command on the channel used for initializations */ bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); /* wait for completion */ udelay(5); - while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { + while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { if (!cnt || (bp->recovery_state != BNX2X_RECOVERY_DONE && @@ -537,7 +537,7 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae) cnt--; udelay(50); } - if (*wb_comp & DMAE_PCI_ERR_FLAG) { + if (*comp & DMAE_PCI_ERR_FLAG) { BNX2X_ERR("DMAE PCI error!\n"); rc = DMAE_PCI_ERROR; } @@ -574,10 +574,12 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, dmae.len = len32; /* issue the command and wait for completion */ - rc = bnx2x_issue_dmae_with_comp(bp, &dmae); + rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); if (rc) { BNX2X_ERR("DMAE returned failure %d\n", rc); +#ifdef BNX2X_STOP_ON_ERROR bnx2x_panic(); +#endif } } @@ -611,10 +613,12 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) dmae.len = len32; /* issue the command and wait for completion */ - rc = bnx2x_issue_dmae_with_comp(bp, &dmae); + rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); if (rc) { BNX2X_ERR("DMAE returned failure %d\n", rc); +#ifdef BNX2X_STOP_ON_ERROR bnx2x_panic(); +#endif } } @@ -751,6 +755,10 @@ static int bnx2x_mc_assert(struct bnx2x *bp) return rc; } +#define MCPR_TRACE_BUFFER_SIZE (0x800) +#define SCRATCH_BUFFER_SIZE(bp) \ + (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000)) + void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) { u32 addr, val; @@ -775,7 +783,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) trace_shmem_base = bp->common.shmem_base; else trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); - addr = trace_shmem_base - 0x800; + + /* sanity */ + if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE || + trace_shmem_base >= MCPR_SCRATCH_BASE(bp) + + SCRATCH_BUFFER_SIZE(bp)) { + BNX2X_ERR("Unable to dump trace buffer (mark %x)\n", + trace_shmem_base); + return; + } + + addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE; /* validate TRCB signature */ mark = REG_RD(bp, addr); @@ -787,14 +805,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) /* read cyclic buffer pointer */ addr += 4; mark = REG_RD(bp, addr); - mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) - + ((mark + 0x3) & ~0x3) - 0x08000000; + mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000; + if (mark >= trace_shmem_base || mark < addr + 4) { + BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n"); + return; + } printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); printk("%s", lvl); /* dump buffer after the mark */ - for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) { + for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) { for (word = 0; word < 8; word++) data[word] = htonl(REG_RD(bp, offset + 4*word)); data[8] = 0x0; @@ -4280,65 +4301,60 @@ static void _print_next_block(int idx, const char *blk) pr_cont("%s%s", idx ? ", " : "", blk); } -static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, - int par_num, bool print) +static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, + int *par_num, bool print) { - int i = 0; - u32 cur_bit = 0; + u32 cur_bit; + bool res; + int i; + + res = false; + for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); + cur_bit = (0x1UL << i); if (sig & cur_bit) { - switch (cur_bit) { - case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "BRB"); + res |= true; /* Each bit is real error! */ + + if (print) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: + _print_next_block((*par_num)++, "BRB"); _print_parity(bp, BRB1_REG_BRB1_PRTY_STS); - } - break; - case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "PARSER"); + break; + case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: + _print_next_block((*par_num)++, + "PARSER"); _print_parity(bp, PRS_REG_PRS_PRTY_STS); - } - break; - case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "TSDM"); + break; + case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: + _print_next_block((*par_num)++, "TSDM"); _print_parity(bp, TSDM_REG_TSDM_PRTY_STS); - } - break; - case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, + break; + case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: + _print_next_block((*par_num)++, "SEARCHER"); _print_parity(bp, SRC_REG_SRC_PRTY_STS); - } - break; - case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "TCM"); - _print_parity(bp, - TCM_REG_TCM_PRTY_STS); - } - break; - case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "TSEMI"); + break; + case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: + _print_next_block((*par_num)++, "TCM"); + _print_parity(bp, TCM_REG_TCM_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: + _print_next_block((*par_num)++, + "TSEMI"); _print_parity(bp, TSEM_REG_TSEM_PRTY_STS_0); _print_parity(bp, TSEM_REG_TSEM_PRTY_STS_1); - } - break; - case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "XPB"); + break; + case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: + _print_next_block((*par_num)++, "XPB"); _print_parity(bp, GRCBASE_XPB + PB_REG_PB_PRTY_STS); + break; } - break; } /* Clear the bit */ @@ -4346,53 +4362,59 @@ static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, } } - return par_num; + return res; } -static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, - int par_num, bool *global, +static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, + int *par_num, bool *global, bool print) { - int i = 0; - u32 cur_bit = 0; + u32 cur_bit; + bool res; + int i; + + res = false; + for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); + cur_bit = (0x1UL << i); if (sig & cur_bit) { + res |= true; /* Each bit is real error! */ switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "PBF"); + _print_next_block((*par_num)++, "PBF"); _print_parity(bp, PBF_REG_PBF_PRTY_STS); } break; case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "QM"); + _print_next_block((*par_num)++, "QM"); _print_parity(bp, QM_REG_QM_PRTY_STS); } break; case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "TM"); + _print_next_block((*par_num)++, "TM"); _print_parity(bp, TM_REG_TM_PRTY_STS); } break; case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "XSDM"); + _print_next_block((*par_num)++, "XSDM"); _print_parity(bp, XSDM_REG_XSDM_PRTY_STS); } break; case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "XCM"); + _print_next_block((*par_num)++, "XCM"); _print_parity(bp, XCM_REG_XCM_PRTY_STS); } break; case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "XSEMI"); + _print_next_block((*par_num)++, + "XSEMI"); _print_parity(bp, XSEM_REG_XSEM_PRTY_STS_0); _print_parity(bp, @@ -4401,7 +4423,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, break; case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: if (print) { - _print_next_block(par_num++, + _print_next_block((*par_num)++, "DOORBELLQ"); _print_parity(bp, DORQ_REG_DORQ_PRTY_STS); @@ -4409,7 +4431,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, break; case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "NIG"); + _print_next_block((*par_num)++, "NIG"); if (CHIP_IS_E1x(bp)) { _print_parity(bp, NIG_REG_NIG_PRTY_STS); @@ -4423,32 +4445,34 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, break; case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: if (print) - _print_next_block(par_num++, + _print_next_block((*par_num)++, "VAUX PCI CORE"); *global = true; break; case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "DEBUG"); + _print_next_block((*par_num)++, + "DEBUG"); _print_parity(bp, DBG_REG_DBG_PRTY_STS); } break; case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "USDM"); + _print_next_block((*par_num)++, "USDM"); _print_parity(bp, USDM_REG_USDM_PRTY_STS); } break; case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "UCM"); + _print_next_block((*par_num)++, "UCM"); _print_parity(bp, UCM_REG_UCM_PRTY_STS); } break; case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "USEMI"); + _print_next_block((*par_num)++, + "USEMI"); _print_parity(bp, USEM_REG_USEM_PRTY_STS_0); _print_parity(bp, @@ -4457,21 +4481,21 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, break; case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "UPB"); + _print_next_block((*par_num)++, "UPB"); _print_parity(bp, GRCBASE_UPB + PB_REG_PB_PRTY_STS); } break; case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "CSDM"); + _print_next_block((*par_num)++, "CSDM"); _print_parity(bp, CSDM_REG_CSDM_PRTY_STS); } break; case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: if (print) { - _print_next_block(par_num++, "CCM"); + _print_next_block((*par_num)++, "CCM"); _print_parity(bp, CCM_REG_CCM_PRTY_STS); } break; @@ -4482,80 +4506,73 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, } } - return par_num; + return res; } -static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, - int par_num, bool print) +static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, + int *par_num, bool print) { - int i = 0; - u32 cur_bit = 0; + u32 cur_bit; + bool res; + int i; + + res = false; + for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); + cur_bit = (0x1UL << i); if (sig & cur_bit) { - switch (cur_bit) { - case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "CSEMI"); + res |= true; /* Each bit is real error! */ + if (print) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: + _print_next_block((*par_num)++, + "CSEMI"); _print_parity(bp, CSEM_REG_CSEM_PRTY_STS_0); _print_parity(bp, CSEM_REG_CSEM_PRTY_STS_1); - } - break; - case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "PXP"); + break; + case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: + _print_next_block((*par_num)++, "PXP"); _print_parity(bp, PXP_REG_PXP_PRTY_STS); _print_parity(bp, PXP2_REG_PXP2_PRTY_STS_0); _print_parity(bp, PXP2_REG_PXP2_PRTY_STS_1); - } - break; - case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: - if (print) - _print_next_block(par_num++, - "PXPPCICLOCKCLIENT"); - break; - case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "CFC"); + break; + case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: + _print_next_block((*par_num)++, + "PXPPCICLOCKCLIENT"); + break; + case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: + _print_next_block((*par_num)++, "CFC"); _print_parity(bp, CFC_REG_CFC_PRTY_STS); - } - break; - case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "CDU"); + break; + case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: + _print_next_block((*par_num)++, "CDU"); _print_parity(bp, CDU_REG_CDU_PRTY_STS); - } - break; - case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "DMAE"); + break; + case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: + _print_next_block((*par_num)++, "DMAE"); _print_parity(bp, DMAE_REG_DMAE_PRTY_STS); - } - break; - case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "IGU"); + break; + case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: + _print_next_block((*par_num)++, "IGU"); if (CHIP_IS_E1x(bp)) _print_parity(bp, HC_REG_HC_PRTY_STS); else _print_parity(bp, IGU_REG_IGU_PRTY_STS); - } - break; - case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "MISC"); + break; + case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: + _print_next_block((*par_num)++, "MISC"); _print_parity(bp, MISC_REG_MISC_PRTY_STS); + break; } - break; } /* Clear the bit */ @@ -4563,40 +4580,49 @@ static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, } } - return par_num; + return res; } -static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, - bool *global, bool print) +static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig, + int *par_num, bool *global, + bool print) { - int i = 0; - u32 cur_bit = 0; + bool res = false; + u32 cur_bit; + int i; + for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); + cur_bit = (0x1UL << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: if (print) - _print_next_block(par_num++, "MCP ROM"); + _print_next_block((*par_num)++, + "MCP ROM"); *global = true; + res |= true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: if (print) - _print_next_block(par_num++, + _print_next_block((*par_num)++, "MCP UMP RX"); *global = true; + res |= true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: if (print) - _print_next_block(par_num++, + _print_next_block((*par_num)++, "MCP UMP TX"); *global = true; + res |= true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: if (print) - _print_next_block(par_num++, + _print_next_block((*par_num)++, "MCP SCPAD"); - *global = true; + /* clear latched SCPAD PATIRY from MCP */ + REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, + 1UL << 10); break; } @@ -4605,45 +4631,50 @@ static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, } } - return par_num; + return res; } -static int bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, - int par_num, bool print) +static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, + int *par_num, bool print) { - int i = 0; - u32 cur_bit = 0; + u32 cur_bit; + bool res; + int i; + + res = false; + for (i = 0; sig; i++) { - cur_bit = ((u32)0x1 << i); + cur_bit = (0x1UL << i); if (sig & cur_bit) { - switch (cur_bit) { - case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "PGLUE_B"); + res |= true; /* Each bit is real error! */ + if (print) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: + _print_next_block((*par_num)++, + "PGLUE_B"); _print_parity(bp, - PGLUE_B_REG_PGLUE_B_PRTY_STS); - } - break; - case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: - if (print) { - _print_next_block(par_num++, "ATC"); + PGLUE_B_REG_PGLUE_B_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: + _print_next_block((*par_num)++, "ATC"); _print_parity(bp, ATC_REG_ATC_PRTY_STS); + break; } - break; } - /* Clear the bit */ sig &= ~cur_bit; } } - return par_num; + return res; } static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, u32 *sig) { + bool res = false; + if ((sig[0] & HW_PRTY_ASSERT_SET_0) || (sig[1] & HW_PRTY_ASSERT_SET_1) || (sig[2] & HW_PRTY_ASSERT_SET_2) || @@ -4660,23 +4691,22 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, if (print) netdev_err(bp->dev, "Parity errors detected in blocks: "); - par_num = bnx2x_check_blocks_with_parity0(bp, - sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); - par_num = bnx2x_check_blocks_with_parity1(bp, - sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); - par_num = bnx2x_check_blocks_with_parity2(bp, - sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); - par_num = bnx2x_check_blocks_with_parity3( - sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); - par_num = bnx2x_check_blocks_with_parity4(bp, - sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); + res |= bnx2x_check_blocks_with_parity0(bp, + sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print); + res |= bnx2x_check_blocks_with_parity1(bp, + sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print); + res |= bnx2x_check_blocks_with_parity2(bp, + sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print); + res |= bnx2x_check_blocks_with_parity3(bp, + sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print); + res |= bnx2x_check_blocks_with_parity4(bp, + sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print); if (print) pr_cont("\n"); + } - return true; - } else - return false; + return res; } /** @@ -4703,6 +4733,14 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); + /* Since MCP attentions can't be disabled inside the block, we need to + * read AEU registers to see whether they're currently disabled + */ + attn.sig[3] &= ((REG_RD(bp, + !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 + : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) & + MISC_AEU_ENABLE_MCP_PRTY_BITS) | + ~MISC_AEU_ENABLE_MCP_PRTY_BITS); if (!CHIP_IS_E1x(bp)) attn.sig[4] = REG_RD(bp, @@ -5197,18 +5235,18 @@ static void bnx2x_eq_int(struct bnx2x *bp) case EVENT_RING_OPCODE_STOP_TRAFFIC: DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); + bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_TX_STOP)) break; - bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); goto next_spqe; case EVENT_RING_OPCODE_START_TRAFFIC: DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); + bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_TX_START)) break; - bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_UPDATE: @@ -5447,26 +5485,24 @@ static void bnx2x_timer(unsigned long data) if (IS_PF(bp) && !BP_NOMCP(bp)) { int mb_idx = BP_FW_MB_IDX(bp); - u32 drv_pulse; - u32 mcp_pulse; + u16 drv_pulse; + u16 mcp_pulse; ++bp->fw_drv_pulse_wr_seq; bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; - /* TBD - add SYSTEM_TIME */ drv_pulse = bp->fw_drv_pulse_wr_seq; bnx2x_drv_pulse(bp); mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & MCP_PULSE_SEQ_MASK); /* The delta between driver pulse and mcp response - * should be 1 (before mcp response) or 0 (after mcp response) + * should not get too big. If the MFW is more than 5 pulses + * behind, we should worry about it enough to generate an error + * log. */ - if ((drv_pulse != mcp_pulse) && - (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { - /* someone lost a heartbeat... */ - BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n", + if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5) + BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n", drv_pulse, mcp_pulse); - } } if (bp->state == BNX2X_STATE_OPEN) @@ -7120,7 +7156,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) int port = BP_PORT(bp); int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; u32 low, high; - u32 val; + u32 val, reg; DP(NETIF_MSG_HW, "starting port init port %d\n", port); @@ -7265,6 +7301,17 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) val |= CHIP_IS_E1(bp) ? 0 : 0x10; REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); + /* SCPAD_PARITY should NOT trigger close the gates */ + reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0; + REG_WR(bp, reg, + REG_RD(bp, reg) & + ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY); + + reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0; + REG_WR(bp, reg, + REG_RD(bp, reg) & + ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY); + bnx2x_init_block(bp, BLOCK_NIG, init_phase); if (!CHIP_IS_E1x(bp)) { @@ -9309,6 +9356,10 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) bnx2x_process_kill_chip_reset(bp, global); barrier(); + /* clear errors in PGB */ + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); + /* Recover after reset: */ /* MCP */ if (global && bnx2x_reset_mcp_comp(bp, val)) @@ -9663,11 +9714,10 @@ sp_rtnl_not_reset: &bp->sp_rtnl_state)) bnx2x_pf_set_vfs_vlan(bp); - if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) + if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) { bnx2x_dcbx_stop_hw_tx(bp); - - if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state)) bnx2x_dcbx_resume_hw_tx(bp); + } /* work which needs rtnl lock not-taken (as it takes the lock itself and * can be called from other contexts as well) @@ -9873,7 +9923,7 @@ static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp) static bool bnx2x_prev_is_path_marked(struct bnx2x *bp) { struct bnx2x_prev_path_list *tmp_list; - int rc = false; + bool rc = false; if (down_trylock(&bnx2x_prev_sem)) return false; @@ -11143,6 +11193,14 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp) bnx2x_get_cnic_mac_hwinfo(bp); } + if (!BP_NOMCP(bp)) { + /* Read physical port identifier from shmem */ + val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); + val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); + bnx2x_set_mac_buf(bp->phys_port_id, val, val2); + bp->flags |= HAS_PHYS_PORT_ID; + } + memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr)) @@ -11389,9 +11447,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) } } - /* adjust igu_sb_cnt to MF for E1x */ - if (CHIP_IS_E1x(bp) && IS_MF(bp)) - bp->igu_sb_cnt /= E1HVN_MAX; + /* adjust igu_sb_cnt to MF for E1H */ + if (CHIP_IS_E1H(bp) && IS_MF(bp)) + bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT); /* port info */ bnx2x_get_port_hwinfo(bp); @@ -11679,9 +11737,6 @@ static int bnx2x_init_bp(struct bnx2x *bp) static int bnx2x_open(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); - bool global = false; - int other_engine = BP_PATH(bp) ? 0 : 1; - bool other_load_status, load_status; int rc; bp->stats_init = true; @@ -11697,6 +11752,10 @@ static int bnx2x_open(struct net_device *dev) * Parity recovery is only relevant for PF driver. */ if (IS_PF(bp)) { + int other_engine = BP_PATH(bp) ? 0 : 1; + bool other_load_status, load_status; + bool global = false; + other_load_status = bnx2x_get_load_status(bp, other_engine); load_status = bnx2x_get_load_status(bp, BP_PATH(bp)); if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || @@ -11740,7 +11799,7 @@ static int bnx2x_open(struct net_device *dev) rc = bnx2x_nic_load(bp, LOAD_OPEN); if (rc) return rc; - return bnx2x_open_epilog(bp); + return 0; } /* called with rtnl_lock */ @@ -12038,6 +12097,20 @@ static int bnx2x_validate_addr(struct net_device *dev) return 0; } +static int bnx2x_get_phys_port_id(struct net_device *netdev, + struct netdev_phys_port_id *ppid) +{ + struct bnx2x *bp = netdev_priv(netdev); + + if (!(bp->flags & HAS_PHYS_PORT_ID)) + return -EOPNOTSUPP; + + ppid->id_len = sizeof(bp->phys_port_id); + memcpy(ppid->id, bp->phys_port_id, ppid->id_len); + + return 0; +} + static const struct net_device_ops bnx2x_netdev_ops = { .ndo_open = bnx2x_open, .ndo_stop = bnx2x_close, @@ -12067,19 +12140,15 @@ static const struct net_device_ops bnx2x_netdev_ops = { #ifdef CONFIG_NET_RX_BUSY_POLL .ndo_busy_poll = bnx2x_low_latency_recv, #endif + .ndo_get_phys_port_id = bnx2x_get_phys_port_id, }; static int bnx2x_set_coherency_mask(struct bnx2x *bp) { struct device *dev = &bp->pdev->dev; - if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { - bp->flags |= USING_DAC_FLAG; - if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { - dev_err(dev, "dma_set_coherent_mask failed, aborting\n"); - return -EIO; - } - } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { + if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 && + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) { dev_err(dev, "System does not support DMA, aborting\n"); return -EIO; } @@ -12231,10 +12300,13 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; if (!CHIP_IS_E1x(bp)) { - dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL; + dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; dev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | + NETIF_F_GSO_IPIP | + NETIF_F_GSO_SIT | NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL; } @@ -12242,8 +12314,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; - if (bp->flags & USING_DAC_FLAG) - dev->features |= NETIF_F_HIGHDMA; + dev->features |= NETIF_F_HIGHDMA; /* Add Loopback capability to the device */ dev->hw_features |= NETIF_F_LOOPBACK; @@ -12268,34 +12339,11 @@ err_out_release: err_out_disable: pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); err_out: return rc; } -static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, - enum bnx2x_pci_bus_speed *speed) -{ - u32 link_speed, val = 0; - - pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val); - *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT; - - link_speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; - - switch (link_speed) { - case 3: - *speed = BNX2X_PCI_LINK_SPEED_8000; - break; - case 2: - *speed = BNX2X_PCI_LINK_SPEED_5000; - break; - default: - *speed = BNX2X_PCI_LINK_SPEED_2500; - } -} - static int bnx2x_check_firmware(struct bnx2x *bp) { const struct firmware *firmware = bp->firmware; @@ -12606,24 +12654,24 @@ static int set_max_cos_est(int chip_id) return BNX2X_MULTI_TX_COS_E1X; case BCM57712: case BCM57712_MF: - case BCM57712_VF: return BNX2X_MULTI_TX_COS_E2_E3A0; case BCM57800: case BCM57800_MF: - case BCM57800_VF: case BCM57810: case BCM57810_MF: case BCM57840_4_10: case BCM57840_2_20: case BCM57840_O: case BCM57840_MFO: - case BCM57810_VF: case BCM57840_MF: - case BCM57840_VF: case BCM57811: case BCM57811_MF: - case BCM57811_VF: return BNX2X_MULTI_TX_COS_E3B0; + case BCM57712_VF: + case BCM57800_VF: + case BCM57810_VF: + case BCM57840_VF: + case BCM57811_VF: return 1; default: pr_err("Unknown board_type (%d), aborting\n", chip_id); @@ -12652,8 +12700,8 @@ static int bnx2x_init_one(struct pci_dev *pdev, { struct net_device *dev = NULL; struct bnx2x *bp; - int pcie_width; - enum bnx2x_pci_bus_speed pcie_speed; + enum pcie_link_width pcie_width; + enum pci_bus_speed pcie_speed; int rc, max_non_def_sbs; int rx_count, tx_count, rss_count, doorbell_size; int max_cos_est; @@ -12802,18 +12850,19 @@ static int bnx2x_init_one(struct pci_dev *pdev, dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); rtnl_unlock(); } - - bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); - BNX2X_DEV_INFO("got pcie width %d and speed %d\n", - pcie_width, pcie_speed); - - BNX2X_DEV_INFO("%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", + if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) || + pcie_speed == PCI_SPEED_UNKNOWN || + pcie_width == PCIE_LNK_WIDTH_UNKNOWN) + BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n"); + else + BNX2X_DEV_INFO( + "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", board_info[ent->driver_data].name, (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), pcie_width, - pcie_speed == BNX2X_PCI_LINK_SPEED_2500 ? "2.5GHz" : - pcie_speed == BNX2X_PCI_LINK_SPEED_5000 ? "5.0GHz" : - pcie_speed == BNX2X_PCI_LINK_SPEED_8000 ? "8.0GHz" : + pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" : + pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" : + pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" : "Unknown", dev->base_addr, bp->pdev->irq, dev->dev_addr); @@ -12832,7 +12881,6 @@ init_one_exit: pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); return rc; } @@ -12915,7 +12963,6 @@ static void __bnx2x_remove(struct pci_dev *pdev, pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } static void bnx2x_remove_one(struct pci_dev *pdev) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 5ecf267dc4cc..14ffb6e56e59 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -2864,6 +2864,17 @@ #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430 #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434 #define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438 +/* [W 7] Writing 1 to each bit in this register clears a corresponding error + * details register and enables logging new error details. Bit 0 - clears + * INCORRECT_RCV_DETAILS; Bit 1 - clears RX_ERR_DETAILS; Bit 2 - clears + * TX_ERR_WR_ADD_31_0 TX_ERR_WR_ADD_63_32 TX_ERR_WR_DETAILS + * TX_ERR_WR_DETAILS2 TX_ERR_RD_ADD_31_0 TX_ERR_RD_ADD_63_32 + * TX_ERR_RD_DETAILS TX_ERR_RD_DETAILS2 TX_ERR_WR_DETAILS_ICPL; Bit 3 - + * clears VF_LENGTH_VIOLATION_DETAILS. Bit 4 - clears + * VF_GRC_SPACE_VIOLATION_DETAILS. Bit 5 - clears RX_TCPL_ERR_DETAILS. Bit 6 + * - clears TCPL_IN_TWO_RCBS_DETAILS. */ +#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x943c + /* [R 9] Interrupt register #0 read */ #define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298 /* [RC 9] Interrupt register #0 read clear */ @@ -7168,6 +7179,7 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca #define MDIO_WC_REG_RX2_PCI_CTRL 0x80da #define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea +#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa #define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104 #define MDIO_WC_REG_XGXS_STATUS3 0x8129 #define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 9fbeee522d2c..18438a504d57 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -1217,9 +1217,6 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, ETH_VLAN_FILTER_CLASSIFY, config); } -#define list_next_entry(pos, member) \ - list_entry((pos)->member.next, typeof(*(pos)), member) - /** * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element * @@ -2041,6 +2038,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, struct bnx2x_vlan_mac_ramrod_params p; struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; + unsigned long flags; int read_lock; int rc = 0; @@ -2049,8 +2047,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, spin_lock_bh(&exeq->lock); list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { - if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == - *vlan_mac_flags) { + flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags; + if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) == + BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { rc = exeq->remove(bp, exeq->owner, exeq_pos); if (rc) { BNX2X_ERR("Failed to remove command\n"); @@ -2083,7 +2082,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, return read_lock; list_for_each_entry(pos, &o->head, link) { - if (pos->vlan_mac_flags == *vlan_mac_flags) { + flags = pos->vlan_mac_flags; + if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) == + BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { p.user_req.vlan_mac_flags = pos->vlan_mac_flags; memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); rc = bnx2x_config_vlan_mac(bp, &p); @@ -4385,8 +4386,11 @@ int bnx2x_config_rss(struct bnx2x *bp, struct bnx2x_raw_obj *r = &o->raw; /* Do nothing if only driver cleanup was requested */ - if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) + if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { + DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n", + p->ramrod_flags); return 0; + } r->set_pending(r); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 658f4e33abf9..6a53c15c85a3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -266,6 +266,13 @@ enum { BNX2X_DONT_CONSUME_CAM_CREDIT, BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, }; +/* When looking for matching filters, some flags are not interesting */ +#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \ + 1 << BNX2X_ETH_MAC | \ + 1 << BNX2X_ISCSI_ETH_MAC | \ + 1 << BNX2X_NETQ_ETH_MAC) +#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \ + ((flags) & BNX2X_VLAN_MAC_CMP_MASK) struct bnx2x_vlan_mac_ramrod_params { /* Object to run the command from */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 2604b6204abe..e7845e5be1c7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -470,10 +470,10 @@ static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, bnx2x_vfop_qdtor, cmd->done); return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, cmd->block); + } else { + BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid); + return -ENOMEM; } - DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n", - vf->abs_vfid, vfop->rc); - return -ENOMEM; } static void @@ -1209,6 +1209,11 @@ static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) /* next state */ vfop->state = BNX2X_VFOP_RXMODE_DONE; + /* record the accept flags in vfdb so hypervisor can modify them + * if necessary + */ + bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) = + ramrod->rx_accept_flags; vfop->rc = bnx2x_config_rx_mode(bp, ramrod); bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); op_err: @@ -1224,39 +1229,43 @@ op_pending: return; } +static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, + struct bnx2x_rx_mode_ramrod_params *ramrod, + struct bnx2x_virtf *vf, + unsigned long accept_flags) +{ + struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); + + memset(ramrod, 0, sizeof(*ramrod)); + ramrod->cid = vfq->cid; + ramrod->cl_id = vfq_cl_id(vf, vfq); + ramrod->rx_mode_obj = &bp->rx_mode_obj; + ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); + ramrod->rx_accept_flags = accept_flags; + ramrod->tx_accept_flags = accept_flags; + ramrod->pstate = &vf->filter_state; + ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; + + set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); + set_bit(RAMROD_RX, &ramrod->ramrod_flags); + set_bit(RAMROD_TX, &ramrod->ramrod_flags); + + ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); + ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); +} + int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vfop_cmd *cmd, int qid, unsigned long accept_flags) { - struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); if (vfop) { struct bnx2x_rx_mode_ramrod_params *ramrod = &vf->op_params.rx_mode; - memset(ramrod, 0, sizeof(*ramrod)); - - /* Prepare ramrod parameters */ - ramrod->cid = vfq->cid; - ramrod->cl_id = vfq_cl_id(vf, vfq); - ramrod->rx_mode_obj = &bp->rx_mode_obj; - ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); - - ramrod->rx_accept_flags = accept_flags; - ramrod->tx_accept_flags = accept_flags; - ramrod->pstate = &vf->filter_state; - ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; - - set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); - set_bit(RAMROD_RX, &ramrod->ramrod_flags); - set_bit(RAMROD_TX, &ramrod->ramrod_flags); - - ramrod->rdata = - bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); - ramrod->rdata_mapping = - bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); + bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags); bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, bnx2x_vfop_rxmode, cmd->done); @@ -1819,7 +1828,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); if (fid & IGU_FID_ENCODE_IS_PF) current_pf = fid & IGU_FID_PF_NUM_MASK; - else if (current_pf == BP_ABS_FUNC(bp)) + else if (current_pf == BP_FUNC(bp)) bnx2x_vf_set_igu_info(bp, sb_id, (fid & IGU_FID_VF_NUM_MASK)); DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", @@ -2018,6 +2027,8 @@ failed: void bnx2x_iov_remove_one(struct bnx2x *bp) { + int vf_idx; + /* if SRIOV is not enabled there's nothing to do */ if (!IS_SRIOV(bp)) return; @@ -2026,6 +2037,18 @@ void bnx2x_iov_remove_one(struct bnx2x *bp) pci_disable_sriov(bp->pdev); DP(BNX2X_MSG_IOV, "sriov disabled\n"); + /* disable access to all VFs */ + for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { + bnx2x_pretend_func(bp, + HW_VF_HANDLE(bp, + bp->vfdb->sriov.first_vf_in_pf + + vf_idx)); + DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n", + bp->vfdb->sriov.first_vf_in_pf + vf_idx); + bnx2x_vf_enable_internal(bp, 0); + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + } + /* free vf database */ __bnx2x_iov_free_vfdb(bp); } @@ -2802,7 +2825,7 @@ struct set_vf_state_cookie { u8 state; }; -void bnx2x_set_vf_state(void *cookie) +static void bnx2x_set_vf_state(void *cookie) { struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; @@ -3100,6 +3123,11 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) { struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); + if (!IS_SRIOV(bp)) { + BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n"); + return -EINVAL; + } + DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", num_vfs_param, BNX2X_NR_VIRTFN(bp)); @@ -3180,13 +3208,19 @@ int bnx2x_enable_sriov(struct bnx2x *bp) /* set local queue arrays */ vf->vfqs = &bp->vfdb->vfqs[qcount]; qcount += vf_sb_count(vf); + bnx2x_iov_static_resc(bp, vf); } - /* prepare msix vectors in VF configuration space */ + /* prepare msix vectors in VF configuration space - the value in the + * PCI configuration space should be the index of the last entry, + * namely one less than the actual size of the table + */ for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, - num_vf_queues); + num_vf_queues - 1); + DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", + vf_idx, num_vf_queues - 1); } bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); @@ -3194,7 +3228,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp) * the "acquire" messages to appear on the VF PF channel. */ DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); - pci_disable_sriov(bp->pdev); + bnx2x_disable_sriov(bp); rc = pci_enable_sriov(bp->pdev, req_vfs); if (rc) { BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); @@ -3222,8 +3256,9 @@ void bnx2x_disable_sriov(struct bnx2x *bp) pci_disable_sriov(bp->pdev); } -int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf, - struct pf_vf_bulletin_content **bulletin) +static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, + struct bnx2x_virtf **vf, + struct pf_vf_bulletin_content **bulletin) { if (bp->state != BNX2X_STATE_OPEN) { BNX2X_ERR("vf ndo called though PF is down\n"); @@ -3387,14 +3422,16 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); if (rc) { BNX2X_ERR("failed to delete eth macs\n"); - return -EINVAL; + rc = -EINVAL; + goto out; } /* remove existing uc list macs */ rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); if (rc) { BNX2X_ERR("failed to delete uc_list macs\n"); - return -EINVAL; + rc = -EINVAL; + goto out; } /* configure the new mac to device */ @@ -3402,6 +3439,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, BNX2X_ETH_MAC, &ramrod_flags); +out: bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); } @@ -3410,10 +3448,18 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) { + struct bnx2x_queue_state_params q_params = {NULL}; + struct bnx2x_vlan_mac_ramrod_params ramrod_param; + struct bnx2x_queue_update_params *update_params; + struct pf_vf_bulletin_content *bulletin = NULL; + struct bnx2x_rx_mode_ramrod_params rx_ramrod; struct bnx2x *bp = netdev_priv(dev); - int rc, q_logical_state; + struct bnx2x_vlan_mac_obj *vlan_obj; + unsigned long vlan_mac_flags = 0; + unsigned long ramrod_flags = 0; struct bnx2x_virtf *vf = NULL; - struct pf_vf_bulletin_content *bulletin = NULL; + unsigned long accept_flags; + int rc; /* sanity and init */ rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); @@ -3431,103 +3477,119 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) /* update PF's copy of the VF's bulletin. No point in posting the vlan * to the VF since it doesn't have anything to do with it. But it useful * to store it here in case the VF is not up yet and we can only - * configure the vlan later when it does. + * configure the vlan later when it does. Treat vlan id 0 as remove the + * Host tag. */ - bulletin->valid_bitmap |= 1 << VLAN_VALID; + if (vlan > 0) + bulletin->valid_bitmap |= 1 << VLAN_VALID; + else + bulletin->valid_bitmap &= ~(1 << VLAN_VALID); bulletin->vlan = vlan; /* is vf initialized and queue set up? */ - q_logical_state = - bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); - if (vf->state == VF_ENABLED && - q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { - /* configure the vlan in device on this vf's queue */ - unsigned long ramrod_flags = 0; - unsigned long vlan_mac_flags = 0; - struct bnx2x_vlan_mac_obj *vlan_obj = - &bnx2x_leading_vfq(vf, vlan_obj); - struct bnx2x_vlan_mac_ramrod_params ramrod_param; - struct bnx2x_queue_state_params q_params = {NULL}; - struct bnx2x_queue_update_params *update_params; + if (vf->state != VF_ENABLED || + bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != + BNX2X_Q_LOGICAL_STATE_ACTIVE) + return rc; - rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); - if (rc) - return rc; - memset(&ramrod_param, 0, sizeof(ramrod_param)); + /* configure the vlan in device on this vf's queue */ + vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); + rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); + if (rc) + return rc; - /* must lock vfpf channel to protect against vf flows */ - bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); + /* must lock vfpf channel to protect against vf flows */ + bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); - /* remove existing vlans */ - __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); - rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, - &ramrod_flags); - if (rc) { - BNX2X_ERR("failed to delete vlans\n"); - return -EINVAL; - } + /* remove existing vlans */ + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, + &ramrod_flags); + if (rc) { + BNX2X_ERR("failed to delete vlans\n"); + rc = -EINVAL; + goto out; + } + + /* need to remove/add the VF's accept_any_vlan bit */ + accept_flags = bnx2x_leading_vfq(vf, accept_flags); + if (vlan) + clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); + else + set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); + + bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, + accept_flags); + bnx2x_leading_vfq(vf, accept_flags) = accept_flags; + bnx2x_config_rx_mode(bp, &rx_ramrod); + + /* configure the new vlan to device */ + memset(&ramrod_param, 0, sizeof(ramrod_param)); + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + ramrod_param.vlan_mac_obj = vlan_obj; + ramrod_param.ramrod_flags = ramrod_flags; + set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &ramrod_param.user_req.vlan_mac_flags); + ramrod_param.user_req.u.vlan.vlan = vlan; + ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; + rc = bnx2x_config_vlan_mac(bp, &ramrod_param); + if (rc) { + BNX2X_ERR("failed to configure vlan\n"); + rc = -EINVAL; + goto out; + } - /* send queue update ramrod to configure default vlan and silent - * vlan removal + /* send queue update ramrod to configure default vlan and silent + * vlan removal + */ + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + q_params.cmd = BNX2X_Q_CMD_UPDATE; + q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); + update_params = &q_params.params.update; + __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, + &update_params->update_flags); + __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, + &update_params->update_flags); + if (vlan == 0) { + /* if vlan is 0 then we want to leave the VF traffic + * untagged, and leave the incoming traffic untouched + * (i.e. do not remove any vlan tags). + */ + __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, + &update_params->update_flags); + __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, + &update_params->update_flags); + } else { + /* configure default vlan to vf queue and set silent + * vlan removal (the vf remains unaware of this vlan). */ - __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); - q_params.cmd = BNX2X_Q_CMD_UPDATE; - q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); - update_params = &q_params.params.update; - __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, + __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &update_params->update_flags); - __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, + __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &update_params->update_flags); + update_params->def_vlan = vlan; + update_params->silent_removal_value = + vlan & VLAN_VID_MASK; + update_params->silent_removal_mask = VLAN_VID_MASK; + } - if (vlan == 0) { - /* if vlan is 0 then we want to leave the VF traffic - * untagged, and leave the incoming traffic untouched - * (i.e. do not remove any vlan tags). - */ - __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, - &update_params->update_flags); - __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, - &update_params->update_flags); - } else { - /* configure the new vlan to device */ - __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); - ramrod_param.vlan_mac_obj = vlan_obj; - ramrod_param.ramrod_flags = ramrod_flags; - ramrod_param.user_req.u.vlan.vlan = vlan; - ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; - rc = bnx2x_config_vlan_mac(bp, &ramrod_param); - if (rc) { - BNX2X_ERR("failed to configure vlan\n"); - return -EINVAL; - } - - /* configure default vlan to vf queue and set silent - * vlan removal (the vf remains unaware of this vlan). - */ - update_params = &q_params.params.update; - __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, - &update_params->update_flags); - __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, - &update_params->update_flags); - update_params->def_vlan = vlan; - } + /* Update the Queue state */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to configure default VLAN\n"); + goto out; + } - /* Update the Queue state */ - rc = bnx2x_queue_state_change(bp, &q_params); - if (rc) { - BNX2X_ERR("Failed to configure default VLAN\n"); - return rc; - } - /* clear the flag indicating that this VF needs its vlan - * (will only be set if the HV configured th Vlan before vf was - * and we were called because the VF came up later - */ - vf->cfg_flags &= ~VF_CFG_VLAN; + /* clear the flag indicating that this VF needs its vlan + * (will only be set if the HV configured the Vlan before vf was + * up and we were called because the VF came up later + */ +out: + vf->cfg_flags &= ~VF_CFG_VLAN; + bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); - bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); - } - return 0; + return rc; } /* crc is the first field in the bulletin board. Compute the crc over the @@ -3634,29 +3696,6 @@ alloc_mem_err: return -ENOMEM; } -int bnx2x_open_epilog(struct bnx2x *bp) -{ - /* Enable sriov via delayed work. This must be done via delayed work - * because it causes the probe of the vf devices to be run, which invoke - * register_netdevice which must have rtnl lock taken. As we are holding - * the lock right now, that could only work if the probe would not take - * the lock. However, as the probe of the vf may be called from other - * contexts as well (such as passthrough to vm fails) it can't assume - * the lock is being held for it. Using delayed work here allows the - * probe code to simply take the lock (i.e. wait for it to be released - * if it is being held). We only want to do this if the number of VFs - * was set before PF driver was loaded. - */ - if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) { - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - schedule_delayed_work(&bp->sp_rtnl_task, 0); - } - - return 0; -} - void bnx2x_iov_channel_down(struct bnx2x *bp) { int vf_idx; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 059f0d460af2..8c213fa52174 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -74,6 +74,7 @@ struct bnx2x_vf_queue { /* VLANs object */ struct bnx2x_vlan_mac_obj vlan_obj; atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */ + unsigned long accept_flags; /* last accept flags configured */ /* Queue Slow-path State object */ struct bnx2x_queue_sp_obj sp_obj; @@ -782,7 +783,6 @@ static inline int bnx2x_vf_headroom(struct bnx2x *bp) void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); void bnx2x_iov_channel_down(struct bnx2x *bp); -int bnx2x_open_epilog(struct bnx2x *bp); #else /* CONFIG_BNX2X_SRIOV */ @@ -842,7 +842,6 @@ static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {} -static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; } #endif /* CONFIG_BNX2X_SRIOV */ #endif /* bnx2x_sriov.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 86436c77af03..3b75070411aa 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -196,7 +196,7 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp) } else if (bp->func_stx) { *stats_comp = 0; - bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); + bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp); } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 6cfb88732452..0756d7dabdd5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -60,6 +60,30 @@ void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv) mutex_unlock(&bp->vf2pf_mutex); } +/* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */ +static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list, + enum channel_tlvs req_tlv) +{ + struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list; + + do { + if (tlv->type == req_tlv) + return tlv; + + if (!tlv->length) { + BNX2X_ERR("Found TLV with length 0\n"); + return NULL; + } + + tlvs_list += tlv->length; + tlv = (struct channel_tlv *)tlvs_list; + } while (tlv->type != CHANNEL_TLV_LIST_END); + + DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv); + + return NULL; +} + /* list the types and lengths of the tlvs on the buffer */ void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list) { @@ -128,7 +152,7 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n"); *done = PFVF_STATUS_SUCCESS; - return 0; + return -EINVAL; } /* Write message address */ @@ -184,7 +208,7 @@ static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id) return -EINVAL; } - BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg); + DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg); *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT; @@ -196,6 +220,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) int rc = 0, attempts = 0; struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire; struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp; + struct vfpf_port_phys_id_resp_tlv *phys_port_resp; u32 vf_id; bool resources_acquired = false; @@ -219,8 +244,14 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) /* pf 2 vf bulletin board address */ req->bulletin_addr = bp->pf2vf_bulletin_mapping; + /* Request physical port identifier */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, + CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv)); + /* add list termination tlv */ - bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + bnx2x_add_tlv(bp, req, + req->first_tlv.tl.length + sizeof(struct channel_tlv), + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); /* output tlvs list */ @@ -287,6 +318,15 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) } } + /* Retrieve physical port id (if possible) */ + phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *) + bnx2x_search_tlv_list(bp, resp, + CHANNEL_TLV_PHYS_PORT_ID); + if (phys_port_resp) { + memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN); + bp->flags |= HAS_PHYS_PORT_ID; + } + /* get HW info */ bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff); bp->link_params.chip_id = bp->common.chip_id; @@ -980,56 +1020,62 @@ static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf, dmae.len = len32; /* issue the command and wait for completion */ - return bnx2x_issue_dmae_with_comp(bp, &dmae); + return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); } -static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf) +static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp, + struct bnx2x_virtf *vf) { struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); - u64 vf_addr; - dma_addr_t pf_addr; u16 length, type; - int rc; - struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp; /* prepare response */ type = mbx->first_tlv.tl.type; length = type == CHANNEL_TLV_ACQUIRE ? sizeof(struct pfvf_acquire_resp_tlv) : sizeof(struct pfvf_general_resp_tlv); - bnx2x_add_tlv(bp, resp, 0, type, length); - resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc); - bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END, + bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length); + bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); +} + +static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, + struct bnx2x_virtf *vf) +{ + struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); + struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp; + dma_addr_t pf_addr; + u64 vf_addr; + int rc; + bnx2x_dp_tlv_list(bp, resp); DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n", mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset); + resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc); + /* send response */ vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) + mbx->first_tlv.resp_msg_offset; pf_addr = mbx->msg_mapping + offsetof(struct bnx2x_vf_mbx_msg, resp); - /* copy the response body, if there is one, before the header, as the vf - * is sensitive to the header being written + /* Copy the response buffer. The first u64 is written afterwards, as + * the vf is sensitive to the header being written */ - if (resp->hdr.tl.length > sizeof(u64)) { - length = resp->hdr.tl.length - sizeof(u64); - vf_addr += sizeof(u64); - pf_addr += sizeof(u64); - rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid, - U64_HI(vf_addr), - U64_LO(vf_addr), - length/4); - if (rc) { - BNX2X_ERR("Failed to copy response body to VF %d\n", - vf->abs_vfid); - goto mbx_error; - } - vf_addr -= sizeof(u64); - pf_addr -= sizeof(u64); + vf_addr += sizeof(u64); + pf_addr += sizeof(u64); + rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid, + U64_HI(vf_addr), + U64_LO(vf_addr), + (sizeof(union pfvf_tlvs) - sizeof(u64))/4); + if (rc) { + BNX2X_ERR("Failed to copy response body to VF %d\n", + vf->abs_vfid); + goto mbx_error; } + vf_addr -= sizeof(u64); + pf_addr -= sizeof(u64); /* ack the FW */ storm_memset_vf_mbx_ack(bp, vf->abs_vfid); @@ -1060,6 +1106,36 @@ mbx_error: bnx2x_vf_release(bp, vf, false); /* non blocking */ } +static void bnx2x_vf_mbx_resp(struct bnx2x *bp, + struct bnx2x_virtf *vf) +{ + bnx2x_vf_mbx_resp_single_tlv(bp, vf); + bnx2x_vf_mbx_resp_send_msg(bp, vf); +} + +static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp, + struct bnx2x_virtf *vf, + void *buffer, + u16 *offset) +{ + struct vfpf_port_phys_id_resp_tlv *port_id; + + if (!(bp->flags & HAS_PHYS_PORT_ID)) + return; + + bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID, + sizeof(struct vfpf_port_phys_id_resp_tlv)); + + port_id = (struct vfpf_port_phys_id_resp_tlv *) + (((u8 *)buffer) + *offset); + memcpy(port_id->id, bp->phys_port_id, ETH_ALEN); + + /* Offset should continue representing the offset to the tail + * of TLV data (outside this function scope) + */ + *offset += sizeof(struct vfpf_port_phys_id_resp_tlv); +} + static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx, int vfop_status) { @@ -1067,6 +1143,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp; struct pf_vf_resc *resc = &resp->resc; u8 status = bnx2x_pfvf_status_codes(vfop_status); + u16 length; memset(resp, 0, sizeof(*resp)); @@ -1140,9 +1217,24 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, resc->hw_sbs[i].sb_qid); DP_CONT(BNX2X_MSG_IOV, "]\n"); + /* prepare response */ + length = sizeof(struct pfvf_acquire_resp_tlv); + bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length); + + /* Handle possible VF requests for physical port identifiers. + * 'length' should continue to indicate the offset of the first empty + * place in the buffer (i.e., where next TLV should be inserted) + */ + if (bnx2x_search_tlv_list(bp, &mbx->msg->req, + CHANNEL_TLV_PHYS_PORT_ID)) + bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length); + + bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + /* send the response */ vf->op_rc = vfop_status; - bnx2x_vf_mbx_resp(bp, vf); + bnx2x_vf_mbx_resp_send_msg(bp, vf); } static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, @@ -1506,6 +1598,8 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { unsigned long accept = 0; + struct pf_vf_bulletin_content *bulletin = + BP_VF_BULLETIN(bp, vf->index); /* covert VF-PF if mask to bnx2x accept flags */ if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) @@ -1525,9 +1619,11 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); /* A packet arriving the vf's mac should be accepted - * with any vlan + * with any vlan, unless a vlan has already been + * configured. */ - __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); + if (!(bulletin->valid_bitmap & (1 << VLAN_VALID))) + __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); /* set rx-mode */ rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, @@ -1618,6 +1714,21 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, goto response; } } + /* if vlan was set by hypervisor we don't allow guest to config vlan */ + if (bulletin->valid_bitmap & 1 << VLAN_VALID) { + int i; + + /* search for vlan filters */ + for (i = 0; i < filters->n_mac_vlan_filters; i++) { + if (filters->filters[i].flags & + VFPF_Q_FILTER_VLAN_TAG_VALID) { + BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", + vf->abs_vfid); + vf->op_rc = -EPERM; + goto response; + } + } + } /* verify vf_qid */ if (filters->vf_qid > vf_rxq_count(vf)) @@ -1713,6 +1824,9 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; /* flags handled individually for backward/forward compatability */ + vf_op_params->rss_flags = 0; + vf_op_params->ramrod_flags = 0; + if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) @@ -1765,28 +1879,28 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, switch (mbx->first_tlv.tl.type) { case CHANNEL_TLV_ACQUIRE: bnx2x_vf_mbx_acquire(bp, vf, mbx); - break; + return; case CHANNEL_TLV_INIT: bnx2x_vf_mbx_init_vf(bp, vf, mbx); - break; + return; case CHANNEL_TLV_SETUP_Q: bnx2x_vf_mbx_setup_q(bp, vf, mbx); - break; + return; case CHANNEL_TLV_SET_Q_FILTERS: bnx2x_vf_mbx_set_q_filters(bp, vf, mbx); - break; + return; case CHANNEL_TLV_TEARDOWN_Q: bnx2x_vf_mbx_teardown_q(bp, vf, mbx); - break; + return; case CHANNEL_TLV_CLOSE: bnx2x_vf_mbx_close_vf(bp, vf, mbx); - break; + return; case CHANNEL_TLV_RELEASE: bnx2x_vf_mbx_release_vf(bp, vf, mbx); - break; + return; case CHANNEL_TLV_UPDATE_RSS: bnx2x_vf_mbx_update_rss(bp, vf, mbx); - break; + return; } } else { @@ -1802,26 +1916,24 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, for (i = 0; i < 20; i++) DP_CONT(BNX2X_MSG_IOV, "%x ", mbx->msg->req.tlv_buf_size.tlv_buffer[i]); + } - /* test whether we can respond to the VF (do we have an address - * for it?) - */ - if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { - /* mbx_resp uses the op_rc of the VF */ - vf->op_rc = PFVF_STATUS_NOT_SUPPORTED; + /* can we respond to VF (do we have an address for it?) */ + if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { + /* mbx_resp uses the op_rc of the VF */ + vf->op_rc = PFVF_STATUS_NOT_SUPPORTED; - /* notify the VF that we do not support this request */ - bnx2x_vf_mbx_resp(bp, vf); - } else { - /* can't send a response since this VF is unknown to us - * just ack the FW to release the mailbox and unlock - * the channel. - */ - storm_memset_vf_mbx_ack(bp, vf->abs_vfid); - mmiowb(); - bnx2x_unlock_vf_pf_channel(bp, vf, - mbx->first_tlv.tl.type); - } + /* notify the VF that we do not support this request */ + bnx2x_vf_mbx_resp(bp, vf); + } else { + /* can't send a response since this VF is unknown to us + * just ack the FW to release the mailbox and unlock + * the channel. + */ + storm_memset_vf_mbx_ack(bp, vf->abs_vfid); + /* Firmware ack should be written before unlocking channel */ + mmiowb(); + bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); } } @@ -1876,6 +1988,9 @@ void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event) /* process the VF message header */ mbx->first_tlv = mbx->msg->req.first_tlv; + /* Clean response buffer to refrain from falsely seeing chains */ + memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs)); + /* dispatch the request (will prepare the response) */ bnx2x_vf_mbx_request(bp, vf, mbx); goto mbx_done; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h index 1179fe06d0c7..208568bc7a71 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h @@ -188,6 +188,12 @@ struct pfvf_acquire_resp_tlv { } resc; }; +struct vfpf_port_phys_id_resp_tlv { + struct channel_tlv tl; + u8 id[ETH_ALEN]; + u8 padding[2]; +}; + #define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues * stats will be coalesced on * the leading RSS queue @@ -398,6 +404,7 @@ enum channel_tlvs { CHANNEL_TLV_PF_SET_MAC, CHANNEL_TLV_PF_SET_VLAN, CHANNEL_TLV_UPDATE_RSS, + CHANNEL_TLV_PHYS_PORT_ID, CHANNEL_TLV_MAX }; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 99394bd49a13..f58a8b80302d 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -393,7 +393,7 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, csk->vlan_id = path_resp->vlan_id; - memcpy(csk->ha, path_resp->mac_addr, 6); + memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN); if (test_bit(SK_F_IPV6, &csk->flags)) memcpy(&csk->src_ip[0], &path_resp->src.v6_addr, sizeof(struct in6_addr)); @@ -5572,7 +5572,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS) cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS; - memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6); + memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN); cp->cnic_ops = &cnic_bnx2x_ops; cp->start_hw = cnic_start_bnx2x_hw; diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index 0658b43e148c..ebbfe25acaa6 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h @@ -353,8 +353,8 @@ struct cnic_ulp_ops { atomic_t ref_count; }; -extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); +int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); -extern int cnic_unregister_driver(int ulp_type); +int cnic_unregister_driver(int ulp_type); #endif diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 12d961c4ebca..15a66e4b1f57 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) #define DRV_MODULE_NAME "tg3" #define TG3_MAJ_NUM 3 -#define TG3_MIN_NUM 133 +#define TG3_MIN_NUM 134 #define DRV_MODULE_VERSION \ __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) -#define DRV_MODULE_RELDATE "Jul 29, 2013" +#define DRV_MODULE_RELDATE "Sep 16, 2013" #define RESET_KIND_SHUTDOWN 0 #define RESET_KIND_INIT 1 @@ -337,6 +337,11 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, @@ -1326,6 +1331,12 @@ static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) return err; } +static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val) +{ + return tg3_writephy(tp, MII_TG3_MISC_SHDW, + reg | val | MII_TG3_MISC_SHDW_WREN); +} + static int tg3_bmcr_reset(struct tg3 *tp) { u32 phy_control; @@ -1364,7 +1375,7 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) spin_lock_bh(&tp->lock); - if (tg3_readphy(tp, reg, &val)) + if (__tg3_readphy(tp, mii_id, reg, &val)) val = -EIO; spin_unlock_bh(&tp->lock); @@ -1379,7 +1390,7 @@ static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) spin_lock_bh(&tp->lock); - if (tg3_writephy(tp, reg, val)) + if (__tg3_writephy(tp, mii_id, reg, val)) ret = -EIO; spin_unlock_bh(&tp->lock); @@ -1397,7 +1408,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp) u32 val; struct phy_device *phydev; - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { case PHY_ID_BCM50610: case PHY_ID_BCM50610M: @@ -1502,6 +1513,13 @@ static int tg3_mdio_init(struct tg3 *tp) TG3_CPMU_PHY_STRAP_IS_SERDES; if (is_serdes) tp->phy_addr += 7; + } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) { + int addr; + + addr = ssb_gige_get_phyaddr(tp->pdev); + if (addr < 0) + return addr; + tp->phy_addr = addr; } else tp->phy_addr = TG3_PHY_MII_ADDR; @@ -1522,7 +1540,7 @@ static int tg3_mdio_init(struct tg3 *tp) tp->mdio_bus->read = &tg3_mdio_read; tp->mdio_bus->write = &tg3_mdio_write; tp->mdio_bus->reset = &tg3_mdio_reset; - tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR); + tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr); tp->mdio_bus->irq = &tp->mdio_irq[0]; for (i = 0; i < PHY_MAX_ADDR; i++) @@ -1543,7 +1561,7 @@ static int tg3_mdio_init(struct tg3 *tp) return i; } - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; if (!phydev || !phydev->drv) { dev_warn(&tp->pdev->dev, "No PHY devices\n"); @@ -1953,7 +1971,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) u32 old_tx_mode = tp->tx_mode; if (tg3_flag(tp, USE_PHYLIB)) - autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg; + autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg; else autoneg = tp->link_config.autoneg; @@ -1989,7 +2007,7 @@ static void tg3_adjust_link(struct net_device *dev) u8 oldflowctrl, linkmesg = 0; u32 mac_mode, lcl_adv, rmt_adv; struct tg3 *tp = netdev_priv(dev); - struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr]; spin_lock_bh(&tp->lock); @@ -2078,7 +2096,7 @@ static int tg3_phy_init(struct tg3 *tp) /* Bring the PHY back to a known state. */ tg3_bmcr_reset(tp); - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; /* Attach the MAC to the PHY. */ phydev = phy_connect(tp->dev, dev_name(&phydev->dev), @@ -2105,7 +2123,7 @@ static int tg3_phy_init(struct tg3 *tp) SUPPORTED_Asym_Pause); break; default: - phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]); return -EINVAL; } @@ -2123,7 +2141,7 @@ static void tg3_phy_start(struct tg3 *tp) if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return; - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; @@ -2143,13 +2161,13 @@ static void tg3_phy_stop(struct tg3 *tp) if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return; - phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]); } static void tg3_phy_fini(struct tg3 *tp) { if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { - phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]); tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; } } @@ -2218,25 +2236,21 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) return; } - reg = MII_TG3_MISC_SHDW_WREN | - MII_TG3_MISC_SHDW_SCR5_SEL | - MII_TG3_MISC_SHDW_SCR5_LPED | + reg = MII_TG3_MISC_SHDW_SCR5_LPED | MII_TG3_MISC_SHDW_SCR5_DLPTLM | MII_TG3_MISC_SHDW_SCR5_SDTL | MII_TG3_MISC_SHDW_SCR5_C125OE; if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable) reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; - tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); + tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg); - reg = MII_TG3_MISC_SHDW_WREN | - MII_TG3_MISC_SHDW_APD_SEL | - MII_TG3_MISC_SHDW_APD_WKTM_84MS; + reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS; if (enable) reg |= MII_TG3_MISC_SHDW_APD_ENABLE; - tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); + tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg); } static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable) @@ -4027,7 +4041,7 @@ static int tg3_power_down_prepare(struct tg3 *tp) struct phy_device *phydev; u32 phyid, advertising; - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; @@ -6848,12 +6862,6 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) pci_unmap_single(tp->pdev, dma_addr, skb_size, PCI_DMA_FROMDEVICE); - skb = build_skb(data, frag_size); - if (!skb) { - tg3_frag_free(frag_size != 0, data); - goto drop_it_no_recycle; - } - skb_reserve(skb, TG3_RX_OFFSET(tp)); /* Ensure that the update to the data happens * after the usage of the old DMA mapping. */ @@ -6861,6 +6869,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) ri->data = NULL; + skb = build_skb(data, frag_size); + if (!skb) { + tg3_frag_free(frag_size != 0, data); + goto drop_it_no_recycle; + } + skb_reserve(skb, TG3_RX_OFFSET(tp)); } else { tg3_recycle_rx(tnapi, tpr, opaque_key, desc_idx, *post_ptr); @@ -7608,7 +7622,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) { u32 base = (u32) mapping & 0xffffffff; - return (base > 0xffffdcc0) && (base + len + 8 < base); + return base + len + 8 < base; } /* Test for TSO DMA buffers that cross into regions which are within MSS bytes @@ -8918,6 +8932,9 @@ static int tg3_chip_reset(struct tg3 *tp) void (*write_op)(struct tg3 *, u32, u32); int i, err; + if (!pci_device_is_present(tp->pdev)) + return -ENODEV; + tg3_nvram_lock(tp); tg3_ape_lock(tp, TG3_APE_LOCK_GRC); @@ -9196,10 +9213,7 @@ static int tg3_halt(struct tg3 *tp, int kind, bool silent) memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); } - if (err) - return err; - - return 0; + return err; } static int tg3_set_mac_addr(struct net_device *dev, void *p) @@ -10618,10 +10632,8 @@ static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) static ssize_t tg3_show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *netdev = pci_get_drvdata(pdev); - struct tg3 *tp = netdev_priv(netdev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct tg3 *tp = dev_get_drvdata(dev); u32 temperature; spin_lock_bh(&tp->lock); @@ -10639,29 +10651,25 @@ static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL, static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL, TG3_TEMP_MAX_OFFSET); -static struct attribute *tg3_attributes[] = { +static struct attribute *tg3_attrs[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, NULL }; - -static const struct attribute_group tg3_group = { - .attrs = tg3_attributes, -}; +ATTRIBUTE_GROUPS(tg3); static void tg3_hwmon_close(struct tg3 *tp) { if (tp->hwmon_dev) { hwmon_device_unregister(tp->hwmon_dev); tp->hwmon_dev = NULL; - sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group); } } static void tg3_hwmon_open(struct tg3 *tp) { - int i, err; + int i; u32 size = 0; struct pci_dev *pdev = tp->pdev; struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; @@ -10679,18 +10687,11 @@ static void tg3_hwmon_open(struct tg3 *tp) if (!size) return; - /* Register hwmon sysfs hooks */ - err = sysfs_create_group(&pdev->dev.kobj, &tg3_group); - if (err) { - dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n"); - return; - } - - tp->hwmon_dev = hwmon_device_register(&pdev->dev); + tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3", + tp, tg3_groups); if (IS_ERR(tp->hwmon_dev)) { tp->hwmon_dev = NULL; dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); - sysfs_remove_group(&pdev->dev.kobj, &tg3_group); } } @@ -11035,7 +11036,18 @@ static int tg3_request_irq(struct tg3 *tp, int irq_num) name = tp->dev->name; else { name = &tnapi->irq_lbl[0]; - snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num); + if (tnapi->tx_buffers && tnapi->rx_rcb) + snprintf(name, IFNAMSIZ, + "%s-txrx-%d", tp->dev->name, irq_num); + else if (tnapi->tx_buffers) + snprintf(name, IFNAMSIZ, + "%s-tx-%d", tp->dev->name, irq_num); + else if (tnapi->rx_rcb) + snprintf(name, IFNAMSIZ, + "%s-rx-%d", tp->dev->name, irq_num); + else + snprintf(name, IFNAMSIZ, + "%s-%d", tp->dev->name, irq_num); name[IFNAMSIZ-1] = 0; } @@ -11572,10 +11584,11 @@ static int tg3_close(struct net_device *dev) memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); - tg3_power_down_prepare(tp); - - tg3_carrier_off(tp); + if (pci_device_is_present(tp->pdev)) { + tg3_power_down_prepare(tp); + tg3_carrier_off(tp); + } return 0; } @@ -11907,7 +11920,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; return phy_ethtool_gset(phydev, cmd); } @@ -11974,7 +11987,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; return phy_ethtool_sset(phydev, cmd); } @@ -12093,12 +12106,10 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); - spin_lock_bh(&tp->lock); if (device_may_wakeup(dp)) tg3_flag_set(tp, WOL_ENABLE); else tg3_flag_clear(tp, WOL_ENABLE); - spin_unlock_bh(&tp->lock); return 0; } @@ -12131,7 +12142,7 @@ static int tg3_nway_reset(struct net_device *dev) if (tg3_flag(tp, USE_PHYLIB)) { if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; - r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]); } else { u32 bmcr; @@ -12247,7 +12258,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam u32 newadv; struct phy_device *phydev; - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; if (!(phydev->supported & SUPPORTED_Pause) || (!(phydev->supported & SUPPORTED_Asym_Pause) && @@ -13194,8 +13205,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) return -ENOMEM; tx_data = skb_put(skb, tx_len); - memcpy(tx_data, tp->dev->dev_addr, 6); - memset(tx_data + 6, 0x0, 8); + memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN); + memset(tx_data + ETH_ALEN, 0x0, 8); tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); @@ -13598,16 +13609,9 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev, if (stmpconf.flags) return -EINVAL; - switch (stmpconf.tx_type) { - case HWTSTAMP_TX_ON: - tg3_flag_set(tp, TX_TSTAMP_EN); - break; - case HWTSTAMP_TX_OFF: - tg3_flag_clear(tp, TX_TSTAMP_EN); - break; - default: + if (stmpconf.tx_type != HWTSTAMP_TX_ON && + stmpconf.tx_type != HWTSTAMP_TX_OFF) return -ERANGE; - } switch (stmpconf.rx_filter) { case HWTSTAMP_FILTER_NONE: @@ -13669,6 +13673,11 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev, tw32(TG3_RX_PTP_CTL, tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); + if (stmpconf.tx_type == HWTSTAMP_TX_ON) + tg3_flag_set(tp, TX_TSTAMP_EN); + else + tg3_flag_clear(tp, TX_TSTAMP_EN); + return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? -EFAULT : 0; } @@ -13683,7 +13692,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) struct phy_device *phydev; if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) return -EAGAIN; - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; return phy_mii_ioctl(phydev, ifr, cmd); } @@ -14921,6 +14930,12 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp) tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1) tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | LED_CTRL_MODE_PHY_2); + + if (tg3_flag(tp, 5717_PLUS) || + tg3_asic_rev(tp) == ASIC_REV_5762) + tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE | + LED_CTRL_BLINK_RATE_MASK; + break; case SHASTA_EXT_LED_MAC: @@ -15759,9 +15774,12 @@ static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) reg = TG3PCI_GEN2_PRODID_ASICREV; else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || @@ -16485,6 +16503,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) /* Clear this out for sanity. */ tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); + /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */ + tw32(TG3PCI_REG_BASE_ADDR, 0); + pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &pci_state_reg); if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && @@ -16632,8 +16653,8 @@ static int tg3_get_macaddr_sparc(struct tg3 *tp) int len; addr = of_get_property(dp, "local-mac-address", &len); - if (addr && len == 6) { - memcpy(dev->dev_addr, addr, 6); + if (addr && len == ETH_ALEN) { + memcpy(dev->dev_addr, addr, ETH_ALEN); return 0; } return -ENODEV; @@ -16643,7 +16664,7 @@ static int tg3_get_default_macaddr_sparc(struct tg3 *tp) { struct net_device *dev = tp->dev; - memcpy(dev->dev_addr, idprom->id_ethaddr, 6); + memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN); return 0; } #endif @@ -17052,10 +17073,6 @@ static int tg3_test_dma(struct tg3 *tp) tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); -#if 0 - /* Unneeded, already done by tg3_get_invariants. */ - tg3_switch_clocks(tp); -#endif if (tg3_asic_rev(tp) != ASIC_REV_5700 && tg3_asic_rev(tp) != ASIC_REV_5701) @@ -17083,20 +17100,6 @@ static int tg3_test_dma(struct tg3 *tp) break; } -#if 0 - /* validate data reached card RAM correctly. */ - for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { - u32 val; - tg3_read_mem(tp, 0x2100 + (i*4), &val); - if (le32_to_cpu(val) != p[i]) { - dev_err(&tp->pdev->dev, - "%s: Buffer corrupted on device! " - "(%d != %d)\n", __func__, val, i); - /* ret = -ENODEV here? */ - } - p[i] = 0; - } -#endif /* Now read it back. */ ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false); if (ret) { @@ -17362,8 +17365,10 @@ static int tg3_init_one(struct pci_dev *pdev, tg3_flag_set(tp, FLUSH_POSTED_WRITES); if (ssb_gige_one_dma_at_once(pdev)) tg3_flag_set(tp, ONE_DMA_AT_ONCE); - if (ssb_gige_have_roboswitch(pdev)) + if (ssb_gige_have_roboswitch(pdev)) { + tg3_flag_set(tp, USE_PHYLIB); tg3_flag_set(tp, ROBOSWITCH); + } if (ssb_gige_is_rgmii(pdev)) tg3_flag_set(tp, RGMII_MODE); } @@ -17409,9 +17414,12 @@ static int tg3_init_one(struct pci_dev *pdev, tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) { + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) { tg3_flag_set(tp, ENABLE_APE); tp->aperegs = pci_ioremap_bar(pdev, BAR_2); if (!tp->aperegs) { @@ -17628,7 +17636,7 @@ static int tg3_init_one(struct pci_dev *pdev, if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { struct phy_device *phydev; - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", phydev->drv->name, dev_name(&phydev->dev)); @@ -17685,7 +17693,6 @@ err_out_free_res: err_out_disable_pdev: if (pci_is_enabled(pdev)) pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); return err; } @@ -17717,7 +17724,6 @@ static void tg3_remove_one(struct pci_dev *pdev) free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } } @@ -17727,10 +17733,12 @@ static int tg3_suspend(struct device *device) struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); struct tg3 *tp = netdev_priv(dev); - int err; + int err = 0; + + rtnl_lock(); if (!netif_running(dev)) - return 0; + goto unlock; tg3_reset_task_cancel(tp); tg3_phy_stop(tp); @@ -17772,6 +17780,8 @@ out: tg3_phy_start(tp); } +unlock: + rtnl_unlock(); return err; } @@ -17780,10 +17790,12 @@ static int tg3_resume(struct device *device) struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); struct tg3 *tp = netdev_priv(dev); - int err; + int err = 0; + + rtnl_lock(); if (!netif_running(dev)) - return 0; + goto unlock; netif_device_attach(dev); @@ -17807,6 +17819,8 @@ out: if (!err) tg3_phy_start(tp); +unlock: + rtnl_unlock(); return err; } #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 70257808aa37..5c3835aa1e1b 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -68,6 +68,9 @@ #define TG3PCI_DEVICE_TIGON3_5762 0x1687 #define TG3PCI_DEVICE_TIGON3_5725 0x1643 #define TG3PCI_DEVICE_TIGON3_5727 0x16f3 +#define TG3PCI_DEVICE_TIGON3_57764 0x1642 +#define TG3PCI_DEVICE_TIGON3_57767 0x1683 +#define TG3PCI_DEVICE_TIGON3_57787 0x1641 /* 0x04 --> 0x2c unused */ #define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM #define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644 |