diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic/qede/qede_main.c')
-rw-r--r-- | drivers/net/ethernet/qlogic/qede/qede_main.c | 662 |
1 files changed, 548 insertions, 114 deletions
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 518af329502d..8114541f327c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -24,7 +24,12 @@ #include <linux/netdev_features.h> #include <linux/udp.h> #include <linux/tcp.h> +#ifdef CONFIG_QEDE_VXLAN #include <net/vxlan.h> +#endif +#ifdef CONFIG_QEDE_GENEVE +#include <net/geneve.h> +#endif #include <linux/ip.h> #include <net/ipv6.h> #include <net/tcp.h> @@ -58,6 +63,7 @@ static const struct qed_eth_ops *qed_ops; #define CHIP_NUM_57980S_100 0x1644 #define CHIP_NUM_57980S_50 0x1654 #define CHIP_NUM_57980S_25 0x1656 +#define CHIP_NUM_57980S_IOV 0x1664 #ifndef PCI_DEVICE_ID_NX2_57980E #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40 @@ -66,15 +72,22 @@ static const struct qed_eth_ops *qed_ops; #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100 #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50 #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25 +#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV #endif +enum qede_pci_private { + QEDE_PRIVATE_PF, + QEDE_PRIVATE_VF +}; + static const struct pci_device_id qede_pci_tbl[] = { - { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 }, - { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 }, - { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 }, - { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 }, - { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 }, - { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 }, + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF}, + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF}, + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF}, + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF}, + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF}, + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF}, + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF}, { 0 } }; @@ -89,17 +102,87 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev, struct qede_rx_queue *rxq); static void qede_link_update(void *dev, struct qed_link_output *link); +#ifdef CONFIG_QED_SRIOV +static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos) +{ + struct qede_dev *edev = netdev_priv(ndev); + + if (vlan > 4095) { + DP_NOTICE(edev, "Illegal vlan value %d\n", vlan); + return -EINVAL; + } + + DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n", + vlan, vf); + + return edev->ops->iov->set_vlan(edev->cdev, vlan, vf); +} + +static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac) +{ + struct qede_dev *edev = netdev_priv(ndev); + + DP_VERBOSE(edev, QED_MSG_IOV, + "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx); + + if (!is_valid_ether_addr(mac)) { + DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n"); + return -EINVAL; + } + + return edev->ops->iov->set_mac(edev->cdev, mac, vfidx); +} + +static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) +{ + struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev)); + struct qed_dev_info *qed_info = &edev->dev_info.common; + int rc; + + DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param); + + rc = edev->ops->iov->configure(edev->cdev, num_vfs_param); + + /* Enable/Disable Tx switching for PF */ + if ((rc == num_vfs_param) && netif_running(edev->ndev) && + qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) { + struct qed_update_vport_params params; + + memset(¶ms, 0, sizeof(params)); + params.vport_id = 0; + params.update_tx_switching_flg = 1; + params.tx_switching_flg = num_vfs_param ? 1 : 0; + edev->ops->vport_update(edev->cdev, ¶ms); + } + + return rc; +} +#endif + static struct pci_driver qede_pci_driver = { .name = "qede", .id_table = qede_pci_tbl, .probe = qede_probe, .remove = qede_remove, +#ifdef CONFIG_QED_SRIOV + .sriov_configure = qede_sriov_configure, +#endif }; +static void qede_force_mac(void *dev, u8 *mac) +{ + struct qede_dev *edev = dev; + + ether_addr_copy(edev->ndev->dev_addr, mac); + ether_addr_copy(edev->primary_mac, mac); +} + static struct qed_eth_cb_ops qede_ll_ops = { { .link_update = qede_link_update, }, + .force_mac = qede_force_mac, }; static int qede_netdev_event(struct notifier_block *this, unsigned long event, @@ -141,19 +224,10 @@ static int __init qede_init(void) { int ret; - u32 qed_ver; pr_notice("qede_init: %s\n", version); - qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH); - if (qed_ver != QEDE_ETH_INTERFACE_VERSION) { - pr_notice("Version mismatch [%08x != %08x]\n", - qed_ver, - QEDE_ETH_INTERFACE_VERSION); - return -EINVAL; - } - - qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION); + qed_ops = qed_get_eth_ops(); if (!qed_ops) { pr_notice("Failed to get qed ethtool operations\n"); return -EINVAL; @@ -319,6 +393,9 @@ static u32 qede_xmit_type(struct qede_dev *edev, (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) *ipv6_ext = 1; + if (skb->encapsulation) + rc |= XMIT_ENC; + if (skb_is_gso(skb)) rc |= XMIT_LSO; @@ -380,6 +457,16 @@ static int map_frag_to_bd(struct qede_dev *edev, return 0; } +static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt) +{ + if (is_encap_pkt) + return (skb_inner_transport_header(skb) + + inner_tcp_hdrlen(skb) - skb->data); + else + return (skb_transport_header(skb) + + tcp_hdrlen(skb) - skb->data); +} + /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */ #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb, @@ -390,8 +477,7 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb, if (xmit_type & XMIT_LSO) { int hlen; - hlen = skb_transport_header(skb) + - tcp_hdrlen(skb) - skb->data; + hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC); /* linear payload would require its own BD */ if (skb_headlen(skb) > hlen) @@ -421,7 +507,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, u8 xmit_type; u16 idx; u16 hlen; - bool data_split; + bool data_split = false; /* Get tx-queue context and netdev index */ txq_index = skb_get_queue_mapping(skb); @@ -499,7 +585,18 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, first_bd->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; - first_bd->data.bitfields |= cpu_to_le16(temp); + if (xmit_type & XMIT_ENC) { + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; + } else { + /* In cases when OS doesn't indicate for inner offloads + * when packet is tunnelled, we need to override the HW + * tunnel configuration so that packets are treated as + * regular non tunnelled packets and no inner offloads + * are done by the hardware. + */ + first_bd->data.bitfields |= cpu_to_le16(temp); + } /* If the packet is IPv6 with extension header, indicate that * to FW and pass few params, since the device cracker doesn't @@ -515,10 +612,15 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, third_bd->data.lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); - first_bd->data.bd_flags.bitfields |= - 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; - hlen = skb_transport_header(skb) + - tcp_hdrlen(skb) - skb->data; + if (unlikely(xmit_type & XMIT_ENC)) { + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT; + hlen = qede_get_skb_hlen(skb, true); + } else { + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; + hlen = qede_get_skb_hlen(skb, false); + } /* @@@TBD - if will not be removed need to check */ third_bd->data.bitfields |= @@ -644,7 +746,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } -static int qede_txq_has_work(struct qede_tx_queue *txq) +int qede_txq_has_work(struct qede_tx_queue *txq) { u16 hw_bd_cons; @@ -727,7 +829,7 @@ static int qede_tx_int(struct qede_dev *edev, return 0; } -static bool qede_has_rx_work(struct qede_rx_queue *rxq) +bool qede_has_rx_work(struct qede_rx_queue *rxq) { u16 hw_comp_cons, sw_comp_cons; @@ -750,6 +852,12 @@ static bool qede_has_tx_work(struct qede_fastpath *fp) return false; } +static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq) +{ + qed_chain_consume(&rxq->rx_bd_ring); + rxq->sw_rx_cons++; +} + /* This function reuses the buffer(from an offset) from * consumer index to producer index in the bd ring */ @@ -773,6 +881,21 @@ static inline void qede_reuse_page(struct qede_dev *edev, curr_cons->data = NULL; } +/* In case of allocation failures reuse buffers + * from consumer index to produce buffers for firmware + */ +void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, + struct qede_dev *edev, u8 count) +{ + struct sw_rx_data *curr_cons; + + for (; count > 0; count--) { + curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; + qede_reuse_page(edev, rxq, curr_cons); + qede_rx_bd_ring_consume(rxq); + } +} + static inline int qede_realloc_rx_buffer(struct qede_dev *edev, struct qede_rx_queue *rxq, struct sw_rx_data *curr_cons) @@ -781,8 +904,14 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev, curr_cons->page_offset += rxq->rx_buf_seg_size; if (curr_cons->page_offset == PAGE_SIZE) { - if (unlikely(qede_alloc_rx_buffer(edev, rxq))) + if (unlikely(qede_alloc_rx_buffer(edev, rxq))) { + /* Since we failed to allocate new buffer + * current buffer can be used again. + */ + curr_cons->page_offset -= rxq->rx_buf_seg_size; + return -ENOMEM; + } dma_unmap_page(&edev->pdev->dev, curr_cons->mapping, PAGE_SIZE, DMA_FROM_DEVICE); @@ -852,6 +981,9 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag) if (csum_flag & QEDE_CSUM_UNNECESSARY) skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) + skb->csum_level = 1; } static inline void qede_skb_receive(struct qede_dev *edev, @@ -901,7 +1033,10 @@ static int qede_fill_frag_skb(struct qede_dev *edev, len_on_bd); if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) { - tpa_info->agg_state = QEDE_AGG_STATE_ERROR; + /* Incr page ref count to reuse on allocation failure + * so that it doesn't get freed while freeing SKB. + */ + atomic_inc(¤t_bd->data->_count); goto out; } @@ -915,6 +1050,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev, return 0; out: + tpa_info->agg_state = QEDE_AGG_STATE_ERROR; + qede_recycle_rx_bd_ring(rxq, edev, 1); return -ENOMEM; } @@ -966,8 +1103,9 @@ static void qede_tpa_start(struct qede_dev *edev, tpa_info->skb = netdev_alloc_skb(edev->ndev, le16_to_cpu(cqe->len_on_first_bd)); if (unlikely(!tpa_info->skb)) { + DP_NOTICE(edev, "Failed to allocate SKB for gro\n"); tpa_info->agg_state = QEDE_AGG_STATE_ERROR; - return; + goto cons_buf; } skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); @@ -990,6 +1128,7 @@ static void qede_tpa_start(struct qede_dev *edev, /* This is needed in order to enable forwarding support */ qede_set_gro_params(edev, tpa_info->skb, cqe); +cons_buf: /* We still need to handle bd_len_list to consume buffers */ if (likely(cqe->ext_bd_len_list[0])) qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, le16_to_cpu(cqe->ext_bd_len_list[0])); @@ -1007,7 +1146,6 @@ static void qede_gro_ip_csum(struct sk_buff *skb) const struct iphdr *iph = ip_hdr(skb); struct tcphdr *th; - skb_set_network_header(skb, 0); skb_set_transport_header(skb, sizeof(struct iphdr)); th = tcp_hdr(skb); @@ -1022,7 +1160,6 @@ static void qede_gro_ipv6_csum(struct sk_buff *skb) struct ipv6hdr *iph = ipv6_hdr(skb); struct tcphdr *th; - skb_set_network_header(skb, 0); skb_set_transport_header(skb, sizeof(struct ipv6hdr)); th = tcp_hdr(skb); @@ -1037,8 +1174,21 @@ static void qede_gro_receive(struct qede_dev *edev, struct sk_buff *skb, u16 vlan_tag) { + /* FW can send a single MTU sized packet from gro flow + * due to aggregation timeout/last segment etc. which + * is not expected to be a gro packet. If a skb has zero + * frags then simply push it in the stack as non gso skb. + */ + if (unlikely(!skb->data_len)) { + skb_shinfo(skb)->gso_type = 0; + skb_shinfo(skb)->gso_size = 0; + goto send_skb; + } + #ifdef CONFIG_INET if (skb_shinfo(skb)->gso_size) { + skb_set_network_header(skb, 0); + switch (skb->protocol) { case htons(ETH_P_IP): qede_gro_ip_csum(skb); @@ -1053,6 +1203,8 @@ static void qede_gro_receive(struct qede_dev *edev, } } #endif + +send_skb: skb_record_rx_queue(skb, fp->rss_id); qede_skb_receive(edev, fp, skb, vlan_tag); } @@ -1141,13 +1293,47 @@ err: tpa_info->skb = NULL; } -static u8 qede_check_csum(u16 flag) +static bool qede_tunn_exist(u16 flag) +{ + return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK << + PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT)); +} + +static u8 qede_check_tunn_csum(u16 flag) +{ + u16 csum_flag = 0; + u8 tcsum = 0; + + if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT)) + csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << + PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT; + + if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { + csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; + tcsum = QEDE_TUNN_CSUM_UNNECESSARY; + } + + csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << + PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | + PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << + PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; + + if (csum_flag & flag) + return QEDE_CSUM_ERROR; + + return QEDE_CSUM_UNNECESSARY | tcsum; +} + +static u8 qede_check_notunn_csum(u16 flag) { u16 csum_flag = 0; u8 csum = 0; - if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << - PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) { + if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; csum = QEDE_CSUM_UNNECESSARY; @@ -1162,6 +1348,14 @@ static u8 qede_check_csum(u16 flag) return csum; } +static u8 qede_check_csum(u16 flag) +{ + if (!qede_tunn_exist(flag)) + return qede_check_notunn_csum(flag); + else + return qede_check_tunn_csum(flag); +} + static int qede_rx_int(struct qede_fastpath *fp, int budget) { struct qede_dev *edev = fp->edev; @@ -1244,17 +1438,17 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", sw_comp_cons, parse_flag); rxq->rx_hw_errors++; - qede_reuse_page(edev, rxq, sw_rx_data); - goto next_rx; + qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num); + goto next_cqe; } skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); if (unlikely(!skb)) { DP_NOTICE(edev, "Build_skb failed, dropping incoming packet\n"); - qede_reuse_page(edev, rxq, sw_rx_data); + qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num); rxq->rx_alloc_errors++; - goto next_rx; + goto next_cqe; } /* Copy data into SKB */ @@ -1288,11 +1482,22 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) if (unlikely(qede_realloc_rx_buffer(edev, rxq, sw_rx_data))) { DP_ERR(edev, "Failed to allocate rx buffer\n"); + /* Incr page ref count to reuse on allocation + * failure so that it doesn't get freed while + * freeing SKB. + */ + + atomic_inc(&sw_rx_data->data->_count); rxq->rx_alloc_errors++; + qede_recycle_rx_bd_ring(rxq, edev, + fp_cqe->bd_num); + dev_kfree_skb_any(skb); goto next_cqe; } } + qede_rx_bd_ring_consume(rxq); + if (fp_cqe->bd_num != 1) { u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len); u8 num_frags; @@ -1303,18 +1508,27 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) num_frags--) { u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size : pkt_len; + if (unlikely(!cur_size)) { + DP_ERR(edev, + "Still got %d BDs for mapping jumbo, but length became 0\n", + num_frags); + qede_recycle_rx_bd_ring(rxq, edev, + num_frags); + dev_kfree_skb_any(skb); + goto next_cqe; + } - WARN_ONCE(!cur_size, - "Still got %d BDs for mapping jumbo, but length became 0\n", - num_frags); - - if (unlikely(qede_alloc_rx_buffer(edev, rxq))) + if (unlikely(qede_alloc_rx_buffer(edev, rxq))) { + qede_recycle_rx_bd_ring(rxq, edev, + num_frags); + dev_kfree_skb_any(skb); goto next_cqe; + } - rxq->sw_rx_cons++; sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; - qed_chain_consume(&rxq->rx_bd_ring); + qede_rx_bd_ring_consume(rxq); + dma_unmap_page(&edev->pdev->dev, sw_rx_data->mapping, PAGE_SIZE, DMA_FROM_DEVICE); @@ -1330,7 +1544,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) pkt_len -= cur_size; } - if (pkt_len) + if (unlikely(pkt_len)) DP_ERR(edev, "Mapped all BDs of jumbo, but still have %d bytes\n", pkt_len); @@ -1349,10 +1563,6 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) skb_record_rx_queue(skb, fp->rss_id); qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag)); - - qed_chain_consume(&rxq->rx_bd_ring); -next_rx: - rxq->sw_rx_cons++; next_rx_only: rx_pkt++; @@ -1506,16 +1716,25 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes; edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets; - edev->stats.rx_127_byte_packets = stats.rx_127_byte_packets; - edev->stats.rx_255_byte_packets = stats.rx_255_byte_packets; - edev->stats.rx_511_byte_packets = stats.rx_511_byte_packets; - edev->stats.rx_1023_byte_packets = stats.rx_1023_byte_packets; - edev->stats.rx_1518_byte_packets = stats.rx_1518_byte_packets; - edev->stats.rx_1522_byte_packets = stats.rx_1522_byte_packets; - edev->stats.rx_2047_byte_packets = stats.rx_2047_byte_packets; - edev->stats.rx_4095_byte_packets = stats.rx_4095_byte_packets; - edev->stats.rx_9216_byte_packets = stats.rx_9216_byte_packets; - edev->stats.rx_16383_byte_packets = stats.rx_16383_byte_packets; + edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets; + edev->stats.rx_128_to_255_byte_packets = + stats.rx_128_to_255_byte_packets; + edev->stats.rx_256_to_511_byte_packets = + stats.rx_256_to_511_byte_packets; + edev->stats.rx_512_to_1023_byte_packets = + stats.rx_512_to_1023_byte_packets; + edev->stats.rx_1024_to_1518_byte_packets = + stats.rx_1024_to_1518_byte_packets; + edev->stats.rx_1519_to_1522_byte_packets = + stats.rx_1519_to_1522_byte_packets; + edev->stats.rx_1519_to_2047_byte_packets = + stats.rx_1519_to_2047_byte_packets; + edev->stats.rx_2048_to_4095_byte_packets = + stats.rx_2048_to_4095_byte_packets; + edev->stats.rx_4096_to_9216_byte_packets = + stats.rx_4096_to_9216_byte_packets; + edev->stats.rx_9217_to_16383_byte_packets = + stats.rx_9217_to_16383_byte_packets; edev->stats.rx_crc_errors = stats.rx_crc_errors; edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames; edev->stats.rx_pause_frames = stats.rx_pause_frames; @@ -1589,6 +1808,49 @@ static struct rtnl_link_stats64 *qede_get_stats64( return stats; } +#ifdef CONFIG_QED_SRIOV +static int qede_get_vf_config(struct net_device *dev, int vfidx, + struct ifla_vf_info *ivi) +{ + struct qede_dev *edev = netdev_priv(dev); + + if (!edev->ops) + return -EINVAL; + + return edev->ops->iov->get_config(edev->cdev, vfidx, ivi); +} + +static int qede_set_vf_rate(struct net_device *dev, int vfidx, + int min_tx_rate, int max_tx_rate) +{ + struct qede_dev *edev = netdev_priv(dev); + + return edev->ops->iov->set_rate(edev->cdev, vfidx, max_tx_rate, + max_tx_rate); +} + +static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val) +{ + struct qede_dev *edev = netdev_priv(dev); + + if (!edev->ops) + return -EINVAL; + + return edev->ops->iov->set_spoof(edev->cdev, vfidx, val); +} + +static int qede_set_vf_link_state(struct net_device *dev, int vfidx, + int link_state) +{ + struct qede_dev *edev = netdev_priv(dev); + + if (!edev->ops) + return -EINVAL; + + return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state); +} +#endif + static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action) { struct qed_update_vport_params params; @@ -1830,6 +2092,76 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev) edev->accept_any_vlan = false; } +#ifdef CONFIG_QEDE_VXLAN +static void qede_add_vxlan_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) +{ + struct qede_dev *edev = netdev_priv(dev); + u16 t_port = ntohs(port); + + if (edev->vxlan_dst_port) + return; + + edev->vxlan_dst_port = t_port; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", t_port); + + set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); + schedule_delayed_work(&edev->sp_task, 0); +} + +static void qede_del_vxlan_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) +{ + struct qede_dev *edev = netdev_priv(dev); + u16 t_port = ntohs(port); + + if (t_port != edev->vxlan_dst_port) + return; + + edev->vxlan_dst_port = 0; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", t_port); + + set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); + schedule_delayed_work(&edev->sp_task, 0); +} +#endif + +#ifdef CONFIG_QEDE_GENEVE +static void qede_add_geneve_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) +{ + struct qede_dev *edev = netdev_priv(dev); + u16 t_port = ntohs(port); + + if (edev->geneve_dst_port) + return; + + edev->geneve_dst_port = t_port; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", t_port); + set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); + schedule_delayed_work(&edev->sp_task, 0); +} + +static void qede_del_geneve_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) +{ + struct qede_dev *edev = netdev_priv(dev); + u16 t_port = ntohs(port); + + if (t_port != edev->geneve_dst_port) + return; + + edev->geneve_dst_port = 0; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", t_port); + set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); + schedule_delayed_work(&edev->sp_task, 0); +} +#endif + static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, @@ -1838,9 +2170,27 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = qede_change_mtu, +#ifdef CONFIG_QED_SRIOV + .ndo_set_vf_mac = qede_set_vf_mac, + .ndo_set_vf_vlan = qede_set_vf_vlan, +#endif .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, .ndo_get_stats64 = qede_get_stats64, +#ifdef CONFIG_QED_SRIOV + .ndo_set_vf_link_state = qede_set_vf_link_state, + .ndo_set_vf_spoofchk = qede_set_vf_spoofchk, + .ndo_get_vf_config = qede_get_vf_config, + .ndo_set_vf_rate = qede_set_vf_rate, +#endif +#ifdef CONFIG_QEDE_VXLAN + .ndo_add_vxlan_port = qede_add_vxlan_port, + .ndo_del_vxlan_port = qede_del_vxlan_port, +#endif +#ifdef CONFIG_QEDE_GENEVE + .ndo_add_geneve_port = qede_add_geneve_port, + .ndo_del_geneve_port = qede_del_geneve_port, +#endif }; /* ------------------------------------------------------------------------- @@ -1875,8 +2225,6 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, edev->q_num_rx_buffers = NUM_RX_BDS_DEF; edev->q_num_tx_buffers = NUM_TX_BDS_DEF; - DP_INFO(edev, "Allocated netdev with 64 tx queues and 64 rx queues\n"); - SET_NETDEV_DEV(ndev, &pdev->dev); memset(&edev->stats, 0, sizeof(edev->stats)); @@ -1913,6 +2261,14 @@ static void qede_init_ndev(struct qede_dev *edev) NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; + /* Encap features*/ + hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_TSO_ECN; + ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN | + NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM; + ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_HIGHDMA; ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | @@ -2013,6 +2369,8 @@ static void qede_sp_task(struct work_struct *work) { struct qede_dev *edev = container_of(work, struct qede_dev, sp_task.work); + struct qed_dev *cdev = edev->cdev; + mutex_lock(&edev->qede_lock); if (edev->state == QEDE_STATE_OPEN) { @@ -2020,6 +2378,24 @@ static void qede_sp_task(struct work_struct *work) qede_config_rx_mode(edev->ndev); } + if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) { + struct qed_tunn_params tunn_params; + + memset(&tunn_params, 0, sizeof(tunn_params)); + tunn_params.update_vxlan_port = 1; + tunn_params.vxlan_port = edev->vxlan_dst_port; + qed_ops->tunn_config(cdev, &tunn_params); + } + + if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) { + struct qed_tunn_params tunn_params; + + memset(&tunn_params, 0, sizeof(tunn_params)); + tunn_params.update_geneve_port = 1; + tunn_params.geneve_port = edev->geneve_dst_port; + qed_ops->tunn_config(cdev, &tunn_params); + } + mutex_unlock(&edev->qede_lock); } @@ -2027,9 +2403,9 @@ static void qede_update_pf_params(struct qed_dev *cdev) { struct qed_pf_params pf_params; - /* 16 rx + 16 tx */ + /* 64 rx + 64 tx */ memset(&pf_params, 0, sizeof(struct qed_pf_params)); - pf_params.eth_pf_params.num_cons = 32; + pf_params.eth_pf_params.num_cons = 128; qed_ops->common->update_pf_params(cdev, &pf_params); } @@ -2038,8 +2414,9 @@ enum qede_probe_mode { }; static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, - enum qede_probe_mode mode) + bool is_vf, enum qede_probe_mode mode) { + struct qed_probe_params probe_params; struct qed_slowpath_params params; struct qed_dev_eth_info dev_info; struct qede_dev *edev; @@ -2049,8 +2426,12 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, if (unlikely(dp_level & QED_LEVEL_INFO)) pr_notice("Starting qede probe\n"); - cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH, - dp_module, dp_level); + memset(&probe_params, 0, sizeof(probe_params)); + probe_params.protocol = QED_PROTOCOL_ETH; + probe_params.dp_module = dp_module; + probe_params.dp_level = dp_level; + probe_params.is_vf = is_vf; + cdev = qed_ops->common->probe(pdev, &probe_params); if (!cdev) { rc = -ENODEV; goto err0; @@ -2084,6 +2465,9 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, goto err2; } + if (is_vf) + edev->flags |= QEDE_FLAG_IS_VF; + qede_init_ndev(edev); rc = register_netdev(edev->ndev); @@ -2115,12 +2499,24 @@ err0: static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + bool is_vf = false; u32 dp_module = 0; u8 dp_level = 0; + switch ((enum qede_pci_private)id->driver_data) { + case QEDE_PRIVATE_VF: + if (debug & QED_LOG_VERBOSE_MASK) + dev_err(&pdev->dev, "Probing a VF\n"); + is_vf = true; + break; + default: + if (debug & QED_LOG_VERBOSE_MASK) + dev_err(&pdev->dev, "Probing a PF\n"); + } + qede_config_debug(debug, &dp_module, &dp_level); - return __qede_probe(pdev, dp_module, dp_level, + return __qede_probe(pdev, dp_module, dp_level, is_vf, QEDE_PROBE_NORMAL); } @@ -2257,7 +2653,7 @@ static void qede_free_sge_mem(struct qede_dev *edev, struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; struct sw_rx_data *replace_buf = &tpa_info->replace_buf; - if (replace_buf) { + if (replace_buf->data) { dma_unmap_page(&edev->pdev->dev, dma_unmap_addr(replace_buf, mapping), PAGE_SIZE, DMA_FROM_DEVICE); @@ -2377,7 +2773,7 @@ err: static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) { - int i, rc, size, num_allocated; + int i, rc, size; rxq->num_rx_buffers = edev->q_num_rx_buffers; @@ -2394,6 +2790,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL); if (!rxq->sw_rx_ring) { DP_ERR(edev, "Rx buffers ring allocation failed\n"); + rc = -ENOMEM; goto err; } @@ -2421,26 +2818,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, /* Allocate buffers for the Rx ring */ for (i = 0; i < rxq->num_rx_buffers; i++) { rc = qede_alloc_rx_buffer(edev, rxq); - if (rc) - break; - } - num_allocated = i; - if (!num_allocated) { - DP_ERR(edev, "Rx buffers allocation failed\n"); - goto err; - } else if (num_allocated < rxq->num_rx_buffers) { - DP_NOTICE(edev, - "Allocated less buffers than desired (%d allocated)\n", - num_allocated); + if (rc) { + DP_ERR(edev, + "Rx buffers allocation failed at index %d\n", i); + goto err; + } } - qede_alloc_sge_mem(edev, rxq); - - return 0; - + rc = qede_alloc_sge_mem(edev, rxq); err: - qede_free_mem_rxq(edev, rxq); - return -ENOMEM; + return rc; } static void qede_free_mem_txq(struct qede_dev *edev, @@ -2523,10 +2910,8 @@ static int qede_alloc_mem_fp(struct qede_dev *edev, } return 0; - err: - qede_free_mem_fp(edev, fp); - return -ENOMEM; + return rc; } static void qede_free_mem_load(struct qede_dev *edev) @@ -2549,22 +2934,13 @@ static int qede_alloc_mem_load(struct qede_dev *edev) struct qede_fastpath *fp = &edev->fp_array[rss_id]; rc = qede_alloc_mem_fp(edev, fp); - if (rc) - break; - } - - if (rss_id != QEDE_RSS_CNT(edev)) { - /* Failed allocating memory for all the queues */ - if (!rss_id) { + if (rc) { DP_ERR(edev, - "Failed to allocate memory for the leading queue\n"); - rc = -ENOMEM; - } else { - DP_NOTICE(edev, - "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n", - QEDE_RSS_CNT(edev), rss_id); + "Failed to allocate memory for fastpath - rss id = %d\n", + rss_id); + qede_free_mem_load(edev); + return rc; } - edev->num_rss = rss_id; } return 0; @@ -2835,10 +3211,11 @@ static int qede_start_queues(struct qede_dev *edev) int rc, tc, i; int vlan_removal_en = 1; struct qed_dev *cdev = edev->cdev; - struct qed_update_vport_rss_params *rss_params = &edev->rss_params; struct qed_update_vport_params vport_update_params; struct qed_queue_start_common_params q_params; + struct qed_dev_info *qed_info = &edev->dev_info.common; struct qed_start_vport_params start = {0}; + bool reset_rss_indir = false; if (!edev->num_rss) { DP_ERR(edev, @@ -2930,19 +3307,59 @@ static int qede_start_queues(struct qede_dev *edev) vport_update_params.update_vport_active_flg = 1; vport_update_params.vport_active_flg = 1; + if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) && + qed_info->tx_switching) { + vport_update_params.update_tx_switching_flg = 1; + vport_update_params.tx_switching_flg = 1; + } + /* Fill struct with RSS params */ if (QEDE_RSS_CNT(edev) > 1) { vport_update_params.update_rss_flg = 1; - for (i = 0; i < 128; i++) - rss_params->rss_ind_table[i] = - ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev)); - netdev_rss_key_fill(rss_params->rss_key, - sizeof(rss_params->rss_key)); + + /* Need to validate current RSS config uses valid entries */ + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { + if (edev->rss_params.rss_ind_table[i] >= + edev->num_rss) { + reset_rss_indir = true; + break; + } + } + + if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || + reset_rss_indir) { + u16 val; + + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { + u16 indir_val; + + val = QEDE_RSS_CNT(edev); + indir_val = ethtool_rxfh_indir_default(i, val); + edev->rss_params.rss_ind_table[i] = indir_val; + } + edev->rss_params_inited |= QEDE_RSS_INDIR_INITED; + } + + if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) { + netdev_rss_key_fill(edev->rss_params.rss_key, + sizeof(edev->rss_params.rss_key)); + edev->rss_params_inited |= QEDE_RSS_KEY_INITED; + } + + if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) { + edev->rss_params.rss_caps = QED_RSS_IPV4 | + QED_RSS_IPV6 | + QED_RSS_IPV4_TCP | + QED_RSS_IPV6_TCP; + edev->rss_params_inited |= QEDE_RSS_CAPS_INITED; + } + + memcpy(&vport_update_params.rss_params, &edev->rss_params, + sizeof(vport_update_params.rss_params)); } else { - memset(rss_params, 0, sizeof(*rss_params)); + memset(&vport_update_params.rss_params, 0, + sizeof(vport_update_params.rss_params)); } - memcpy(&vport_update_params.rss_params, rss_params, - sizeof(*rss_params)); rc = edev->ops->vport_update(cdev, &vport_update_params); if (rc) { @@ -3124,12 +3541,24 @@ void qede_reload(struct qede_dev *edev, static int qede_open(struct net_device *ndev) { struct qede_dev *edev = netdev_priv(ndev); + int rc; netif_carrier_off(ndev); edev->ops->common->set_power_state(edev->cdev, PCI_D0); - return qede_load(edev, QEDE_LOAD_NORMAL); + rc = qede_load(edev, QEDE_LOAD_NORMAL); + + if (rc) + return rc; + +#ifdef CONFIG_QEDE_VXLAN + vxlan_get_rx_port(ndev); +#endif +#ifdef CONFIG_QEDE_GENEVE + geneve_get_rx_port(ndev); +#endif + return 0; } static int qede_close(struct net_device *ndev) @@ -3180,6 +3609,11 @@ static int qede_set_mac_addr(struct net_device *ndev, void *p) return -EFAULT; } + if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) { + DP_NOTICE(edev, "qed prevents setting MAC\n"); + return -EINVAL; + } + ether_addr_copy(ndev->dev_addr, addr->sa_data); if (!netif_running(ndev)) { |