diff options
Diffstat (limited to 'drivers/net/ethernet/hisilicon')
31 files changed, 1770 insertions, 895 deletions
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index 471805ea363b..e5d853b7b454 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -1006,8 +1006,8 @@ static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv) for (i = 0; i < QUEUE_NUMS; i++) { size = priv->pool[i].count * sizeof(struct hix5hd2_desc); - virt_addr = dma_zalloc_coherent(dev, size, &phys_addr, - GFP_KERNEL); + virt_addr = dma_alloc_coherent(dev, size, &phys_addr, + GFP_KERNEL); if (virt_addr == NULL) goto error_free_pool; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index ad1779fc410e..a78bfafd212c 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -147,12 +147,10 @@ static void hns_ae_put_handle(struct hnae_handle *handle) struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); int i; - vf_cb->mac_cb = NULL; - - kfree(vf_cb); - for (i = 0; i < handle->q_num; i++) hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; + + kfree(vf_cb); } static int hns_ae_wait_flow_down(struct hnae_handle *handle) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 3b9e74be5fbd..ac55db065f16 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -3081,6 +3081,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) dsaf_dev = dev_get_drvdata(&pdev->dev); if (!dsaf_dev) { dev_err(&pdev->dev, "dsaf_dev is NULL\n"); + put_device(&pdev->dev); return -ENODEV; } @@ -3088,6 +3089,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n", dsaf_dev->ae_dev.name); + put_device(&pdev->dev); return -ENODEV; } @@ -3126,6 +3128,9 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1); dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); } + + put_device(&pdev->dev); + return 0; } EXPORT_SYMBOL(hns_dsaf_roce_reset); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index 0942e4916d9d..3d07c8a7639d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -83,8 +83,9 @@ static int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index) else ppe_num = HNS_PPE_DEBUG_NW_ENGINE_NUM; - ppe_common = devm_kzalloc(dsaf_dev->dev, sizeof(*ppe_common) + - ppe_num * sizeof(struct hns_ppe_cb), GFP_KERNEL); + ppe_common = devm_kzalloc(dsaf_dev->dev, + struct_size(ppe_common, ppe_cb, ppe_num), + GFP_KERNEL); if (!ppe_common) return -ENOMEM; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 5d64519b9b1d..6bf346c11b25 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -788,8 +788,9 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, int ring_num = hns_rcb_get_ring_num(dsaf_dev); rcb_common = - devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) + - ring_num * sizeof(struct ring_pair_cb), GFP_KERNEL); + devm_kzalloc(dsaf_dev->dev, + struct_size(rcb_common, ring_pair_cb, ring_num), + GFP_KERNEL); if (!rcb_common) { dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n"); return -ENOMEM; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 5748d3f722f6..60e7d7ae3787 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1170,6 +1170,13 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) if (!h->phy_dev) return 0; + ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support); + linkmode_and(phy_dev->supported, phy_dev->supported, supported); + linkmode_copy(phy_dev->advertising, phy_dev->supported); + + if (h->phy_if == PHY_INTERFACE_MODE_XGMII) + phy_dev->autoneg = false; + if (h->phy_if != PHY_INTERFACE_MODE_XGMII) { phy_dev->dev_flags = 0; @@ -1181,16 +1188,6 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) if (unlikely(ret)) return -ENODEV; - ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support); - linkmode_and(phy_dev->supported, phy_dev->supported, supported); - linkmode_copy(phy_dev->advertising, phy_dev->supported); - - if (h->phy_if == PHY_INTERFACE_MODE_XGMII) - phy_dev->autoneg = false; - - if (h->phy_if == PHY_INTERFACE_MODE_SGMII) - phy_stop(phy_dev); - return 0; } @@ -2421,6 +2418,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev) out_notify_fail: (void)cancel_work_sync(&priv->service_task); out_read_prop_fail: + /* safe for ACPI FW */ + of_node_put(to_of_node(priv->fwnode)); free_netdev(ndev); return ret; } @@ -2450,6 +2449,9 @@ static int hns_nic_dev_remove(struct platform_device *pdev) set_bit(NIC_STATE_REMOVING, &priv->state); (void)cancel_work_sync(&priv->service_task); + /* safe for ACPI FW */ + of_node_put(to_of_node(priv->fwnode)); + free_netdev(ndev); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 8e9b95871d30..ce15d2350db9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -1157,16 +1157,18 @@ static int hns_get_regs_len(struct net_device *net_dev) */ static int hns_nic_nway_reset(struct net_device *netdev) { - int ret = 0; struct phy_device *phy = netdev->phydev; - if (netif_running(netdev)) { - /* if autoneg is disabled, don't restart auto-negotiation */ - if (phy && phy->autoneg == AUTONEG_ENABLE) - ret = genphy_restart_aneg(phy); - } + if (!netif_running(netdev)) + return 0; - return ret; + if (!phy) + return -EOPNOTSUPP; + + if (phy->autoneg != AUTONEG_ENABLE) + return -EINVAL; + + return genphy_restart_aneg(phy); } static u32 diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 691d12174902..299b277bc7ae 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -21,6 +21,7 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_SET_MACVLAN, /* (VF -> PF) set unicast filter */ HCLGE_MBX_API_NEGOTIATE, /* (VF -> PF) negotiate API version */ HCLGE_MBX_GET_QINFO, /* (VF -> PF) get queue config */ + HCLGE_MBX_GET_QDEPTH, /* (VF -> PF) get queue depth */ HCLGE_MBX_GET_TCINFO, /* (VF -> PF) get TC config */ HCLGE_MBX_GET_RETA, /* (VF -> PF) get RETA */ HCLGE_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */ @@ -40,6 +41,10 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_SET_ALIVE, /* (VF -> PF) set alive state */ HCLGE_MBX_SET_MTU, /* (VF -> PF) set mtu */ HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */ + HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */ + HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */ + + HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */ }; /* below are per-VF mac-vlan subcodes */ @@ -60,7 +65,7 @@ enum hclge_mbx_vlan_cfg_subcode { }; #define HCLGE_MBX_MAX_MSG_SIZE 16 -#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8 +#define HCLGE_MBX_MAX_RESP_DATA_SIZE 16 #define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3 #define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3 diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 781e5dee3c70..17ab4f4af6ad 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -32,6 +32,9 @@ static bool hnae3_client_match(enum hnae3_client_type client_type, void hnae3_set_client_init_flag(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev, int inited) { + if (!client || !ae_dev) + return; + switch (client->type) { case HNAE3_CLIENT_KNIC: hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited); @@ -109,6 +112,9 @@ int hnae3_register_client(struct hnae3_client *client) struct hnae3_ae_dev *ae_dev; int ret = 0; + if (!client) + return -ENODEV; + mutex_lock(&hnae3_common_lock); /* one system should only have one client for every type */ list_for_each_entry(client_tmp, &hnae3_client_list, node) { @@ -141,6 +147,9 @@ void hnae3_unregister_client(struct hnae3_client *client) { struct hnae3_ae_dev *ae_dev; + if (!client) + return; + mutex_lock(&hnae3_common_lock); /* un-initialize the client on every matched port */ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { @@ -163,6 +172,9 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) struct hnae3_client *client; int ret = 0; + if (!ae_algo) + return; + mutex_lock(&hnae3_common_lock); list_add_tail(&ae_algo->node, &hnae3_ae_algo_list); @@ -173,8 +185,12 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) if (!id) continue; - /* ae_dev init should set flag */ + if (!ae_algo->ops) { + dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n"); + continue; + } ae_dev->ops = ae_algo->ops; + ret = ae_algo->ops->init_ae_dev(ae_dev); if (ret) { dev_err(&ae_dev->pdev->dev, @@ -182,6 +198,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) continue; } + /* ae_dev init should set flag */ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); /* check the client list for the match with this ae_dev type and @@ -209,6 +226,9 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) struct hnae3_ae_dev *ae_dev; struct hnae3_client *client; + if (!ae_algo) + return; + mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_dev */ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { @@ -238,13 +258,16 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo); * @ae_dev: the AE device * NOTE: the duplicated name will not be checked */ -void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) +int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) { const struct pci_device_id *id; struct hnae3_ae_algo *ae_algo; struct hnae3_client *client; int ret = 0; + if (!ae_dev) + return -ENODEV; + mutex_lock(&hnae3_common_lock); list_add_tail(&ae_dev->node, &hnae3_ae_dev_list); @@ -255,14 +278,13 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) if (!id) continue; - ae_dev->ops = ae_algo->ops; - - if (!ae_dev->ops) { - dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n"); + if (!ae_algo->ops) { + dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n"); + ret = -EOPNOTSUPP; goto out_err; } + ae_dev->ops = ae_algo->ops; - /* ae_dev init should set flag */ ret = ae_dev->ops->init_ae_dev(ae_dev); if (ret) { dev_err(&ae_dev->pdev->dev, @@ -270,6 +292,7 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) goto out_err; } + /* ae_dev init should set flag */ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); break; } @@ -285,8 +308,15 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) ret); } + mutex_unlock(&hnae3_common_lock); + + return 0; + out_err: + list_del(&ae_dev->node); mutex_unlock(&hnae3_common_lock); + + return ret; } EXPORT_SYMBOL(hnae3_register_ae_dev); @@ -299,6 +329,9 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) struct hnae3_ae_algo *ae_algo; struct hnae3_client *client; + if (!ae_dev) + return; + mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_algo */ list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 36eab37d8a40..66d7a8b80e76 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -87,7 +87,8 @@ struct hnae3_queue { struct hnae3_handle *handle; int tqp_index; /* index in a handle */ u32 buf_size; /* size for hnae_desc->addr, preset by AE */ - u16 desc_num; /* total number of desc */ + u16 tx_desc_num;/* total number of tx desc */ + u16 rx_desc_num;/* total number of rx desc */ }; /*hnae3 loop mode*/ @@ -124,6 +125,7 @@ enum hnae3_reset_notify_type { HNAE3_DOWN_CLIENT, HNAE3_INIT_CLIENT, HNAE3_UNINIT_CLIENT, + HNAE3_RESTORE_CLIENT, }; enum hnae3_reset_type { @@ -432,7 +434,8 @@ struct hnae3_ae_ops { struct ethtool_channels *ch); void (*get_tqps_and_rss_info)(struct hnae3_handle *h, u16 *alloc_tqps, u16 *max_rss_size); - int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num); + int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num, + bool rxfh_configured); void (*get_flowctrl_adv)(struct hnae3_handle *handle, u32 *flowctrl_adv); int (*set_led_id)(struct hnae3_handle *handle, @@ -459,9 +462,11 @@ struct hnae3_ae_ops { bool (*get_hw_reset_stat)(struct hnae3_handle *handle); bool (*ae_dev_resetting)(struct hnae3_handle *handle); unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle); - int (*set_gro_en)(struct hnae3_handle *handle, int enable); + int (*set_gro_en)(struct hnae3_handle *handle, bool enable); u16 (*get_global_queue_id)(struct hnae3_handle *handle, u16 queue_id); void (*set_timer_task)(struct hnae3_handle *handle, bool enable); + int (*mac_connect_phy)(struct hnae3_handle *handle); + void (*mac_disconnect_phy)(struct hnae3_handle *handle); }; struct hnae3_dcb_ops { @@ -475,7 +480,6 @@ struct hnae3_dcb_ops { u8 (*getdcbx)(struct hnae3_handle *); u8 (*setdcbx)(struct hnae3_handle *, u8); - int (*map_update)(struct hnae3_handle *); int (*setup_tc)(struct hnae3_handle *, u8, u8 *); }; @@ -500,8 +504,10 @@ struct hnae3_tc_info { struct hnae3_knic_private_info { struct net_device *netdev; /* Set by KNIC client when init instance */ u16 rss_size; /* Allocated RSS queues */ + u16 req_rss_size; u16 rx_buf_len; - u16 num_desc; + u16 num_tx_desc; + u16 num_rx_desc; u8 num_tc; /* Total number of enabled TCs */ u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */ @@ -533,7 +539,9 @@ struct hnae3_roce_private_info { struct hnae3_unic_private_info { struct net_device *netdev; u16 rx_buf_len; - u16 num_desc; + u16 num_tx_desc; + u16 num_rx_desc; + u16 num_tqps; /* total number of tqps in this handle */ struct hnae3_queue **tqp; /* array base of all TQPs of this instance */ }; @@ -585,7 +593,7 @@ struct hnae3_handle { #define hnae3_get_bit(origin, shift) \ hnae3_get_field((origin), (0x1 << (shift)), (shift)) -void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); +int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 07cd58798083..3cb43b1f1c2e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -21,6 +21,8 @@ #include "hnae3.h" #include "hns3_enet.h" +#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift))) + static void hns3_clear_all_ring(struct hnae3_handle *h); static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h); static void hns3_remove_hw_addr(struct net_device *netdev); @@ -348,6 +350,8 @@ static int hns3_nic_net_up(struct net_device *netdev) return ret; } + clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); + /* enable the vectors */ for (i = 0; i < priv->vector_num; i++) hns3_vector_enable(&priv->tqp_vector[i]); @@ -361,11 +365,10 @@ static int hns3_nic_net_up(struct net_device *netdev) if (ret) goto out_start_err; - clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); - return 0; out_start_err: + set_bit(HNS3_NIC_STATE_DOWN, &priv->state); while (j--) hns3_tqp_disable(h->kinfo.tqp[j]); @@ -377,6 +380,29 @@ out_start_err: return ret; } +static void hns3_config_xps(struct hns3_nic_priv *priv) +{ + int i; + + for (i = 0; i < priv->vector_num; i++) { + struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i]; + struct hns3_enet_ring *ring = tqp_vector->tx_group.ring; + + while (ring) { + int ret; + + ret = netif_set_xps_queue(priv->netdev, + &tqp_vector->affinity_mask, + ring->tqp->tqp_index); + if (ret) + netdev_warn(priv->netdev, + "set xps queue failed: %d", ret); + + ring = ring->next; + } + } +} + static int hns3_nic_net_open(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); @@ -409,6 +435,7 @@ static int hns3_nic_net_open(struct net_device *netdev) if (h->ae_algo->ops->set_timer_task) h->ae_algo->ops->set_timer_task(priv->ae_handle, true); + hns3_config_xps(priv); return 0; } @@ -506,7 +533,7 @@ static u8 hns3_get_netdev_flags(struct net_device *netdev) u8 flags = 0; if (netdev->flags & IFF_PROMISC) { - flags = HNAE3_USER_UPE | HNAE3_USER_MPE; + flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE; } else { flags |= HNAE3_VLAN_FLTR; if (netdev->flags & IFF_ALLMULTI) @@ -541,13 +568,13 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev) } } - hns3_update_promisc_mode(netdev, new_flags); /* User mode Promisc mode enable and vlan filtering is disabled to * let all packets in. MAC-VLAN Table overflow Promisc enabled and * vlan fitering is enabled */ hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR); h->netdev_flags = new_flags; + hns3_update_promisc_mode(netdev, new_flags); } int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags) @@ -594,7 +621,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, return 0; ret = skb_cow_head(skb, 0); - if (ret) + if (unlikely(ret)) return ret; l3.hdr = skb_network_header(skb); @@ -633,7 +660,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, /* normal or tunnel packet*/ l4_offset = l4.hdr - skb->data; - hdr_len = (l4.tcp->doff * 4) + l4_offset; + hdr_len = (l4.tcp->doff << 2) + l4_offset; /* remove payload length from inner pseudo checksum when tso*/ l4_paylen = skb->len - l4_offset; @@ -642,8 +669,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, /* find the txbd field values */ *paylen = skb->len - hdr_len; - hnae3_set_bit(*type_cs_vlan_tso, - HNS3_TXD_TSO_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1); /* get MSS for TSO */ *mss = skb_shinfo(skb)->gso_size; @@ -654,11 +680,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, u8 *il4_proto) { - union { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; - } l3; + union l3_hdr_info l3; unsigned char *l4_hdr; unsigned char *exthdr; u8 l4_proto_tmp; @@ -711,17 +733,8 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, u8 il4_proto, u32 *type_cs_vlan_tso, u32 *ol_type_vlan_len_msec) { - union { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; - } l3; - union { - struct tcphdr *tcp; - struct udphdr *udp; - struct gre_base_hdr *gre; - unsigned char *hdr; - } l4; + union l3_hdr_info l3; + union l4_hdr_info l4; unsigned char *l2_hdr; u8 l4_proto = ol4_proto; u32 ol2_len; @@ -735,21 +748,19 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, /* compute L2 header size for normal packet, defined in 2 Bytes */ l2_len = l3.hdr - skb->data; - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, l2_len >> 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); /* tunnel packet*/ if (skb->encapsulation) { /* compute OL2 header size, defined in 2 Bytes */ ol2_len = l2_len; - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, ol2_len >> 1); + hns3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_L2LEN_S, ol2_len >> 1); /* compute OL3 header size, defined in 4 Bytes */ ol3_len = l4.hdr - l3.hdr; - hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, - HNS3_TXD_L3LEN_S, ol3_len >> 2); + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, + ol3_len >> 2); /* MAC in UDP, MAC in GRE (0x6558)*/ if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { @@ -758,17 +769,16 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, /* compute OL4 header size, defined in 4 Bytes. */ ol4_len = l2_hdr - l4.hdr; - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, - ol4_len >> 2); + hns3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_L4LEN_S, ol4_len >> 2); /* switch IP header ptr from outer to inner header */ l3.hdr = skb_inner_network_header(skb); /* compute inner l2 header size, defined in 2 Bytes. */ l2_len = l3.hdr - l2_hdr; - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, l2_len >> 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, + l2_len >> 1); } else { /* skb packet types not supported by hardware, * txbd len fild doesn't be filled. @@ -784,24 +794,21 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, /* compute inner(/normal) L3 header size, defined in 4 Bytes */ l3_len = l4.hdr - l3.hdr; - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, - HNS3_TXD_L3LEN_S, l3_len >> 2); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); /* compute inner(/normal) L4 header size, defined in 4 Bytes */ switch (l4_proto) { case IPPROTO_TCP: - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, l4.tcp->doff); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, + l4.tcp->doff); break; case IPPROTO_SCTP: - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, - (sizeof(struct sctphdr) >> 2)); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, + (sizeof(struct sctphdr) >> 2)); break; case IPPROTO_UDP: - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, - (sizeof(struct udphdr) >> 2)); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, + (sizeof(struct udphdr) >> 2)); break; default: /* skb packet types not supported by hardware, @@ -820,12 +827,7 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, static bool hns3_tunnel_csum_bug(struct sk_buff *skb) { #define IANA_VXLAN_PORT 4789 - union { - struct tcphdr *tcp; - struct udphdr *udp; - struct gre_base_hdr *gre; - unsigned char *hdr; - } l4; + union l4_hdr_info l4; l4.hdr = skb_transport_header(skb); @@ -841,11 +843,7 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, u8 il4_proto, u32 *type_cs_vlan_tso, u32 *ol_type_vlan_len_msec) { - union { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; - } l3; + union l3_hdr_info l3; u32 l4_proto = ol4_proto; l3.hdr = skb_network_header(skb); @@ -855,34 +853,30 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, /* define outer network header type.*/ if (skb->protocol == htons(ETH_P_IP)) { if (skb_is_gso(skb)) - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_M, - HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_CSUM); + hns3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_CSUM); else - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_M, - HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_NO_CSUM); + hns3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_NO_CSUM); } else if (skb->protocol == htons(ETH_P_IPV6)) { - hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, - HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV6); } /* define tunnel type(OL4).*/ switch (l4_proto) { case IPPROTO_UDP: - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_M, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_MAC_IN_UDP); + hns3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_MAC_IN_UDP); break; case IPPROTO_GRE: - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_M, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_NVGRE); + hns3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_NVGRE); break; default: /* drop the skb tunnel packet if hardware don't support, @@ -903,43 +897,37 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, } if (l3.v4->version == 4) { - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, - HNS3_TXD_L3T_S, HNS3_L3T_IPV4); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, + HNS3_L3T_IPV4); /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ if (skb_is_gso(skb)) - hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); } else if (l3.v6->version == 6) { - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, - HNS3_TXD_L3T_S, HNS3_L3T_IPV6); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, + HNS3_L3T_IPV6); } switch (l4_proto) { case IPPROTO_TCP: - hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); - hnae3_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_TCP); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, + HNS3_L4T_TCP); break; case IPPROTO_UDP: if (hns3_tunnel_csum_bug(skb)) break; - hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); - hnae3_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_UDP); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, + HNS3_L4T_UDP); break; case IPPROTO_SCTP: - hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); - hnae3_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_SCTP); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, + HNS3_L4T_SCTP); break; default: /* drop the skb tunnel packet if hardware don't support, @@ -961,11 +949,8 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) { /* Config bd buffer end */ - hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, - HNS3_TXD_BDTYPE_S, 0); - hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); - hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); - hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); + hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); + hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); } static int hns3_fill_desc_vtags(struct sk_buff *skb, @@ -998,10 +983,10 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, * and use inner_vtag in one tag case. */ if (skb->protocol == htons(ETH_P_8021Q)) { - hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); + hns3_set_field(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); *out_vtag = vlan_tag; } else { - hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); + hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); *inner_vtag = vlan_tag; } } else if (skb->protocol == htons(ETH_P_8021Q)) { @@ -1009,7 +994,7 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, int rc; rc = skb_cow_head(skb, 0); - if (rc < 0) + if (unlikely(rc < 0)) return rc; vhdr = (struct vlan_ethhdr *)skb->data; vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7) @@ -1026,26 +1011,21 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; struct hns3_desc *desc = &ring->desc[ring->next_to_use]; struct device *dev = ring_to_dev(ring); - u32 ol_type_vlan_len_msec = 0; u16 bdtp_fe_sc_vld_ra_ri = 0; struct skb_frag_struct *frag; unsigned int frag_buf_num; - u32 type_cs_vlan_tso = 0; - struct sk_buff *skb; - u16 inner_vtag = 0; - u16 out_vtag = 0; - unsigned int k; - int sizeoflast; - u32 paylen = 0; + int k, sizeoflast; dma_addr_t dma; - u16 mss = 0; - u8 ol4_proto; - u8 il4_proto; - int ret; if (type == DESC_TYPE_SKB) { - skb = (struct sk_buff *)priv; - paylen = skb->len; + struct sk_buff *skb = (struct sk_buff *)priv; + u32 ol_type_vlan_len_msec = 0; + u32 type_cs_vlan_tso = 0; + u32 paylen = skb->len; + u16 inner_vtag = 0; + u16 out_vtag = 0; + u16 mss = 0; + int ret; ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso, &ol_type_vlan_len_msec, @@ -1054,10 +1034,12 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, return ret; if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 ol4_proto, il4_proto; + skb_reset_mac_len(skb); ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); - if (ret) + if (unlikely(ret)) return ret; hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto, &type_cs_vlan_tso, @@ -1065,12 +1047,12 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto, &type_cs_vlan_tso, &ol_type_vlan_len_msec); - if (ret) + if (unlikely(ret)) return ret; ret = hns3_set_tso(skb, &paylen, &mss, &type_cs_vlan_tso); - if (ret) + if (unlikely(ret)) return ret; } @@ -1090,15 +1072,15 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); } - if (dma_mapping_error(ring->dev, dma)) { + if (unlikely(dma_mapping_error(ring->dev, dma))) { ring->stats.sw_err_cnt++; return -ENOMEM; } desc_cb->length = size; - frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; - sizeoflast = size % HNS3_MAX_BD_SIZE; + frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET; + sizeoflast = size & HNS3_TX_LAST_SIZE_M; sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; /* When frag size is bigger than hardware limit, split this frag */ @@ -1133,6 +1115,7 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, struct hns3_enet_ring *ring) { struct sk_buff *skb = *out_skb; + struct sk_buff *new_skb = NULL; struct skb_frag_struct *frag; int bdnum_for_frag; int frag_num; @@ -1141,21 +1124,34 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, int i; size = skb_headlen(skb); - buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET; frag_num = skb_shinfo(skb)->nr_frags; for (i = 0; i < frag_num; i++) { frag = &skb_shinfo(skb)->frags[i]; size = skb_frag_size(frag); - bdnum_for_frag = - (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; - if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG) + bdnum_for_frag = (size + HNS3_MAX_BD_SIZE - 1) >> + HNS3_MAX_BD_SIZE_OFFSET; + if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)) return -ENOMEM; buf_num += bdnum_for_frag; } - if (buf_num > ring_space(ring)) + if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { + buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) >> + HNS3_MAX_BD_SIZE_OFFSET; + if (ring_space(ring) < buf_num) + return -EBUSY; + /* manual split the send packet */ + new_skb = skb_copy(skb, GFP_ATOMIC); + if (!new_skb) + return -ENOMEM; + dev_kfree_skb_any(skb); + *out_skb = new_skb; + } + + if (unlikely(ring_space(ring) < buf_num)) return -EBUSY; *bnum = buf_num; @@ -1166,11 +1162,24 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, struct hns3_enet_ring *ring) { struct sk_buff *skb = *out_skb; + struct sk_buff *new_skb = NULL; int buf_num; /* No. of segments (plus a header) */ buf_num = skb_shinfo(skb)->nr_frags + 1; + if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { + buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + if (ring_space(ring) < buf_num) + return -EBUSY; + /* manual split the send packet */ + new_skb = skb_copy(skb, GFP_ATOMIC); + if (!new_skb) + return -ENOMEM; + dev_kfree_skb_any(skb); + *out_skb = new_skb; + } + if (unlikely(ring_space(ring) < buf_num)) return -EBUSY; @@ -1252,9 +1261,9 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) next_to_use_head = ring->next_to_use; - ret = priv->ops.fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0, - DESC_TYPE_SKB); - if (ret) + ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0, + DESC_TYPE_SKB); + if (unlikely(ret)) goto head_fill_err; next_to_use_frag = ring->next_to_use; @@ -1263,11 +1272,11 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) frag = &skb_shinfo(skb)->frags[i - 1]; size = skb_frag_size(frag); - ret = priv->ops.fill_desc(ring, frag, size, - seg_num - 1 == i ? 1 : 0, - DESC_TYPE_PAGE); + ret = hns3_fill_desc(ring, frag, size, + seg_num - 1 == i ? 1 : 0, + DESC_TYPE_PAGE); - if (ret) + if (unlikely(ret)) goto frag_fill_err; } @@ -1344,6 +1353,7 @@ static int hns3_nic_set_features(struct net_device *netdev, netdev_features_t changed = netdev->features ^ features; struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = priv->ae_handle; + bool enable; int ret; if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) { @@ -1354,38 +1364,29 @@ static int hns3_nic_set_features(struct net_device *netdev, } if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { - if (features & NETIF_F_GRO_HW) - ret = h->ae_algo->ops->set_gro_en(h, true); - else - ret = h->ae_algo->ops->set_gro_en(h, false); + enable = !!(features & NETIF_F_GRO_HW); + ret = h->ae_algo->ops->set_gro_en(h, enable); if (ret) return ret; } if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && h->ae_algo->ops->enable_vlan_filter) { - if (features & NETIF_F_HW_VLAN_CTAG_FILTER) - h->ae_algo->ops->enable_vlan_filter(h, true); - else - h->ae_algo->ops->enable_vlan_filter(h, false); + enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER); + h->ae_algo->ops->enable_vlan_filter(h, enable); } if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && h->ae_algo->ops->enable_hw_strip_rxvtag) { - if (features & NETIF_F_HW_VLAN_CTAG_RX) - ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true); - else - ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false); - + enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); + ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable); if (ret) return ret; } if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { - if (features & NETIF_F_NTUPLE) - h->ae_algo->ops->enable_fd(h, true); - else - h->ae_algo->ops->enable_fd(h, false); + enable = !!(features & NETIF_F_NTUPLE); + h->ae_algo->ops->enable_fd(h, enable); } netdev->features = features; @@ -1399,7 +1400,12 @@ static void hns3_nic_get_stats64(struct net_device *netdev, int queue_num = priv->ae_handle->kinfo.num_tqps; struct hnae3_handle *handle = priv->ae_handle; struct hns3_enet_ring *ring; + u64 rx_length_errors = 0; + u64 rx_crc_errors = 0; + u64 rx_multicast = 0; unsigned int start; + u64 tx_errors = 0; + u64 rx_errors = 0; unsigned int idx; u64 tx_bytes = 0; u64 rx_bytes = 0; @@ -1420,8 +1426,8 @@ static void hns3_nic_get_stats64(struct net_device *netdev, start = u64_stats_fetch_begin_irq(&ring->syncp); tx_bytes += ring->stats.tx_bytes; tx_pkts += ring->stats.tx_pkts; - tx_drop += ring->stats.tx_busy; tx_drop += ring->stats.sw_err_cnt; + tx_errors += ring->stats.sw_err_cnt; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); /* fetch the rx stats */ @@ -1431,8 +1437,13 @@ static void hns3_nic_get_stats64(struct net_device *netdev, rx_bytes += ring->stats.rx_bytes; rx_pkts += ring->stats.rx_pkts; rx_drop += ring->stats.non_vld_descs; - rx_drop += ring->stats.err_pkt_len; rx_drop += ring->stats.l2_err; + rx_errors += ring->stats.non_vld_descs; + rx_errors += ring->stats.l2_err; + rx_crc_errors += ring->stats.l2_err; + rx_crc_errors += ring->stats.l3l4_csum_err; + rx_multicast += ring->stats.rx_multicast; + rx_length_errors += ring->stats.err_pkt_len; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); } @@ -1441,15 +1452,15 @@ static void hns3_nic_get_stats64(struct net_device *netdev, stats->rx_bytes = rx_bytes; stats->rx_packets = rx_pkts; - stats->rx_errors = netdev->stats.rx_errors; - stats->multicast = netdev->stats.multicast; - stats->rx_length_errors = netdev->stats.rx_length_errors; - stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_errors = rx_errors; + stats->multicast = rx_multicast; + stats->rx_length_errors = rx_length_errors; + stats->rx_crc_errors = rx_crc_errors; stats->rx_missed_errors = netdev->stats.rx_missed_errors; - stats->tx_errors = netdev->stats.tx_errors; - stats->rx_dropped = rx_drop + netdev->stats.rx_dropped; - stats->tx_dropped = tx_drop + netdev->stats.tx_dropped; + stats->tx_errors = tx_errors; + stats->rx_dropped = rx_drop; + stats->tx_dropped = tx_drop; stats->collisions = netdev->stats.collisions; stats->rx_over_errors = netdev->stats.rx_over_errors; stats->rx_frame_errors = netdev->stats.rx_frame_errors; @@ -1472,8 +1483,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data) u8 tc = mqprio_qopt->qopt.num_tc; u16 mode = mqprio_qopt->mode; u8 hw = mqprio_qopt->qopt.hw; - bool if_running; - int ret; if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) @@ -1485,24 +1494,8 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data) if (!netdev) return -EINVAL; - if_running = netif_running(netdev); - if (if_running) { - hns3_nic_net_stop(netdev); - msleep(100); - } - - ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? + return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP; - if (ret) - goto out; - - ret = hns3_nic_set_real_num_queue(netdev); - -out: - if (if_running) - hns3_nic_net_open(netdev); - - return ret; } static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, @@ -1755,9 +1748,13 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hns3_get_dev_capability(pdev, ae_dev); pci_set_drvdata(pdev, ae_dev); - hnae3_register_ae_dev(ae_dev); + ret = hnae3_register_ae_dev(ae_dev); + if (ret) { + devm_kfree(&pdev->dev, ae_dev); + pci_set_drvdata(pdev, NULL); + } - return 0; + return ret; } /* hns3_remove - Device removal routine @@ -1771,6 +1768,7 @@ static void hns3_remove(struct pci_dev *pdev) hns3_disable_sriov(pdev); hnae3_unregister_ae_dev(ae_dev); + pci_set_drvdata(pdev, NULL); } /** @@ -1935,8 +1933,7 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; if (pdev->revision >= 0x21) { - netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_GRO_HW; + netdev->hw_features |= NETIF_F_GRO_HW; netdev->features |= NETIF_F_GRO_HW; if (!(h->flags & HNAE3_SUPPORT_VF)) { @@ -2041,9 +2038,8 @@ static int hns3_alloc_desc(struct hns3_enet_ring *ring) { int size = ring->desc_num * sizeof(ring->desc[0]); - ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size, - &ring->desc_dma_addr, - GFP_KERNEL); + ring->desc = dma_alloc_coherent(ring_to_dev(ring), size, + &ring->desc_dma_addr, GFP_KERNEL); if (!ring->desc) return -ENOMEM; @@ -2322,13 +2318,12 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, } /* check if hardware has done checksum */ - if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) + if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) return; - if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) || - hnae3_get_bit(l234info, HNS3_RXD_L4E_B) || - hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) || - hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) { + if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) || + BIT(HNS3_RXD_OL3E_B) || + BIT(HNS3_RXD_OL4E_B)))) { u64_stats_update_begin(&ring->syncp); ring->stats.l3l4_csum_err++; u64_stats_update_end(&ring->syncp); @@ -2336,11 +2331,6 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, return; } - l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, - HNS3_RXD_L3ID_S); - l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, - HNS3_RXD_L4ID_S); - ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); switch (ol4_type) { @@ -2349,6 +2339,11 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, skb->csum_level = 1; /* fall through */ case HNS3_OL4_TYPE_NO_TUN: + l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, + HNS3_RXD_L3ID_S); + l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, + HNS3_RXD_L4ID_S); + /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ if ((l3_type == HNS3_L3_TYPE_IPV4 || l3_type == HNS3_L3_TYPE_IPV6) && @@ -2473,11 +2468,11 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, bd_base_info = le32_to_cpu(desc->rx.bd_base_info); } - while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) { + while (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { desc = &ring->desc[ring->next_to_clean]; desc_cb = &ring->desc_cb[ring->next_to_clean]; bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - if (!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)) + if (!(bd_base_info & BIT(HNS3_RXD_VLD_B))) return -ENXIO; if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { @@ -2573,6 +2568,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, struct sk_buff **out_skb) { struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + enum hns3_pkt_l2t_type l2_frame_type; struct sk_buff *skb = ring->skb; struct hns3_desc_cb *desc_cb; struct hns3_desc *desc; @@ -2590,7 +2586,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, bd_base_info = le32_to_cpu(desc->rx.bd_base_info); /* Check valid BD */ - if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) + if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) return -ENXIO; if (!skb) @@ -2653,7 +2649,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, vlan_tag); } - if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { + if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) { u64_stats_update_begin(&ring->syncp); ring->stats.non_vld_descs++; u64_stats_update_end(&ring->syncp); @@ -2663,25 +2659,26 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, } if (unlikely((!desc->rx.pkt_len) || - hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { + (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | + BIT(HNS3_RXD_L2E_B))))) { u64_stats_update_begin(&ring->syncp); - ring->stats.err_pkt_len++; + if (l234info & BIT(HNS3_RXD_L2E_B)) + ring->stats.l2_err++; + else + ring->stats.err_pkt_len++; u64_stats_update_end(&ring->syncp); dev_kfree_skb_any(skb); return -EFAULT; } - if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) { - u64_stats_update_begin(&ring->syncp); - ring->stats.l2_err++; - u64_stats_update_end(&ring->syncp); - - dev_kfree_skb_any(skb); - return -EFAULT; - } + l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M, + HNS3_RXD_DMAC_S); u64_stats_update_begin(&ring->syncp); + if (l2_frame_type == HNS3_L2_TYPE_MULTICAST) + ring->stats.rx_multicast++; + ring->stats.rx_pkts++; ring->stats.rx_bytes += skb->len; u64_stats_update_end(&ring->syncp); @@ -2770,7 +2767,7 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) u32 time_passed_ms; u16 new_int_gl; - if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies) + if (!tqp_vector->last_jiffies) return false; if (ring_group->total_packets == 0) { @@ -2873,7 +2870,7 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) } if (tx_group->coal.gl_adapt_enable) { - tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group); + tx_update = hns3_get_new_int_gl(tx_group); if (tx_update) hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_group->coal.int_gl); @@ -3176,25 +3173,23 @@ static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) group->count = 0; } -static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) +static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) { struct hnae3_ring_chain_node vector_ring_chain; struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_tqp_vector *tqp_vector; - int i, ret; + int i; for (i = 0; i < priv->vector_num; i++) { tqp_vector = &priv->tqp_vector[i]; - ret = hns3_get_vector_ring_chain(tqp_vector, - &vector_ring_chain); - if (ret) - return ret; + if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring) + continue; + + hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain); - ret = h->ae_algo->ops->unmap_ring_from_vector(h, + h->ae_algo->ops->unmap_ring_from_vector(h, tqp_vector->vector_irq, &vector_ring_chain); - if (ret) - return ret; hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); @@ -3206,13 +3201,10 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED; } - priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; hns3_clear_ring_group(&tqp_vector->rx_group); hns3_clear_ring_group(&tqp_vector->tx_group); netif_napi_del(&priv->tqp_vector[i].napi); } - - return 0; } static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) @@ -3241,16 +3233,19 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, int queue_num = priv->ae_handle->kinfo.num_tqps; struct pci_dev *pdev = priv->ae_handle->pdev; struct hns3_enet_ring *ring; + int desc_num; ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL); if (!ring) return -ENOMEM; if (ring_type == HNAE3_RING_TYPE_TX) { + desc_num = priv->ae_handle->kinfo.num_tx_desc; ring_data[q->tqp_index].ring = ring; ring_data[q->tqp_index].queue_index = q->tqp_index; ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; } else { + desc_num = priv->ae_handle->kinfo.num_rx_desc; ring_data[q->tqp_index + queue_num].ring = ring; ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index; ring->io_base = q->io_base; @@ -3264,7 +3259,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, ring->dev = priv->dev; ring->desc_dma_addr = 0; ring->buf_size = q->buf_size; - ring->desc_num = q->desc_num; + ring->desc_num = desc_num; ring->next_to_use = 0; ring->next_to_clean = 0; @@ -3376,6 +3371,11 @@ static void hns3_fini_ring(struct hns3_enet_ring *ring) ring->desc_cb = NULL; ring->next_to_clean = 0; ring->next_to_use = 0; + ring->pending_buf = 0; + if (ring->skb) { + dev_kfree_skb_any(ring->skb); + ring->skb = NULL; + } } static int hns3_buf_size2type(u32 buf_size) @@ -3516,6 +3516,25 @@ static int hns3_init_mac_addr(struct net_device *netdev, bool init) return ret; } +static int hns3_init_phy(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + int ret = 0; + + if (h->ae_algo->ops->mac_connect_phy) + ret = h->ae_algo->ops->mac_connect_phy(h); + + return ret; +} + +static void hns3_uninit_phy(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->mac_disconnect_phy) + h->ae_algo->ops->mac_disconnect_phy(h); +} + static int hns3_restore_fd_rules(struct net_device *netdev) { struct hnae3_handle *h = hns3_get_handle(netdev); @@ -3539,7 +3558,6 @@ static void hns3_nic_set_priv_ops(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); - priv->ops.fill_desc = hns3_fill_desc; if ((netdev->features & NETIF_F_TSO) || (netdev->features & NETIF_F_TSO6)) priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; @@ -3582,6 +3600,7 @@ static int hns3_client_init(struct hnae3_handle *handle) priv->netdev = netdev; priv->ae_handle = handle; priv->tx_timeout_count = 0; + set_bit(HNS3_NIC_STATE_DOWN, &priv->state); handle->kinfo.netdev = netdev; handle->priv = (void *)priv; @@ -3624,6 +3643,10 @@ static int hns3_client_init(struct hnae3_handle *handle) goto out_init_ring_data; } + ret = hns3_init_phy(netdev); + if (ret) + goto out_init_phy; + ret = register_netdev(netdev); if (ret) { dev_err(priv->dev, "probe register netdev fail!\n"); @@ -3633,7 +3656,7 @@ static int hns3_client_init(struct hnae3_handle *handle) ret = hns3_client_start(handle); if (ret) { dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); - goto out_reg_netdev_fail; + goto out_client_start; } hns3_dcbnl_setup(handle); @@ -3647,9 +3670,14 @@ static int hns3_client_init(struct hnae3_handle *handle) return ret; +out_client_start: + unregister_netdev(netdev); out_reg_netdev_fail: + hns3_uninit_phy(netdev); +out_init_phy: + hns3_uninit_all_ring(priv); out_init_ring_data: - (void)hns3_nic_uninit_vector_data(priv); + hns3_nic_uninit_vector_data(priv); out_init_vector_data: hns3_nic_dealloc_vector_data(priv); out_alloc_vector_data: @@ -3682,9 +3710,9 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) hns3_force_clear_all_rx_ring(handle); - ret = hns3_nic_uninit_vector_data(priv); - if (ret) - netdev_err(netdev, "uninit vector error\n"); + hns3_uninit_phy(netdev); + + hns3_nic_uninit_vector_data(priv); ret = hns3_nic_dealloc_vector_data(priv); if (ret) @@ -3726,8 +3754,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct net_device *ndev = kinfo->netdev; - bool if_running; - int ret; if (tc > HNAE3_MAX_TC) return -EINVAL; @@ -3735,25 +3761,7 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) if (!ndev) return -ENODEV; - if_running = netif_running(ndev); - - if (if_running) { - (void)hns3_nic_net_stop(ndev); - msleep(100); - } - - ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ? - kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP; - if (ret) - goto err_out; - - ret = hns3_nic_set_real_num_queue(ndev); - -err_out: - if (if_running) - (void)hns3_nic_net_open(ndev); - - return ret; + return hns3_nic_set_real_num_queue(ndev); } static int hns3_recover_hw_addr(struct net_device *ndev) @@ -4014,41 +4022,18 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) { struct net_device *netdev = handle->kinfo.netdev; struct hns3_nic_priv *priv = netdev_priv(netdev); - bool vlan_filter_enable; int ret; - ret = hns3_init_mac_addr(netdev, false); - if (ret) - return ret; - - ret = hns3_recover_hw_addr(netdev); - if (ret) - return ret; - - ret = hns3_update_promisc_mode(netdev, handle->netdev_flags); - if (ret) - return ret; - - vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true; - hns3_enable_vlan_filter(netdev, vlan_filter_enable); - - /* Hardware table is only clear when pf resets */ - if (!(handle->flags & HNAE3_SUPPORT_VF)) { - ret = hns3_restore_vlan(netdev); - if (ret) - return ret; - } + /* Carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); - ret = hns3_restore_fd_rules(netdev); + ret = hns3_get_ring_config(priv); if (ret) return ret; - /* Carrier off reporting is important to ethtool even BEFORE open */ - netif_carrier_off(netdev); - ret = hns3_nic_alloc_vector_data(priv); if (ret) - return ret; + goto err_put_ring; hns3_restore_coal(priv); @@ -4069,10 +4054,44 @@ err_uninit_vector: priv->ring_data = NULL; err_dealloc_vector: hns3_nic_dealloc_vector_data(priv); +err_put_ring: + hns3_put_ring_config(priv); + priv->ring_data = NULL; return ret; } +static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle) +{ + struct net_device *netdev = handle->kinfo.netdev; + bool vlan_filter_enable; + int ret; + + ret = hns3_init_mac_addr(netdev, false); + if (ret) + return ret; + + ret = hns3_recover_hw_addr(netdev); + if (ret) + return ret; + + ret = hns3_update_promisc_mode(netdev, handle->netdev_flags); + if (ret) + return ret; + + vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true; + hns3_enable_vlan_filter(netdev, vlan_filter_enable); + + /* Hardware table is only clear when pf resets */ + if (!(handle->flags & HNAE3_SUPPORT_VF)) { + ret = hns3_restore_vlan(netdev); + if (ret) + return ret; + } + + return hns3_restore_fd_rules(netdev); +} + static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) { struct net_device *netdev = handle->kinfo.netdev; @@ -4086,11 +4105,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) hns3_force_clear_all_rx_ring(handle); - ret = hns3_nic_uninit_vector_data(priv); - if (ret) { - netdev_err(netdev, "uninit vector error\n"); - return ret; - } + hns3_nic_uninit_vector_data(priv); hns3_store_coal(priv); @@ -4102,6 +4117,9 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) if (ret) netdev_err(netdev, "uninit ring error\n"); + hns3_put_ring_config(priv); + priv->ring_data = NULL; + clear_bit(HNS3_NIC_STATE_INITED, &priv->state); return ret; @@ -4125,6 +4143,9 @@ static int hns3_reset_notify(struct hnae3_handle *handle, case HNAE3_UNINIT_CLIENT: ret = hns3_reset_notify_uninit_enet(handle); break; + case HNAE3_RESTORE_CLIENT: + ret = hns3_reset_notify_restore_enet(handle); + break; default: break; } @@ -4132,57 +4153,12 @@ static int hns3_reset_notify(struct hnae3_handle *handle, return ret; } -static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = hns3_get_handle(netdev); - int ret; - - ret = h->ae_algo->ops->set_channels(h, new_tqp_num); - if (ret) - return ret; - - ret = hns3_get_ring_config(priv); - if (ret) - return ret; - - ret = hns3_nic_alloc_vector_data(priv); - if (ret) - goto err_alloc_vector; - - hns3_restore_coal(priv); - - ret = hns3_nic_init_vector_data(priv); - if (ret) - goto err_uninit_vector; - - ret = hns3_init_all_ring(priv); - if (ret) - goto err_put_ring; - - return 0; - -err_put_ring: - hns3_put_ring_config(priv); -err_uninit_vector: - hns3_nic_uninit_vector_data(priv); -err_alloc_vector: - hns3_nic_dealloc_vector_data(priv); - return ret; -} - -static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num) -{ - return (new_tqp_num / num_tc) * num_tc; -} - int hns3_set_channels(struct net_device *netdev, struct ethtool_channels *ch) { - struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_knic_private_info *kinfo = &h->kinfo; - bool if_running = netif_running(netdev); + bool rxfh_configured = netif_is_rxfh_configured(netdev); u32 new_tqp_num = ch->combined_count; u16 org_tqp_num; int ret; @@ -4191,39 +4167,29 @@ int hns3_set_channels(struct net_device *netdev, return -EINVAL; if (new_tqp_num > hns3_get_max_available_channels(h) || - new_tqp_num < kinfo->num_tc) { + new_tqp_num < 1) { dev_err(&netdev->dev, - "Change tqps fail, the tqp range is from %d to %d", - kinfo->num_tc, + "Change tqps fail, the tqp range is from 1 to %d", hns3_get_max_available_channels(h)); return -EINVAL; } - new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num); - if (kinfo->num_tqps == new_tqp_num) + if (kinfo->rss_size == new_tqp_num) return 0; - if (if_running) - hns3_nic_net_stop(netdev); - - ret = hns3_nic_uninit_vector_data(priv); - if (ret) { - dev_err(&netdev->dev, - "Unbind vector with tqp fail, nothing is changed"); - goto open_netdev; - } - - hns3_store_coal(priv); - - hns3_nic_dealloc_vector_data(priv); + ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); + if (ret) + return ret; - hns3_uninit_all_ring(priv); - hns3_put_ring_config(priv); + ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; org_tqp_num = h->kinfo.num_tqps; - ret = hns3_modify_tqp_num(netdev, new_tqp_num); + ret = h->ae_algo->ops->set_channels(h, new_tqp_num, rxfh_configured); if (ret) { - ret = hns3_modify_tqp_num(netdev, org_tqp_num); + ret = h->ae_algo->ops->set_channels(h, org_tqp_num, + rxfh_configured); if (ret) { /* If revert to old tqp failed, fatal error occurred */ dev_err(&netdev->dev, @@ -4233,12 +4199,11 @@ int hns3_set_channels(struct net_device *netdev, dev_info(&netdev->dev, "Change tqp num fail, Revert to old tqp num"); } + ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT); + if (ret) + return ret; -open_netdev: - if (if_running) - hns3_nic_net_open(netdev); - - return ret; + return hns3_reset_notify(h, HNAE3_UP_CLIENT); } static const struct hnae3_client_ops client_ops = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index e55995e93bb0..1db0bd41d209 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -74,7 +74,7 @@ enum hns3_nic_state { #define HNS3_RING_NAME_LEN 16 #define HNS3_BUFFER_SIZE_2048 2048 #define HNS3_RING_MAX_PENDING 32768 -#define HNS3_RING_MIN_PENDING 8 +#define HNS3_RING_MIN_PENDING 24 #define HNS3_RING_BD_MULTIPLE 8 /* max frame size of mac */ #define HNS3_MAC_MAX_FRAME 9728 @@ -184,6 +184,8 @@ enum hns3_nic_state { #define HNS3_TXD_MSS_S 0 #define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) +#define HNS3_TX_LAST_SIZE_M 0xffff + #define HNS3_VECTOR_TX_IRQ BIT_ULL(0) #define HNS3_VECTOR_RX_IRQ BIT_ULL(1) @@ -191,6 +193,7 @@ enum hns3_nic_state { #define HNS3_VECTOR_INITED 1 #define HNS3_MAX_BD_SIZE 65535 +#define HNS3_MAX_BD_SIZE_OFFSET 16 #define HNS3_MAX_BD_PER_FRAG 8 #define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS @@ -202,6 +205,13 @@ enum hns3_nic_state { #define HNS3_RING_EN_B 0 +enum hns3_pkt_l2t_type { + HNS3_L2_TYPE_UNICAST, + HNS3_L2_TYPE_MULTICAST, + HNS3_L2_TYPE_BROADCAST, + HNS3_L2_TYPE_INVALID, +}; + enum hns3_pkt_l3t_type { HNS3_L3T_NONE, HNS3_L3T_IPV6, @@ -376,6 +386,7 @@ struct ring_stats { u64 err_bd_num; u64 l2_err; u64 l3l4_csum_err; + u64 rx_multicast; }; }; }; @@ -412,7 +423,6 @@ struct hns3_enet_ring { unsigned char *va; /* first buffer address for current packet */ u32 flag; /* ring attribute */ - int irq_init_flag; int numa_node; cpumask_t affinity_mask; @@ -434,11 +444,8 @@ struct hns3_nic_ring_data { }; struct hns3_nic_ops { - int (*fill_desc)(struct hns3_enet_ring *ring, void *priv, - int size, int frag_end, enum hns_desc_type type); int (*maybe_stop_tx)(struct sk_buff **out_skb, int *bnum, struct hns3_enet_ring *ring); - void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum); }; enum hns3_flow_level_range { @@ -567,6 +574,7 @@ union l3_hdr_info { union l4_hdr_info { struct tcphdr *tcp; struct udphdr *udp; + struct gre_base_hdr *gre; unsigned char *hdr; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index e678b6939da3..359d4731fb2d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -47,6 +47,7 @@ static const struct hns3_stats hns3_rxq_stats[] = { HNS3_TQP_STAT("err_bd_num", err_bd_num), HNS3_TQP_STAT("l2_err", l2_err), HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err), + HNS3_TQP_STAT("multicast", rx_multicast), }; #define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) @@ -116,7 +117,7 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) ret = hns3_lp_setup(ndev, loop_mode, true); usleep_range(10000, 20000); - return 0; + return ret; } static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode) @@ -333,10 +334,10 @@ static void hns3_self_test(struct net_device *ndev, continue; data[test_index] = hns3_lp_up(ndev, loop_type); - if (!data[test_index]) { + if (!data[test_index]) data[test_index] = hns3_lp_run_test(ndev, loop_type); - hns3_lp_down(ndev, loop_type); - } + + hns3_lp_down(ndev, loop_type); if (data[test_index]) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -620,12 +621,11 @@ static int hns3_get_link_ksettings(struct net_device *netdev, hns3_get_ksettings(h, cmd); break; case HNAE3_MEDIA_TYPE_COPPER: - if (!netdev->phydev) - return -EOPNOTSUPP; - cmd->base.port = PORT_TP; - phy_ethtool_ksettings_get(netdev->phydev, cmd); - + if (!netdev->phydev) + hns3_get_ksettings(h, cmd); + else + phy_ethtool_ksettings_get(netdev->phydev, cmd); break; default: @@ -748,15 +748,19 @@ static int hns3_get_rxnfc(struct net_device *netdev, } static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, - u32 new_desc_num) + u32 tx_desc_num, u32 rx_desc_num) { struct hnae3_handle *h = priv->ae_handle; int i; - h->kinfo.num_desc = new_desc_num; + h->kinfo.num_tx_desc = tx_desc_num; + h->kinfo.num_rx_desc = rx_desc_num; - for (i = 0; i < h->kinfo.num_tqps * 2; i++) - priv->ring_data[i].ring->desc_num = new_desc_num; + for (i = 0; i < h->kinfo.num_tqps; i++) { + priv->ring_data[i].ring->desc_num = tx_desc_num; + priv->ring_data[i + h->kinfo.num_tqps].ring->desc_num = + rx_desc_num; + } return hns3_init_all_ring(priv); } @@ -767,7 +771,9 @@ static int hns3_set_ringparam(struct net_device *ndev, struct hns3_nic_priv *priv = netdev_priv(ndev); struct hnae3_handle *h = priv->ae_handle; bool if_running = netif_running(ndev); - u32 old_desc_num, new_desc_num; + u32 old_tx_desc_num, new_tx_desc_num; + u32 old_rx_desc_num, new_rx_desc_num; + int queue_num = h->kinfo.num_tqps; int ret; if (hns3_nic_resetting(ndev)) @@ -776,43 +782,41 @@ static int hns3_set_ringparam(struct net_device *ndev, if (param->rx_mini_pending || param->rx_jumbo_pending) return -EINVAL; - if (param->tx_pending != param->rx_pending) { - netdev_err(ndev, - "Descriptors of tx and rx must be equal"); - return -EINVAL; - } - if (param->tx_pending > HNS3_RING_MAX_PENDING || - param->tx_pending < HNS3_RING_MIN_PENDING) { - netdev_err(ndev, - "Descriptors requested (Tx/Rx: %d) out of range [%d-%d]\n", - param->tx_pending, HNS3_RING_MIN_PENDING, - HNS3_RING_MAX_PENDING); + param->tx_pending < HNS3_RING_MIN_PENDING || + param->rx_pending > HNS3_RING_MAX_PENDING || + param->rx_pending < HNS3_RING_MIN_PENDING) { + netdev_err(ndev, "Queue depth out of range [%d-%d]\n", + HNS3_RING_MIN_PENDING, HNS3_RING_MAX_PENDING); return -EINVAL; } - new_desc_num = param->tx_pending; - /* Hardware requires that its descriptors must be multiple of eight */ - new_desc_num = ALIGN(new_desc_num, HNS3_RING_BD_MULTIPLE); - old_desc_num = h->kinfo.num_desc; - if (old_desc_num == new_desc_num) + new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE); + new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE); + old_tx_desc_num = priv->ring_data[0].ring->desc_num; + old_rx_desc_num = priv->ring_data[queue_num].ring->desc_num; + if (old_tx_desc_num == new_tx_desc_num && + old_rx_desc_num == new_rx_desc_num) return 0; netdev_info(ndev, - "Changing descriptor count from %d to %d.\n", - old_desc_num, new_desc_num); + "Changing Tx/Rx ring depth from %d/%d to %d/%d\n", + old_tx_desc_num, old_rx_desc_num, + new_tx_desc_num, new_rx_desc_num); if (if_running) - dev_close(ndev); + ndev->netdev_ops->ndo_stop(ndev); ret = hns3_uninit_all_ring(priv); if (ret) return ret; - ret = hns3_change_all_ring_bd_num(priv, new_desc_num); + ret = hns3_change_all_ring_bd_num(priv, new_tx_desc_num, + new_rx_desc_num); if (ret) { - ret = hns3_change_all_ring_bd_num(priv, old_desc_num); + ret = hns3_change_all_ring_bd_num(priv, old_tx_desc_num, + old_rx_desc_num); if (ret) { netdev_err(ndev, "Revert to old bd num fail, ret=%d.\n", ret); @@ -821,7 +825,7 @@ static int hns3_set_ringparam(struct net_device *ndev, } if (if_running) - ret = dev_open(ndev, NULL); + ret = ndev->netdev_ops->ndo_open(ndev); return ret; } @@ -1114,6 +1118,8 @@ static const struct ethtool_ops hns3vf_ethtool_ops = { .get_channels = hns3_get_channels, .get_coalesce = hns3_get_coalesce, .set_coalesce = hns3_set_coalesce, + .get_regs_len = hns3_get_regs_len, + .get_regs = hns3_get_regs, .get_link = hns3_get_link, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 8af0cef5609b..3a093a92eac5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -39,9 +39,8 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring) { int size = ring->desc_num * sizeof(struct hclge_desc); - ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), - size, &ring->desc_dma_addr, - GFP_KERNEL); + ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size, + &ring->desc_dma_addr, GFP_KERNEL); if (!ring->desc) return -ENOMEM; @@ -171,8 +170,12 @@ static bool hclge_is_special_opcode(u16 opcode) /* these commands have several descriptors, * and use the first one to save opcode and return value */ - u16 spec_opcode[3] = {HCLGE_OPC_STATS_64_BIT, - HCLGE_OPC_STATS_32_BIT, HCLGE_OPC_STATS_MAC}; + u16 spec_opcode[] = {HCLGE_OPC_STATS_64_BIT, + HCLGE_OPC_STATS_32_BIT, + HCLGE_OPC_STATS_MAC, + HCLGE_OPC_STATS_MAC_ALL, + HCLGE_OPC_QUERY_32_BIT_REG, + HCLGE_OPC_QUERY_64_BIT_REG}; int i; for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { @@ -183,6 +186,38 @@ static bool hclge_is_special_opcode(u16 opcode) return false; } +static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc, + int num, int ntc) +{ + u16 opcode, desc_ret; + int handle; + int retval; + + opcode = le16_to_cpu(desc[0].opcode); + for (handle = 0; handle < num; handle++) { + desc[handle] = hw->cmq.csq.desc[ntc]; + ntc++; + if (ntc >= hw->cmq.csq.desc_num) + ntc = 0; + } + if (likely(!hclge_is_special_opcode(opcode))) + desc_ret = le16_to_cpu(desc[num - 1].retval); + else + desc_ret = le16_to_cpu(desc[0].retval); + + if (desc_ret == HCLGE_CMD_EXEC_SUCCESS) + retval = 0; + else if (desc_ret == HCLGE_CMD_NO_AUTH) + retval = -EPERM; + else if (desc_ret == HCLGE_CMD_NOT_SUPPORTED) + retval = -EOPNOTSUPP; + else + retval = -EIO; + hw->cmq.last_status = desc_ret; + + return retval; +} + /** * hclge_cmd_send - send command to command queue * @hw: pointer to the hw struct @@ -200,7 +235,6 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) u32 timeout = 0; int handle = 0; int retval = 0; - u16 opcode, desc_ret; int ntc; spin_lock_bh(&hw->cmq.csq.lock); @@ -216,12 +250,11 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) * which will be use for hardware to write back */ ntc = hw->cmq.csq.next_to_use; - opcode = le16_to_cpu(desc[0].opcode); while (handle < num) { desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; *desc_to_use = desc[handle]; (hw->cmq.csq.next_to_use)++; - if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num) + if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num) hw->cmq.csq.next_to_use = 0; handle++; } @@ -247,27 +280,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) if (!complete) { retval = -EAGAIN; } else { - handle = 0; - while (handle < num) { - /* Get the result of hardware write back */ - desc_to_use = &hw->cmq.csq.desc[ntc]; - desc[handle] = *desc_to_use; - - if (likely(!hclge_is_special_opcode(opcode))) - desc_ret = le16_to_cpu(desc[handle].retval); - else - desc_ret = le16_to_cpu(desc[0].retval); - - if (desc_ret == HCLGE_CMD_EXEC_SUCCESS) - retval = 0; - else - retval = -EIO; - hw->cmq.last_status = desc_ret; - ntc++; - handle++; - if (ntc == hw->cmq.csq.desc_num) - ntc = 0; - } + retval = hclge_cmd_check_retval(hw, desc, num, ntc); } /* Clean the command send queue */ @@ -377,6 +390,20 @@ int hclge_cmd_init(struct hclge_dev *hdev) return 0; } +static void hclge_cmd_uninit_regs(struct hclge_hw *hw) +{ + hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0); +} + static void hclge_destroy_queue(struct hclge_cmq_ring *ring) { spin_lock(&ring->lock); @@ -389,3 +416,15 @@ void hclge_destroy_cmd_queue(struct hclge_hw *hw) hclge_destroy_queue(&hw->cmq.csq); hclge_destroy_queue(&hw->cmq.crq); } + +void hclge_cmd_uninit(struct hclge_dev *hdev) +{ + spin_lock_bh(&hdev->hw.cmq.csq.lock); + spin_lock(&hdev->hw.cmq.crq.lock); + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + hclge_cmd_uninit_regs(&hdev->hw); + spin_unlock(&hdev->hw.cmq.crq.lock); + spin_unlock_bh(&hdev->hw.cmq.csq.lock); + + hclge_destroy_cmd_queue(&hdev->hw); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index f23042b24c09..3714733c96d9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -39,7 +39,7 @@ struct hclge_cmq_ring { enum hclge_cmd_return_status { HCLGE_CMD_EXEC_SUCCESS = 0, HCLGE_CMD_NO_AUTH = 1, - HCLGE_CMD_NOT_EXEC = 2, + HCLGE_CMD_NOT_SUPPORTED = 2, HCLGE_CMD_QUEUE_FULL = 3, }; @@ -82,6 +82,8 @@ enum hclge_opcode_type { HCLGE_OPC_STATS_64_BIT = 0x0030, HCLGE_OPC_STATS_32_BIT = 0x0031, HCLGE_OPC_STATS_MAC = 0x0032, + HCLGE_OPC_QUERY_MAC_REG_NUM = 0x0033, + HCLGE_OPC_STATS_MAC_ALL = 0x0034, HCLGE_OPC_QUERY_REG_NUM = 0x0040, HCLGE_OPC_QUERY_32_BIT_REG = 0x0041, @@ -310,16 +312,16 @@ struct hclge_ctrl_vector_chain_cmd { u8 rsv; }; -#define HCLGE_TC_NUM 8 +#define HCLGE_MAX_TC_NUM 8 #define HCLGE_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */ #define HCLGE_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */ struct hclge_tx_buff_alloc_cmd { - __le16 tx_pkt_buff[HCLGE_TC_NUM]; + __le16 tx_pkt_buff[HCLGE_MAX_TC_NUM]; u8 tx_buff_rsv[8]; }; struct hclge_rx_priv_buff_cmd { - __le16 buf_num[HCLGE_TC_NUM]; + __le16 buf_num[HCLGE_MAX_TC_NUM]; __le16 shared_buf; u8 rsv[6]; }; @@ -365,7 +367,6 @@ struct hclge_priv_buf { u32 enable; /* Enable TC private buffer or not */ }; -#define HCLGE_MAX_TC_NUM 8 struct hclge_shared_buf { struct hclge_waterline self; struct hclge_tc_thrd tc_thrd[HCLGE_MAX_TC_NUM]; @@ -692,7 +693,9 @@ struct hclge_mac_vlan_remove_cmd { struct hclge_vlan_filter_ctrl_cmd { u8 vlan_type; u8 vlan_fe; - u8 rsv[22]; + u8 rsv1[2]; + u8 vf_id; + u8 rsv2[19]; }; struct hclge_vlan_filter_pf_cfg_cmd { @@ -974,6 +977,6 @@ enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw, enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw, struct hclge_desc *desc); -void hclge_destroy_cmd_queue(struct hclge_hw *hw); +void hclge_cmd_uninit(struct hclge_dev *hdev); int hclge_cmd_queue_init(struct hclge_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index f6323b2501dc..1161361a973b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -93,13 +93,11 @@ static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc, } } - for (i = 0; i < hdev->num_alloc_vport; i++) { - if (num_tc > hdev->vport[i].alloc_tqps) { - dev_err(&hdev->pdev->dev, - "allocated tqp(%u) checking failed, %u > tqp(%u)\n", - i, num_tc, hdev->vport[i].alloc_tqps); - return -EINVAL; - } + if (num_tc > hdev->vport[0].alloc_tqps) { + dev_err(&hdev->pdev->dev, + "allocated tqp checking failed, %u > tqp(%u)\n", + num_tc, hdev->vport[0].alloc_tqps); + return -EINVAL; } return 0; @@ -156,21 +154,15 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, return 0; } -static int hclge_map_update(struct hnae3_handle *h) +static int hclge_map_update(struct hclge_dev *hdev) { - struct hclge_vport *vport = hclge_get_vport(h); - struct hclge_dev *hdev = vport->back; int ret; - ret = hclge_tm_map_cfg(hdev); + ret = hclge_tm_schd_setup_hw(hdev); if (ret) return ret; - ret = hclge_tm_schd_mode_hw(hdev); - if (ret) - return ret; - - ret = hclge_pause_setup_hw(hdev); + ret = hclge_pause_setup_hw(hdev, false); if (ret) return ret; @@ -222,19 +214,51 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) if (ret) return ret; + if (map_changed) { + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + + ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; + } + hclge_tm_schd_info_update(hdev, num_tc); ret = hclge_ieee_ets_to_tm_info(hdev, ets); if (ret) - return ret; + goto err_out; if (map_changed) { + ret = hclge_map_update(hdev); + if (ret) + goto err_out; + ret = hclge_client_setup_tc(hdev); if (ret) + goto err_out; + + ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); + if (ret) + return ret; + + ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); + if (ret) return ret; } return hclge_tm_dwrr_cfg(hdev); + +err_out: + if (!map_changed) + return ret; + + if (hclge_notify_client(hdev, HNAE3_INIT_CLIENT)) + return ret; + + hclge_notify_client(hdev, HNAE3_UP_CLIENT); + return ret; } static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) @@ -283,6 +307,9 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE) return -EINVAL; + if (pfc->pfc_en == hdev->tm_info.pfc_en) + return 0; + prio_tc = hdev->tm_info.prio_tc; pfc_map = 0; @@ -295,12 +322,10 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) } } - if (pfc_map == hdev->tm_info.hw_pfc_map) - return 0; - hdev->tm_info.hw_pfc_map = pfc_map; + hdev->tm_info.pfc_en = pfc->pfc_en; - return hclge_pause_setup_hw(hdev); + return hclge_pause_setup_hw(hdev, false); } /* DCBX configuration */ @@ -345,12 +370,24 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) if (ret) return -EINVAL; + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + + ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; + hclge_tm_schd_info_update(hdev, tc); hclge_tm_prio_tc_info_update(hdev, prio_tc); - ret = hclge_tm_init_hw(hdev); + ret = hclge_tm_init_hw(hdev, false); if (ret) - return ret; + goto err_out; + + ret = hclge_client_setup_tc(hdev); + if (ret) + goto err_out; hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; @@ -359,7 +396,18 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) else hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE; - return 0; + ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); + if (ret) + return ret; + + return hclge_notify_client(hdev, HNAE3_UP_CLIENT); + +err_out: + if (hclge_notify_client(hdev, HNAE3_INIT_CLIENT)) + return ret; + + hclge_notify_client(hdev, HNAE3_UP_CLIENT); + return ret; } static const struct hnae3_dcb_ops hns3_dcb_ops = { @@ -369,7 +417,6 @@ static const struct hnae3_dcb_ops hns3_dcb_ops = { .ieee_setpfc = hclge_ieee_setpfc, .getdcbx = hclge_getdcbx, .setdcbx = hclge_setdcbx, - .map_update = hclge_map_update, .setup_tc = hclge_setup_tc, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 26d80504c730..1192cf6f2321 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -691,7 +691,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) dev_info(&hdev->pdev->dev, "dump qos buf cfg\n"); tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data; - for (i = 0; i < HCLGE_TC_NUM; i++) + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i, tx_buf_cmd->tx_pkt_buff[i]); @@ -703,7 +703,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) dev_info(&hdev->pdev->dev, "\n"); rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data; - for (i = 0; i < HCLGE_TC_NUM; i++) + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i, rx_buf_cmd->buf_num[i]); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index d0f654123b9b..1feceff1477c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -80,7 +80,7 @@ static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = { { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" }, { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" }, { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" }, - { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_erre" }, + { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err" }, { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" }, { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" }, { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" }, @@ -219,6 +219,12 @@ static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = { { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err" }, { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err" }, { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err" }, + { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err" }, + { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err" }, + { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err" }, + { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err" }, + { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err" }, + { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err" }, { /* sentinel */ } }; @@ -277,6 +283,45 @@ static const struct hclge_hw_error hclge_ssu_com_err_int[] = { { /* sentinel */ } }; +#define HCLGE_SSU_MEM_ECC_ERR(x) \ + { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err" } + +static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = { + HCLGE_SSU_MEM_ECC_ERR(0), + HCLGE_SSU_MEM_ECC_ERR(1), + HCLGE_SSU_MEM_ECC_ERR(2), + HCLGE_SSU_MEM_ECC_ERR(3), + HCLGE_SSU_MEM_ECC_ERR(4), + HCLGE_SSU_MEM_ECC_ERR(5), + HCLGE_SSU_MEM_ECC_ERR(6), + HCLGE_SSU_MEM_ECC_ERR(7), + HCLGE_SSU_MEM_ECC_ERR(8), + HCLGE_SSU_MEM_ECC_ERR(9), + HCLGE_SSU_MEM_ECC_ERR(10), + HCLGE_SSU_MEM_ECC_ERR(11), + HCLGE_SSU_MEM_ECC_ERR(12), + HCLGE_SSU_MEM_ECC_ERR(13), + HCLGE_SSU_MEM_ECC_ERR(14), + HCLGE_SSU_MEM_ECC_ERR(15), + HCLGE_SSU_MEM_ECC_ERR(16), + HCLGE_SSU_MEM_ECC_ERR(17), + HCLGE_SSU_MEM_ECC_ERR(18), + HCLGE_SSU_MEM_ECC_ERR(19), + HCLGE_SSU_MEM_ECC_ERR(20), + HCLGE_SSU_MEM_ECC_ERR(21), + HCLGE_SSU_MEM_ECC_ERR(22), + HCLGE_SSU_MEM_ECC_ERR(23), + HCLGE_SSU_MEM_ECC_ERR(24), + HCLGE_SSU_MEM_ECC_ERR(25), + HCLGE_SSU_MEM_ECC_ERR(26), + HCLGE_SSU_MEM_ECC_ERR(27), + HCLGE_SSU_MEM_ECC_ERR(28), + HCLGE_SSU_MEM_ECC_ERR(29), + HCLGE_SSU_MEM_ECC_ERR(30), + HCLGE_SSU_MEM_ECC_ERR(31), + { /* sentinel */ } +}; + static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = { { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" }, { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port" }, @@ -835,13 +880,15 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, desc_data = (__le32 *)&desc[2]; status = le32_to_cpu(*(desc_data + 2)); if (status) { - dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_0 ssu_ecc_mbit_int[31:0]\n"); + hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0", + &hclge_ssu_mem_ecc_err_int[0], status); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); } status = le32_to_cpu(*(desc_data + 3)) & BIT(0); if (status) { - dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_ecc_mbit_int[32]\n"); + dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n", + status); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); } @@ -997,6 +1044,13 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, hclge_log_error(dev, "IGU_EGU_TNL_INT_STS", &hclge_igu_egu_tnl_int[0], status); + /* log PPU(RCB) errors */ + desc_data = (__le32 *)&desc[3]; + status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK; + if (status) + hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0", + &hclge_ppu_pf_abnormal_int[0], status); + /* clear all PF RAS errors */ hclge_cmd_reuse_desc(&desc[0], false); desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); @@ -1094,10 +1148,10 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev) return 0; } -static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) +static enum hnae3_reset_type +hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) { - enum hnae3_reset_type reset_type = HNAE3_FUNC_RESET; - struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + enum hnae3_reset_type reset_type = HNAE3_NONE_RESET; struct device *dev = &hdev->pdev->dev; struct hclge_desc desc[2]; unsigned int status; @@ -1110,17 +1164,20 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) if (ret) { dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret); /* reset everything for now */ - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); - return ret; + return HNAE3_GLOBAL_RESET; } status = le32_to_cpu(desc[0].data[0]); - if (status & HCLGE_ROCEE_RERR_INT_MASK) + if (status & HCLGE_ROCEE_RERR_INT_MASK) { dev_warn(dev, "ROCEE RAS AXI rresp error\n"); + reset_type = HNAE3_FUNC_RESET; + } - if (status & HCLGE_ROCEE_BERR_INT_MASK) + if (status & HCLGE_ROCEE_BERR_INT_MASK) { dev_warn(dev, "ROCEE RAS AXI bresp error\n"); + reset_type = HNAE3_FUNC_RESET; + } if (status & HCLGE_ROCEE_ECC_INT_MASK) { dev_warn(dev, "ROCEE RAS 2bit ECC error\n"); @@ -1132,9 +1189,9 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) if (ret) { dev_err(dev, "failed(%d) to process ovf error\n", ret); /* reset everything for now */ - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); - return ret; + return HNAE3_GLOBAL_RESET; } + reset_type = HNAE3_FUNC_RESET; } /* clear error status */ @@ -1143,12 +1200,10 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) if (ret) { dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret); /* reset everything for now */ - reset_type = HNAE3_GLOBAL_RESET; + return HNAE3_GLOBAL_RESET; } - HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type); - - return ret; + return reset_type; } static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en) @@ -1178,15 +1233,18 @@ static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en) return ret; } -static int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev) +static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev) { + enum hnae3_reset_type reset_type = HNAE3_NONE_RESET; struct hclge_dev *hdev = ae_dev->priv; if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || hdev->pdev->revision < 0x21) - return HNAE3_NONE_RESET; + return; - return hclge_log_and_clear_rocee_ras_error(hdev); + reset_type = hclge_log_and_clear_rocee_ras_error(hdev); + if (reset_type != HNAE3_NONE_RESET) + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type); } static const struct hclge_hw_blk hw_blk[] = { @@ -1332,14 +1390,13 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, set_bit(HNAE3_GLOBAL_RESET, reset_requests); } - /* log PPU(RCB) errors */ + /* log PPU(RCB) MPF errors */ desc_data = (__le32 *)&desc[5]; status = le32_to_cpu(*(desc_data + 2)) & HCLGE_PPU_MPF_INT_ST2_MSIX_MASK; if (status) { - dev_warn(dev, - "PPU_MPF_ABNORMAL_INT_ST2[28:29], err_status(0x%x)\n", - status); + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2", + &hclge_ppu_mpf_abnormal_int_st2[0], status); set_bit(HNAE3_CORE_RESET, reset_requests); } @@ -1386,7 +1443,7 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0", &hclge_ppp_pf_abnormal_int[0], status); - /* PPU(RCB) PF errors */ + /* log PPU(RCB) PF errors */ desc_data = (__le32 *)&desc[3]; status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK; if (status) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h index 51a7d4eb066a..fc068280d391 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h @@ -45,8 +45,8 @@ #define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF #define HCLGE_NCSI_ERR_INT_EN 0x3 #define HCLGE_NCSI_ERR_INT_TYPE 0x9 -#define HCLGE_MAC_COMMON_ERR_INT_EN GENMASK(7, 0) -#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK GENMASK(7, 0) +#define HCLGE_MAC_COMMON_ERR_INT_EN 0x107FF +#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK 0x107FF #define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0) #define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0) #define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0) @@ -79,6 +79,7 @@ #define HCLGE_PPP_MPF_INT_ST3_MASK GENMASK(5, 0) #define HCLGE_PPU_MPF_INT_ST3_MASK GENMASK(7, 0) #define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK GENMASK(29, 28) +#define HCLGE_PPU_PF_INT_RAS_MASK 0x18 #define HCLGE_PPU_PF_INT_MSIX_MASK 0x27 #define HCLGE_QCN_FIFO_INT_MASK GENMASK(17, 0) #define HCLGE_QCN_ECC_INT_MASK GENMASK(21, 0) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index f7637c08bb3a..deda606c51e7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -118,6 +118,12 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = { HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, + {"mac_tx_control_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)}, + {"mac_rx_control_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)}, + {"mac_tx_pfc_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)}, {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, {"mac_tx_pfc_pri1_pkt_num", @@ -134,6 +140,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = { HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, + {"mac_rx_pfc_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)}, {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, {"mac_rx_pfc_pri1_pkt_num", @@ -287,10 +295,17 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { }, }; -static int hclge_mac_update_stats(struct hclge_dev *hdev) +static const u8 hclge_hash_key[] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA +}; + +static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) { #define HCLGE_MAC_CMD_NUM 21 -#define HCLGE_RTN_DATA_NUM 4 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; @@ -308,15 +323,18 @@ static int hclge_mac_update_stats(struct hclge_dev *hdev) } for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { + /* for special opcode 0032, only the first desc has the head */ if (unlikely(i == 0)) { desc_data = (__le64 *)(&desc[i].data[0]); - n = HCLGE_RTN_DATA_NUM - 2; + n = HCLGE_RD_FIRST_STATS_NUM; } else { desc_data = (__le64 *)(&desc[i]); - n = HCLGE_RTN_DATA_NUM; + n = HCLGE_RD_OTHER_STATS_NUM; } + for (k = 0; k < n; k++) { - *data++ += le64_to_cpu(*desc_data); + *data += le64_to_cpu(*desc_data); + data++; desc_data++; } } @@ -324,6 +342,85 @@ static int hclge_mac_update_stats(struct hclge_dev *hdev) return 0; } +static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num) +{ + u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); + struct hclge_desc *desc; + __le64 *desc_data; + u16 i, k, n; + int ret; + + desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true); + ret = hclge_cmd_send(&hdev->hw, desc, desc_num); + if (ret) { + kfree(desc); + return ret; + } + + for (i = 0; i < desc_num; i++) { + /* for special opcode 0034, only the first desc has the head */ + if (i == 0) { + desc_data = (__le64 *)(&desc[i].data[0]); + n = HCLGE_RD_FIRST_STATS_NUM; + } else { + desc_data = (__le64 *)(&desc[i]); + n = HCLGE_RD_OTHER_STATS_NUM; + } + + for (k = 0; k < n; k++) { + *data += le64_to_cpu(*desc_data); + data++; + desc_data++; + } + } + + kfree(desc); + + return 0; +} + +static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num) +{ + struct hclge_desc desc; + __le32 *desc_data; + u32 reg_num; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + return ret; + + desc_data = (__le32 *)(&desc.data[0]); + reg_num = le32_to_cpu(*desc_data); + + *desc_num = 1 + ((reg_num - 3) >> 2) + + (u32)(((reg_num - 3) & 0x3) ? 1 : 0); + + return 0; +} + +static int hclge_mac_update_stats(struct hclge_dev *hdev) +{ + u32 desc_num; + int ret; + + ret = hclge_mac_query_reg_num(hdev, &desc_num); + + /* The firmware supports the new statistics acquisition method */ + if (!ret) + ret = hclge_mac_update_stats_complete(hdev, desc_num); + else if (ret == -EOPNOTSUPP) + ret = hclge_mac_update_stats_defective(hdev); + else + dev_err(&hdev->pdev->dev, "query mac reg num fail!\n"); + + return ret; +} + static int hclge_tqps_update_stats(struct hnae3_handle *handle) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; @@ -461,26 +558,6 @@ static u8 *hclge_comm_get_strings(u32 stringset, return (u8 *)buff; } -static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, - struct net_device_stats *net_stats) -{ - net_stats->tx_dropped = 0; - net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num; - net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; - net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; - - net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; - net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; - - net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; - net_stats->rx_length_errors = - hw_stats->mac_stats.mac_rx_undersize_pkt_num; - net_stats->rx_length_errors += - hw_stats->mac_stats.mac_rx_oversize_pkt_num; - net_stats->rx_over_errors = - hw_stats->mac_stats.mac_rx_oversize_pkt_num; -} - static void hclge_update_stats_for_all(struct hclge_dev *hdev) { struct hnae3_handle *handle; @@ -500,8 +577,6 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev) if (status) dev_err(&hdev->pdev->dev, "Update MAC stats fail, status = %d.\n", status); - - hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); } static void hclge_update_stats(struct hnae3_handle *handle, @@ -509,7 +584,6 @@ static void hclge_update_stats(struct hnae3_handle *handle, { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - struct hclge_hw_stats *hw_stats = &hdev->hw_stats; int status; if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) @@ -527,8 +601,6 @@ static void hclge_update_stats(struct hnae3_handle *handle, "Update TQPS stats fail, status = %d.\n", status); - hclge_update_netstat(hw_stats, net_stats); - clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); } @@ -767,37 +839,67 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, unsigned long *supported = hdev->hw.mac.supported; if (speed_ability & HCLGE_SUPPORT_1G_BIT) - set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + supported); if (speed_ability & HCLGE_SUPPORT_10G_BIT) - set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + supported); if (speed_ability & HCLGE_SUPPORT_25G_BIT) - set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + supported); if (speed_ability & HCLGE_SUPPORT_50G_BIT) - set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, + supported); if (speed_ability & HCLGE_SUPPORT_100G_BIT) - set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + supported); + + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); +} + +static void hclge_parse_copper_link_mode(struct hclge_dev *hdev, + u8 speed_ability) +{ + unsigned long *supported = hdev->hw.mac.supported; + + /* default to support all speed for GE port */ + if (!speed_ability) + speed_ability = HCLGE_SUPPORT_GE; + + if (speed_ability & HCLGE_SUPPORT_1G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + supported); + + if (speed_ability & HCLGE_SUPPORT_100M_BIT) { + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + supported); + } + + if (speed_ability & HCLGE_SUPPORT_10M_BIT) { + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported); + } - set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); - set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); } static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) { u8 media_type = hdev->hw.mac.media_type; - if (media_type != HNAE3_MEDIA_TYPE_FIBER) - return; - - hclge_parse_fiber_link_mode(hdev, speed_ability); + if (media_type == HNAE3_MEDIA_TYPE_FIBER) + hclge_parse_fiber_link_mode(hdev, speed_ability); + else if (media_type == HNAE3_MEDIA_TYPE_COPPER) + hclge_parse_copper_link_mode(hdev, speed_ability); } static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) @@ -931,12 +1033,16 @@ static int hclge_configure(struct hclge_dev *hdev) ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); hdev->hw.mac.media_type = cfg.media_type; hdev->hw.mac.phy_addr = cfg.phy_addr; - hdev->num_desc = cfg.tqp_desc_num; + hdev->num_tx_desc = cfg.tqp_desc_num; + hdev->num_rx_desc = cfg.tqp_desc_num; hdev->tm_info.num_pg = 1; hdev->tc_max = cfg.tc_num; hdev->tm_info.hw_pfc_map = 0; hdev->wanted_umv_size = cfg.umv_space; + if (hnae3_dev_fd_supported(hdev)) + hdev->fd_en = true; + ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); if (ret) { dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); @@ -1035,7 +1141,8 @@ static int hclge_alloc_tqps(struct hclge_dev *hdev) tqp->q.ae_algo = &ae_algo; tqp->q.buf_size = hdev->rx_buf_len; - tqp->q.desc_num = hdev->num_desc; + tqp->q.tx_desc_num = hdev->num_tx_desc; + tqp->q.rx_desc_num = hdev->num_rx_desc; tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + i * HCLGE_TQP_REG_SIZE; @@ -1068,64 +1175,51 @@ static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, return ret; } -static int hclge_assign_tqp(struct hclge_vport *vport) +static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hclge_dev *hdev = vport->back; int i, alloced; for (i = 0, alloced = 0; i < hdev->num_tqps && - alloced < kinfo->num_tqps; i++) { + alloced < num_tqps; i++) { if (!hdev->htqp[i].alloced) { hdev->htqp[i].q.handle = &vport->nic; hdev->htqp[i].q.tqp_index = alloced; - hdev->htqp[i].q.desc_num = kinfo->num_desc; + hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc; + hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc; kinfo->tqp[alloced] = &hdev->htqp[i].q; hdev->htqp[i].alloced = true; alloced++; } } - vport->alloc_tqps = kinfo->num_tqps; + vport->alloc_tqps = alloced; + kinfo->rss_size = min_t(u16, hdev->rss_size_max, + vport->alloc_tqps / hdev->tm_info.num_tc); return 0; } -static int hclge_knic_setup(struct hclge_vport *vport, - u16 num_tqps, u16 num_desc) +static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps, + u16 num_tx_desc, u16 num_rx_desc) + { struct hnae3_handle *nic = &vport->nic; struct hnae3_knic_private_info *kinfo = &nic->kinfo; struct hclge_dev *hdev = vport->back; - int i, ret; + int ret; - kinfo->num_desc = num_desc; - kinfo->rx_buf_len = hdev->rx_buf_len; - kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc); - kinfo->rss_size - = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc); - kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc; + kinfo->num_tx_desc = num_tx_desc; + kinfo->num_rx_desc = num_rx_desc; - for (i = 0; i < HNAE3_MAX_TC; i++) { - if (hdev->hw_tc_map & BIT(i)) { - kinfo->tc_info[i].enable = true; - kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; - kinfo->tc_info[i].tqp_count = kinfo->rss_size; - kinfo->tc_info[i].tc = i; - } else { - /* Set to default queue if TC is disable */ - kinfo->tc_info[i].enable = false; - kinfo->tc_info[i].tqp_offset = 0; - kinfo->tc_info[i].tqp_count = 1; - kinfo->tc_info[i].tc = 0; - } - } + kinfo->rx_buf_len = hdev->rx_buf_len; - kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, + kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps, sizeof(struct hnae3_queue *), GFP_KERNEL); if (!kinfo->tqp) return -ENOMEM; - ret = hclge_assign_tqp(vport); + ret = hclge_assign_tqp(vport, num_tqps); if (ret) dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); @@ -1140,7 +1234,7 @@ static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, u16 i; kinfo = &nic->kinfo; - for (i = 0; i < kinfo->num_tqps; i++) { + for (i = 0; i < vport->alloc_tqps; i++) { struct hclge_tqp *q = container_of(kinfo->tqp[i], struct hclge_tqp, q); bool is_pf; @@ -1191,7 +1285,9 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) nic->numa_node_mask = hdev->numa_node_mask; if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) { - ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc); + ret = hclge_knic_setup(vport, num_tqps, + hdev->num_tx_desc, hdev->num_rx_desc); + if (ret) { dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret); @@ -1241,6 +1337,9 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) vport->back = hdev; vport->vport_id = i; vport->mps = HCLGE_MAC_DEFAULT_FRAME; + INIT_LIST_HEAD(&vport->vlan_list); + INIT_LIST_HEAD(&vport->uc_mac_list); + INIT_LIST_HEAD(&vport->mc_mac_list); if (i == 0) ret = hclge_vport_setup(vport, tqp_main_vport); @@ -1273,7 +1372,7 @@ static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, req = (struct hclge_tx_buff_alloc_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); - for (i = 0; i < HCLGE_TC_NUM; i++) { + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; req->tx_pkt_buff[i] = @@ -1448,13 +1547,14 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev, for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; - if (total_size < hdev->tx_buf_size) - return -ENOMEM; + if (hdev->hw_tc_map & BIT(i)) { + if (total_size < hdev->tx_buf_size) + return -ENOMEM; - if (hdev->hw_tc_map & BIT(i)) priv->tx_buf_size = hdev->tx_buf_size; - else + } else { priv->tx_buf_size = 0; + } total_size -= priv->tx_buf_size; } @@ -1462,66 +1562,15 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev, return 0; } -/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs - * @hdev: pointer to struct hclge_dev - * @buf_alloc: pointer to buffer calculation data - * @return: 0: calculate sucessful, negative: fail - */ -static int hclge_rx_buffer_calc(struct hclge_dev *hdev, - struct hclge_pkt_buf_alloc *buf_alloc) +static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max, + struct hclge_pkt_buf_alloc *buf_alloc) { - u32 rx_all = hdev->pkt_buf_size, aligned_mps; - int no_pfc_priv_num, pfc_priv_num; - struct hclge_priv_buf *priv; + u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); + u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); int i; - aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); - rx_all -= hclge_get_tx_buff_alloced(buf_alloc); - - /* When DCB is not supported, rx private - * buffer is not allocated. - */ - if (!hnae3_dev_dcb_supported(hdev)) { - if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) - return -ENOMEM; - - return 0; - } - - /* step 1, try to alloc private buffer for all enabled tc */ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - priv = &buf_alloc->priv_buf[i]; - if (hdev->hw_tc_map & BIT(i)) { - priv->enable = 1; - if (hdev->tm_info.hw_pfc_map & BIT(i)) { - priv->wl.low = aligned_mps; - priv->wl.high = - roundup(priv->wl.low + aligned_mps, - HCLGE_BUF_SIZE_UNIT); - priv->buf_size = priv->wl.high + - hdev->dv_buf_size; - } else { - priv->wl.low = 0; - priv->wl.high = 2 * aligned_mps; - priv->buf_size = priv->wl.high + - hdev->dv_buf_size; - } - } else { - priv->enable = 0; - priv->wl.low = 0; - priv->wl.high = 0; - priv->buf_size = 0; - } - } - - if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) - return 0; - - /* step 2, try to decrease the buffer size of - * no pfc TC's private buffer - */ - for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - priv = &buf_alloc->priv_buf[i]; + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; priv->enable = 0; priv->wl.low = 0; @@ -1534,28 +1583,30 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, priv->enable = 1; if (hdev->tm_info.hw_pfc_map & BIT(i)) { - priv->wl.low = 256; - priv->wl.high = priv->wl.low + aligned_mps; - priv->buf_size = priv->wl.high + hdev->dv_buf_size; + priv->wl.low = max ? aligned_mps : 256; + priv->wl.high = roundup(priv->wl.low + aligned_mps, + HCLGE_BUF_SIZE_UNIT); } else { priv->wl.low = 0; - priv->wl.high = aligned_mps; - priv->buf_size = priv->wl.high + hdev->dv_buf_size; + priv->wl.high = max ? (aligned_mps * 2) : aligned_mps; } + + priv->buf_size = priv->wl.high + hdev->dv_buf_size; } - if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) - return 0; + return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); +} - /* step 3, try to reduce the number of pfc disabled TCs, - * which have private buffer - */ - /* get the total no pfc enable TC number, which have private buffer */ - no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); +static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); + int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); + int i; /* let the last to be cleared first */ for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { - priv = &buf_alloc->priv_buf[i]; + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; if (hdev->hw_tc_map & BIT(i) && !(hdev->tm_info.hw_pfc_map & BIT(i))) { @@ -1572,17 +1623,19 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, break; } - if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) - return 0; + return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); +} - /* step 4, try to reduce the number of pfc enabled TCs - * which have private buffer. - */ - pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); +static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); + int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); + int i; /* let the last to be cleared first */ for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { - priv = &buf_alloc->priv_buf[i]; + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; if (hdev->hw_tc_map & BIT(i) && hdev->tm_info.hw_pfc_map & BIT(i)) { @@ -1598,7 +1651,40 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, pfc_priv_num == 0) break; } - if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) + + return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); +} + +/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs + * @hdev: pointer to struct hclge_dev + * @buf_alloc: pointer to buffer calculation data + * @return: 0: calculate sucessful, negative: fail + */ +static int hclge_rx_buffer_calc(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + /* When DCB is not supported, rx private buffer is not allocated. */ + if (!hnae3_dev_dcb_supported(hdev)) { + u32 rx_all = hdev->pkt_buf_size; + + rx_all -= hclge_get_tx_buff_alloced(buf_alloc); + if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) + return -ENOMEM; + + return 0; + } + + if (hclge_rx_buf_calc_all(hdev, true, buf_alloc)) + return 0; + + /* try to decrease the buffer size */ + if (hclge_rx_buf_calc_all(hdev, false, buf_alloc)) + return 0; + + if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc)) + return 0; + + if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc)) return 0; return -ENOMEM; @@ -2122,7 +2208,9 @@ static int hclge_get_mac_phy_link(struct hclge_dev *hdev) static void hclge_update_link_status(struct hclge_dev *hdev) { + struct hnae3_client *rclient = hdev->roce_client; struct hnae3_client *client = hdev->nic_client; + struct hnae3_handle *rhandle; struct hnae3_handle *handle; int state; int i; @@ -2134,6 +2222,10 @@ static void hclge_update_link_status(struct hclge_dev *hdev) for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { handle = &hdev->vport[i].nic; client->ops->link_status_change(handle, state); + rhandle = &hdev->vport[i].roce; + if (rclient && rclient->ops->link_status_change) + rclient->ops->link_status_change(rhandle, + state); } hdev->hw.mac.link = state; } @@ -2418,8 +2510,8 @@ static void hclge_misc_irq_uninit(struct hclge_dev *hdev) hclge_free_vector(hdev, 0); } -static int hclge_notify_client(struct hclge_dev *hdev, - enum hnae3_reset_notify_type type) +int hclge_notify_client(struct hclge_dev *hdev, + enum hnae3_reset_notify_type type) { struct hnae3_client *client = hdev->nic_client; u16 i; @@ -2548,7 +2640,7 @@ static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) return hclge_cmd_send(&hdev->hw, &desc, 1); } -int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) +static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) { int i; @@ -2883,6 +2975,10 @@ static void hclge_reset(struct hclge_dev *hdev) if (ret) goto err_reset_lock; + ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT); + if (ret) + goto err_reset_lock; + hclge_clear_reset_cause(hdev); ret = hclge_reset_prepare_up(hdev); @@ -3597,8 +3693,11 @@ void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) static void hclge_rss_init_cfg(struct hclge_dev *hdev) { + int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; struct hclge_vport *vport = hdev->vport; - int i; + + if (hdev->pdev->revision >= 0x21) + rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE; for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { vport[i].rss_tuple_sets.ipv4_tcp_en = @@ -3618,9 +3717,10 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev) vport[i].rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; - vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; + vport[i].rss_algo = rss_algo; - netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE); + memcpy(vport[i].rss_hash_key, hclge_hash_key, + HCLGE_RSS_KEY_SIZE); } hclge_rss_indir_init_cfg(hdev); @@ -3788,8 +3888,16 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; struct hclge_promisc_param param; + bool en_bc_pmc = true; - hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true, + /* For revision 0x20, if broadcast promisc enabled, vlan filter is + * always bypassed. So broadcast promisc should be disabled until + * user enable promisc mode + */ + if (handle->pdev->revision == 0x20) + en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false; + + hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vport->vport_id); return hclge_cmd_set_promisc_mode(hdev, ¶m); } @@ -3898,7 +4006,6 @@ static int hclge_init_fd_config(struct hclge_dev *hdev) return -EOPNOTSUPP; } - hdev->fd_cfg.fd_en = true; hdev->fd_cfg.proto_support = TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW | UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW; @@ -4656,7 +4763,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, if (!hnae3_dev_fd_supported(hdev)) return -EOPNOTSUPP; - if (!hdev->fd_cfg.fd_en) { + if (!hdev->fd_en) { dev_warn(&hdev->pdev->dev, "Please enable flow director first\n"); return -EOPNOTSUPP; @@ -4809,7 +4916,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle) return 0; /* if fd is disabled, should not restore it when reset */ - if (!hdev->fd_cfg.fd_en) + if (!hdev->fd_en) return 0; hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { @@ -5095,7 +5202,7 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - hdev->fd_cfg.fd_en = enable; + hdev->fd_en = enable; if (!enable) hclge_del_all_fd_entries(handle, false); else @@ -5174,8 +5281,15 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, { #define HCLGE_SERDES_RETRY_MS 10 #define HCLGE_SERDES_RETRY_NUM 100 + +#define HCLGE_MAC_LINK_STATUS_MS 20 +#define HCLGE_MAC_LINK_STATUS_NUM 10 +#define HCLGE_MAC_LINK_STATUS_DOWN 0 +#define HCLGE_MAC_LINK_STATUS_UP 1 + struct hclge_serdes_lb_cmd *req; struct hclge_desc desc; + int mac_link_ret = 0; int ret, i = 0; u8 loop_mode_b; @@ -5198,8 +5312,10 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, if (en) { req->enable = loop_mode_b; req->mask = loop_mode_b; + mac_link_ret = HCLGE_MAC_LINK_STATUS_UP; } else { req->mask = loop_mode_b; + mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN; } ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -5231,7 +5347,19 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, } hclge_cfg_mac_mode(hdev, en); - return 0; + + i = 0; + do { + /* serdes Internal loopback, independent of the network cable.*/ + msleep(HCLGE_MAC_LINK_STATUS_MS); + ret = hclge_get_mac_link_status(hdev); + if (ret == mac_link_ret) + return 0; + } while (++i < HCLGE_MAC_LINK_STATUS_NUM); + + dev_err(&hdev->pdev->dev, "config mac mode timeout\n"); + + return -EBUSY; } static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, @@ -5258,6 +5386,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle, enum hnae3_loop loop_mode, bool en) { struct hclge_vport *vport = hclge_get_vport(handle); + struct hnae3_knic_private_info *kinfo; struct hclge_dev *hdev = vport->back; int i, ret; @@ -5276,7 +5405,11 @@ static int hclge_set_loopback(struct hnae3_handle *handle, break; } - for (i = 0; i < vport->alloc_tqps; i++) { + if (ret) + return ret; + + kinfo = &vport->nic.kinfo; + for (i = 0; i < kinfo->num_tqps; i++) { ret = hclge_tqp_enable(hdev, i, 0, en); if (ret) return ret; @@ -5288,11 +5421,13 @@ static int hclge_set_loopback(struct hnae3_handle *handle, static void hclge_reset_tqp_stats(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); + struct hnae3_knic_private_info *kinfo; struct hnae3_queue *queue; struct hclge_tqp *tqp; int i; - for (i = 0; i < vport->alloc_tqps; i++) { + kinfo = &vport->nic.kinfo; + for (i = 0; i < kinfo->num_tqps; i++) { queue = handle->kinfo.tqp[i]; tqp = container_of(queue, struct hclge_tqp, q); memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); @@ -5493,13 +5628,19 @@ static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) } static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, - const u8 *addr) + const u8 *addr, bool is_mc) { const unsigned char *mac_addr = addr; u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | (mac_addr[0]) | (mac_addr[1] << 8); u32 low_val = mac_addr[4] | (mac_addr[5] << 8); + hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + if (is_mc) { + hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); + hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + } + new_req->mac_addr_hi32 = cpu_to_le32(high_val); new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); } @@ -5730,9 +5871,12 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) if (is_free) { if (vport->used_umv_num > hdev->priv_umv_size) hdev->share_umv_size++; - vport->used_umv_num--; + + if (vport->used_umv_num > 0) + vport->used_umv_num--; } else { - if (vport->used_umv_num >= hdev->priv_umv_size) + if (vport->used_umv_num >= hdev->priv_umv_size && + hdev->share_umv_size > 0) hdev->share_umv_size--; vport->used_umv_num++; } @@ -5770,14 +5914,13 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, } memset(&req, 0, sizeof(req)); - hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, HCLGE_MAC_EPORT_VFID_S, vport->vport_id); req.egress_port = cpu_to_le16(egress_port); - hclge_prepare_mac_addr(&req, addr); + hclge_prepare_mac_addr(&req, addr, false); /* Lookup the mac address in the mac_vlan table, and add * it if the entry is inexistent. Repeated unicast entry @@ -5835,9 +5978,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, } memset(&req, 0, sizeof(req)); - hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hclge_prepare_mac_addr(&req, addr); + hclge_prepare_mac_addr(&req, addr, false); ret = hclge_remove_mac_vlan_tbl(vport, &req); if (!ret) hclge_update_umv_space(vport, true); @@ -5869,11 +6011,8 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, return -EINVAL; } memset(&req, 0, sizeof(req)); - hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hclge_prepare_mac_addr(&req, addr); + hclge_prepare_mac_addr(&req, addr, true); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { /* This mac addr exist, update VFID for it */ @@ -5919,11 +6058,8 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, } memset(&req, 0, sizeof(req)); - hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hclge_prepare_mac_addr(&req, addr); + hclge_prepare_mac_addr(&req, addr, true); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { /* This mac addr exist, remove this handle's VFID for it */ @@ -5949,6 +6085,103 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, return status; } +void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_vport_mac_addr_cfg *mac_cfg; + struct list_head *list; + + if (!vport->vport_id) + return; + + mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL); + if (!mac_cfg) + return; + + mac_cfg->hd_tbl_status = true; + memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN); + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + list_add_tail(&mac_cfg->node, list); +} + +void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, + bool is_write_tbl, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; + struct list_head *list; + bool uc_flag, mc_flag; + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC; + mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC; + + list_for_each_entry_safe(mac_cfg, tmp, list, node) { + if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) { + if (uc_flag && mac_cfg->hd_tbl_status) + hclge_rm_uc_addr_common(vport, mac_addr); + + if (mc_flag && mac_cfg->hd_tbl_status) + hclge_rm_mc_addr_common(vport, mac_addr); + + list_del(&mac_cfg->node); + kfree(mac_cfg); + break; + } + } +} + +void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; + struct list_head *list; + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + list_for_each_entry_safe(mac_cfg, tmp, list, node) { + if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status) + hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr); + + if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status) + hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr); + + mac_cfg->hd_tbl_status = false; + if (is_del_list) { + list_del(&mac_cfg->node); + kfree(mac_cfg); + } + } +} + +void hclge_uninit_vport_mac_table(struct hclge_dev *hdev) +{ + struct hclge_vport_mac_addr_cfg *mac, *tmp; + struct hclge_vport *vport; + int i; + + mutex_lock(&hdev->vport_cfg_mutex); + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) { + list_del(&mac->node); + kfree(mac); + } + + list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) { + list_del(&mac->node); + kfree(mac); + } + } + mutex_unlock(&hdev->vport_cfg_mutex); +} + static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, u16 cmdq_resp, u8 resp_code) { @@ -6104,7 +6337,7 @@ static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, } static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, - u8 fe_type, bool filter_en) + u8 fe_type, bool filter_en, u8 vf_id) { struct hclge_vlan_filter_ctrl_cmd *req; struct hclge_desc desc; @@ -6115,6 +6348,7 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; req->vlan_type = vlan_type; req->vlan_fe = filter_en ? fe_type : 0; + req->vf_id = vf_id; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) @@ -6143,12 +6377,13 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) if (hdev->pdev->revision >= 0x21) { hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, - HCLGE_FILTER_FE_EGRESS, enable); + HCLGE_FILTER_FE_EGRESS, enable, 0); hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, - HCLGE_FILTER_FE_INGRESS, enable); + HCLGE_FILTER_FE_INGRESS, enable, 0); } else { hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, - HCLGE_FILTER_FE_EGRESS_V1_B, enable); + HCLGE_FILTER_FE_EGRESS_V1_B, enable, + 0); } if (enable) handle->netdev_flags |= HNAE3_VLAN_FLTR; @@ -6456,19 +6691,27 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) int i; if (hdev->pdev->revision >= 0x21) { - ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, - HCLGE_FILTER_FE_EGRESS, true); - if (ret) - return ret; + /* for revision 0x21, vf vlan filter is per function */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + ret = hclge_set_vlan_filter_ctrl(hdev, + HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS, + true, + vport->vport_id); + if (ret) + return ret; + } ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, - HCLGE_FILTER_FE_INGRESS, true); + HCLGE_FILTER_FE_INGRESS, true, + 0); if (ret) return ret; } else { ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, HCLGE_FILTER_FE_EGRESS_V1_B, - true); + true, 0); if (ret) return ret; } @@ -6522,6 +6765,84 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); } +void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id) +{ + struct hclge_vport_vlan_cfg *vlan; + + /* vlan 0 is reserved */ + if (!vlan_id) + return; + + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) + return; + + vlan->hd_tbl_status = true; + vlan->vlan_id = vlan_id; + + list_add_tail(&vlan->node, &vport->vlan_list); +} + +void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, + bool is_write_tbl) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (vlan->vlan_id == vlan_id) { + if (is_write_tbl && vlan->hd_tbl_status) + hclge_set_vlan_filter_hw(hdev, + htons(ETH_P_8021Q), + vport->vport_id, + vlan_id, 0, + true); + + list_del(&vlan->node); + kfree(vlan); + break; + } + } +} + +void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (vlan->hd_tbl_status) + hclge_set_vlan_filter_hw(hdev, + htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, 0, + true); + + vlan->hd_tbl_status = false; + if (is_del_list) { + list_del(&vlan->node); + kfree(vlan); + } + } +} + +void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_vport *vport; + int i; + + mutex_lock(&hdev->vport_cfg_mutex); + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + list_del(&vlan->node); + kfree(vlan); + } + } + mutex_unlock(&hdev->vport_cfg_mutex); +} + int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -6959,16 +7280,6 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle, *tp_mdix = ETH_TP_MDI; } -static int hclge_init_instance_hw(struct hclge_dev *hdev) -{ - return hclge_mac_connect_phy(hdev); -} - -static void hclge_uninit_instance_hw(struct hclge_dev *hdev) -{ - hclge_mac_disconnect_phy(hdev); -} - static int hclge_init_client_instance(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev) { @@ -6988,13 +7299,6 @@ static int hclge_init_client_instance(struct hnae3_client *client, if (ret) goto clear_nic; - ret = hclge_init_instance_hw(hdev); - if (ret) { - client->ops->uninit_instance(&vport->nic, - 0); - goto clear_nic; - } - hnae3_set_client_init_flag(client, ae_dev, 1); if (hdev->roce_client && @@ -7079,7 +7383,6 @@ static void hclge_uninit_client_instance(struct hnae3_client *client, if (client->type == HNAE3_CLIENT_ROCE) return; if (hdev->nic_client && client->ops->uninit_instance) { - hclge_uninit_instance_hw(hdev); client->ops->uninit_instance(&vport->nic, 0); hdev->nic_client = NULL; vport->nic.client = NULL; @@ -7222,6 +7525,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; mutex_init(&hdev->vport_lock); + mutex_init(&hdev->vport_cfg_mutex); ret = hclge_pci_init(hdev); if (ret) { @@ -7298,7 +7602,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ret = hclge_init_umv_space(hdev); if (ret) { dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret); - goto err_msi_irq_uninit; + goto err_mdiobus_unreg; } ret = hclge_mac_init(hdev); @@ -7383,7 +7687,7 @@ err_msi_irq_uninit: err_msi_uninit: pci_free_irq_vectors(pdev); err_cmd_uninit: - hclge_destroy_cmd_queue(&hdev->hw); + hclge_cmd_uninit(hdev); err_pci_uninit: pcim_iounmap(pdev, hdev->hw.io_base); pci_clear_master(pdev); @@ -7456,7 +7760,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - ret = hclge_tm_init_hw(hdev); + ret = hclge_tm_init_hw(hdev, true); if (ret) { dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); return ret; @@ -7510,10 +7814,13 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) synchronize_irq(hdev->misc_vector.vector_irq); hclge_hw_error_set_state(hdev, false); - hclge_destroy_cmd_queue(&hdev->hw); + hclge_cmd_uninit(hdev); hclge_misc_irq_uninit(hdev); hclge_pci_uninit(hdev); mutex_destroy(&hdev->vport_lock); + hclge_uninit_vport_mac_table(hdev); + hclge_uninit_vport_vlan_table(hdev); + mutex_destroy(&hdev->vport_cfg_mutex); ae_dev->priv = NULL; } @@ -7523,18 +7830,17 @@ static u32 hclge_get_max_channels(struct hnae3_handle *handle) struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); + return min_t(u32, hdev->rss_size_max, + vport->alloc_tqps / kinfo->num_tc); } static void hclge_get_channels(struct hnae3_handle *handle, struct ethtool_channels *ch) { - struct hclge_vport *vport = hclge_get_vport(handle); - ch->max_combined = hclge_get_max_channels(handle); ch->other_count = 1; ch->max_other = 1; - ch->combined_count = vport->alloc_tqps; + ch->combined_count = handle->kinfo.rss_size; } static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, @@ -7547,26 +7853,8 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, *max_rss_size = hdev->rss_size_max; } -static void hclge_release_tqp(struct hclge_vport *vport) -{ - struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; - struct hclge_dev *hdev = vport->back; - int i; - - for (i = 0; i < kinfo->num_tqps; i++) { - struct hclge_tqp *tqp = - container_of(kinfo->tqp[i], struct hclge_tqp, q); - - tqp->q.handle = NULL; - tqp->q.tqp_index = 0; - tqp->alloced = false; - } - - devm_kfree(&hdev->pdev->dev, kinfo->tqp); - kinfo->tqp = NULL; -} - -static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) +static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, + bool rxfh_configured) { struct hclge_vport *vport = hclge_get_vport(handle); struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; @@ -7580,24 +7868,11 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) u32 *rss_indir; int ret, i; - /* Free old tqps, and reallocate with new tqp number when nic setup */ - hclge_release_tqp(vport); - - ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc); - if (ret) { - dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret); - return ret; - } - - ret = hclge_map_tqp_to_vport(hdev, vport); - if (ret) { - dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret); - return ret; - } + kinfo->req_rss_size = new_tqps_num; - ret = hclge_tm_schd_init(hdev); + ret = hclge_tm_vport_map_update(hdev); if (ret) { - dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret); + dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret); return ret; } @@ -7618,6 +7893,10 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) if (ret) return ret; + /* RSS indirection table has been configuared by user */ + if (rxfh_configured) + goto out; + /* Reinitializes the rss indirect table according to the new RSS size */ rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); if (!rss_indir) @@ -7633,6 +7912,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) kfree(rss_indir); +out: if (!ret) dev_info(&hdev->pdev->dev, "Channels changed, rss_size from %d to %d, tqps from %d to %d", @@ -7927,7 +8207,7 @@ static void hclge_get_link_mode(struct hnae3_handle *handle, } } -static int hclge_gro_en(struct hnae3_handle *handle, int enable) +static int hclge_gro_en(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; @@ -8012,6 +8292,8 @@ static const struct hnae3_ae_ops hclge_ops = { .set_gro_en = hclge_gro_en, .get_global_queue_id = hclge_covert_handle_qid_global, .set_timer_task = hclge_set_timer_task, + .mac_connect_phy = hclge_mac_connect_phy, + .mac_disconnect_phy = hclge_mac_disconnect_phy, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 6615b85a1c52..b57ac4beb313 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -16,6 +16,9 @@ #define HCLGE_MAX_PF_NUM 8 +#define HCLGE_RD_FIRST_STATS_NUM 2 +#define HCLGE_RD_OTHER_STATS_NUM 4 + #define HCLGE_INVALID_VPORT 0xffff #define HCLGE_PF_CFG_BLOCK_SIZE 32 @@ -185,6 +188,10 @@ enum HLCGE_PORT_TYPE { #define HCLGE_SUPPORT_25G_BIT BIT(2) #define HCLGE_SUPPORT_50G_BIT BIT(3) #define HCLGE_SUPPORT_100G_BIT BIT(4) +#define HCLGE_SUPPORT_100M_BIT BIT(6) +#define HCLGE_SUPPORT_10M_BIT BIT(7) +#define HCLGE_SUPPORT_GE \ + (HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT) enum HCLGE_DEV_STATE { HCLGE_STATE_REINITING, @@ -322,6 +329,7 @@ struct hclge_tm_info { struct hclge_tc_info tc_info[HNAE3_MAX_TC]; enum hclge_fc_mode fc_mode; u8 hw_pfc_map; /* Allow for packet drop or not on this TC */ + u8 pfc_en; /* PFC enabled or not for user priority */ }; struct hclge_comm_stats_str { @@ -415,6 +423,10 @@ struct hclge_mac_stats { u64 mac_rx_fcs_err_pkt_num; u64 mac_rx_send_app_good_pkt_num; u64 mac_rx_send_app_bad_pkt_num; + u64 mac_tx_pfc_pause_pkt_num; + u64 mac_rx_pfc_pause_pkt_num; + u64 mac_tx_ctrl_pkt_num; + u64 mac_rx_ctrl_pkt_num; }; #define HCLGE_STATS_TIMER_INTERVAL (60 * 5) @@ -575,7 +587,6 @@ struct hclge_fd_key_cfg { struct hclge_fd_cfg { u8 fd_mode; - u8 fd_en; u16 max_key_length; u32 proto_support; u32 rule_num[2]; /* rule entry number */ @@ -621,6 +632,23 @@ struct hclge_fd_ad_data { u16 rule_id; }; +struct hclge_vport_mac_addr_cfg { + struct list_head node; + int hd_tbl_status; + u8 mac_addr[ETH_ALEN]; +}; + +enum HCLGE_MAC_ADDR_TYPE { + HCLGE_MAC_ADDR_UC, + HCLGE_MAC_ADDR_MC +}; + +struct hclge_vport_vlan_cfg { + struct list_head node; + int hd_tbl_status; + u16 vlan_id; +}; + /* For each bit of TCAM entry, it uses a pair of 'x' and * 'y' to indicate which value to match, like below: * ---------------------------------- @@ -678,7 +706,8 @@ struct hclge_dev { u16 num_alloc_vport; /* Num vports this driver supports */ u32 numa_node_mask; u16 rx_buf_len; - u16 num_desc; + u16 num_tx_desc; /* desc num of per tx queue */ + u16 num_rx_desc; /* desc num of per rx queue */ u8 hw_tc_map; u8 tc_num_last_time; enum hclge_fc_mode fc_mode_last_time; @@ -750,6 +779,7 @@ struct hclge_dev { struct hclge_fd_cfg fd_cfg; struct hlist_head fd_rule_list; u16 hclge_fd_rule_num; + u8 fd_en; u16 wanted_umv_size; /* max available unicast mac vlan space */ @@ -759,6 +789,8 @@ struct hclge_dev { /* unicast mac vlan space shared by PF and its VFs */ u16 share_umv_size; struct mutex umv_mutex; /* protect share_umv_size */ + + struct mutex vport_cfg_mutex; /* Protect stored vf table */ }; /* VPort level vlan tag configuration for TX direction */ @@ -826,6 +858,10 @@ struct hclge_vport { unsigned long state; unsigned long last_active_jiffies; u32 mps; /* Max packet size */ + + struct list_head uc_mac_list; /* Store VF unicast table */ + struct list_head mc_mac_list; /* Store VF multicast table */ + struct list_head vlan_list; /* Store VF vlan table */ }; void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, @@ -878,4 +914,19 @@ void hclge_vport_stop(struct hclge_vport *vport); int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu); int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf); u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id); +int hclge_notify_client(struct hclge_dev *hdev, + enum hnae3_reset_notify_type type); +void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, + enum HCLGE_MAC_ADDR_TYPE mac_type); +void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, + bool is_write_tbl, + enum HCLGE_MAC_ADDR_TYPE mac_type); +void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, + enum HCLGE_MAC_ADDR_TYPE mac_type); +void hclge_uninit_vport_mac_table(struct hclge_dev *hdev); +void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id); +void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, + bool is_write_tbl); +void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list); +void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index a1de451a85df..306a23e486de 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -203,12 +203,11 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, static int hclge_set_vf_promisc_mode(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *req) { - bool en_uc = req->msg[1] ? true : false; - bool en_mc = req->msg[2] ? true : false; + bool en_bc = req->msg[1] ? true : false; struct hclge_promisc_param param; - /* always enable broadcast promisc bit */ - hclge_promisc_param_init(¶m, en_uc, en_mc, true, vport->vport_id); + /* vf is not allowed to enable unicast/multicast broadcast */ + hclge_promisc_param_init(¶m, false, false, en_bc, vport->vport_id); return hclge_cmd_set_promisc_mode(vport->back, ¶m); } @@ -225,12 +224,24 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, hclge_rm_uc_addr_common(vport, old_addr); status = hclge_add_uc_addr_common(vport, mac_addr); - if (status) + if (status) { hclge_add_uc_addr_common(vport, old_addr); + } else { + hclge_rm_vport_mac_table(vport, mac_addr, + false, HCLGE_MAC_ADDR_UC); + hclge_add_vport_mac_table(vport, mac_addr, + HCLGE_MAC_ADDR_UC); + } } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) { status = hclge_add_uc_addr_common(vport, mac_addr); + if (!status) + hclge_add_vport_mac_table(vport, mac_addr, + HCLGE_MAC_ADDR_UC); } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) { status = hclge_rm_uc_addr_common(vport, mac_addr); + if (!status) + hclge_rm_vport_mac_table(vport, mac_addr, + false, HCLGE_MAC_ADDR_UC); } else { dev_err(&hdev->pdev->dev, "failed to set unicast mac addr, unknown subcode %d\n", @@ -256,8 +267,14 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) { status = hclge_add_mc_addr_common(vport, mac_addr); + if (!status) + hclge_add_vport_mac_table(vport, mac_addr, + HCLGE_MAC_ADDR_MC); } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { status = hclge_rm_mc_addr_common(vport, mac_addr); + if (!status) + hclge_rm_vport_mac_table(vport, mac_addr, + false, HCLGE_MAC_ADDR_MC); } else { dev_err(&hdev->pdev->dev, "failed to set mcast mac addr, unknown subcode %d\n", @@ -288,6 +305,9 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, memcpy(&proto, &mbx_req->msg[5], sizeof(proto)); status = hclge_set_vlan_filter(handle, cpu_to_be16(proto), vlan, is_kill); + if (!status) + is_kill ? hclge_rm_vport_vlan_table(vport, vlan, false) + : hclge_add_vport_vlan_table(vport, vlan); } else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) { struct hnae3_handle *handle = &vport->nic; bool en = mbx_req->msg[2] ? true : false; @@ -320,10 +340,14 @@ static int hclge_get_vf_tcinfo(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req, bool gen_resp) { - struct hclge_dev *hdev = vport->back; - int ret; + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + u8 vf_tc_map = 0; + int i, ret; - ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &hdev->hw_tc_map, + for (i = 0; i < kinfo->num_tc; i++) + vf_tc_map |= BIT(i); + + ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &vf_tc_map, sizeof(u8)); return ret; @@ -333,35 +357,52 @@ static int hclge_get_vf_queue_info(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req, bool gen_resp) { -#define HCLGE_TQPS_RSS_INFO_LEN 8 +#define HCLGE_TQPS_RSS_INFO_LEN 6 u8 resp_data[HCLGE_TQPS_RSS_INFO_LEN]; struct hclge_dev *hdev = vport->back; /* get the queue related info */ memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16)); memcpy(&resp_data[2], &vport->nic.kinfo.rss_size, sizeof(u16)); - memcpy(&resp_data[4], &hdev->num_desc, sizeof(u16)); - memcpy(&resp_data[6], &hdev->rx_buf_len, sizeof(u16)); + memcpy(&resp_data[4], &hdev->rx_buf_len, sizeof(u16)); return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, HCLGE_TQPS_RSS_INFO_LEN); } +static int hclge_get_vf_queue_depth(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + bool gen_resp) +{ +#define HCLGE_TQPS_DEPTH_INFO_LEN 4 + u8 resp_data[HCLGE_TQPS_DEPTH_INFO_LEN]; + struct hclge_dev *hdev = vport->back; + + /* get the queue depth info */ + memcpy(&resp_data[0], &hdev->num_tx_desc, sizeof(u16)); + memcpy(&resp_data[2], &hdev->num_rx_desc, sizeof(u16)); + return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, + HCLGE_TQPS_DEPTH_INFO_LEN); +} + static int hclge_get_link_info(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req) { struct hclge_dev *hdev = vport->back; u16 link_status; - u8 msg_data[8]; + u8 msg_data[10]; + u16 media_type; u8 dest_vfid; u16 duplex; /* mac.link can only be 0 or 1 */ link_status = (u16)hdev->hw.mac.link; duplex = hdev->hw.mac.duplex; + media_type = hdev->hw.mac.media_type; memcpy(&msg_data[0], &link_status, sizeof(u16)); memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); memcpy(&msg_data[6], &duplex, sizeof(u16)); + memcpy(&msg_data[8], &media_type, sizeof(u16)); dest_vfid = mbx_req->mbx_src_vfid; /* send this requested info to VF */ @@ -369,6 +410,29 @@ static int hclge_get_link_info(struct hclge_vport *vport, HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid); } +static void hclge_get_link_mode(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ +#define HCLGE_SUPPORTED 1 + struct hclge_dev *hdev = vport->back; + unsigned long advertising; + unsigned long supported; + unsigned long send_data; + u8 msg_data[10]; + u8 dest_vfid; + + advertising = hdev->hw.mac.advertising[0]; + supported = hdev->hw.mac.supported[0]; + dest_vfid = mbx_req->mbx_src_vfid; + msg_data[0] = mbx_req->msg[2]; + + send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising; + + memcpy(&msg_data[2], &send_data, sizeof(unsigned long)); + hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + HCLGE_MBX_LINK_STAT_MODE, dest_vfid); +} + static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req) { @@ -426,6 +490,24 @@ static int hclge_get_queue_id_in_pf(struct hclge_vport *vport, return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, 2); } +static int hclge_get_rss_key(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ +#define HCLGE_RSS_MBX_RESP_LEN 8 + u8 resp_data[HCLGE_RSS_MBX_RESP_LEN]; + struct hclge_dev *hdev = vport->back; + u8 index; + + index = mbx_req->msg[2]; + + memcpy(&resp_data[0], + &hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN], + HCLGE_RSS_MBX_RESP_LEN); + + return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, + HCLGE_RSS_MBX_RESP_LEN); +} + static bool hclge_cmd_crq_empty(struct hclge_hw *hw) { u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG); @@ -517,6 +599,14 @@ void hclge_mbx_handler(struct hclge_dev *hdev) "PF failed(%d) to get Q info for VF\n", ret); break; + case HCLGE_MBX_GET_QDEPTH: + ret = hclge_get_vf_queue_depth(vport, req, true); + if (ret) + dev_err(&hdev->pdev->dev, + "PF failed(%d) to get Q depth for VF\n", + ret); + break; + case HCLGE_MBX_GET_TCINFO: ret = hclge_get_vf_tcinfo(vport, req, true); if (ret) @@ -553,6 +643,25 @@ void hclge_mbx_handler(struct hclge_dev *hdev) "PF failed(%d) to get qid for VF\n", ret); break; + case HCLGE_MBX_GET_RSS_KEY: + ret = hclge_get_rss_key(vport, req); + if (ret) + dev_err(&hdev->pdev->dev, + "PF fail(%d) to get rss key for VF\n", + ret); + break; + case HCLGE_MBX_GET_LINK_MODE: + hclge_get_link_mode(vport, req); + break; + case HCLGE_MBX_GET_VF_FLR_STATUS: + mutex_lock(&hdev->vport_cfg_mutex); + hclge_rm_vport_all_mac_table(vport, true, + HCLGE_MAC_ADDR_UC); + hclge_rm_vport_all_mac_table(vport, true, + HCLGE_MAC_ADDR_MC); + hclge_rm_vport_all_vlan_table(vport, true); + mutex_unlock(&hdev->vport_cfg_mutex); + break; default: dev_err(&hdev->pdev->dev, "un-supported mailbox message, code = %d\n", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index dabb8437f8dc..48eda2c6fdae 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -8,12 +8,6 @@ #include "hclge_main.h" #include "hclge_mdio.h" -#define HCLGE_PHY_SUPPORTED_FEATURES (SUPPORTED_Autoneg | \ - SUPPORTED_TP | \ - PHY_10BT_FEATURES | \ - PHY_100BT_FEATURES | \ - SUPPORTED_1000baseT_Full) - enum hclge_mdio_c22_op_seq { HCLGE_MDIO_C22_WRITE = 1, HCLGE_MDIO_C22_READ = 2 @@ -195,8 +189,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev) netdev_err(netdev, "failed to configure flow control.\n"); } -int hclge_mac_connect_phy(struct hclge_dev *hdev) +int hclge_mac_connect_phy(struct hnae3_handle *handle) { + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; struct net_device *netdev = hdev->vport[0].nic.netdev; struct phy_device *phydev = hdev->hw.mac.phydev; __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; @@ -215,22 +211,17 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev) return ret; } - linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask); - linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask); - linkmode_set_bit_array(phy_10_100_features_array, - ARRAY_SIZE(phy_10_100_features_array), - mask); - linkmode_set_bit_array(phy_gbit_features_array, - ARRAY_SIZE(phy_gbit_features_array), - mask); + linkmode_copy(mask, hdev->hw.mac.supported); linkmode_and(phydev->supported, phydev->supported, mask); - phy_support_asym_pause(phydev); + linkmode_copy(phydev->advertising, phydev->supported); return 0; } -void hclge_mac_disconnect_phy(struct hclge_dev *hdev) +void hclge_mac_disconnect_phy(struct hnae3_handle *handle) { + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; struct phy_device *phydev = hdev->hw.mac.phydev; if (!phydev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h index 5fbf7dddb5d9..ef095d9c566f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h @@ -5,8 +5,8 @@ #define __HCLGE_MDIO_H int hclge_mac_mdio_config(struct hclge_dev *hdev); -int hclge_mac_connect_phy(struct hclge_dev *hdev); -void hclge_mac_disconnect_phy(struct hclge_dev *hdev); +int hclge_mac_connect_phy(struct hnae3_handle *handle); +void hclge_mac_disconnect_phy(struct hnae3_handle *handle); void hclge_mac_start_phy(struct hclge_dev *hdev); void hclge_mac_stop_phy(struct hclge_dev *hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 00458da67503..aafc69f4bfdd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -517,20 +517,39 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hclge_dev *hdev = vport->back; + u16 max_rss_size; u8 i; - vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; - kinfo->num_tc = - min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc); - kinfo->rss_size - = min_t(u16, hdev->rss_size_max, - kinfo->num_tqps / kinfo->num_tc); - vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id; + /* TC configuration is shared by PF/VF in one port, only allow + * one tc for VF for simplicity. VF's vport_id is non zero. + */ + kinfo->num_tc = vport->vport_id ? 1 : + min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); + vport->qs_offset = (vport->vport_id ? hdev->tm_info.num_tc : 0) + + (vport->vport_id ? (vport->vport_id - 1) : 0); + + max_rss_size = min_t(u16, hdev->rss_size_max, + vport->alloc_tqps / kinfo->num_tc); + + if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && + kinfo->req_rss_size <= max_rss_size) { + dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n", + kinfo->rss_size, kinfo->req_rss_size); + kinfo->rss_size = kinfo->req_rss_size; + } else if (kinfo->rss_size > max_rss_size || + (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) { + dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n", + kinfo->rss_size, max_rss_size); + kinfo->rss_size = max_rss_size; + } + + kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; vport->dwrr = 100; /* 100 percent as init */ vport->alloc_rss_size = kinfo->rss_size; + vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; - for (i = 0; i < kinfo->num_tc; i++) { - if (hdev->hw_tc_map & BIT(i)) { + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) { kinfo->tc_info[i].enable = true; kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; kinfo->tc_info[i].tqp_count = kinfo->rss_size; @@ -753,13 +772,17 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { /* Cfg qs -> pri mapping, one by one mapping */ - for (k = 0; k < hdev->num_alloc_vport; k++) - for (i = 0; i < hdev->tm_info.num_tc; i++) { + for (k = 0; k < hdev->num_alloc_vport; k++) { + struct hnae3_knic_private_info *kinfo = + &vport[k].nic.kinfo; + + for (i = 0; i < kinfo->num_tc; i++) { ret = hclge_tm_qs_to_pri_map_cfg( hdev, vport[k].qs_offset + i, i); if (ret) return ret; } + } } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) { /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ for (k = 0; k < hdev->num_alloc_vport; k++) @@ -934,6 +957,36 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) return 0; } +static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev) +{ +#define DEFAULT_TC_WEIGHT 1 +#define DEFAULT_TC_OFFSET 14 + + struct hclge_ets_tc_weight_cmd *ets_weight; + struct hclge_desc desc; + int i; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false); + ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + struct hclge_pg_info *pg_info; + + ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + pg_info = + &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; + ets_weight->tc_weight[i] = pg_info->tc_dwrr[i]; + } + + ets_weight->weight_offset = DEFAULT_TC_OFFSET; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; @@ -983,6 +1036,19 @@ static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev) ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev); if (ret) return ret; + + if (!hnae3_dev_dcb_supported(hdev)) + return 0; + + ret = hclge_tm_ets_tc_dwrr_cfg(hdev); + if (ret == -EOPNOTSUPP) { + dev_warn(&hdev->pdev->dev, + "fw %08x does't support ets tc weight cmd\n", + hdev->fw_version); + ret = 0; + } + + return ret; } else { ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev); if (ret) @@ -992,7 +1058,7 @@ static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev) return 0; } -int hclge_tm_map_cfg(struct hclge_dev *hdev) +static int hclge_tm_map_cfg(struct hclge_dev *hdev) { int ret; @@ -1107,7 +1173,7 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) return 0; } -int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) +static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) { int ret; @@ -1118,7 +1184,7 @@ int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) return hclge_tm_lvl34_schd_mode_cfg(hdev); } -static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) +int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) { int ret; @@ -1159,7 +1225,7 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev) HCLGE_RX_MAC_PAUSE_EN_MSK; return hclge_pfc_pause_en_cfg(hdev, enable_bitmap, - hdev->tm_info.hw_pfc_map); + hdev->tm_info.pfc_en); } /* Each Tc has a 1024 queue sets to backpress, it divides to @@ -1228,10 +1294,23 @@ static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); } -int hclge_pause_setup_hw(struct hclge_dev *hdev) +static int hclge_tm_bp_setup(struct hclge_dev *hdev) +{ + int ret = 0; + int i; + + for (i = 0; i < hdev->tm_info.num_tc; i++) { + ret = hclge_bp_setup_hw(hdev, i); + if (ret) + return ret; + } + + return ret; +} + +int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init) { int ret; - u8 i; ret = hclge_pause_param_setup_hw(hdev); if (ret) @@ -1245,18 +1324,17 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) if (!hnae3_dev_dcb_supported(hdev)) return 0; - /* When MAC is GE Mode, hdev does not support pfc setting */ + /* GE MAC does not support PFC, when driver is initializing and MAC + * is in GE Mode, ignore the error here, otherwise initialization + * will fail. + */ ret = hclge_pfc_setup_hw(hdev); - if (ret) - dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret); - - for (i = 0; i < hdev->tm_info.num_tc; i++) { - ret = hclge_bp_setup_hw(hdev, i); - if (ret) - return ret; - } + if (init && ret == -EOPNOTSUPP) + dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n"); + else + return ret; - return 0; + return hclge_tm_bp_setup(hdev); } void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) @@ -1294,7 +1372,7 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) hclge_tm_schd_info_init(hdev); } -int hclge_tm_init_hw(struct hclge_dev *hdev) +int hclge_tm_init_hw(struct hclge_dev *hdev, bool init) { int ret; @@ -1306,7 +1384,7 @@ int hclge_tm_init_hw(struct hclge_dev *hdev) if (ret) return ret; - ret = hclge_pause_setup_hw(hdev); + ret = hclge_pause_setup_hw(hdev, init); if (ret) return ret; @@ -1325,5 +1403,22 @@ int hclge_tm_schd_init(struct hclge_dev *hdev) if (ret) return ret; - return hclge_tm_init_hw(hdev); + return hclge_tm_init_hw(hdev, true); +} + +int hclge_tm_vport_map_update(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + + hclge_tm_vport_tc_info_update(vport); + + ret = hclge_vport_q_to_qs_map(hdev, vport); + if (ret) + return ret; + + if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) + return 0; + + return hclge_tm_bp_setup(hdev); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index b6496a439304..f60e540c7a62 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -142,13 +142,13 @@ struct hclge_port_shapping_cmd { (HCLGE_TM_SHAP_##string##_LSH)) int hclge_tm_schd_init(struct hclge_dev *hdev); -int hclge_pause_setup_hw(struct hclge_dev *hdev); -int hclge_tm_schd_mode_hw(struct hclge_dev *hdev); +int hclge_tm_vport_map_update(struct hclge_dev *hdev); +int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init); +int hclge_tm_schd_setup_hw(struct hclge_dev *hdev); void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc); void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc); int hclge_tm_dwrr_cfg(struct hclge_dev *hdev); -int hclge_tm_map_cfg(struct hclge_dev *hdev); -int hclge_tm_init_hw(struct hclge_dev *hdev); +int hclge_tm_init_hw(struct hclge_dev *hdev, bool init); int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx); int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr); int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index d5765c8cf3a3..9441b453d38d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -115,9 +115,8 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring) { int size = ring->desc_num * sizeof(struct hclgevf_desc); - ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), - size, &ring->desc_dma_addr, - GFP_KERNEL); + ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size, + &ring->desc_dma_addr, GFP_KERNEL); if (!ring->desc) return -ENOMEM; @@ -363,8 +362,28 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) return 0; } +static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw) +{ + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); +} + void hclgevf_cmd_uninit(struct hclgevf_dev *hdev) { + spin_lock_bh(&hdev->hw.cmq.csq.lock); + spin_lock(&hdev->hw.cmq.crq.lock); + clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + hclgevf_cmd_uninit_regs(&hdev->hw); + spin_unlock(&hdev->hw.cmq.crq.lock); + spin_unlock_bh(&hdev->hw.cmq.csq.lock); hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); hclgevf_free_cmd_desc(&hdev->hw.cmq.crq); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 82103d5fa815..8bc28e6f465f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -21,6 +21,14 @@ static const struct pci_device_id ae_algovf_pci_tbl[] = { {0, } }; +static const u8 hclgevf_hash_key[] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA +}; + MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, @@ -78,7 +86,12 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, static inline struct hclgevf_dev *hclgevf_ae_get_hdev( struct hnae3_handle *handle) { - return container_of(handle, struct hclgevf_dev, nic); + if (!handle->client) + return container_of(handle, struct hclgevf_dev, nic); + else if (handle->client->type == HNAE3_CLIENT_ROCE) + return container_of(handle, struct hclgevf_dev, roce); + else + return container_of(handle, struct hclgevf_dev, nic); } static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) @@ -234,7 +247,7 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) { -#define HCLGEVF_TQPS_RSS_INFO_LEN 8 +#define HCLGEVF_TQPS_RSS_INFO_LEN 6 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; int status; @@ -250,8 +263,29 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); - memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); - memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); + memcpy(&hdev->rx_buf_len, &resp_msg[4], sizeof(u16)); + + return 0; +} + +static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 + u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; + int ret; + + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QDEPTH, 0, NULL, 0, + true, resp_msg, + HCLGEVF_TQPS_DEPTH_INFO_LEN); + if (ret) { + dev_err(&hdev->pdev->dev, + "VF request to get tqp depth info from PF failed %d", + ret); + return ret; + } + + memcpy(&hdev->num_tx_desc, &resp_msg[0], sizeof(u16)); + memcpy(&hdev->num_rx_desc, &resp_msg[2], sizeof(u16)); return 0; } @@ -291,7 +325,8 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) tqp->q.ae_algo = &ae_algovf; tqp->q.buf_size = hdev->rx_buf_len; - tqp->q.desc_num = hdev->num_desc; + tqp->q.tx_desc_num = hdev->num_tx_desc; + tqp->q.rx_desc_num = hdev->num_rx_desc; tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + i * HCLGEVF_TQP_REG_SIZE; @@ -310,7 +345,8 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev) kinfo = &nic->kinfo; kinfo->num_tc = 0; - kinfo->num_desc = hdev->num_desc; + kinfo->num_tx_desc = hdev->num_tx_desc; + kinfo->num_rx_desc = hdev->num_rx_desc; kinfo->rx_buf_len = hdev->rx_buf_len; for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) if (hdev->hw_tc_map & BIT(i)) @@ -349,20 +385,40 @@ static void hclgevf_request_link_info(struct hclgevf_dev *hdev) void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) { + struct hnae3_handle *rhandle = &hdev->roce; struct hnae3_handle *handle = &hdev->nic; + struct hnae3_client *rclient; struct hnae3_client *client; client = handle->client; + rclient = hdev->roce_client; link_state = test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; if (link_state != hdev->hw.mac.link) { client->ops->link_status_change(handle, !!link_state); + if (rclient && rclient->ops->link_status_change) + rclient->ops->link_status_change(rhandle, !!link_state); hdev->hw.mac.link = link_state; } } +void hclgevf_update_link_mode(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_ADVERTISING 0 +#define HCLGEVF_SUPPORTED 1 + u8 send_msg; + u8 resp_msg; + + send_msg = HCLGEVF_ADVERTISING; + hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, + sizeof(u8), false, &resp_msg, sizeof(u8)); + send_msg = HCLGEVF_SUPPORTED; + hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, + sizeof(u8), false, &resp_msg, sizeof(u8)); +} + static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) { struct hnae3_handle *nic = &hdev->nic; @@ -564,12 +620,50 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) return status; } +/* for revision 0x20, vf shared the same rss config with pf */ +static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_RSS_MBX_RESP_LEN 8 + + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; + u16 msg_num, hash_key_index; + u8 index; + int ret; + + msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / + HCLGEVF_RSS_MBX_RESP_LEN; + for (index = 0; index < msg_num; index++) { + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_RSS_KEY, 0, + &index, sizeof(index), + true, resp_msg, + HCLGEVF_RSS_MBX_RESP_LEN); + if (ret) { + dev_err(&hdev->pdev->dev, + "VF get rss hash key from PF failed, ret=%d", + ret); + return ret; + } + + hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; + if (index == msg_num - 1) + memcpy(&rss_cfg->rss_hash_key[hash_key_index], + &resp_msg[0], + HCLGEVF_RSS_KEY_SIZE - hash_key_index); + else + memcpy(&rss_cfg->rss_hash_key[hash_key_index], + &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); + } + + return 0; +} + static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, u8 *hfunc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - int i; + int i, ret; if (handle->pdev->revision >= 0x21) { /* Get hash algorithm */ @@ -591,6 +685,16 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, if (key) memcpy(key, rss_cfg->rss_hash_key, HCLGEVF_RSS_KEY_SIZE); + } else { + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (key) { + ret = hclgevf_get_rss_hash_key(hdev); + if (ret) + return ret; + memcpy(key, rss_cfg->rss_hash_key, + HCLGEVF_RSS_KEY_SIZE); + } } if (indir) @@ -964,33 +1068,29 @@ static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) } static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, - bool en_uc_pmc, bool en_mc_pmc) + bool en_bc_pmc) { struct hclge_mbx_vf_to_pf_cmd *req; struct hclgevf_desc desc; - int status; + int ret; req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; - req->msg[1] = en_uc_pmc ? 1 : 0; - req->msg[2] = en_mc_pmc ? 1 : 0; + req->msg[1] = en_bc_pmc ? 1 : 0; - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) dev_err(&hdev->pdev->dev, - "Set promisc mode fail, status is %d.\n", status); + "Set promisc mode fail, status is %d.\n", ret); - return status; + return ret; } -static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, - bool en_uc_pmc, bool en_mc_pmc) +static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - - return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); + return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc); } static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, @@ -1264,7 +1364,7 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev) if (ret) return ret; - return 0; + return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); } static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) @@ -1610,6 +1710,10 @@ static void hclgevf_keep_alive_task(struct work_struct *work) int ret; hdev = container_of(work, struct hclgevf_dev, keep_alive_task); + + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) + return; + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, 0, false, &respmsg, sizeof(u8)); if (ret) @@ -1628,6 +1732,8 @@ static void hclgevf_service_task(struct work_struct *work) */ hclgevf_request_link_info(hdev); + hclgevf_update_link_mode(hdev); + hclgevf_deferred_task_schedule(hdev); clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); @@ -1708,12 +1814,16 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) { int ret; - hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE; - /* get queue configuration from PF */ ret = hclgevf_get_queue_info(hdev); if (ret) return ret; + + /* get queue depth info from PF */ + ret = hclgevf_get_queue_depth(hdev); + if (ret) + return ret; + /* get tc configuration from PF */ return hclgevf_get_tc_info(hdev); } @@ -1788,9 +1898,9 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) rss_cfg->rss_size = hdev->rss_size_max; if (hdev->pdev->revision >= 0x21) { - rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; - netdev_rss_key_fill(rss_cfg->rss_hash_key, - HCLGEVF_RSS_KEY_SIZE); + rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; + memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, + HCLGEVF_RSS_KEY_SIZE); ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, rss_cfg->rss_hash_key); @@ -1862,6 +1972,8 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) hclgevf_request_link_info(hdev); + hclgevf_update_link_mode(hdev); + clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); return 0; @@ -2377,6 +2489,15 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) if (ret) goto err_config; + /* vf is not allowed to enable unicast/multicast promisc mode. + * For revision 0x20, default to disable broadcast promisc mode, + * firmware makes sure broadcast packets can be accepted. + * For revision 0x21, default to enable broadcast promisc mode. + */ + ret = hclgevf_set_promisc_mode(hdev, true); + if (ret) + goto err_config; + /* Initialize RSS for this VF */ ret = hclgevf_rss_init_hw(hdev); if (ret) { @@ -2461,7 +2582,8 @@ static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) struct hnae3_handle *nic = &hdev->nic; struct hnae3_knic_private_info *kinfo = &nic->kinfo; - return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); + return min_t(u32, hdev->rss_size_max, + hdev->num_tqps / kinfo->num_tc); } /** @@ -2482,7 +2604,7 @@ static void hclgevf_get_channels(struct hnae3_handle *handle, ch->max_combined = hclgevf_get_max_channels(hdev); ch->other_count = 0; ch->max_other = 0; - ch->combined_count = hdev->num_tqps; + ch->combined_count = handle->kinfo.rss_size; } static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, @@ -2522,7 +2644,7 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, hdev->hw.mac.duplex = duplex; } -static int hclgevf_gro_en(struct hnae3_handle *handle, int enable) +static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -2558,6 +2680,16 @@ static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) return hdev->reset_count; } +static void hclgevf_get_link_mode(struct hnae3_handle *handle, + unsigned long *supported, + unsigned long *advertising) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + *supported = hdev->hw.mac.supported; + *advertising = hdev->hw.mac.advertising; +} + #define MAX_SEPARATE_NUM 4 #define SEPARATOR_VALUE 0xFFFFFFFF #define REG_NUM_PER_LINE 4 @@ -2640,7 +2772,6 @@ static const struct hnae3_ae_ops hclgevf_ops = { .get_vector = hclgevf_get_vector, .put_vector = hclgevf_put_vector, .reset_queue = hclgevf_reset_tqp, - .set_promisc_mode = hclgevf_set_promisc_mode, .get_mac_addr = hclgevf_get_mac_addr, .set_mac_addr = hclgevf_set_mac_addr, .add_uc_addr = hclgevf_add_uc_addr, @@ -2677,6 +2808,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { .set_mtu = hclgevf_set_mtu, .get_global_queue_id = hclgevf_get_qid_global, .set_timer_task = hclgevf_set_timer_task, + .get_link_mode = hclgevf_get_link_mode, }; static struct hnae3_ae_algo ae_algovf = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 787bc06944e5..c128863ee7d0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -145,6 +145,8 @@ struct hclgevf_mac { int link; u8 duplex; u32 speed; + u64 supported; + u64 advertising; }; struct hclgevf_hw { @@ -237,7 +239,8 @@ struct hclgevf_dev { u16 num_alloc_vport; /* num vports this driver supports */ u32 numa_node_mask; u16 rx_buf_len; - u16 num_desc; + u16 num_tx_desc; /* desc num of per tx queue */ + u16 num_rx_desc; /* desc num of per rx queue */ u8 hw_tc_map; u16 num_msi; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index 84653f58b2d1..7dc3c9f79169 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -197,6 +197,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) break; case HCLGE_MBX_LINK_STAT_CHANGE: case HCLGE_MBX_ASSERTING_RESET: + case HCLGE_MBX_LINK_STAT_MODE: /* set this mbx event as pending. This is required as we * might loose interrupt event when mbx task is busy * handling. This shall be cleared when mbx task just @@ -247,6 +248,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) u8 duplex; u32 speed; u32 tail; + u8 idx; /* we can safely clear it now as we are at start of the async message * processing @@ -270,12 +272,22 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) link_status = le16_to_cpu(msg_q[1]); memcpy(&speed, &msg_q[2], sizeof(speed)); duplex = (u8)le16_to_cpu(msg_q[4]); + hdev->hw.mac.media_type = (u8)le16_to_cpu(msg_q[5]); /* update upper layer with new link link status */ hclgevf_update_link_status(hdev, link_status); hclgevf_update_speed_duplex(hdev, speed, duplex); break; + case HCLGE_MBX_LINK_STAT_MODE: + idx = (u8)le16_to_cpu(msg_q[1]); + if (idx) + memcpy(&hdev->hw.mac.supported, &msg_q[2], + sizeof(unsigned long)); + else + memcpy(&hdev->hw.mac.advertising, &msg_q[2], + sizeof(unsigned long)); + break; case HCLGE_MBX_ASSERTING_RESET: /* PF has asserted reset hence VF should go in pending * state and poll for the hardware reset status till it diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index 017e08452d8c..baf5cc251f32 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -321,7 +321,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum) } hns_mdio_cmd_write(mdio_dev, is_c45, - MDIO_C45_WRITE_ADDR, phy_id, devad); + MDIO_C45_READ, phy_id, devad); } /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/ |