diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic/qede')
-rw-r--r-- | drivers/net/ethernet/qlogic/qede/qede.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 11 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qede/qede_filter.c | 572 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qede/qede_main.c | 292 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qede/qede_rdma.c | 63 |
5 files changed, 514 insertions, 427 deletions
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 730997b13747..63a78162cfaf 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -162,6 +162,7 @@ struct qede_rdma_dev { struct list_head entry; struct list_head rdma_event_list; struct workqueue_struct *rdma_wq; + bool exp_recovery; }; struct qede_ptp; @@ -264,6 +265,7 @@ struct qede_dev { enum QEDE_STATE { QEDE_STATE_CLOSED, QEDE_STATE_OPEN, + QEDE_STATE_RECOVERY, }; #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) @@ -462,6 +464,7 @@ struct qede_fastpath { #define QEDE_CSUM_UNNECESSARY BIT(1) #define QEDE_TUNN_CSUM_UNNECESSARY BIT(2) +#define QEDE_SP_RECOVERY 0 #define QEDE_SP_RX_MODE 1 #ifdef CONFIG_RFS_ACCEL diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 16331c6c6fa7..c6238083e898 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -186,11 +186,13 @@ static const struct { enum { QEDE_PRI_FLAG_CMT, + QEDE_PRI_FLAG_SMART_AN_SUPPORT, /* MFW supports SmartAN */ QEDE_PRI_FLAG_LEN, }; static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = { "Coupled-Function", + "SmartAN capable", }; enum qede_ethtool_tests { @@ -404,8 +406,15 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) static u32 qede_get_priv_flags(struct net_device *dev) { struct qede_dev *edev = netdev_priv(dev); + u32 flags = 0; - return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT; + if (edev->dev_info.common.num_hwfns > 1) + flags |= BIT(QEDE_PRI_FLAG_CMT); + + if (edev->dev_info.common.smart_an) + flags |= BIT(QEDE_PRI_FLAG_SMART_AN_SUPPORT); + + return flags; } struct qede_link_mode_mapping { diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index b16ce7d93caf..add922b93d2c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1665,198 +1665,6 @@ static int qede_set_v6_tuple_to_profile(struct qede_dev *edev, return 0; } -static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev, - struct qede_arfs_tuple *t, - struct ethtool_rx_flow_spec *fs) -{ - if ((fs->h_u.tcp_ip4_spec.ip4src & - fs->m_u.tcp_ip4_spec.ip4src) != fs->h_u.tcp_ip4_spec.ip4src) { - DP_INFO(edev, "Don't support IP-masks\n"); - return -EOPNOTSUPP; - } - - if ((fs->h_u.tcp_ip4_spec.ip4dst & - fs->m_u.tcp_ip4_spec.ip4dst) != fs->h_u.tcp_ip4_spec.ip4dst) { - DP_INFO(edev, "Don't support IP-masks\n"); - return -EOPNOTSUPP; - } - - if ((fs->h_u.tcp_ip4_spec.psrc & - fs->m_u.tcp_ip4_spec.psrc) != fs->h_u.tcp_ip4_spec.psrc) { - DP_INFO(edev, "Don't support port-masks\n"); - return -EOPNOTSUPP; - } - - if ((fs->h_u.tcp_ip4_spec.pdst & - fs->m_u.tcp_ip4_spec.pdst) != fs->h_u.tcp_ip4_spec.pdst) { - DP_INFO(edev, "Don't support port-masks\n"); - return -EOPNOTSUPP; - } - - if (fs->h_u.tcp_ip4_spec.tos) { - DP_INFO(edev, "Don't support tos\n"); - return -EOPNOTSUPP; - } - - t->eth_proto = htons(ETH_P_IP); - t->src_ipv4 = fs->h_u.tcp_ip4_spec.ip4src; - t->dst_ipv4 = fs->h_u.tcp_ip4_spec.ip4dst; - t->src_port = fs->h_u.tcp_ip4_spec.psrc; - t->dst_port = fs->h_u.tcp_ip4_spec.pdst; - - return qede_set_v4_tuple_to_profile(edev, t); -} - -static int qede_flow_spec_to_tuple_tcpv4(struct qede_dev *edev, - struct qede_arfs_tuple *t, - struct ethtool_rx_flow_spec *fs) -{ - t->ip_proto = IPPROTO_TCP; - - if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs)) - return -EINVAL; - - return 0; -} - -static int qede_flow_spec_to_tuple_udpv4(struct qede_dev *edev, - struct qede_arfs_tuple *t, - struct ethtool_rx_flow_spec *fs) -{ - t->ip_proto = IPPROTO_UDP; - - if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs)) - return -EINVAL; - - return 0; -} - -static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev, - struct qede_arfs_tuple *t, - struct ethtool_rx_flow_spec *fs) -{ - struct in6_addr zero_addr; - - memset(&zero_addr, 0, sizeof(zero_addr)); - - if ((fs->h_u.tcp_ip6_spec.psrc & - fs->m_u.tcp_ip6_spec.psrc) != fs->h_u.tcp_ip6_spec.psrc) { - DP_INFO(edev, "Don't support port-masks\n"); - return -EOPNOTSUPP; - } - - if ((fs->h_u.tcp_ip6_spec.pdst & - fs->m_u.tcp_ip6_spec.pdst) != fs->h_u.tcp_ip6_spec.pdst) { - DP_INFO(edev, "Don't support port-masks\n"); - return -EOPNOTSUPP; - } - - if (fs->h_u.tcp_ip6_spec.tclass) { - DP_INFO(edev, "Don't support tclass\n"); - return -EOPNOTSUPP; - } - - t->eth_proto = htons(ETH_P_IPV6); - memcpy(&t->src_ipv6, &fs->h_u.tcp_ip6_spec.ip6src, - sizeof(struct in6_addr)); - memcpy(&t->dst_ipv6, &fs->h_u.tcp_ip6_spec.ip6dst, - sizeof(struct in6_addr)); - t->src_port = fs->h_u.tcp_ip6_spec.psrc; - t->dst_port = fs->h_u.tcp_ip6_spec.pdst; - - return qede_set_v6_tuple_to_profile(edev, t, &zero_addr); -} - -static int qede_flow_spec_to_tuple_tcpv6(struct qede_dev *edev, - struct qede_arfs_tuple *t, - struct ethtool_rx_flow_spec *fs) -{ - t->ip_proto = IPPROTO_TCP; - - if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs)) - return -EINVAL; - - return 0; -} - -static int qede_flow_spec_to_tuple_udpv6(struct qede_dev *edev, - struct qede_arfs_tuple *t, - struct ethtool_rx_flow_spec *fs) -{ - t->ip_proto = IPPROTO_UDP; - - if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs)) - return -EINVAL; - - return 0; -} - -static int qede_flow_spec_to_tuple(struct qede_dev *edev, - struct qede_arfs_tuple *t, - struct ethtool_rx_flow_spec *fs) -{ - memset(t, 0, sizeof(*t)); - - if (qede_flow_spec_validate_unused(edev, fs)) - return -EOPNOTSUPP; - - switch ((fs->flow_type & ~FLOW_EXT)) { - case TCP_V4_FLOW: - return qede_flow_spec_to_tuple_tcpv4(edev, t, fs); - case UDP_V4_FLOW: - return qede_flow_spec_to_tuple_udpv4(edev, t, fs); - case TCP_V6_FLOW: - return qede_flow_spec_to_tuple_tcpv6(edev, t, fs); - case UDP_V6_FLOW: - return qede_flow_spec_to_tuple_udpv6(edev, t, fs); - default: - DP_VERBOSE(edev, NETIF_MSG_IFUP, - "Can't support flow of type %08x\n", fs->flow_type); - return -EOPNOTSUPP; - } - - return 0; -} - -static int qede_flow_spec_validate(struct qede_dev *edev, - struct ethtool_rx_flow_spec *fs, - struct qede_arfs_tuple *t) -{ - if (fs->location >= QEDE_RFS_MAX_FLTR) { - DP_INFO(edev, "Location out-of-bounds\n"); - return -EINVAL; - } - - /* Check location isn't already in use */ - if (test_bit(fs->location, edev->arfs->arfs_fltr_bmap)) { - DP_INFO(edev, "Location already in use\n"); - return -EINVAL; - } - - /* Check if the filtering-mode could support the filter */ - if (edev->arfs->filter_count && - edev->arfs->mode != t->mode) { - DP_INFO(edev, - "flow_spec would require filtering mode %08x, but %08x is configured\n", - t->mode, edev->arfs->filter_count); - return -EINVAL; - } - - /* If drop requested then no need to validate other data */ - if (fs->ring_cookie == RX_CLS_FLOW_DISC) - return 0; - - if (ethtool_get_flow_spec_ring_vf(fs->ring_cookie)) - return 0; - - if (fs->ring_cookie >= QEDE_RSS_COUNT(edev)) { - DP_INFO(edev, "Queue out-of-bounds\n"); - return -EINVAL; - } - - return 0; -} - /* Must be called while qede lock is held */ static struct qede_arfs_fltr_node * qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t) @@ -1896,72 +1704,6 @@ static void qede_flow_set_destination(struct qede_dev *edev, "Configuring N-tuple for VF 0x%02x\n", n->vfid - 1); } -int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) -{ - struct ethtool_rx_flow_spec *fsp = &info->fs; - struct qede_arfs_fltr_node *n; - struct qede_arfs_tuple t; - int min_hlen, rc; - - __qede_lock(edev); - - if (!edev->arfs) { - rc = -EPERM; - goto unlock; - } - - /* Translate the flow specification into something fittign our DB */ - rc = qede_flow_spec_to_tuple(edev, &t, fsp); - if (rc) - goto unlock; - - /* Make sure location is valid and filter isn't already set */ - rc = qede_flow_spec_validate(edev, fsp, &t); - if (rc) - goto unlock; - - if (qede_flow_find_fltr(edev, &t)) { - rc = -EINVAL; - goto unlock; - } - - n = kzalloc(sizeof(*n), GFP_KERNEL); - if (!n) { - rc = -ENOMEM; - goto unlock; - } - - min_hlen = qede_flow_get_min_header_size(&t); - n->data = kzalloc(min_hlen, GFP_KERNEL); - if (!n->data) { - kfree(n); - rc = -ENOMEM; - goto unlock; - } - - n->sw_id = fsp->location; - set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap); - n->buf_len = min_hlen; - - memcpy(&n->tuple, &t, sizeof(n->tuple)); - - qede_flow_set_destination(edev, n, fsp); - - /* Build a minimal header according to the flow */ - n->tuple.build_hdr(&n->tuple, n->data); - - rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0); - if (rc) - goto unlock; - - qede_configure_arfs_fltr(edev, n, n->rxq_id, true); - rc = qede_poll_arfs_filter_config(edev, n); -unlock: - __qede_unlock(edev); - - return rc; -} - int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie) { struct qede_arfs_fltr_node *fltr = NULL; @@ -2004,190 +1746,172 @@ unlock: } static int qede_parse_actions(struct qede_dev *edev, - struct tcf_exts *exts) + struct flow_action *flow_action) { - int rc = -EINVAL, num_act = 0, i; - const struct tc_action *a; - bool is_drop = false; + const struct flow_action_entry *act; + int i; - if (!tcf_exts_has_actions(exts)) { - DP_NOTICE(edev, "No tc actions received\n"); - return rc; + if (!flow_action_has_entries(flow_action)) { + DP_NOTICE(edev, "No actions received\n"); + return -EINVAL; } - tcf_exts_for_each_action(i, a, exts) { - num_act++; + flow_action_for_each(i, act, flow_action) { + switch (act->id) { + case FLOW_ACTION_DROP: + break; + case FLOW_ACTION_QUEUE: + if (act->queue.vf) + break; - if (is_tcf_gact_shot(a)) - is_drop = true; + if (act->queue.index >= QEDE_RSS_COUNT(edev)) { + DP_INFO(edev, "Queue out-of-bounds\n"); + return -EINVAL; + } + break; + default: + return -EINVAL; + } } - if (num_act == 1 && is_drop) - return 0; - - return rc; + return 0; } static int -qede_tc_parse_ports(struct qede_dev *edev, - struct tc_cls_flower_offload *f, - struct qede_arfs_tuple *t) -{ - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { - struct flow_dissector_key_ports *key, *mask; - - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->key); - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->mask); - - if ((key->src && mask->src != U16_MAX) || - (key->dst && mask->dst != U16_MAX)) { +qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule, + struct qede_arfs_tuple *t) +{ + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(rule, &match); + if ((match.key->src && match.mask->src != U16_MAX) || + (match.key->dst && match.mask->dst != U16_MAX)) { DP_NOTICE(edev, "Do not support ports masks\n"); return -EINVAL; } - t->src_port = key->src; - t->dst_port = key->dst; + t->src_port = match.key->src; + t->dst_port = match.key->dst; } return 0; } static int -qede_tc_parse_v6_common(struct qede_dev *edev, - struct tc_cls_flower_offload *f, - struct qede_arfs_tuple *t) +qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule, + struct qede_arfs_tuple *t) { struct in6_addr zero_addr, addr; memset(&zero_addr, 0, sizeof(addr)); memset(&addr, 0xff, sizeof(addr)); - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { - struct flow_dissector_key_ipv6_addrs *key, *mask; - - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->key); - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + struct flow_match_ipv6_addrs match; - if ((memcmp(&key->src, &zero_addr, sizeof(addr)) && - memcmp(&mask->src, &addr, sizeof(addr))) || - (memcmp(&key->dst, &zero_addr, sizeof(addr)) && - memcmp(&mask->dst, &addr, sizeof(addr)))) { + flow_rule_match_ipv6_addrs(rule, &match); + if ((memcmp(&match.key->src, &zero_addr, sizeof(addr)) && + memcmp(&match.mask->src, &addr, sizeof(addr))) || + (memcmp(&match.key->dst, &zero_addr, sizeof(addr)) && + memcmp(&match.mask->dst, &addr, sizeof(addr)))) { DP_NOTICE(edev, "Do not support IPv6 address prefix/mask\n"); return -EINVAL; } - memcpy(&t->src_ipv6, &key->src, sizeof(addr)); - memcpy(&t->dst_ipv6, &key->dst, sizeof(addr)); + memcpy(&t->src_ipv6, &match.key->src, sizeof(addr)); + memcpy(&t->dst_ipv6, &match.key->dst, sizeof(addr)); } - if (qede_tc_parse_ports(edev, f, t)) + if (qede_flow_parse_ports(edev, rule, t)) return -EINVAL; return qede_set_v6_tuple_to_profile(edev, t, &zero_addr); } static int -qede_tc_parse_v4_common(struct qede_dev *edev, - struct tc_cls_flower_offload *f, +qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule, struct qede_arfs_tuple *t) { - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { - struct flow_dissector_key_ipv4_addrs *key, *mask; - - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->key); - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + struct flow_match_ipv4_addrs match; - if ((key->src && mask->src != U32_MAX) || - (key->dst && mask->dst != U32_MAX)) { + flow_rule_match_ipv4_addrs(rule, &match); + if ((match.key->src && match.mask->src != U32_MAX) || + (match.key->dst && match.mask->dst != U32_MAX)) { DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n"); return -EINVAL; } - t->src_ipv4 = key->src; - t->dst_ipv4 = key->dst; + t->src_ipv4 = match.key->src; + t->dst_ipv4 = match.key->dst; } - if (qede_tc_parse_ports(edev, f, t)) + if (qede_flow_parse_ports(edev, rule, t)) return -EINVAL; return qede_set_v4_tuple_to_profile(edev, t); } static int -qede_tc_parse_tcp_v6(struct qede_dev *edev, - struct tc_cls_flower_offload *f, +qede_flow_parse_tcp_v6(struct qede_dev *edev, struct flow_rule *rule, struct qede_arfs_tuple *tuple) { tuple->ip_proto = IPPROTO_TCP; tuple->eth_proto = htons(ETH_P_IPV6); - return qede_tc_parse_v6_common(edev, f, tuple); + return qede_flow_parse_v6_common(edev, rule, tuple); } static int -qede_tc_parse_tcp_v4(struct qede_dev *edev, - struct tc_cls_flower_offload *f, +qede_flow_parse_tcp_v4(struct qede_dev *edev, struct flow_rule *rule, struct qede_arfs_tuple *tuple) { tuple->ip_proto = IPPROTO_TCP; tuple->eth_proto = htons(ETH_P_IP); - return qede_tc_parse_v4_common(edev, f, tuple); + return qede_flow_parse_v4_common(edev, rule, tuple); } static int -qede_tc_parse_udp_v6(struct qede_dev *edev, - struct tc_cls_flower_offload *f, +qede_flow_parse_udp_v6(struct qede_dev *edev, struct flow_rule *rule, struct qede_arfs_tuple *tuple) { tuple->ip_proto = IPPROTO_UDP; tuple->eth_proto = htons(ETH_P_IPV6); - return qede_tc_parse_v6_common(edev, f, tuple); + return qede_flow_parse_v6_common(edev, rule, tuple); } static int -qede_tc_parse_udp_v4(struct qede_dev *edev, - struct tc_cls_flower_offload *f, +qede_flow_parse_udp_v4(struct qede_dev *edev, struct flow_rule *rule, struct qede_arfs_tuple *tuple) { tuple->ip_proto = IPPROTO_UDP; tuple->eth_proto = htons(ETH_P_IP); - return qede_tc_parse_v4_common(edev, f, tuple); + return qede_flow_parse_v4_common(edev, rule, tuple); } static int -qede_parse_flower_attr(struct qede_dev *edev, __be16 proto, - struct tc_cls_flower_offload *f, - struct qede_arfs_tuple *tuple) +qede_parse_flow_attr(struct qede_dev *edev, __be16 proto, + struct flow_rule *rule, struct qede_arfs_tuple *tuple) { + struct flow_dissector *dissector = rule->match.dissector; int rc = -EINVAL; u8 ip_proto = 0; memset(tuple, 0, sizeof(*tuple)); - if (f->dissector->used_keys & + if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | BIT(FLOW_DISSECTOR_KEY_PORTS))) { DP_NOTICE(edev, "Unsupported key set:0x%x\n", - f->dissector->used_keys); + dissector->used_keys); return -EOPNOTSUPP; } @@ -2197,25 +1921,23 @@ qede_parse_flower_attr(struct qede_dev *edev, __be16 proto, return -EPROTONOSUPPORT; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->key); - ip_proto = key->ip_proto; + flow_rule_match_basic(rule, &match); + ip_proto = match.key->ip_proto; } if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP)) - rc = qede_tc_parse_tcp_v4(edev, f, tuple); + rc = qede_flow_parse_tcp_v4(edev, rule, tuple); else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6)) - rc = qede_tc_parse_tcp_v6(edev, f, tuple); + rc = qede_flow_parse_tcp_v6(edev, rule, tuple); else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP)) - rc = qede_tc_parse_udp_v4(edev, f, tuple); + rc = qede_flow_parse_udp_v4(edev, rule, tuple); else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6)) - rc = qede_tc_parse_udp_v6(edev, f, tuple); + rc = qede_flow_parse_udp_v6(edev, rule, tuple); else - DP_NOTICE(edev, "Invalid tc protocol request\n"); + DP_NOTICE(edev, "Invalid protocol request\n"); return rc; } @@ -2235,7 +1957,7 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, } /* parse flower attribute and prepare filter */ - if (qede_parse_flower_attr(edev, proto, f, &t)) + if (qede_parse_flow_attr(edev, proto, f->rule, &t)) goto unlock; /* Validate profile mode and number of filters */ @@ -2248,7 +1970,7 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, } /* parse tc actions and get the vf_id */ - if (qede_parse_actions(edev, f->exts)) + if (qede_parse_actions(edev, &f->rule->action)) goto unlock; if (qede_flow_find_fltr(edev, &t)) { @@ -2290,3 +2012,141 @@ unlock: __qede_unlock(edev); return rc; } + +static int qede_flow_spec_validate(struct qede_dev *edev, + struct flow_action *flow_action, + struct qede_arfs_tuple *t, + __u32 location) +{ + if (location >= QEDE_RFS_MAX_FLTR) { + DP_INFO(edev, "Location out-of-bounds\n"); + return -EINVAL; + } + + /* Check location isn't already in use */ + if (test_bit(location, edev->arfs->arfs_fltr_bmap)) { + DP_INFO(edev, "Location already in use\n"); + return -EINVAL; + } + + /* Check if the filtering-mode could support the filter */ + if (edev->arfs->filter_count && + edev->arfs->mode != t->mode) { + DP_INFO(edev, + "flow_spec would require filtering mode %08x, but %08x is configured\n", + t->mode, edev->arfs->filter_count); + return -EINVAL; + } + + if (qede_parse_actions(edev, flow_action)) + return -EINVAL; + + return 0; +} + +static int qede_flow_spec_to_rule(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) +{ + struct ethtool_rx_flow_spec_input input = {}; + struct ethtool_rx_flow_rule *flow; + __be16 proto; + int err = 0; + + if (qede_flow_spec_validate_unused(edev, fs)) + return -EOPNOTSUPP; + + switch ((fs->flow_type & ~FLOW_EXT)) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + proto = htons(ETH_P_IP); + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + proto = htons(ETH_P_IPV6); + break; + default: + DP_VERBOSE(edev, NETIF_MSG_IFUP, + "Can't support flow of type %08x\n", fs->flow_type); + return -EOPNOTSUPP; + } + + input.fs = fs; + flow = ethtool_rx_flow_rule_create(&input); + if (IS_ERR(flow)) + return PTR_ERR(flow); + + if (qede_parse_flow_attr(edev, proto, flow->rule, t)) { + err = -EINVAL; + goto err_out; + } + + /* Make sure location is valid and filter isn't already set */ + err = qede_flow_spec_validate(edev, &flow->rule->action, t, + fs->location); +err_out: + ethtool_rx_flow_rule_destroy(flow); + return err; + +} + +int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) +{ + struct ethtool_rx_flow_spec *fsp = &info->fs; + struct qede_arfs_fltr_node *n; + struct qede_arfs_tuple t; + int min_hlen, rc; + + __qede_lock(edev); + + if (!edev->arfs) { + rc = -EPERM; + goto unlock; + } + + /* Translate the flow specification into something fittign our DB */ + rc = qede_flow_spec_to_rule(edev, &t, fsp); + if (rc) + goto unlock; + + if (qede_flow_find_fltr(edev, &t)) { + rc = -EINVAL; + goto unlock; + } + + n = kzalloc(sizeof(*n), GFP_KERNEL); + if (!n) { + rc = -ENOMEM; + goto unlock; + } + + min_hlen = qede_flow_get_min_header_size(&t); + n->data = kzalloc(min_hlen, GFP_KERNEL); + if (!n->data) { + kfree(n); + rc = -ENOMEM; + goto unlock; + } + + n->sw_id = fsp->location; + set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap); + n->buf_len = min_hlen; + + memcpy(&n->tuple, &t, sizeof(n->tuple)); + + qede_flow_set_destination(edev, n, fsp); + + /* Build a minimal header according to the flow */ + n->tuple.build_hdr(&n->tuple, n->data); + + rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0); + if (rc) + goto unlock; + + qede_configure_arfs_fltr(edev, n, n->rxq_id, true); + rc = qede_poll_arfs_filter_config(edev, n); +unlock: + __qede_unlock(edev); + + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 9790f26d17c4..02a97c659e29 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -133,23 +133,12 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id); static void qede_remove(struct pci_dev *pdev); static void qede_shutdown(struct pci_dev *pdev); static void qede_link_update(void *dev, struct qed_link_output *link); +static void qede_schedule_recovery_handler(void *dev); +static void qede_recovery_handler(struct qede_dev *edev); static void qede_get_eth_tlv_data(void *edev, void *data); static void qede_get_generic_tlv_data(void *edev, struct qed_generic_tlvs *data); -/* The qede lock is used to protect driver state change and driver flows that - * are not reentrant. - */ -void __qede_lock(struct qede_dev *edev) -{ - mutex_lock(&edev->qede_lock); -} - -void __qede_unlock(struct qede_dev *edev) -{ - mutex_unlock(&edev->qede_lock); -} - #ifdef CONFIG_QED_SRIOV static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos, __be16 vlan_proto) @@ -231,6 +220,7 @@ static struct qed_eth_cb_ops qede_ll_ops = { .arfs_filter_op = qede_arfs_filter_op, #endif .link_update = qede_link_update, + .schedule_recovery_handler = qede_schedule_recovery_handler, .get_generic_tlv_data = qede_get_generic_tlv_data, .get_protocol_tlv_data = qede_get_eth_tlv_data, }, @@ -953,11 +943,57 @@ err: return -ENOMEM; } +/* The qede lock is used to protect driver state change and driver flows that + * are not reentrant. + */ +void __qede_lock(struct qede_dev *edev) +{ + mutex_lock(&edev->qede_lock); +} + +void __qede_unlock(struct qede_dev *edev) +{ + mutex_unlock(&edev->qede_lock); +} + +/* This version of the lock should be used when acquiring the RTNL lock is also + * needed in addition to the internal qede lock. + */ +void qede_lock(struct qede_dev *edev) +{ + rtnl_lock(); + __qede_lock(edev); +} + +void qede_unlock(struct qede_dev *edev) +{ + __qede_unlock(edev); + rtnl_unlock(); +} + static void qede_sp_task(struct work_struct *work) { struct qede_dev *edev = container_of(work, struct qede_dev, sp_task.work); + /* The locking scheme depends on the specific flag: + * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to + * ensure that ongoing flows are ended and new ones are not started. + * In other cases - only the internal qede lock should be acquired. + */ + + if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) { +#ifdef CONFIG_QED_SRIOV + /* SRIOV must be disabled outside the lock to avoid a deadlock. + * The recovery of the active VFs is currently not supported. + */ + qede_sriov_configure(edev->pdev, 0); +#endif + qede_lock(edev); + qede_recovery_handler(edev); + qede_unlock(edev); + } + __qede_lock(edev); if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags)) @@ -1034,6 +1070,7 @@ static void qede_log_probe(struct qede_dev *edev) enum qede_probe_mode { QEDE_PROBE_NORMAL, + QEDE_PROBE_RECOVERY, }; static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, @@ -1054,6 +1091,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, probe_params.dp_module = dp_module; probe_params.dp_level = dp_level; probe_params.is_vf = is_vf; + probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY); cdev = qed_ops->common->probe(pdev, &probe_params); if (!cdev) { rc = -ENODEV; @@ -1081,11 +1119,20 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, if (rc) goto err2; - edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module, - dp_level); - if (!edev) { - rc = -ENOMEM; - goto err2; + if (mode != QEDE_PROBE_RECOVERY) { + edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module, + dp_level); + if (!edev) { + rc = -ENOMEM; + goto err2; + } + } else { + struct net_device *ndev = pci_get_drvdata(pdev); + + edev = netdev_priv(ndev); + edev->cdev = cdev; + memset(&edev->stats, 0, sizeof(edev->stats)); + memcpy(&edev->dev_info, &dev_info, sizeof(dev_info)); } if (is_vf) @@ -1093,28 +1140,31 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, qede_init_ndev(edev); - rc = qede_rdma_dev_add(edev); + rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY)); if (rc) goto err3; - /* Prepare the lock prior to the registration of the netdev, - * as once it's registered we might reach flows requiring it - * [it's even possible to reach a flow needing it directly - * from there, although it's unlikely]. - */ - INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); - mutex_init(&edev->qede_lock); - rc = register_netdev(edev->ndev); - if (rc) { - DP_NOTICE(edev, "Cannot register net-device\n"); - goto err4; + if (mode != QEDE_PROBE_RECOVERY) { + /* Prepare the lock prior to the registration of the netdev, + * as once it's registered we might reach flows requiring it + * [it's even possible to reach a flow needing it directly + * from there, although it's unlikely]. + */ + INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); + mutex_init(&edev->qede_lock); + + rc = register_netdev(edev->ndev); + if (rc) { + DP_NOTICE(edev, "Cannot register net-device\n"); + goto err4; + } } edev->ops->common->set_name(cdev, edev->ndev->name); /* PTP not supported on VFs */ if (!is_vf) - qede_ptp_enable(edev, true); + qede_ptp_enable(edev, (mode == QEDE_PROBE_NORMAL)); edev->ops->register_ops(cdev, &qede_ll_ops, edev); @@ -1129,7 +1179,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, return 0; err4: - qede_rdma_dev_remove(edev); + qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY)); err3: free_netdev(edev->ndev); err2: @@ -1165,6 +1215,7 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id) enum qede_remove_mode { QEDE_REMOVE_NORMAL, + QEDE_REMOVE_RECOVERY, }; static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) @@ -1175,15 +1226,19 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) DP_INFO(edev, "Starting qede_remove\n"); - qede_rdma_dev_remove(edev); - unregister_netdev(ndev); - cancel_delayed_work_sync(&edev->sp_task); + qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY)); - qede_ptp_disable(edev); + if (mode != QEDE_REMOVE_RECOVERY) { + unregister_netdev(ndev); - edev->ops->common->set_power_state(cdev, PCI_D0); + cancel_delayed_work_sync(&edev->sp_task); - pci_set_drvdata(pdev, NULL); + edev->ops->common->set_power_state(cdev, PCI_D0); + + pci_set_drvdata(pdev, NULL); + } + + qede_ptp_disable(edev); /* Use global ops since we've freed edev */ qed_ops->common->slowpath_stop(cdev); @@ -1197,7 +1252,8 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) * [e.g., QED register callbacks] won't break anything when * accessing the netdevice. */ - free_netdev(ndev); + if (mode != QEDE_REMOVE_RECOVERY) + free_netdev(ndev); dev_info(&pdev->dev, "Ending qede_remove successfully\n"); } @@ -1542,6 +1598,58 @@ static int qede_alloc_mem_load(struct qede_dev *edev) return 0; } +static void qede_empty_tx_queue(struct qede_dev *edev, + struct qede_tx_queue *txq) +{ + unsigned int pkts_compl = 0, bytes_compl = 0; + struct netdev_queue *netdev_txq; + int rc, len = 0; + + netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id); + + while (qed_chain_get_cons_idx(&txq->tx_pbl) != + qed_chain_get_prod_idx(&txq->tx_pbl)) { + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, + "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n", + txq->index, qed_chain_get_cons_idx(&txq->tx_pbl), + qed_chain_get_prod_idx(&txq->tx_pbl)); + + rc = qede_free_tx_pkt(edev, txq, &len); + if (rc) { + DP_NOTICE(edev, + "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n", + txq->index, + qed_chain_get_cons_idx(&txq->tx_pbl), + qed_chain_get_prod_idx(&txq->tx_pbl)); + break; + } + + bytes_compl += len; + pkts_compl++; + txq->sw_tx_cons++; + } + + netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl); +} + +static void qede_empty_tx_queues(struct qede_dev *edev) +{ + int i; + + for_each_queue(i) + if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { + int cos; + + for_each_cos_in_txq(edev, cos) { + struct qede_fastpath *fp; + + fp = &edev->fp_array[i]; + qede_empty_tx_queue(edev, + &fp->txq[cos]); + } + } +} + /* This function inits fp content and resets the SB, RXQ and TXQ structures */ static void qede_init_fp(struct qede_dev *edev) { @@ -2056,6 +2164,7 @@ out: enum qede_unload_mode { QEDE_UNLOAD_NORMAL, + QEDE_UNLOAD_RECOVERY, }; static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, @@ -2071,7 +2180,8 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags); - edev->state = QEDE_STATE_CLOSED; + if (mode != QEDE_UNLOAD_RECOVERY) + edev->state = QEDE_STATE_CLOSED; qede_rdma_dev_event_close(edev); @@ -2079,17 +2189,20 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, netif_tx_disable(edev->ndev); netif_carrier_off(edev->ndev); - /* Reset the link */ - memset(&link_params, 0, sizeof(link_params)); - link_params.link_up = false; - edev->ops->common->set_link(edev->cdev, &link_params); - rc = qede_stop_queues(edev); - if (rc) { - qede_sync_free_irqs(edev); - goto out; - } + if (mode != QEDE_UNLOAD_RECOVERY) { + /* Reset the link */ + memset(&link_params, 0, sizeof(link_params)); + link_params.link_up = false; + edev->ops->common->set_link(edev->cdev, &link_params); - DP_INFO(edev, "Stopped Queues\n"); + rc = qede_stop_queues(edev); + if (rc) { + qede_sync_free_irqs(edev); + goto out; + } + + DP_INFO(edev, "Stopped Queues\n"); + } qede_vlan_mark_nonconfigured(edev); edev->ops->fastpath_stop(edev->cdev); @@ -2105,18 +2218,26 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, qede_napi_disable_remove(edev); + if (mode == QEDE_UNLOAD_RECOVERY) + qede_empty_tx_queues(edev); + qede_free_mem_load(edev); qede_free_fp_array(edev); out: if (!is_locked) __qede_unlock(edev); + + if (mode != QEDE_UNLOAD_RECOVERY) + DP_NOTICE(edev, "Link is down\n"); + DP_INFO(edev, "Ending qede unload\n"); } enum qede_load_mode { QEDE_LOAD_NORMAL, QEDE_LOAD_RELOAD, + QEDE_LOAD_RECOVERY, }; static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, @@ -2296,6 +2417,77 @@ static void qede_link_update(void *dev, struct qed_link_output *link) } } +static void qede_schedule_recovery_handler(void *dev) +{ + struct qede_dev *edev = dev; + + if (edev->state == QEDE_STATE_RECOVERY) { + DP_NOTICE(edev, + "Avoid scheduling a recovery handling since already in recovery state\n"); + return; + } + + set_bit(QEDE_SP_RECOVERY, &edev->sp_flags); + schedule_delayed_work(&edev->sp_task, 0); + + DP_INFO(edev, "Scheduled a recovery handler\n"); +} + +static void qede_recovery_failed(struct qede_dev *edev) +{ + netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n"); + + netif_device_detach(edev->ndev); + + if (edev->cdev) + edev->ops->common->set_power_state(edev->cdev, PCI_D3hot); +} + +static void qede_recovery_handler(struct qede_dev *edev) +{ + u32 curr_state = edev->state; + int rc; + + DP_NOTICE(edev, "Starting a recovery process\n"); + + /* No need to acquire first the qede_lock since is done by qede_sp_task + * before calling this function. + */ + edev->state = QEDE_STATE_RECOVERY; + + edev->ops->common->recovery_prolog(edev->cdev); + + if (curr_state == QEDE_STATE_OPEN) + qede_unload(edev, QEDE_UNLOAD_RECOVERY, true); + + __qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY); + + rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level, + IS_VF(edev), QEDE_PROBE_RECOVERY); + if (rc) { + edev->cdev = NULL; + goto err; + } + + if (curr_state == QEDE_STATE_OPEN) { + rc = qede_load(edev, QEDE_LOAD_RECOVERY, true); + if (rc) + goto err; + + qede_config_rx_mode(edev->ndev); + udp_tunnel_get_rx_info(edev->ndev); + } + + edev->state = curr_state; + + DP_NOTICE(edev, "Recovery handling is done\n"); + + return; + +err: + qede_recovery_failed(edev); +} + static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq) { struct netdev_queue *netdev_txq; diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c index 1900bf7e67d1..ffabc2d2f082 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c +++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c @@ -50,6 +50,8 @@ static void _qede_rdma_dev_add(struct qede_dev *edev) if (!qedr_drv) return; + /* Leftovers from previous error recovery */ + edev->rdma_info.exp_recovery = false; edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev, edev->ndev); } @@ -87,21 +89,26 @@ static void qede_rdma_destroy_wq(struct qede_dev *edev) destroy_workqueue(edev->rdma_info.rdma_wq); } -int qede_rdma_dev_add(struct qede_dev *edev) +int qede_rdma_dev_add(struct qede_dev *edev, bool recovery) { - int rc = 0; + int rc; - if (qede_rdma_supported(edev)) { - rc = qede_rdma_create_wq(edev); - if (rc) - return rc; + if (!qede_rdma_supported(edev)) + return 0; - INIT_LIST_HEAD(&edev->rdma_info.entry); - mutex_lock(&qedr_dev_list_lock); - list_add_tail(&edev->rdma_info.entry, &qedr_dev_list); - _qede_rdma_dev_add(edev); - mutex_unlock(&qedr_dev_list_lock); - } + /* Cannot start qedr while recovering since it wasn't fully stopped */ + if (recovery) + return 0; + + rc = qede_rdma_create_wq(edev); + if (rc) + return rc; + + INIT_LIST_HEAD(&edev->rdma_info.entry); + mutex_lock(&qedr_dev_list_lock); + list_add_tail(&edev->rdma_info.entry, &qedr_dev_list); + _qede_rdma_dev_add(edev); + mutex_unlock(&qedr_dev_list_lock); return rc; } @@ -110,19 +117,30 @@ static void _qede_rdma_dev_remove(struct qede_dev *edev) { if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev) qedr_drv->remove(edev->rdma_info.qedr_dev); - edev->rdma_info.qedr_dev = NULL; } -void qede_rdma_dev_remove(struct qede_dev *edev) +void qede_rdma_dev_remove(struct qede_dev *edev, bool recovery) { if (!qede_rdma_supported(edev)) return; - qede_rdma_destroy_wq(edev); - mutex_lock(&qedr_dev_list_lock); - _qede_rdma_dev_remove(edev); - list_del(&edev->rdma_info.entry); - mutex_unlock(&qedr_dev_list_lock); + /* Cannot remove qedr while recovering since it wasn't fully stopped */ + if (!recovery) { + qede_rdma_destroy_wq(edev); + mutex_lock(&qedr_dev_list_lock); + if (!edev->rdma_info.exp_recovery) + _qede_rdma_dev_remove(edev); + edev->rdma_info.qedr_dev = NULL; + list_del(&edev->rdma_info.entry); + mutex_unlock(&qedr_dev_list_lock); + } else { + if (!edev->rdma_info.exp_recovery) { + mutex_lock(&qedr_dev_list_lock); + _qede_rdma_dev_remove(edev); + mutex_unlock(&qedr_dev_list_lock); + } + edev->rdma_info.exp_recovery = true; + } } static void _qede_rdma_dev_open(struct qede_dev *edev) @@ -204,7 +222,8 @@ void qede_rdma_unregister_driver(struct qedr_driver *drv) mutex_lock(&qedr_dev_list_lock); list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { - if (edev->rdma_info.qedr_dev) + /* If device has experienced recovery it was already removed */ + if (edev->rdma_info.qedr_dev && !edev->rdma_info.exp_recovery) _qede_rdma_dev_remove(edev); } qedr_drv = NULL; @@ -284,6 +303,10 @@ static void qede_rdma_add_event(struct qede_dev *edev, { struct qede_rdma_event_work *event_node; + /* If a recovery was experienced avoid adding the event */ + if (edev->rdma_info.exp_recovery) + return; + if (!edev->rdma_info.qedr_dev) return; |