aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/netronome
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/netronome')
-rw-r--r--drivers/net/ethernet/netronome/Kconfig13
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile4
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm_mbox.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/crypto.h23
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c613
-rw-r--r--drivers/net/ethernet/netronome/nfp/devlink_param.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c52
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h21
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c59
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c59
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c18
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c55
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h36
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c149
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h37
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c238
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c19
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h14
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h59
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c26
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/dcb.c571
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.c43
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.h46
33 files changed, 2157 insertions, 127 deletions
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index 8844d1ac053a..d03d6e96f730 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -18,7 +18,7 @@ if NET_VENDOR_NETRONOME
config NFP
tristate "Netronome(R) NFP4000/NFP6000 NIC driver"
- depends on PCI && PCI_MSI
+ depends on PCI_MSI
depends on VXLAN || VXLAN=n
depends on TLS && TLS_DEVICE || TLS_DEVICE=n
select NET_DEVLINK
@@ -54,6 +54,17 @@ config NFP_APP_ABM_NIC
functionality.
Code will be built into the nfp.ko driver.
+config NFP_NET_IPSEC
+ bool "NFP IPsec crypto offload support"
+ depends on NFP
+ depends on XFRM_OFFLOAD
+ default y
+ help
+ Enable driver support IPsec crypto offload on NFP NIC.
+ Say Y, if you are planning to make use of IPsec crypto
+ offload. NOTE that IPsec crypto offload on NFP NIC
+ requires specific FW to work.
+
config NFP_DEBUG
bool "Debug support for Netronome(R) NFP4000/NFP6000 NIC drivers"
depends on NFP
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 9c0861d03634..808599b8066e 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -80,4 +80,8 @@ nfp-objs += \
abm/main.o
endif
+nfp-$(CONFIG_NFP_NET_IPSEC) += crypto/ipsec.o nfd3/ipsec.o nfdk/ipsec.o
+
nfp-$(CONFIG_NFP_DEBUG) += nfp_net_debugfs.o
+
+nfp-$(CONFIG_DCB) += nic/dcb.o
diff --git a/drivers/net/ethernet/netronome/nfp/ccm_mbox.c b/drivers/net/ethernet/netronome/nfp/ccm_mbox.c
index 4247bca09807..aa8aba4ff7aa 100644
--- a/drivers/net/ethernet/netronome/nfp/ccm_mbox.c
+++ b/drivers/net/ethernet/netronome/nfp/ccm_mbox.c
@@ -503,7 +503,7 @@ nfp_ccm_mbox_msg_prepare(struct nfp_net *nn, struct sk_buff *skb,
max_len = max(max_reply_size, round_up(skb->len, 4));
if (max_len > mbox_max) {
nn_dp_warn(&nn->dp,
- "message too big for tha mailbox: %u/%u vs %u\n",
+ "message too big for the mailbox: %u/%u vs %u\n",
skb->len, max_reply_size, mbox_max);
return -EMSGSIZE;
}
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/crypto.h b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
index bffe58bb2f27..1df73d658938 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
+++ b/drivers/net/ethernet/netronome/nfp/crypto/crypto.h
@@ -39,4 +39,27 @@ nfp_net_tls_rx_resync_req(struct net_device *netdev,
}
#endif
+/* IPsec related structures and functions */
+struct nfp_ipsec_offload {
+ u32 seq_hi;
+ u32 seq_low;
+ u32 handle;
+};
+
+#ifndef CONFIG_NFP_NET_IPSEC
+static inline void nfp_net_ipsec_init(struct nfp_net *nn)
+{
+}
+
+static inline void nfp_net_ipsec_clean(struct nfp_net *nn)
+{
+}
+#else
+void nfp_net_ipsec_init(struct nfp_net *nn);
+void nfp_net_ipsec_clean(struct nfp_net *nn);
+bool nfp_net_ipsec_tx_prep(struct nfp_net_dp *dp, struct sk_buff *skb,
+ struct nfp_ipsec_offload *offload_info);
+int nfp_net_ipsec_rx(struct nfp_meta_parsed *meta, struct sk_buff *skb);
+#endif
+
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
new file mode 100644
index 000000000000..c0dcce8ae437
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
@@ -0,0 +1,613 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc */
+/* Copyright (C) 2021 Corigine, Inc */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <asm/unaligned.h>
+#include <linux/ktime.h>
+#include <net/xfrm.h>
+
+#include "../nfpcore/nfp_dev.h"
+#include "../nfp_net_ctrl.h"
+#include "../nfp_net.h"
+#include "crypto.h"
+
+#define NFP_NET_IPSEC_MAX_SA_CNT (16 * 1024) /* Firmware support a maximum of 16K SA offload */
+
+/* IPsec config message cmd codes */
+enum nfp_ipsec_cfg_mssg_cmd_codes {
+ NFP_IPSEC_CFG_MSSG_ADD_SA, /* Add a new SA */
+ NFP_IPSEC_CFG_MSSG_INV_SA /* Invalidate an existing SA */
+};
+
+/* IPsec config message response codes */
+enum nfp_ipsec_cfg_mssg_rsp_codes {
+ NFP_IPSEC_CFG_MSSG_OK,
+ NFP_IPSEC_CFG_MSSG_FAILED,
+ NFP_IPSEC_CFG_MSSG_SA_VALID,
+ NFP_IPSEC_CFG_MSSG_SA_HASH_ADD_FAILED,
+ NFP_IPSEC_CFG_MSSG_SA_HASH_DEL_FAILED,
+ NFP_IPSEC_CFG_MSSG_SA_INVALID_CMD
+};
+
+/* Protocol */
+enum nfp_ipsec_sa_prot {
+ NFP_IPSEC_PROTOCOL_AH = 0,
+ NFP_IPSEC_PROTOCOL_ESP = 1
+};
+
+/* Mode */
+enum nfp_ipsec_sa_mode {
+ NFP_IPSEC_PROTMODE_TRANSPORT = 0,
+ NFP_IPSEC_PROTMODE_TUNNEL = 1
+};
+
+/* Cipher types */
+enum nfp_ipsec_sa_cipher {
+ NFP_IPSEC_CIPHER_NULL,
+ NFP_IPSEC_CIPHER_3DES,
+ NFP_IPSEC_CIPHER_AES128,
+ NFP_IPSEC_CIPHER_AES192,
+ NFP_IPSEC_CIPHER_AES256,
+ NFP_IPSEC_CIPHER_AES128_NULL,
+ NFP_IPSEC_CIPHER_AES192_NULL,
+ NFP_IPSEC_CIPHER_AES256_NULL,
+ NFP_IPSEC_CIPHER_CHACHA20
+};
+
+/* Cipher modes */
+enum nfp_ipsec_sa_cipher_mode {
+ NFP_IPSEC_CIMODE_ECB,
+ NFP_IPSEC_CIMODE_CBC,
+ NFP_IPSEC_CIMODE_CFB,
+ NFP_IPSEC_CIMODE_OFB,
+ NFP_IPSEC_CIMODE_CTR
+};
+
+/* Hash types */
+enum nfp_ipsec_sa_hash_type {
+ NFP_IPSEC_HASH_NONE,
+ NFP_IPSEC_HASH_MD5_96,
+ NFP_IPSEC_HASH_SHA1_96,
+ NFP_IPSEC_HASH_SHA256_96,
+ NFP_IPSEC_HASH_SHA384_96,
+ NFP_IPSEC_HASH_SHA512_96,
+ NFP_IPSEC_HASH_MD5_128,
+ NFP_IPSEC_HASH_SHA1_80,
+ NFP_IPSEC_HASH_SHA256_128,
+ NFP_IPSEC_HASH_SHA384_192,
+ NFP_IPSEC_HASH_SHA512_256,
+ NFP_IPSEC_HASH_GF128_128,
+ NFP_IPSEC_HASH_POLY1305_128
+};
+
+/* IPSEC_CFG_MSSG_ADD_SA */
+struct nfp_ipsec_cfg_add_sa {
+ u32 ciph_key[8]; /* Cipher Key */
+ union {
+ u32 auth_key[16]; /* Authentication Key */
+ struct nfp_ipsec_aesgcm { /* AES-GCM-ESP fields */
+ u32 salt; /* Initialized with SA */
+ u32 resv[15];
+ } aesgcm_fields;
+ };
+ struct sa_ctrl_word {
+ uint32_t hash :4; /* From nfp_ipsec_sa_hash_type */
+ uint32_t cimode :4; /* From nfp_ipsec_sa_cipher_mode */
+ uint32_t cipher :4; /* From nfp_ipsec_sa_cipher */
+ uint32_t mode :2; /* From nfp_ipsec_sa_mode */
+ uint32_t proto :2; /* From nfp_ipsec_sa_prot */
+ uint32_t dir :1; /* SA direction */
+ uint32_t resv0 :12;
+ uint32_t encap_dsbl:1; /* Encap/Decap disable */
+ uint32_t resv1 :2; /* Must be set to 0 */
+ } ctrl_word;
+ u32 spi; /* SPI Value */
+ uint32_t pmtu_limit :16; /* PMTU Limit */
+ uint32_t resv0 :5;
+ uint32_t ipv6 :1; /* Outbound IPv6 addr format */
+ uint32_t resv1 :10;
+ u32 resv2[2];
+ u32 src_ip[4]; /* Src IP addr */
+ u32 dst_ip[4]; /* Dst IP addr */
+ u32 resv3[6];
+};
+
+/* IPSEC_CFG_MSSG */
+struct nfp_ipsec_cfg_mssg {
+ union {
+ struct{
+ uint32_t cmd:16; /* One of nfp_ipsec_cfg_mssg_cmd_codes */
+ uint32_t rsp:16; /* One of nfp_ipsec_cfg_mssg_rsp_codes */
+ uint32_t sa_idx:16; /* SA table index */
+ uint32_t spare0:16;
+ struct nfp_ipsec_cfg_add_sa cfg_add_sa;
+ };
+ u32 raw[64];
+ };
+};
+
+static int nfp_net_ipsec_cfg(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry)
+{
+ unsigned int offset = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ struct nfp_ipsec_cfg_mssg *msg = (struct nfp_ipsec_cfg_mssg *)entry->msg;
+ int i, msg_size, ret;
+
+ ret = nfp_net_mbox_lock(nn, sizeof(*msg));
+ if (ret)
+ return ret;
+
+ msg_size = ARRAY_SIZE(msg->raw);
+ for (i = 0; i < msg_size; i++)
+ nn_writel(nn, offset + 4 * i, msg->raw[i]);
+
+ ret = nfp_net_mbox_reconfig(nn, entry->cmd);
+ if (ret < 0) {
+ nn_ctrl_bar_unlock(nn);
+ return ret;
+ }
+
+ /* For now we always read the whole message response back */
+ for (i = 0; i < msg_size; i++)
+ msg->raw[i] = nn_readl(nn, offset + 4 * i);
+
+ nn_ctrl_bar_unlock(nn);
+
+ switch (msg->rsp) {
+ case NFP_IPSEC_CFG_MSSG_OK:
+ return 0;
+ case NFP_IPSEC_CFG_MSSG_SA_INVALID_CMD:
+ return -EINVAL;
+ case NFP_IPSEC_CFG_MSSG_SA_VALID:
+ return -EEXIST;
+ case NFP_IPSEC_CFG_MSSG_FAILED:
+ case NFP_IPSEC_CFG_MSSG_SA_HASH_ADD_FAILED:
+ case NFP_IPSEC_CFG_MSSG_SA_HASH_DEL_FAILED:
+ return -EIO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int set_aes_keylen(struct nfp_ipsec_cfg_add_sa *cfg, int alg, int keylen)
+{
+ bool aes_gmac = (alg == SADB_X_EALG_NULL_AES_GMAC);
+
+ switch (keylen) {
+ case 128:
+ cfg->ctrl_word.cipher = aes_gmac ? NFP_IPSEC_CIPHER_AES128_NULL :
+ NFP_IPSEC_CIPHER_AES128;
+ break;
+ case 192:
+ cfg->ctrl_word.cipher = aes_gmac ? NFP_IPSEC_CIPHER_AES192_NULL :
+ NFP_IPSEC_CIPHER_AES192;
+ break;
+ case 256:
+ cfg->ctrl_word.cipher = aes_gmac ? NFP_IPSEC_CIPHER_AES256_NULL :
+ NFP_IPSEC_CIPHER_AES256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void set_md5hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
+{
+ switch (*trunc_len) {
+ case 96:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_MD5_96;
+ break;
+ case 128:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_MD5_128;
+ break;
+ default:
+ *trunc_len = 0;
+ }
+}
+
+static void set_sha1hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
+{
+ switch (*trunc_len) {
+ case 96:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA1_96;
+ break;
+ case 80:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA1_80;
+ break;
+ default:
+ *trunc_len = 0;
+ }
+}
+
+static void set_sha2_256hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
+{
+ switch (*trunc_len) {
+ case 96:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA256_96;
+ break;
+ case 128:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA256_128;
+ break;
+ default:
+ *trunc_len = 0;
+ }
+}
+
+static void set_sha2_384hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
+{
+ switch (*trunc_len) {
+ case 96:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA384_96;
+ break;
+ case 192:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA384_192;
+ break;
+ default:
+ *trunc_len = 0;
+ }
+}
+
+static void set_sha2_512hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
+{
+ switch (*trunc_len) {
+ case 96:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA512_96;
+ break;
+ case 256:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_SHA512_256;
+ break;
+ default:
+ *trunc_len = 0;
+ }
+}
+
+static int nfp_net_xfrm_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *netdev = x->xso.dev;
+ struct nfp_ipsec_cfg_mssg msg = {};
+ int i, key_len, trunc_len, err = 0;
+ struct nfp_ipsec_cfg_add_sa *cfg;
+ struct nfp_net *nn;
+ unsigned int saidx;
+
+ nn = netdev_priv(netdev);
+ cfg = &msg.cfg_add_sa;
+
+ /* General */
+ switch (x->props.mode) {
+ case XFRM_MODE_TUNNEL:
+ cfg->ctrl_word.mode = NFP_IPSEC_PROTMODE_TUNNEL;
+ break;
+ case XFRM_MODE_TRANSPORT:
+ cfg->ctrl_word.mode = NFP_IPSEC_PROTMODE_TRANSPORT;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mode for xfrm offload");
+ return -EINVAL;
+ }
+
+ switch (x->id.proto) {
+ case IPPROTO_ESP:
+ cfg->ctrl_word.proto = NFP_IPSEC_PROTOCOL_ESP;
+ break;
+ case IPPROTO_AH:
+ cfg->ctrl_word.proto = NFP_IPSEC_PROTOCOL_AH;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for xfrm offload");
+ return -EINVAL;
+ }
+
+ if (x->props.flags & XFRM_STATE_ESN) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported XFRM_REPLAY_MODE_ESN for xfrm offload");
+ return -EINVAL;
+ }
+
+ if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
+ return -EINVAL;
+ }
+
+ cfg->spi = ntohl(x->id.spi);
+
+ /* Hash/Authentication */
+ if (x->aalg)
+ trunc_len = x->aalg->alg_trunc_len;
+ else
+ trunc_len = 0;
+
+ switch (x->props.aalgo) {
+ case SADB_AALG_NONE:
+ if (x->aead) {
+ trunc_len = -1;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported authentication algorithm");
+ return -EINVAL;
+ }
+ break;
+ case SADB_X_AALG_NULL:
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_NONE;
+ trunc_len = -1;
+ break;
+ case SADB_AALG_MD5HMAC:
+ if (nn->pdev->device == PCI_DEVICE_ID_NFP3800) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported authentication algorithm");
+ return -EINVAL;
+ }
+ set_md5hmac(cfg, &trunc_len);
+ break;
+ case SADB_AALG_SHA1HMAC:
+ set_sha1hmac(cfg, &trunc_len);
+ break;
+ case SADB_X_AALG_SHA2_256HMAC:
+ set_sha2_256hmac(cfg, &trunc_len);
+ break;
+ case SADB_X_AALG_SHA2_384HMAC:
+ set_sha2_384hmac(cfg, &trunc_len);
+ break;
+ case SADB_X_AALG_SHA2_512HMAC:
+ set_sha2_512hmac(cfg, &trunc_len);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported authentication algorithm");
+ return -EINVAL;
+ }
+
+ if (!trunc_len) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported authentication algorithm trunc length");
+ return -EINVAL;
+ }
+
+ if (x->aalg) {
+ key_len = DIV_ROUND_UP(x->aalg->alg_key_len, BITS_PER_BYTE);
+ if (key_len > sizeof(cfg->auth_key)) {
+ NL_SET_ERR_MSG_MOD(extack, "Insufficient space for offloaded auth key");
+ return -EINVAL;
+ }
+ for (i = 0; i < key_len / sizeof(cfg->auth_key[0]) ; i++)
+ cfg->auth_key[i] = get_unaligned_be32(x->aalg->alg_key +
+ sizeof(cfg->auth_key[0]) * i);
+ }
+
+ /* Encryption */
+ switch (x->props.ealgo) {
+ case SADB_EALG_NONE:
+ case SADB_EALG_NULL:
+ cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC;
+ cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_NULL;
+ break;
+ case SADB_EALG_3DESCBC:
+ if (nn->pdev->device == PCI_DEVICE_ID_NFP3800) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported encryption algorithm for offload");
+ return -EINVAL;
+ }
+ cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC;
+ cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_3DES;
+ break;
+ case SADB_X_EALG_AES_GCM_ICV16:
+ case SADB_X_EALG_NULL_AES_GMAC:
+ if (!x->aead) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid AES key data");
+ return -EINVAL;
+ }
+
+ if (x->aead->alg_icv_len != 128) {
+ NL_SET_ERR_MSG_MOD(extack, "ICV must be 128bit with SADB_X_EALG_AES_GCM_ICV16");
+ return -EINVAL;
+ }
+ cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CTR;
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_GF128_128;
+
+ /* Aead->alg_key_len includes 32-bit salt */
+ if (set_aes_keylen(cfg, x->props.ealgo, x->aead->alg_key_len - 32)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported AES key length");
+ return -EINVAL;
+ }
+ break;
+ case SADB_X_EALG_AESCBC:
+ cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC;
+ if (!x->ealg) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid AES key data");
+ return -EINVAL;
+ }
+ if (set_aes_keylen(cfg, x->props.ealgo, x->ealg->alg_key_len) < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported AES key length");
+ return -EINVAL;
+ }
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported encryption algorithm for offload");
+ return -EINVAL;
+ }
+
+ if (x->aead) {
+ int salt_len = 4;
+
+ key_len = DIV_ROUND_UP(x->aead->alg_key_len, BITS_PER_BYTE);
+ key_len -= salt_len;
+
+ if (key_len > sizeof(cfg->ciph_key)) {
+ NL_SET_ERR_MSG_MOD(extack, "aead: Insufficient space for offloaded key");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < key_len / sizeof(cfg->ciph_key[0]) ; i++)
+ cfg->ciph_key[i] = get_unaligned_be32(x->aead->alg_key +
+ sizeof(cfg->ciph_key[0]) * i);
+
+ /* Load up the salt */
+ cfg->aesgcm_fields.salt = get_unaligned_be32(x->aead->alg_key + key_len);
+ }
+
+ if (x->ealg) {
+ key_len = DIV_ROUND_UP(x->ealg->alg_key_len, BITS_PER_BYTE);
+
+ if (key_len > sizeof(cfg->ciph_key)) {
+ NL_SET_ERR_MSG_MOD(extack, "ealg: Insufficient space for offloaded key");
+ return -EINVAL;
+ }
+ for (i = 0; i < key_len / sizeof(cfg->ciph_key[0]) ; i++)
+ cfg->ciph_key[i] = get_unaligned_be32(x->ealg->alg_key +
+ sizeof(cfg->ciph_key[0]) * i);
+ }
+
+ /* IP related info */
+ switch (x->props.family) {
+ case AF_INET:
+ cfg->ipv6 = 0;
+ cfg->src_ip[0] = ntohl(x->props.saddr.a4);
+ cfg->dst_ip[0] = ntohl(x->id.daddr.a4);
+ break;
+ case AF_INET6:
+ cfg->ipv6 = 1;
+ for (i = 0; i < 4; i++) {
+ cfg->src_ip[i] = ntohl(x->props.saddr.a6[i]);
+ cfg->dst_ip[i] = ntohl(x->id.daddr.a6[i]);
+ }
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported address family");
+ return -EINVAL;
+ }
+
+ /* Maximum nic IPsec code could handle. Other limits may apply. */
+ cfg->pmtu_limit = 0xffff;
+ cfg->ctrl_word.encap_dsbl = 1;
+
+ /* SA direction */
+ cfg->ctrl_word.dir = x->xso.dir;
+
+ /* Find unused SA data*/
+ err = xa_alloc(&nn->xa_ipsec, &saidx, x,
+ XA_LIMIT(0, NFP_NET_IPSEC_MAX_SA_CNT - 1), GFP_KERNEL);
+ if (err < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to get sa_data number for IPsec");
+ return err;
+ }
+
+ /* Allocate saidx and commit the SA */
+ msg.cmd = NFP_IPSEC_CFG_MSSG_ADD_SA;
+ msg.sa_idx = saidx;
+ err = nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_IPSEC, &msg,
+ sizeof(msg), nfp_net_ipsec_cfg);
+ if (err) {
+ xa_erase(&nn->xa_ipsec, saidx);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to issue IPsec command");
+ return err;
+ }
+
+ /* 0 is invalid offload_handle for kernel */
+ x->xso.offload_handle = saidx + 1;
+ return 0;
+}
+
+static void nfp_net_xfrm_del_state(struct xfrm_state *x)
+{
+ struct nfp_ipsec_cfg_mssg msg = {
+ .cmd = NFP_IPSEC_CFG_MSSG_INV_SA,
+ .sa_idx = x->xso.offload_handle - 1,
+ };
+ struct net_device *netdev = x->xso.dev;
+ struct nfp_net *nn;
+ int err;
+
+ nn = netdev_priv(netdev);
+ err = nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_IPSEC, &msg,
+ sizeof(msg), nfp_net_ipsec_cfg);
+ if (err)
+ nn_warn(nn, "Failed to invalidate SA in hardware\n");
+
+ xa_erase(&nn->xa_ipsec, x->xso.offload_handle - 1);
+}
+
+static bool nfp_net_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ if (x->props.family == AF_INET)
+ /* Offload with IPv4 options is not supported yet */
+ return ip_hdr(skb)->ihl == 5;
+
+ /* Offload with IPv6 extension headers is not support yet */
+ return !(ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr));
+}
+
+static const struct xfrmdev_ops nfp_net_ipsec_xfrmdev_ops = {
+ .xdo_dev_state_add = nfp_net_xfrm_add_state,
+ .xdo_dev_state_delete = nfp_net_xfrm_del_state,
+ .xdo_dev_offload_ok = nfp_net_ipsec_offload_ok,
+};
+
+void nfp_net_ipsec_init(struct nfp_net *nn)
+{
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_IPSEC))
+ return;
+
+ xa_init_flags(&nn->xa_ipsec, XA_FLAGS_ALLOC);
+ nn->dp.netdev->xfrmdev_ops = &nfp_net_ipsec_xfrmdev_ops;
+}
+
+void nfp_net_ipsec_clean(struct nfp_net *nn)
+{
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_IPSEC))
+ return;
+
+ WARN_ON(!xa_empty(&nn->xa_ipsec));
+ xa_destroy(&nn->xa_ipsec);
+}
+
+bool nfp_net_ipsec_tx_prep(struct nfp_net_dp *dp, struct sk_buff *skb,
+ struct nfp_ipsec_offload *offload_info)
+{
+ struct xfrm_offload *xo = xfrm_offload(skb);
+ struct xfrm_state *x;
+
+ x = xfrm_input_state(skb);
+ if (!x)
+ return false;
+
+ offload_info->seq_hi = xo->seq.hi;
+ offload_info->seq_low = xo->seq.low;
+ offload_info->handle = x->xso.offload_handle;
+
+ return true;
+}
+
+int nfp_net_ipsec_rx(struct nfp_meta_parsed *meta, struct sk_buff *skb)
+{
+ struct net_device *netdev = skb->dev;
+ struct xfrm_offload *xo;
+ struct xfrm_state *x;
+ struct sec_path *sp;
+ struct nfp_net *nn;
+ u32 saidx;
+
+ nn = netdev_priv(netdev);
+
+ saidx = meta->ipsec_saidx - 1;
+ if (saidx >= NFP_NET_IPSEC_MAX_SA_CNT)
+ return -EINVAL;
+
+ sp = secpath_set(skb);
+ if (unlikely(!sp))
+ return -ENOMEM;
+
+ xa_lock(&nn->xa_ipsec);
+ x = xa_load(&nn->xa_ipsec, saidx);
+ xa_unlock(&nn->xa_ipsec);
+ if (!x)
+ return -EINVAL;
+
+ xfrm_state_hold(x);
+ sp->xvec[sp->len++] = x;
+ sp->olen++;
+ xo = xfrm_offload(skb);
+ xo->flags = CRYPTO_DONE;
+ xo->status = CRYPTO_SUCCESS;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/devlink_param.c b/drivers/net/ethernet/netronome/nfp/devlink_param.c
index db297ee4d7ad..a655f9e69a7b 100644
--- a/drivers/net/ethernet/netronome/nfp/devlink_param.c
+++ b/drivers/net/ethernet/netronome/nfp/devlink_param.c
@@ -233,8 +233,8 @@ int nfp_devlink_params_register(struct nfp_pf *pf)
if (err <= 0)
return err;
- return devlink_params_register(devlink, nfp_devlink_params,
- ARRAY_SIZE(nfp_devlink_params));
+ return devl_params_register(devlink, nfp_devlink_params,
+ ARRAY_SIZE(nfp_devlink_params));
}
void nfp_devlink_params_unregister(struct nfp_pf *pf)
@@ -245,6 +245,6 @@ void nfp_devlink_params_unregister(struct nfp_pf *pf)
if (err <= 0)
return;
- devlink_params_unregister(priv_to_devlink(pf), nfp_devlink_params,
- ARRAY_SIZE(nfp_devlink_params));
+ devl_params_unregister(priv_to_devlink(pf), nfp_devlink_params,
+ ARRAY_SIZE(nfp_devlink_params));
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index f693119541d5..d23830b5bcb8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -1964,6 +1964,27 @@ int nfp_fl_ct_stats(struct flow_cls_offload *flow,
return 0;
}
+static bool
+nfp_fl_ct_offload_nft_supported(struct flow_cls_offload *flow)
+{
+ struct flow_rule *flow_rule = flow->rule;
+ struct flow_action *flow_action =
+ &flow_rule->action;
+ struct flow_action_entry *act;
+ int i;
+
+ flow_action_for_each(i, act, flow_action) {
+ if (act->id == FLOW_ACTION_CT_METADATA) {
+ enum ip_conntrack_info ctinfo =
+ act->ct_metadata.cookie & NFCT_INFOMASK;
+
+ return ctinfo != IP_CT_NEW;
+ }
+ }
+
+ return false;
+}
+
static int
nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offload *flow)
{
@@ -1976,6 +1997,9 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl
extack = flow->common.extack;
switch (flow->command) {
case FLOW_CLS_REPLACE:
+ if (!nfp_fl_ct_offload_nft_supported(flow))
+ return -EOPNOTSUPP;
+
/* Netfilter can request offload multiple times for the same
* flow - protect against adding duplicates.
*/
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index e92860e20a24..88d6d992e7d0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -154,10 +154,11 @@ nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag *lag,
return NULL;
}
-int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
- struct net_device *master,
- struct nfp_fl_pre_lag *pre_act,
- struct netlink_ext_ack *extack)
+static int nfp_fl_lag_get_group_info(struct nfp_app *app,
+ struct net_device *netdev,
+ __be16 *group_id,
+ u8 *batch_ver,
+ u8 *group_inst)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_lag_group *group = NULL;
@@ -165,23 +166,52 @@ int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
mutex_lock(&priv->nfp_lag.lock);
group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag,
- master);
+ netdev);
if (!group) {
mutex_unlock(&priv->nfp_lag.lock);
- NL_SET_ERR_MSG_MOD(extack, "invalid entry: group does not exist for LAG action");
return -ENOENT;
}
- pre_act->group_id = cpu_to_be16(group->group_id);
- temp_vers = cpu_to_be32(priv->nfp_lag.batch_ver <<
- NFP_FL_PRE_LAG_VER_OFF);
- memcpy(pre_act->lag_version, &temp_vers, 3);
- pre_act->instance = group->group_inst;
+ if (group_id)
+ *group_id = cpu_to_be16(group->group_id);
+
+ if (batch_ver) {
+ temp_vers = cpu_to_be32(priv->nfp_lag.batch_ver <<
+ NFP_FL_PRE_LAG_VER_OFF);
+ memcpy(batch_ver, &temp_vers, 3);
+ }
+
+ if (group_inst)
+ *group_inst = group->group_inst;
+
mutex_unlock(&priv->nfp_lag.lock);
return 0;
}
+int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
+ struct net_device *master,
+ struct nfp_fl_pre_lag *pre_act,
+ struct netlink_ext_ack *extack)
+{
+ if (nfp_fl_lag_get_group_info(app, master, &pre_act->group_id,
+ pre_act->lag_version,
+ &pre_act->instance)) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: group does not exist for LAG action");
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+void nfp_flower_lag_get_info_from_netdev(struct nfp_app *app,
+ struct net_device *netdev,
+ struct nfp_tun_neigh_lag *lag)
+{
+ nfp_fl_lag_get_group_info(app, netdev, NULL,
+ lag->lag_version, &lag->lag_instance);
+}
+
int nfp_flower_lag_get_output_id(struct nfp_app *app, struct net_device *master)
{
struct nfp_flower_priv *priv = app->priv;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 4d960a9641b3..83eaa5ae3cd4 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -76,7 +76,9 @@ nfp_flower_get_internal_port_id(struct nfp_app *app, struct net_device *netdev)
u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
struct net_device *netdev)
{
+ struct nfp_flower_priv *priv = app->priv;
int ext_port;
+ int gid;
if (nfp_netdev_is_nfp_repr(netdev)) {
return nfp_repr_get_port_id(netdev);
@@ -86,6 +88,13 @@ u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
return 0;
return nfp_flower_internal_port_get_port_id(ext_port);
+ } else if (netif_is_lag_master(netdev) &&
+ priv->flower_ext_feats & NFP_FL_FEATS_TUNNEL_NEIGH_LAG) {
+ gid = nfp_flower_lag_get_output_id(app, netdev);
+ if (gid < 0)
+ return 0;
+
+ return (NFP_FL_LAG_OUT | gid);
}
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index cb799d18682d..40372545148e 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -52,6 +52,7 @@ struct nfp_app;
#define NFP_FL_FEATS_QOS_PPS BIT(9)
#define NFP_FL_FEATS_QOS_METER BIT(10)
#define NFP_FL_FEATS_DECAP_V2 BIT(11)
+#define NFP_FL_FEATS_TUNNEL_NEIGH_LAG BIT(12)
#define NFP_FL_FEATS_HOST_ACK BIT(31)
#define NFP_FL_ENABLE_FLOW_MERGE BIT(0)
@@ -69,7 +70,8 @@ struct nfp_app;
NFP_FL_FEATS_VLAN_QINQ | \
NFP_FL_FEATS_QOS_PPS | \
NFP_FL_FEATS_QOS_METER | \
- NFP_FL_FEATS_DECAP_V2)
+ NFP_FL_FEATS_DECAP_V2 | \
+ NFP_FL_FEATS_TUNNEL_NEIGH_LAG)
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
@@ -104,6 +106,16 @@ struct nfp_fl_tunnel_offloads {
};
/**
+ * struct nfp_tun_neigh_lag - lag info
+ * @lag_version: lag version
+ * @lag_instance: lag instance
+ */
+struct nfp_tun_neigh_lag {
+ u8 lag_version[3];
+ u8 lag_instance;
+};
+
+/**
* struct nfp_tun_neigh - basic neighbour data
* @dst_addr: Destination MAC address
* @src_addr: Source MAC address
@@ -133,12 +145,14 @@ struct nfp_tun_neigh_ext {
* @src_ipv4: Source IPv4 address
* @common: Neighbour/route common info
* @ext: Neighbour/route extended info
+ * @lag: lag port info
*/
struct nfp_tun_neigh_v4 {
__be32 dst_ipv4;
__be32 src_ipv4;
struct nfp_tun_neigh common;
struct nfp_tun_neigh_ext ext;
+ struct nfp_tun_neigh_lag lag;
};
/**
@@ -147,12 +161,14 @@ struct nfp_tun_neigh_v4 {
* @src_ipv6: Source IPv6 address
* @common: Neighbour/route common info
* @ext: Neighbour/route extended info
+ * @lag: lag port info
*/
struct nfp_tun_neigh_v6 {
struct in6_addr dst_ipv6;
struct in6_addr src_ipv6;
struct nfp_tun_neigh common;
struct nfp_tun_neigh_ext ext;
+ struct nfp_tun_neigh_lag lag;
};
/**
@@ -647,6 +663,9 @@ int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
struct netlink_ext_ack *extack);
int nfp_flower_lag_get_output_id(struct nfp_app *app,
struct net_device *master);
+void nfp_flower_lag_get_info_from_netdev(struct nfp_app *app,
+ struct net_device *netdev,
+ struct nfp_tun_neigh_lag *lag);
void nfp_flower_qos_init(struct nfp_app *app);
void nfp_flower_qos_cleanup(struct nfp_app *app);
int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 52f67157bd0f..060a77f2265d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -290,6 +290,11 @@ nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6))
plen -= sizeof(struct nfp_tun_neigh_ext);
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_TUNNEL_NEIGH_LAG) &&
+ (mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH ||
+ mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6))
+ plen -= sizeof(struct nfp_tun_neigh_lag);
+
skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
if (!skb)
return -ENOMEM;
@@ -455,6 +460,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
sizeof(struct nfp_tun_neigh_v4);
unsigned long cookie = (unsigned long)neigh;
struct nfp_flower_priv *priv = app->priv;
+ struct nfp_tun_neigh_lag lag_info;
struct nfp_neigh_entry *nn_entry;
u32 port_id;
u8 mtype;
@@ -463,11 +469,17 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
if (!port_id)
return;
+ if ((port_id & NFP_FL_LAG_OUT) == NFP_FL_LAG_OUT) {
+ memset(&lag_info, 0, sizeof(struct nfp_tun_neigh_lag));
+ nfp_flower_lag_get_info_from_netdev(app, netdev, &lag_info);
+ }
+
spin_lock_bh(&priv->predt_lock);
nn_entry = rhashtable_lookup_fast(&priv->neigh_table, &cookie,
neigh_table_params);
if (!nn_entry && !neigh_invalid) {
struct nfp_tun_neigh_ext *ext;
+ struct nfp_tun_neigh_lag *lag;
struct nfp_tun_neigh *common;
nn_entry = kzalloc(sizeof(*nn_entry) + neigh_size,
@@ -488,6 +500,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
payload->dst_ipv6 = flowi6->daddr;
common = &payload->common;
ext = &payload->ext;
+ lag = &payload->lag;
mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6;
} else {
struct flowi4 *flowi4 = (struct flowi4 *)flow;
@@ -498,6 +511,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
payload->dst_ipv4 = flowi4->daddr;
common = &payload->common;
ext = &payload->ext;
+ lag = &payload->lag;
mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
}
ext->host_ctx = cpu_to_be32(U32_MAX);
@@ -505,6 +519,9 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
ext->vlan_tci = cpu_to_be16(U16_MAX);
ether_addr_copy(common->src_addr, netdev->dev_addr);
neigh_ha_snapshot(common->dst_addr, neigh, netdev);
+
+ if ((port_id & NFP_FL_LAG_OUT) == NFP_FL_LAG_OUT)
+ memcpy(lag, &lag_info, sizeof(struct nfp_tun_neigh_lag));
common->port_id = cpu_to_be32(port_id);
if (rhashtable_insert_fast(&priv->neigh_table,
@@ -547,13 +564,38 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
if (nn_entry->flow)
list_del(&nn_entry->list_head);
kfree(nn_entry);
- } else if (nn_entry && !neigh_invalid && override) {
- mtype = is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 :
- NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
- nfp_tun_link_predt_entries(app, nn_entry);
- nfp_flower_xmit_tun_conf(app, mtype, neigh_size,
- nn_entry->payload,
- GFP_ATOMIC);
+ } else if (nn_entry && !neigh_invalid) {
+ struct nfp_tun_neigh *common;
+ u8 dst_addr[ETH_ALEN];
+ bool is_mac_change;
+
+ if (is_ipv6) {
+ struct nfp_tun_neigh_v6 *payload;
+
+ payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload;
+ common = &payload->common;
+ mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6;
+ } else {
+ struct nfp_tun_neigh_v4 *payload;
+
+ payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload;
+ common = &payload->common;
+ mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
+ }
+
+ ether_addr_copy(dst_addr, common->dst_addr);
+ neigh_ha_snapshot(common->dst_addr, neigh, netdev);
+ is_mac_change = !ether_addr_equal(dst_addr, common->dst_addr);
+ if (override || is_mac_change) {
+ if (is_mac_change && nn_entry->flow) {
+ list_del(&nn_entry->list_head);
+ nn_entry->flow = NULL;
+ }
+ nfp_tun_link_predt_entries(app, nn_entry);
+ nfp_flower_xmit_tun_conf(app, mtype, neigh_size,
+ nn_entry->payload,
+ GFP_ATOMIC);
+ }
}
spin_unlock_bh(&priv->predt_lock);
@@ -593,8 +635,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
app = app_priv->app;
- if (!nfp_netdev_is_nfp_repr(n->dev) &&
- !nfp_flower_internal_port_can_offload(app, n->dev))
+ if (!nfp_flower_get_port_id_from_netdev(app, n->dev))
return NOTIFY_DONE;
#if IS_ENABLED(CONFIG_INET)
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
index 448c1c1afaee..59fb0583cc08 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -4,6 +4,7 @@
#include <linux/bpf_trace.h>
#include <linux/netdevice.h>
#include <linux/bitfield.h>
+#include <net/xfrm.h>
#include "../nfp_app.h"
#include "../nfp_net.h"
@@ -167,28 +168,34 @@ nfp_nfd3_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
u64_stats_update_end(&r_vec->tx_sync);
}
-static int nfp_nfd3_prep_tx_meta(struct nfp_net_dp *dp, struct sk_buff *skb, u64 tls_handle)
+static int nfp_nfd3_prep_tx_meta(struct nfp_net_dp *dp, struct sk_buff *skb,
+ u64 tls_handle, bool *ipsec)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ struct nfp_ipsec_offload offload_info;
unsigned char *data;
bool vlan_insert;
u32 meta_id = 0;
int md_bytes;
- if (unlikely(md_dst || tls_handle)) {
- if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX))
- md_dst = NULL;
- }
+#ifdef CONFIG_NFP_NET_IPSEC
+ if (xfrm_offload(skb))
+ *ipsec = nfp_net_ipsec_tx_prep(dp, skb, &offload_info);
+#endif
+
+ if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX))
+ md_dst = NULL;
vlan_insert = skb_vlan_tag_present(skb) && (dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN_V2);
- if (!(md_dst || tls_handle || vlan_insert))
+ if (!(md_dst || tls_handle || vlan_insert || *ipsec))
return 0;
md_bytes = sizeof(meta_id) +
- !!md_dst * NFP_NET_META_PORTID_SIZE +
- !!tls_handle * NFP_NET_META_CONN_HANDLE_SIZE +
- vlan_insert * NFP_NET_META_VLAN_SIZE;
+ (!!md_dst ? NFP_NET_META_PORTID_SIZE : 0) +
+ (!!tls_handle ? NFP_NET_META_CONN_HANDLE_SIZE : 0) +
+ (vlan_insert ? NFP_NET_META_VLAN_SIZE : 0) +
+ (*ipsec ? NFP_NET_META_IPSEC_FIELD_SIZE : 0);
if (unlikely(skb_cow_head(skb, md_bytes)))
return -ENOMEM;
@@ -218,6 +225,16 @@ static int nfp_nfd3_prep_tx_meta(struct nfp_net_dp *dp, struct sk_buff *skb, u64
meta_id <<= NFP_NET_META_FIELD_SIZE;
meta_id |= NFP_NET_META_VLAN;
}
+ if (*ipsec) {
+ data -= NFP_NET_META_IPSEC_SIZE;
+ put_unaligned_be32(offload_info.seq_hi, data);
+ data -= NFP_NET_META_IPSEC_SIZE;
+ put_unaligned_be32(offload_info.seq_low, data);
+ data -= NFP_NET_META_IPSEC_SIZE;
+ put_unaligned_be32(offload_info.handle - 1, data);
+ meta_id <<= NFP_NET_META_IPSEC_FIELD_SIZE;
+ meta_id |= NFP_NET_META_IPSEC << 8 | NFP_NET_META_IPSEC << 4 | NFP_NET_META_IPSEC;
+ }
data -= sizeof(meta_id);
put_unaligned_be32(meta_id, data);
@@ -246,6 +263,7 @@ netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
dma_addr_t dma_addr;
unsigned int fsize;
u64 tls_handle = 0;
+ bool ipsec = false;
u16 qidx;
dp = &nn->dp;
@@ -273,7 +291,7 @@ netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
- md_bytes = nfp_nfd3_prep_tx_meta(dp, skb, tls_handle);
+ md_bytes = nfp_nfd3_prep_tx_meta(dp, skb, tls_handle, &ipsec);
if (unlikely(md_bytes < 0))
goto err_flush;
@@ -312,6 +330,8 @@ netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
}
+ if (ipsec)
+ nfp_nfd3_ipsec_tx(txd, skb);
/* Gather DMA */
if (nr_frags > 0) {
__le64 second_half;
@@ -764,6 +784,15 @@ nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
return false;
data += sizeof(struct nfp_net_tls_resync_req);
break;
+#ifdef CONFIG_NFP_NET_IPSEC
+ case NFP_NET_META_IPSEC:
+ /* Note: IPsec packet will have zero saidx, so need add 1
+ * to indicate packet is IPsec packet within driver.
+ */
+ meta->ipsec_saidx = get_unaligned_be32(data) + 1;
+ data += 4;
+ break;
+#endif
default:
return true;
}
@@ -876,12 +905,11 @@ static int nfp_nfd3_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
struct nfp_net_tx_ring *tx_ring;
struct bpf_prog *xdp_prog;
+ int idx, pkts_polled = 0;
bool xdp_tx_cmpl = false;
unsigned int true_bufsz;
struct sk_buff *skb;
- int pkts_polled = 0;
struct xdp_buff xdp;
- int idx;
xdp_prog = READ_ONCE(dp->xdp_prog);
true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
@@ -1081,6 +1109,13 @@ static int nfp_nfd3_rx(struct nfp_net_rx_ring *rx_ring, int budget)
continue;
}
+#ifdef CONFIG_NFP_NET_IPSEC
+ if (meta.ipsec_saidx != 0 && unlikely(nfp_net_ipsec_rx(&meta, skb))) {
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, NULL, skb);
+ continue;
+ }
+#endif
+
if (meta_len_xdp)
skb_metadata_set(skb, meta_len_xdp);
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c b/drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c
new file mode 100644
index 000000000000..e90f8c975903
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc */
+/* Copyright (C) 2021 Corigine, Inc */
+
+#include <net/xfrm.h>
+
+#include "../nfp_net.h"
+#include "nfd3.h"
+
+void nfp_nfd3_ipsec_tx(struct nfp_nfd3_tx_desc *txd, struct sk_buff *skb)
+{
+ struct xfrm_state *x = xfrm_input_state(skb);
+
+ if (x->xso.dev && (x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)) {
+ txd->flags |= NFD3_DESC_TX_CSUM | NFD3_DESC_TX_IP4_CSUM |
+ NFD3_DESC_TX_TCP_CSUM | NFD3_DESC_TX_UDP_CSUM;
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h b/drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h
index 7a0df9e6c3c4..9c1c10dcbaee 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h
@@ -103,4 +103,12 @@ void nfp_nfd3_rx_ring_fill_freelist(struct nfp_net_dp *dp,
void nfp_nfd3_xsk_tx_free(struct nfp_nfd3_tx_buf *txbuf);
int nfp_nfd3_xsk_poll(struct napi_struct *napi, int budget);
+#ifndef CONFIG_NFP_NET_IPSEC
+static inline void nfp_nfd3_ipsec_tx(struct nfp_nfd3_tx_desc *txd, struct sk_buff *skb)
+{
+}
+#else
+void nfp_nfd3_ipsec_tx(struct nfp_nfd3_tx_desc *txd, struct sk_buff *skb);
+#endif
+
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
index 2b427d8ccb2f..d60c0e991a91 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -6,6 +6,7 @@
#include <linux/overflow.h>
#include <linux/sizes.h>
#include <linux/bitfield.h>
+#include <net/xfrm.h>
#include "../nfp_app.h"
#include "../nfp_net.h"
@@ -172,25 +173,32 @@ close_block:
static int
nfp_nfdk_prep_tx_meta(struct nfp_net_dp *dp, struct nfp_app *app,
- struct sk_buff *skb)
+ struct sk_buff *skb, bool *ipsec)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ struct nfp_ipsec_offload offload_info;
unsigned char *data;
bool vlan_insert;
u32 meta_id = 0;
int md_bytes;
+#ifdef CONFIG_NFP_NET_IPSEC
+ if (xfrm_offload(skb))
+ *ipsec = nfp_net_ipsec_tx_prep(dp, skb, &offload_info);
+#endif
+
if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX))
md_dst = NULL;
vlan_insert = skb_vlan_tag_present(skb) && (dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN_V2);
- if (!(md_dst || vlan_insert))
+ if (!(md_dst || vlan_insert || *ipsec))
return 0;
md_bytes = sizeof(meta_id) +
- !!md_dst * NFP_NET_META_PORTID_SIZE +
- vlan_insert * NFP_NET_META_VLAN_SIZE;
+ (!!md_dst ? NFP_NET_META_PORTID_SIZE : 0) +
+ (vlan_insert ? NFP_NET_META_VLAN_SIZE : 0) +
+ (*ipsec ? NFP_NET_META_IPSEC_FIELD_SIZE : 0);
if (unlikely(skb_cow_head(skb, md_bytes)))
return -ENOMEM;
@@ -212,6 +220,17 @@ nfp_nfdk_prep_tx_meta(struct nfp_net_dp *dp, struct nfp_app *app,
meta_id |= NFP_NET_META_VLAN;
}
+ if (*ipsec) {
+ data -= NFP_NET_META_IPSEC_SIZE;
+ put_unaligned_be32(offload_info.seq_hi, data);
+ data -= NFP_NET_META_IPSEC_SIZE;
+ put_unaligned_be32(offload_info.seq_low, data);
+ data -= NFP_NET_META_IPSEC_SIZE;
+ put_unaligned_be32(offload_info.handle - 1, data);
+ meta_id <<= NFP_NET_META_IPSEC_FIELD_SIZE;
+ meta_id |= NFP_NET_META_IPSEC << 8 | NFP_NET_META_IPSEC << 4 | NFP_NET_META_IPSEC;
+ }
+
meta_id = FIELD_PREP(NFDK_META_LEN, md_bytes) |
FIELD_PREP(NFDK_META_FIELDS, meta_id);
@@ -243,6 +262,7 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
struct nfp_net_dp *dp;
int nr_frags, wr_idx;
dma_addr_t dma_addr;
+ bool ipsec = false;
u64 metadata;
dp = &nn->dp;
@@ -263,7 +283,7 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- metadata = nfp_nfdk_prep_tx_meta(dp, nn->app, skb);
+ metadata = nfp_nfdk_prep_tx_meta(dp, nn->app, skb, &ipsec);
if (unlikely((int)metadata < 0))
goto err_flush;
@@ -282,7 +302,7 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
dma_len = skb_headlen(skb);
if (skb_is_gso(skb))
type = NFDK_DESC_TX_TYPE_TSO;
- else if (!nr_frags && dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
+ else if (!nr_frags && dma_len <= NFDK_TX_MAX_DATA_PER_HEAD)
type = NFDK_DESC_TX_TYPE_SIMPLE;
else
type = NFDK_DESC_TX_TYPE_GATHER;
@@ -361,6 +381,9 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
(txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
+ if (ipsec)
+ metadata = nfp_nfdk_ipsec_tx(metadata, skb);
+
if (!skb_is_gso(skb)) {
real_len = skb->len;
/* Metadata desc */
@@ -760,6 +783,15 @@ nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
return false;
data += sizeof(struct nfp_net_tls_resync_req);
break;
+#ifdef CONFIG_NFP_NET_IPSEC
+ case NFP_NET_META_IPSEC:
+ /* Note: IPsec packet could have zero saidx, so need add 1
+ * to indicate packet is IPsec packet within driver.
+ */
+ meta->ipsec_saidx = get_unaligned_be32(data) + 1;
+ data += 4;
+ break;
+#endif
default:
return true;
}
@@ -927,7 +959,7 @@ nfp_nfdk_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
dma_len = pkt_len;
dma_addr = rxbuf->dma_addr + dma_off;
- if (dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
+ if (dma_len <= NFDK_TX_MAX_DATA_PER_HEAD)
type = NFDK_DESC_TX_TYPE_SIMPLE;
else
type = NFDK_DESC_TX_TYPE_GATHER;
@@ -1186,6 +1218,13 @@ static int nfp_nfdk_rx(struct nfp_net_rx_ring *rx_ring, int budget)
continue;
}
+#ifdef CONFIG_NFP_NET_IPSEC
+ if (meta.ipsec_saidx != 0 && unlikely(nfp_net_ipsec_rx(&meta, skb))) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, NULL, skb);
+ continue;
+ }
+#endif
+
if (meta_len_xdp)
skb_metadata_set(skb, meta_len_xdp);
@@ -1325,7 +1364,7 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
txbuf = &tx_ring->ktxbufs[wr_idx];
dma_len = skb_headlen(skb);
- if (dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
+ if (dma_len <= NFDK_TX_MAX_DATA_PER_HEAD)
type = NFDK_DESC_TX_TYPE_SIMPLE;
else
type = NFDK_DESC_TX_TYPE_GATHER;
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c b/drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c
new file mode 100644
index 000000000000..58d8f59eb885
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2023 Corigine, Inc */
+
+#include <net/xfrm.h>
+
+#include "../nfp_net.h"
+#include "nfdk.h"
+
+u64 nfp_nfdk_ipsec_tx(u64 flags, struct sk_buff *skb)
+{
+ struct xfrm_state *x = xfrm_input_state(skb);
+
+ if (x->xso.dev && (x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM))
+ flags |= NFDK_DESC_TX_L3_CSUM | NFDK_DESC_TX_L4_CSUM;
+
+ return flags;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h b/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
index 0ea51d9f2325..fe55980348e9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
@@ -125,4 +125,12 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
void nfp_nfdk_ctrl_poll(struct tasklet_struct *t);
void nfp_nfdk_rx_ring_fill_freelist(struct nfp_net_dp *dp,
struct nfp_net_rx_ring *rx_ring);
+#ifndef CONFIG_NFP_NET_IPSEC
+static inline u64 nfp_nfdk_ipsec_tx(u64 flags, struct sk_buff *skb)
+{
+ return flags;
+}
+#else
+u64 nfp_nfdk_ipsec_tx(u64 flags, struct sk_buff *skb);
+#endif
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index dd56207df246..90707346a4ef 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -445,6 +445,4 @@ int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
int nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
struct nfp_net *nn, unsigned int id);
-struct devlink_port *nfp_devlink_get_devlink_port(struct net_device *netdev);
-
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index cb08d7bf9524..bf6bae557158 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -239,10 +239,6 @@ nfp_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
char *buf = NULL;
int err;
- err = devlink_info_driver_name_put(req, "nfp");
- if (err)
- return err;
-
vendor = nfp_hwinfo_lookup(pf->hwinfo, "assembly.vendor");
part = nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno");
sn = nfp_hwinfo_lookup(pf->hwinfo, "assembly.serial");
@@ -334,6 +330,8 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
int serial_len;
int ret;
+ SET_NETDEV_DEVLINK_PORT(port->netdev, &port->dl_port);
+
rtnl_lock();
ret = nfp_devlink_fill_eth_port(port, &eth_port);
rtnl_unlock();
@@ -360,24 +358,3 @@ void nfp_devlink_port_unregister(struct nfp_port *port)
{
devl_port_unregister(&port->dl_port);
}
-
-void nfp_devlink_port_type_eth_set(struct nfp_port *port)
-{
- devlink_port_type_eth_set(&port->dl_port, port->netdev);
-}
-
-void nfp_devlink_port_type_clear(struct nfp_port *port)
-{
- devlink_port_type_clear(&port->dl_port);
-}
-
-struct devlink_port *nfp_devlink_get_devlink_port(struct net_device *netdev)
-{
- struct nfp_port *port;
-
- port = nfp_port_from_netdev(netdev);
- if (!port)
- return NULL;
-
- return &port->dl_port;
-}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index afd3edfa2428..14a751bfe1fe 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -12,7 +12,6 @@
#include <linux/ethtool.h>
#include <linux/list.h>
#include <linux/types.h>
-#include <linux/msi.h>
#include <linux/pci.h>
#include <linux/workqueue.h>
#include <net/devlink.h>
@@ -28,6 +27,7 @@ struct nfp_hwinfo;
struct nfp_mip;
struct nfp_net;
struct nfp_nsp_identify;
+struct nfp_eth_media_buf;
struct nfp_port;
struct nfp_rtsym;
struct nfp_rtsym_table;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index a101ff30a1ae..939cfce15830 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -88,6 +88,9 @@
#define NFP_NET_FL_BATCH 16 /* Add freelist in this Batch size */
#define NFP_NET_XDP_MAX_COMPLETE 2048 /* XDP bufs to reclaim in NAPI poll */
+/* MC definitions */
+#define NFP_NET_CFG_MAC_MC_MAX 1024 /* The maximum number of MC address per port*/
+
/* Offload definitions */
#define NFP_NET_N_VXLAN_PORTS (NFP_NET_CFG_VXLAN_SZ / sizeof(__be16))
@@ -263,6 +266,10 @@ struct nfp_meta_parsed {
u8 tpid;
u16 tci;
} vlan;
+
+#ifdef CONFIG_NFP_NET_IPSEC
+ u32 ipsec_saidx;
+#endif
};
struct nfp_net_rx_hash {
@@ -472,6 +479,7 @@ struct nfp_stat_pair {
* @rx_dma_off: Offset at which DMA packets (for XDP headroom)
* @rx_offset: Offset in the RX buffers where packet data starts
* @ctrl: Local copy of the control register/word.
+ * @ctrl_w1: Local copy of the control register/word1.
* @fl_bufsz: Currently configured size of the freelist buffers
* @xdp_prog: Installed XDP program
* @tx_rings: Array of pre-allocated TX ring structures
@@ -504,6 +512,7 @@ struct nfp_net_dp {
u32 rx_dma_off;
u32 ctrl;
+ u32 ctrl_w1;
u32 fl_bufsz;
struct bpf_prog *xdp_prog;
@@ -541,6 +550,7 @@ struct nfp_net_dp {
* @id: vNIC id within the PF (0 for VFs)
* @fw_ver: Firmware version
* @cap: Capabilities advertised by the Firmware
+ * @cap_w1: Extended capabilities word advertised by the Firmware
* @max_mtu: Maximum support MTU advertised by the Firmware
* @rss_hfunc: RSS selected hash function
* @rss_cfg: RSS configuration
@@ -583,6 +593,7 @@ struct nfp_net_dp {
* @qcp_cfg: Pointer to QCP queue used for configuration notification
* @tx_bar: Pointer to mapped TX queues
* @rx_bar: Pointer to mapped FL/RX queues
+ * @xa_ipsec: IPsec xarray SA data
* @tlv_caps: Parsed TLV capabilities
* @ktls_tx_conn_cnt: Number of offloaded kTLS TX connections
* @ktls_rx_conn_cnt: Number of offloaded kTLS RX connections
@@ -606,6 +617,10 @@ struct nfp_net_dp {
* @vnic_no_name: For non-port PF vNIC make ndo_get_phys_port_name return
* -EOPNOTSUPP to keep backwards compatibility (set by app)
* @port: Pointer to nfp_port structure if vNIC is a port
+ * @mbox_amsg: Asynchronously processed message via mailbox
+ * @mbox_amsg.lock: Protect message list
+ * @mbox_amsg.list: List of message to process
+ * @mbox_amsg.work: Work to process message asynchronously
* @app_priv: APP private data for this vNIC
*/
struct nfp_net {
@@ -617,6 +632,7 @@ struct nfp_net {
u32 id;
u32 cap;
+ u32 cap_w1;
u32 max_mtu;
u8 rss_hfunc;
@@ -670,6 +686,10 @@ struct nfp_net {
u8 __iomem *tx_bar;
u8 __iomem *rx_bar;
+#ifdef CONFIG_NFP_NET_IPSEC
+ struct xarray xa_ipsec;
+#endif
+
struct nfp_net_tlv_caps tlv_caps;
unsigned int ktls_tx_conn_cnt;
@@ -702,9 +722,25 @@ struct nfp_net {
struct nfp_port *port;
+ struct {
+ spinlock_t lock;
+ struct list_head list;
+ struct work_struct work;
+ } mbox_amsg;
+
void *app_priv;
};
+struct nfp_mbox_amsg_entry {
+ struct list_head list;
+ int (*cfg)(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry);
+ u32 cmd;
+ char msg[];
+};
+
+int nfp_net_sched_mbox_amsg_work(struct nfp_net *nn, u32 cmd, const void *data, size_t len,
+ int (*cb)(struct nfp_net *, struct nfp_mbox_amsg_entry *));
+
/* Functions to read/write from/to a BAR
* Performs any endian conversion necessary.
*/
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 27f4786ace4f..81b7ca0ad222 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -27,7 +27,6 @@
#include <linux/page_ref.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
-#include <linux/msi.h>
#include <linux/ethtool.h>
#include <linux/log2.h>
#include <linux/if_vlan.h>
@@ -735,8 +734,9 @@ static unsigned int nfp_net_calc_fl_bufsz_xsk(struct nfp_net_dp *dp)
*/
static void nfp_net_vecs_init(struct nfp_net *nn)
{
+ int numa_node = dev_to_node(&nn->pdev->dev);
struct nfp_net_r_vector *r_vec;
- int r;
+ unsigned int r;
nn->lsc_handler = nfp_net_irq_lsc;
nn->exn_handler = nfp_net_irq_exn;
@@ -762,7 +762,7 @@ static void nfp_net_vecs_init(struct nfp_net *nn)
tasklet_disable(&r_vec->tasklet);
}
- cpumask_set_cpu(r, &r_vec->affinity_mask);
+ cpumask_set_cpu(cpumask_local_spread(r, numa_node), &r_vec->affinity_mask);
}
}
@@ -1007,6 +1007,7 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn)
new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
+ nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, nn->dp.ctrl_w1);
err = nfp_net_reconfig(nn, update);
if (err) {
nfp_net_clear_config_and_disable(nn);
@@ -1333,18 +1334,108 @@ err_unlock:
return err;
}
+int nfp_net_sched_mbox_amsg_work(struct nfp_net *nn, u32 cmd, const void *data, size_t len,
+ int (*cb)(struct nfp_net *, struct nfp_mbox_amsg_entry *))
+{
+ struct nfp_mbox_amsg_entry *entry;
+
+ entry = kmalloc(sizeof(*entry) + len, GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+
+ memcpy(entry->msg, data, len);
+ entry->cmd = cmd;
+ entry->cfg = cb;
+
+ spin_lock_bh(&nn->mbox_amsg.lock);
+ list_add_tail(&entry->list, &nn->mbox_amsg.list);
+ spin_unlock_bh(&nn->mbox_amsg.lock);
+
+ schedule_work(&nn->mbox_amsg.work);
+
+ return 0;
+}
+
+static void nfp_net_mbox_amsg_work(struct work_struct *work)
+{
+ struct nfp_net *nn = container_of(work, struct nfp_net, mbox_amsg.work);
+ struct nfp_mbox_amsg_entry *entry, *tmp;
+ struct list_head tmp_list;
+
+ INIT_LIST_HEAD(&tmp_list);
+
+ spin_lock_bh(&nn->mbox_amsg.lock);
+ list_splice_init(&nn->mbox_amsg.list, &tmp_list);
+ spin_unlock_bh(&nn->mbox_amsg.lock);
+
+ list_for_each_entry_safe(entry, tmp, &tmp_list, list) {
+ int err = entry->cfg(nn, entry);
+
+ if (err)
+ nn_err(nn, "Config cmd %d to HW failed %d.\n", entry->cmd, err);
+
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
+
+static int nfp_net_mc_cfg(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry)
+{
+ unsigned char *addr = entry->msg;
+ int ret;
+
+ ret = nfp_net_mbox_lock(nn, NFP_NET_CFG_MULTICAST_SZ);
+ if (ret)
+ return ret;
+
+ nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MULTICAST_MAC_HI,
+ get_unaligned_be32(addr));
+ nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MULTICAST_MAC_LO,
+ get_unaligned_be16(addr + 4));
+
+ return nfp_net_mbox_reconfig_and_unlock(nn, entry->cmd);
+}
+
+static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ if (netdev_mc_count(netdev) > NFP_NET_CFG_MAC_MC_MAX) {
+ nn_err(nn, "Requested number of MC addresses (%d) exceeds maximum (%d).\n",
+ netdev_mc_count(netdev), NFP_NET_CFG_MAC_MC_MAX);
+ return -EINVAL;
+ }
+
+ return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD, addr,
+ NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
+}
+
+static int nfp_net_mc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL, addr,
+ NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
+}
+
static void nfp_net_set_rx_mode(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- u32 new_ctrl;
+ u32 new_ctrl, new_ctrl_w1;
new_ctrl = nn->dp.ctrl;
+ new_ctrl_w1 = nn->dp.ctrl_w1;
if (!netdev_mc_empty(netdev) || netdev->flags & IFF_ALLMULTI)
new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_L2MC;
else
new_ctrl &= ~NFP_NET_CFG_CTRL_L2MC;
+ if (netdev->flags & IFF_ALLMULTI)
+ new_ctrl_w1 &= ~NFP_NET_CFG_CTRL_MCAST_FILTER;
+ else
+ new_ctrl_w1 |= nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER;
+
if (netdev->flags & IFF_PROMISC) {
if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
@@ -1354,13 +1445,21 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
}
- if (new_ctrl == nn->dp.ctrl)
+ if ((nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER) &&
+ __dev_mc_sync(netdev, nfp_net_mc_sync, nfp_net_mc_unsync))
+ netdev_err(netdev, "Sync mc address failed\n");
+
+ if (new_ctrl == nn->dp.ctrl && new_ctrl_w1 == nn->dp.ctrl_w1)
return;
- nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
+ if (new_ctrl != nn->dp.ctrl)
+ nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
+ if (new_ctrl_w1 != nn->dp.ctrl_w1)
+ nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, new_ctrl_w1);
nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
nn->dp.ctrl = new_ctrl;
+ nn->dp.ctrl_w1 = new_ctrl_w1;
}
static void nfp_net_rss_init_itbl(struct nfp_net *nn)
@@ -1631,21 +1730,21 @@ static void nfp_net_stat64(struct net_device *netdev,
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&r_vec->rx_sync);
+ start = u64_stats_fetch_begin(&r_vec->rx_sync);
data[0] = r_vec->rx_pkts;
data[1] = r_vec->rx_bytes;
data[2] = r_vec->rx_drops;
- } while (u64_stats_fetch_retry_irq(&r_vec->rx_sync, start));
+ } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
stats->rx_packets += data[0];
stats->rx_bytes += data[1];
stats->rx_dropped += data[2];
do {
- start = u64_stats_fetch_begin_irq(&r_vec->tx_sync);
+ start = u64_stats_fetch_begin(&r_vec->tx_sync);
data[0] = r_vec->tx_pkts;
data[1] = r_vec->tx_bytes;
data[2] = r_vec->tx_errors;
- } while (u64_stats_fetch_retry_irq(&r_vec->tx_sync, start));
+ } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
stats->tx_packets += data[0];
stats->tx_bytes += data[1];
stats->tx_errors += data[2];
@@ -2013,7 +2112,6 @@ const struct net_device_ops nfp_nfd3_netdev_ops = {
.ndo_get_phys_port_name = nfp_net_get_phys_port_name,
.ndo_bpf = nfp_net_xdp,
.ndo_xsk_wakeup = nfp_net_xsk_wakeup,
- .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
.ndo_bridge_getlink = nfp_net_bridge_getlink,
.ndo_bridge_setlink = nfp_net_bridge_setlink,
};
@@ -2044,7 +2142,6 @@ const struct net_device_ops nfp_nfdk_netdev_ops = {
.ndo_features_check = nfp_net_features_check,
.ndo_get_phys_port_name = nfp_net_get_phys_port_name,
.ndo_bpf = nfp_net_xdp,
- .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
.ndo_bridge_getlink = nfp_net_bridge_getlink,
.ndo_bridge_setlink = nfp_net_bridge_setlink,
};
@@ -2094,7 +2191,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->fw_ver.extend, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
nn->max_mtu);
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
nn->cap,
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
@@ -2122,6 +2219,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
"RXCSUM_COMPLETE " : "",
nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
+ nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER ? "MULTICAST_FILTER " : "",
nfp_app_extra_cap(nn->app, nn));
}
@@ -2373,6 +2471,12 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
}
if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)
netdev->hw_features |= NETIF_F_RXHASH;
+
+#ifdef CONFIG_NFP_NET_IPSEC
+ if (nn->cap_w1 & NFP_NET_CFG_CTRL_IPSEC)
+ netdev->hw_features |= NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM;
+#endif
+
if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) {
if (nn->cap & NFP_NET_CFG_CTRL_LSO) {
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
@@ -2427,10 +2531,15 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
netdev->features &= ~NETIF_F_HW_VLAN_STAG_RX;
nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
+ if (nn->app && nn->app->type->id == NFP_APP_BPF_NIC)
+ netdev->xdp_features |= NETDEV_XDP_ACT_HW_OFFLOAD;
+
/* Finalise the netdev setup */
switch (nn->dp.ops->version) {
case NFP_NFD_VER_NFD3:
netdev->netdev_ops = &nfp_nfd3_netdev_ops;
+ netdev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
break;
case NFP_NFD_VER_NFDK:
netdev->netdev_ops = &nfp_nfdk_netdev_ops;
@@ -2454,6 +2563,7 @@ static int nfp_net_read_caps(struct nfp_net *nn)
{
/* Get some of the read-only fields from the BAR */
nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
+ nn->cap_w1 = nn_readl(nn, NFP_NET_CFG_CAP_WORD1);
nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
/* ABI 4.x and ctrl vNIC always use chained metadata, in other cases
@@ -2543,6 +2653,9 @@ int nfp_net_init(struct nfp_net *nn)
if (nn->cap & NFP_NET_CFG_CTRL_TXRWB)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXRWB;
+ if (nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER)
+ nn->dp.ctrl_w1 |= NFP_NET_CFG_CTRL_MCAST_FILTER;
+
/* Stash the re-configuration queue away. First odd queue in TX Bar */
nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
@@ -2550,6 +2663,7 @@ int nfp_net_init(struct nfp_net *nn)
nn_writel(nn, NFP_NET_CFG_CTRL, 0);
nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
+ nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, 0);
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING |
NFP_NET_CFG_UPDATE_GEN);
if (err)
@@ -2565,12 +2679,19 @@ int nfp_net_init(struct nfp_net *nn)
err = nfp_net_tls_init(nn);
if (err)
goto err_clean_mbox;
+
+ nfp_net_ipsec_init(nn);
}
nfp_net_vecs_init(nn);
if (!nn->dp.netdev)
return 0;
+
+ spin_lock_init(&nn->mbox_amsg.lock);
+ INIT_LIST_HEAD(&nn->mbox_amsg.list);
+ INIT_WORK(&nn->mbox_amsg.work, nfp_net_mbox_amsg_work);
+
return register_netdev(nn->dp.netdev);
err_clean_mbox:
@@ -2588,6 +2709,8 @@ void nfp_net_clean(struct nfp_net *nn)
return;
unregister_netdev(nn->dp.netdev);
+ nfp_net_ipsec_clean(nn);
nfp_ccm_mbox_clean(nn);
+ flush_work(&nn->mbox_amsg.work);
nfp_net_reconfig_wait_posted(nn);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 6714d5e8fdab..669b9dccb6a9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -48,6 +48,7 @@
#define NFP_NET_META_CSUM 6 /* checksum complete type */
#define NFP_NET_META_CONN_HANDLE 7
#define NFP_NET_META_RESYNC_INFO 8 /* RX resync info request */
+#define NFP_NET_META_IPSEC 9 /* IPsec SA index for tx and rx */
#define NFP_META_PORT_ID_CTRL ~0U
@@ -55,6 +56,8 @@
#define NFP_NET_META_VLAN_SIZE 4
#define NFP_NET_META_PORTID_SIZE 4
#define NFP_NET_META_CONN_HANDLE_SIZE 8
+#define NFP_NET_META_IPSEC_SIZE 4
+#define NFP_NET_META_IPSEC_FIELD_SIZE 12
/* Hash type pre-pended when a RSS hash was computed */
#define NFP_NET_RSS_NONE 0
#define NFP_NET_RSS_IPV4 1
@@ -257,10 +260,20 @@
#define NFP_NET_CFG_BPF_CFG_MASK 7ULL
#define NFP_NET_CFG_BPF_ADDR_MASK (~NFP_NET_CFG_BPF_CFG_MASK)
-/* 40B reserved for future use (0x0098 - 0x00c0)
+/* 3 words reserved for extended ctrl words (0x0098 - 0x00a4)
+ * 3 words reserved for extended cap words (0x00a4 - 0x00b0)
+ * Currently only one word is used, can be extended in future.
*/
-#define NFP_NET_CFG_RESERVED 0x0098
-#define NFP_NET_CFG_RESERVED_SZ 0x0028
+#define NFP_NET_CFG_CTRL_WORD1 0x0098
+#define NFP_NET_CFG_CTRL_PKT_TYPE (0x1 << 0) /* Pkttype offload */
+#define NFP_NET_CFG_CTRL_IPSEC (0x1 << 1) /* IPsec offload */
+#define NFP_NET_CFG_CTRL_MCAST_FILTER (0x1 << 2) /* Multicast Filter */
+
+#define NFP_NET_CFG_CAP_WORD1 0x00a4
+
+/* 16B reserved for future use (0x00b0 - 0x00c0) */
+#define NFP_NET_CFG_RESERVED 0x00b0
+#define NFP_NET_CFG_RESERVED_SZ 0x0010
/* RSS configuration (0x0100 - 0x01ac):
* Used only when NFP_NET_CFG_CTRL_RSS is enabled
@@ -390,16 +403,19 @@
*/
#define NFP_NET_CFG_MBOX_BASE 0x1800
#define NFP_NET_CFG_MBOX_VAL_MAX_SZ 0x1F8
-
#define NFP_NET_CFG_MBOX_SIMPLE_CMD 0x0
#define NFP_NET_CFG_MBOX_SIMPLE_RET 0x4
#define NFP_NET_CFG_MBOX_SIMPLE_VAL 0x8
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
-
+#define NFP_NET_CFG_MBOX_CMD_IPSEC 3
#define NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET 5
#define NFP_NET_CFG_MBOX_CMD_TLV_CMSG 6
+#define NFP_NET_CFG_MBOX_CMD_DCB_UPDATE 7
+
+#define NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD 8
+#define NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL 9
/* VLAN filtering using general use mailbox
* %NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
@@ -412,6 +428,17 @@
#define NFP_NET_CFG_VLAN_FILTER_PROTO (NFP_NET_CFG_VLAN_FILTER + 2)
#define NFP_NET_CFG_VLAN_FILTER_SZ 0x0004
+/* Multicast filtering using general use mailbox
+ * %NFP_NET_CFG_MULTICAST: Base address of Multicast filter mailbox
+ * %NFP_NET_CFG_MULTICAST_MAC_HI: High 32-bits of Multicast MAC address
+ * %NFP_NET_CFG_MULTICAST_MAC_LO: Low 16-bits of Multicast MAC address
+ * %NFP_NET_CFG_MULTICAST_SZ: Size of the Multicast filter mailbox in bytes
+ */
+#define NFP_NET_CFG_MULTICAST NFP_NET_CFG_MBOX_SIMPLE_VAL
+#define NFP_NET_CFG_MULTICAST_MAC_HI NFP_NET_CFG_MULTICAST
+#define NFP_NET_CFG_MULTICAST_MAC_LO (NFP_NET_CFG_MULTICAST + 6)
+#define NFP_NET_CFG_MULTICAST_SZ 0x0006
+
/* TLV capabilities
* %NFP_NET_CFG_TLV_TYPE: Offset of type within the TLV
* %NFP_NET_CFG_TLV_TYPE_REQUIRED: Driver must be able to parse the TLV
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 991059d6cb32..dfedb52b7e70 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -293,6 +293,194 @@ nfp_net_set_fec_link_mode(struct nfp_eth_table_port *eth_port,
}
}
+static const struct nfp_eth_media_link_mode {
+ u16 ethtool_link_mode;
+ u16 speed;
+} nfp_eth_media_table[NFP_MEDIA_LINK_MODES_NUMBER] = {
+ [NFP_MEDIA_1000BASE_CX] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = NFP_SPEED_1G,
+ },
+ [NFP_MEDIA_1000BASE_KX] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = NFP_SPEED_1G,
+ },
+ [NFP_MEDIA_10GBASE_KX4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_KR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_LR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_CX4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_CR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_SR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_ER] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_25GBASE_KR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_KR_S] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_CR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_CR_S] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_SR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_LR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_ER] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_40GBASE_CR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ .speed = NFP_SPEED_40G,
+ },
+ [NFP_MEDIA_40GBASE_KR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ .speed = NFP_SPEED_40G,
+ },
+ [NFP_MEDIA_40GBASE_SR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ .speed = NFP_SPEED_40G,
+ },
+ [NFP_MEDIA_40GBASE_LR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ .speed = NFP_SPEED_40G,
+ },
+ [NFP_MEDIA_50GBASE_KR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_SR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_CR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_LR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_ER] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_FR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_100GBASE_KR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+ [NFP_MEDIA_100GBASE_SR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+ [NFP_MEDIA_100GBASE_CR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+ [NFP_MEDIA_100GBASE_KP4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+ [NFP_MEDIA_100GBASE_CR10] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+};
+
+static const unsigned int nfp_eth_speed_map[NFP_SUP_SPEED_NUMBER] = {
+ [NFP_SPEED_1G] = SPEED_1000,
+ [NFP_SPEED_10G] = SPEED_10000,
+ [NFP_SPEED_25G] = SPEED_25000,
+ [NFP_SPEED_40G] = SPEED_40000,
+ [NFP_SPEED_50G] = SPEED_50000,
+ [NFP_SPEED_100G] = SPEED_100000,
+};
+
+static void nfp_add_media_link_mode(struct nfp_port *port,
+ struct nfp_eth_table_port *eth_port,
+ struct ethtool_link_ksettings *cmd)
+{
+ u64 supported_modes[2], advertised_modes[2];
+ struct nfp_eth_media_buf ethm = {
+ .eth_index = eth_port->eth_index,
+ };
+ struct nfp_cpp *cpp = port->app->cpp;
+
+ if (nfp_eth_read_media(cpp, &ethm)) {
+ bitmap_fill(port->speed_bitmap, NFP_SUP_SPEED_NUMBER);
+ return;
+ }
+
+ bitmap_zero(port->speed_bitmap, NFP_SUP_SPEED_NUMBER);
+
+ for (u32 i = 0; i < 2; i++) {
+ supported_modes[i] = le64_to_cpu(ethm.supported_modes[i]);
+ advertised_modes[i] = le64_to_cpu(ethm.advertised_modes[i]);
+ }
+
+ for (u32 i = 0; i < NFP_MEDIA_LINK_MODES_NUMBER; i++) {
+ if (i < 64) {
+ if (supported_modes[0] & BIT_ULL(i)) {
+ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
+ cmd->link_modes.supported);
+ __set_bit(nfp_eth_media_table[i].speed,
+ port->speed_bitmap);
+ }
+
+ if (advertised_modes[0] & BIT_ULL(i))
+ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
+ cmd->link_modes.advertising);
+ } else {
+ if (supported_modes[1] & BIT_ULL(i - 64)) {
+ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
+ cmd->link_modes.supported);
+ __set_bit(nfp_eth_media_table[i].speed,
+ port->speed_bitmap);
+ }
+
+ if (advertised_modes[1] & BIT_ULL(i - 64))
+ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
+ cmd->link_modes.advertising);
+ }
+ }
+}
+
/**
* nfp_net_get_link_ksettings - Get Link Speed settings
* @netdev: network interface device structure
@@ -311,6 +499,8 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
u16 sts;
/* Init to unknowns */
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
cmd->base.port = PORT_OTHER;
cmd->base.speed = SPEED_UNKNOWN;
@@ -321,6 +511,7 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
if (eth_port) {
ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
+ nfp_add_media_link_mode(port, eth_port, cmd);
if (eth_port->supp_aneg) {
ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
if (eth_port->aneg == NFP_ANEG_AUTO) {
@@ -395,6 +586,22 @@ nfp_net_set_link_ksettings(struct net_device *netdev,
if (cmd->base.speed != SPEED_UNKNOWN) {
u32 speed = cmd->base.speed / eth_port->lanes;
+ bool is_supported = false;
+
+ for (u32 i = 0; i < NFP_SUP_SPEED_NUMBER; i++) {
+ if (cmd->base.speed == nfp_eth_speed_map[i] &&
+ test_bit(i, port->speed_bitmap)) {
+ is_supported = true;
+ break;
+ }
+ }
+
+ if (!is_supported) {
+ netdev_err(netdev, "Speed %u is not supported.\n",
+ cmd->base.speed);
+ err = -EINVAL;
+ goto err_bad_set;
+ }
if (req_aneg) {
netdev_err(netdev, "Speed changing is not allowed when working on autoneg mode.\n");
@@ -686,7 +893,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].rx_sync);
+ start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
data[0] = nn->r_vecs[i].rx_pkts;
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
@@ -694,10 +901,10 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
tmp[5] = nn->r_vecs[i].hw_tls_rx;
- } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].rx_sync, start));
+ } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
do {
- start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].tx_sync);
+ start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
data[1] = nn->r_vecs[i].tx_pkts;
data[2] = nn->r_vecs[i].tx_busy;
tmp[6] = nn->r_vecs[i].hw_csum_tx;
@@ -707,7 +914,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[10] = nn->r_vecs[i].hw_tls_tx;
tmp[11] = nn->r_vecs[i].tls_tx_fallback;
tmp[12] = nn->r_vecs[i].tls_tx_no_fallback;
- } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].tx_sync, start));
+ } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
data += NN_RVEC_PER_Q_STATS;
@@ -1832,16 +2039,16 @@ static int
nfp_net_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
- struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
u8 buf[NFP_EEPROM_LEN] = {};
- if (eeprom->len == 0)
- return -EINVAL;
-
if (nfp_net_get_port_mac_by_hwinfo(netdev, buf))
return -EOPNOTSUPP;
- eeprom->magic = nn->pdev->vendor | (nn->pdev->device << 16);
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = app->pdev->vendor | (app->pdev->device << 16);
memcpy(bytes, buf + eeprom->offset, eeprom->len);
return 0;
@@ -1851,18 +2058,18 @@ static int
nfp_net_set_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
- struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
u8 buf[NFP_EEPROM_LEN] = {};
+ if (nfp_net_get_port_mac_by_hwinfo(netdev, buf))
+ return -EOPNOTSUPP;
+
if (eeprom->len == 0)
return -EINVAL;
- if (eeprom->magic != (nn->pdev->vendor | nn->pdev->device << 16))
+ if (eeprom->magic != (app->pdev->vendor | app->pdev->device << 16))
return -EINVAL;
- if (nfp_net_get_port_mac_by_hwinfo(netdev, buf))
- return -EOPNOTSUPP;
-
memcpy(buf + eeprom->offset, bytes, eeprom->len);
if (nfp_net_set_port_mac_by_hwinfo(netdev, buf))
return -EOPNOTSUPP;
@@ -1922,6 +2129,9 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
.get_dump_data = nfp_app_get_dump_data,
+ .get_eeprom_len = nfp_net_get_eeprom_len,
+ .get_eeprom = nfp_net_get_eeprom,
+ .set_eeprom = nfp_net_set_eeprom,
.get_module_info = nfp_port_get_module_info,
.get_module_eeprom = nfp_port_get_module_eeprom,
.get_link_ksettings = nfp_net_get_link_ksettings,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 3bae92dc899e..cbe4972ba104 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -16,7 +16,6 @@
#include <linux/lockdep.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
-#include <linux/msi.h>
#include <linux/random.h>
#include <linux/rtnetlink.h>
@@ -156,22 +155,17 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
nfp_net_debugfs_vnic_add(nn, pf->ddir);
- if (nn->port)
- nfp_devlink_port_type_eth_set(nn->port);
-
nfp_net_info(nn);
if (nfp_net_is_data_vnic(nn)) {
err = nfp_app_vnic_init(pf->app, nn);
if (err)
- goto err_devlink_port_type_clean;
+ goto err_debugfs_vnic_clean;
}
return 0;
-err_devlink_port_type_clean:
- if (nn->port)
- nfp_devlink_port_type_clear(nn->port);
+err_debugfs_vnic_clean:
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_clean(nn);
err_devlink_port_clean:
@@ -220,8 +214,6 @@ static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
{
if (nfp_net_is_data_vnic(nn))
nfp_app_vnic_clean(pf->app, nn);
- if (nn->port)
- nfp_devlink_port_type_clear(nn->port);
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_clean(nn);
if (nn->port)
@@ -762,11 +754,11 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_devlink_unreg;
+ devl_lock(devlink);
err = nfp_devlink_params_register(pf);
if (err)
goto err_shared_buf_unreg;
- devl_lock(devlink);
pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
/* Allocate the vnics and do basic init */
@@ -799,9 +791,9 @@ err_free_vnics:
nfp_net_pf_free_vnics(pf);
err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir);
- devl_unlock(devlink);
nfp_devlink_params_unregister(pf);
err_shared_buf_unreg:
+ devl_unlock(devlink);
nfp_shared_buf_unregister(pf);
err_devlink_unreg:
cancel_work_sync(&pf->port_refresh_work);
@@ -829,9 +821,10 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
/* stop app first, to avoid double free of ctrl vNIC's ddir */
nfp_net_debugfs_dir_clean(&pf->ddir);
+ nfp_devlink_params_unregister(pf);
+
devl_unlock(devlink);
- nfp_devlink_params_unregister(pf);
nfp_shared_buf_unregister(pf);
nfp_net_pf_free_irqs(pf);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 8b77582bdfa0..3af1229a3f08 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -134,13 +134,13 @@ nfp_repr_get_host_stats64(const struct net_device *netdev,
repr_stats = per_cpu_ptr(repr->stats, i);
do {
- start = u64_stats_fetch_begin_irq(&repr_stats->syncp);
+ start = u64_stats_fetch_begin(&repr_stats->syncp);
tbytes = repr_stats->tx_bytes;
tpkts = repr_stats->tx_packets;
tdrops = repr_stats->tx_drops;
rbytes = repr_stats->rx_bytes;
rpkts = repr_stats->rx_packets;
- } while (u64_stats_fetch_retry_irq(&repr_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&repr_stats->syncp, start));
stats->tx_bytes += tbytes;
stats->tx_packets += tpkts;
@@ -275,7 +275,6 @@ const struct net_device_ops nfp_repr_netdev_ops = {
.ndo_set_features = nfp_port_set_features,
.ndo_set_mac_address = eth_mac_addr,
.ndo_get_port_parent_id = nfp_port_get_port_parent_id,
- .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
};
void
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index 6793cdf9ff11..9c04f9f0e2c9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -38,6 +38,16 @@ enum nfp_port_flags {
NFP_PORT_CHANGED = 0,
};
+enum {
+ NFP_SPEED_1G,
+ NFP_SPEED_10G,
+ NFP_SPEED_25G,
+ NFP_SPEED_40G,
+ NFP_SPEED_50G,
+ NFP_SPEED_100G,
+ NFP_SUP_SPEED_NUMBER
+};
+
/**
* struct nfp_port - structure representing NFP port
* @netdev: backpointer to associated netdev
@@ -52,6 +62,7 @@ enum nfp_port_flags {
* @eth_forced: for %NFP_PORT_PHYS_PORT port is forced UP or DOWN, don't change
* @eth_port: for %NFP_PORT_PHYS_PORT translated ETH Table port entry
* @eth_stats: for %NFP_PORT_PHYS_PORT MAC stats if available
+ * @speed_bitmap: for %NFP_PORT_PHYS_PORT supported speed bitmap
* @pf_id: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3)
* @vf_id: for %NFP_PORT_VF_PORT ID of the PCI VF within @pf_id
* @pf_split: for %NFP_PORT_PF_PORT %true if PCI PF has more than one vNIC
@@ -78,6 +89,7 @@ struct nfp_port {
bool eth_forced;
struct nfp_eth_table_port *eth_port;
u8 __iomem *eth_stats;
+ DECLARE_BITMAP(speed_bitmap, NFP_SUP_SPEED_NUMBER);
};
/* NFP_PORT_PF_PORT, NFP_PORT_VF_PORT */
struct {
@@ -129,8 +141,6 @@ int nfp_net_refresh_port_table_sync(struct nfp_pf *pf);
int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port);
void nfp_devlink_port_unregister(struct nfp_port *port);
-void nfp_devlink_port_type_eth_set(struct nfp_port *port);
-void nfp_devlink_port_type_clear(struct nfp_port *port);
/* Mac stats (0x0000 - 0x0200)
* all counters are 64bit.
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 730fea214b8a..7136bc48530b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -100,6 +100,7 @@ enum nfp_nsp_cmd {
SPCODE_FW_LOADED = 19, /* Is application firmware loaded */
SPCODE_VERSIONS = 21, /* Report FW versions */
SPCODE_READ_SFF_EEPROM = 22, /* Read module EEPROM */
+ SPCODE_READ_MEDIA = 23, /* Get either the supported or advertised media for a port */
};
struct nfp_nsp_dma_buf {
@@ -1100,4 +1101,20 @@ int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index,
kfree(buf);
return ret;
+};
+
+int nfp_nsp_read_media(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+ struct nfp_nsp_command_buf_arg media = {
+ {
+ .code = SPCODE_READ_MEDIA,
+ .option = size,
+ },
+ .in_buf = buf,
+ .in_size = size,
+ .out_buf = buf,
+ .out_size = size,
+ };
+
+ return nfp_nsp_command_buf(state, &media);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index 992d72ac98d3..781edc451bd4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -65,6 +65,11 @@ static inline bool nfp_nsp_has_read_module_eeprom(struct nfp_nsp *state)
return nfp_nsp_get_abi_ver_minor(state) > 28;
}
+static inline bool nfp_nsp_has_read_media(struct nfp_nsp *state)
+{
+ return nfp_nsp_get_abi_ver_minor(state) > 33;
+}
+
enum nfp_eth_interface {
NFP_INTERFACE_NONE = 0,
NFP_INTERFACE_SFP = 1,
@@ -97,6 +102,50 @@ enum nfp_eth_fec {
NFP_FEC_DISABLED_BIT,
};
+/* link modes about RJ45 haven't been used, so there's no mapping to them */
+enum nfp_ethtool_link_mode_list {
+ NFP_MEDIA_W0_RJ45_10M,
+ NFP_MEDIA_W0_RJ45_10M_HD,
+ NFP_MEDIA_W0_RJ45_100M,
+ NFP_MEDIA_W0_RJ45_100M_HD,
+ NFP_MEDIA_W0_RJ45_1G,
+ NFP_MEDIA_W0_RJ45_2P5G,
+ NFP_MEDIA_W0_RJ45_5G,
+ NFP_MEDIA_W0_RJ45_10G,
+ NFP_MEDIA_1000BASE_CX,
+ NFP_MEDIA_1000BASE_KX,
+ NFP_MEDIA_10GBASE_KX4,
+ NFP_MEDIA_10GBASE_KR,
+ NFP_MEDIA_10GBASE_CX4,
+ NFP_MEDIA_10GBASE_CR,
+ NFP_MEDIA_10GBASE_SR,
+ NFP_MEDIA_10GBASE_ER,
+ NFP_MEDIA_25GBASE_KR,
+ NFP_MEDIA_25GBASE_KR_S,
+ NFP_MEDIA_25GBASE_CR,
+ NFP_MEDIA_25GBASE_CR_S,
+ NFP_MEDIA_25GBASE_SR,
+ NFP_MEDIA_40GBASE_CR4,
+ NFP_MEDIA_40GBASE_KR4,
+ NFP_MEDIA_40GBASE_SR4,
+ NFP_MEDIA_40GBASE_LR4,
+ NFP_MEDIA_50GBASE_KR,
+ NFP_MEDIA_50GBASE_SR,
+ NFP_MEDIA_50GBASE_CR,
+ NFP_MEDIA_50GBASE_LR,
+ NFP_MEDIA_50GBASE_ER,
+ NFP_MEDIA_50GBASE_FR,
+ NFP_MEDIA_100GBASE_KR4,
+ NFP_MEDIA_100GBASE_SR4,
+ NFP_MEDIA_100GBASE_CR4,
+ NFP_MEDIA_100GBASE_KP4,
+ NFP_MEDIA_100GBASE_CR10,
+ NFP_MEDIA_10GBASE_LR,
+ NFP_MEDIA_25GBASE_LR,
+ NFP_MEDIA_25GBASE_ER,
+ NFP_MEDIA_LINK_MODES_NUMBER
+};
+
#define NFP_FEC_AUTO BIT(NFP_FEC_AUTO_BIT)
#define NFP_FEC_BASER BIT(NFP_FEC_BASER_BIT)
#define NFP_FEC_REED_SOLOMON BIT(NFP_FEC_REED_SOLOMON_BIT)
@@ -256,6 +305,16 @@ enum nfp_nsp_sensor_id {
int nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id,
long *val);
+struct nfp_eth_media_buf {
+ u8 eth_index;
+ u8 reserved[7];
+ __le64 supported_modes[2];
+ __le64 advertised_modes[2];
+};
+
+int nfp_nsp_read_media(struct nfp_nsp *state, void *buf, unsigned int size);
+int nfp_eth_read_media(struct nfp_cpp *cpp, struct nfp_eth_media_buf *ethm);
+
#define NFP_NSP_VERSION_BUFSZ 1024 /* reasonable size, not in the ABI */
enum nfp_nsp_versions {
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index bb64efec4c46..570ac1bb2122 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -647,3 +647,29 @@ int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes)
return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES,
lanes, NSP_ETH_CTRL_SET_LANES);
}
+
+int nfp_eth_read_media(struct nfp_cpp *cpp, struct nfp_eth_media_buf *ethm)
+{
+ struct nfp_nsp *nsp;
+ int ret;
+
+ nsp = nfp_nsp_open(cpp);
+ if (IS_ERR(nsp)) {
+ nfp_err(cpp, "Failed to access the NSP: %pe\n", nsp);
+ return PTR_ERR(nsp);
+ }
+
+ if (!nfp_nsp_has_read_media(nsp)) {
+ nfp_warn(cpp, "Reading media link modes not supported. Please update flash\n");
+ ret = -EOPNOTSUPP;
+ goto exit_close_nsp;
+ }
+
+ ret = nfp_nsp_read_media(nsp, ethm, sizeof(*ethm));
+ if (ret)
+ nfp_err(cpp, "Reading media link modes failed: %pe\n", ERR_PTR(ret));
+
+exit_close_nsp:
+ nfp_nsp_close(nsp);
+ return ret;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nic/dcb.c b/drivers/net/ethernet/netronome/nfp/nic/dcb.c
new file mode 100644
index 000000000000..bb498ac6bd7d
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nic/dcb.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2023 Corigine, Inc. */
+
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <net/dcbnl.h>
+
+#include "../nfp_app.h"
+#include "../nfp_net.h"
+#include "../nfp_main.h"
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfpcore/nfp_nffw.h"
+#include "../nfp_net_sriov.h"
+
+#include "main.h"
+
+#define NFP_DCB_TRUST_PCP 1
+#define NFP_DCB_TRUST_DSCP 2
+#define NFP_DCB_TRUST_INVALID 0xff
+
+#define NFP_DCB_TSA_VENDOR 1
+#define NFP_DCB_TSA_STRICT 2
+#define NFP_DCB_TSA_ETS 3
+
+#define NFP_DCB_GBL_ENABLE BIT(0)
+#define NFP_DCB_QOS_ENABLE BIT(1)
+#define NFP_DCB_DISABLE 0
+#define NFP_DCB_ALL_QOS_ENABLE (NFP_DCB_GBL_ENABLE | NFP_DCB_QOS_ENABLE)
+
+#define NFP_DCB_UPDATE_MSK_SZ 4
+#define NFP_DCB_TC_RATE_MAX 0xffff
+
+#define NFP_DCB_DATA_OFF_DSCP2IDX 0
+#define NFP_DCB_DATA_OFF_PCP2IDX 64
+#define NFP_DCB_DATA_OFF_TSA 80
+#define NFP_DCB_DATA_OFF_IDX_BW_PCT 88
+#define NFP_DCB_DATA_OFF_RATE 96
+#define NFP_DCB_DATA_OFF_CAP 112
+#define NFP_DCB_DATA_OFF_ENABLE 116
+#define NFP_DCB_DATA_OFF_TRUST 120
+
+#define NFP_DCB_MSG_MSK_ENABLE BIT(31)
+#define NFP_DCB_MSG_MSK_TRUST BIT(30)
+#define NFP_DCB_MSG_MSK_TSA BIT(29)
+#define NFP_DCB_MSG_MSK_DSCP BIT(28)
+#define NFP_DCB_MSG_MSK_PCP BIT(27)
+#define NFP_DCB_MSG_MSK_RATE BIT(26)
+#define NFP_DCB_MSG_MSK_PCT BIT(25)
+
+static struct nfp_dcb *get_dcb_priv(struct nfp_net *nn)
+{
+ struct nfp_dcb *dcb = &((struct nfp_app_nic_private *)nn->app_priv)->dcb;
+
+ return dcb;
+}
+
+static u8 nfp_tsa_ieee2nfp(u8 tsa)
+{
+ switch (tsa) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ return NFP_DCB_TSA_STRICT;
+ case IEEE_8021QAZ_TSA_ETS:
+ return NFP_DCB_TSA_ETS;
+ default:
+ return NFP_DCB_TSA_VENDOR;
+ }
+}
+
+static int nfp_nic_dcbnl_ieee_getets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nfp_dcb *dcb;
+
+ dcb = get_dcb_priv(nn);
+
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ ets->prio_tc[i] = dcb->prio2tc[i];
+ ets->tc_tx_bw[i] = dcb->tc_tx_pct[i];
+ ets->tc_tsa[i] = dcb->tc_tsa[i];
+ }
+
+ return 0;
+}
+
+static bool nfp_refresh_tc2idx(struct nfp_net *nn)
+{
+ u8 tc2idx[IEEE_8021QAZ_MAX_TCS];
+ bool change = false;
+ struct nfp_dcb *dcb;
+ int maxstrict = 0;
+
+ dcb = get_dcb_priv(nn);
+
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ tc2idx[i] = i;
+ if (dcb->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT)
+ maxstrict = i;
+ }
+
+ if (maxstrict > 0 && dcb->tc_tsa[0] != IEEE_8021QAZ_TSA_STRICT) {
+ tc2idx[0] = maxstrict;
+ tc2idx[maxstrict] = 0;
+ }
+
+ for (unsigned int j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
+ if (dcb->tc2idx[j] != tc2idx[j]) {
+ change = true;
+ dcb->tc2idx[j] = tc2idx[j];
+ }
+ }
+
+ return change;
+}
+
+static int nfp_fill_maxrate(struct nfp_net *nn, u64 *max_rate_array)
+{
+ struct nfp_app *app = nn->app;
+ struct nfp_dcb *dcb;
+ u32 ratembps;
+
+ dcb = get_dcb_priv(nn);
+
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ /* Convert bandwidth from kbps to mbps. */
+ ratembps = max_rate_array[i] / 1024;
+
+ /* Reject input values >= NFP_DCB_TC_RATE_MAX */
+ if (ratembps >= NFP_DCB_TC_RATE_MAX) {
+ nfp_warn(app->cpp, "ratembps(%d) must less than %d.",
+ ratembps, NFP_DCB_TC_RATE_MAX);
+ return -EINVAL;
+ }
+ /* Input value 0 mapped to NFP_DCB_TC_RATE_MAX for firmware. */
+ if (ratembps == 0)
+ ratembps = NFP_DCB_TC_RATE_MAX;
+
+ writew((u16)ratembps, dcb->dcbcfg_tbl +
+ dcb->cfg_offset + NFP_DCB_DATA_OFF_RATE + dcb->tc2idx[i] * 2);
+ /* for rate value from user space, need to sync to dcb structure */
+ if (dcb->tc_maxrate != max_rate_array)
+ dcb->tc_maxrate[i] = max_rate_array[i];
+ }
+
+ return 0;
+}
+
+static int update_dscp_maxrate(struct net_device *dev, u32 *update)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nfp_dcb *dcb;
+ int err;
+
+ dcb = get_dcb_priv(nn);
+
+ err = nfp_fill_maxrate(nn, dcb->tc_maxrate);
+ if (err)
+ return err;
+
+ *update |= NFP_DCB_MSG_MSK_RATE;
+
+ /* We only refresh dscp in dscp trust mode. */
+ if (dcb->dscp_cnt > 0) {
+ for (unsigned int i = 0; i < NFP_NET_MAX_DSCP; i++) {
+ writeb(dcb->tc2idx[dcb->prio2tc[dcb->dscp2prio[i]]],
+ dcb->dcbcfg_tbl + dcb->cfg_offset +
+ NFP_DCB_DATA_OFF_DSCP2IDX + i);
+ }
+ *update |= NFP_DCB_MSG_MSK_DSCP;
+ }
+
+ return 0;
+}
+
+static void nfp_nic_set_trust(struct nfp_net *nn, u32 *update)
+{
+ struct nfp_dcb *dcb;
+ u8 trust;
+
+ dcb = get_dcb_priv(nn);
+
+ if (dcb->trust_status != NFP_DCB_TRUST_INVALID)
+ return;
+
+ trust = dcb->dscp_cnt > 0 ? NFP_DCB_TRUST_DSCP : NFP_DCB_TRUST_PCP;
+ writeb(trust, dcb->dcbcfg_tbl + dcb->cfg_offset +
+ NFP_DCB_DATA_OFF_TRUST);
+
+ dcb->trust_status = trust;
+ *update |= NFP_DCB_MSG_MSK_TRUST;
+}
+
+static void nfp_nic_set_enable(struct nfp_net *nn, u32 enable, u32 *update)
+{
+ struct nfp_dcb *dcb;
+ u32 value = 0;
+
+ dcb = get_dcb_priv(nn);
+
+ value = readl(dcb->dcbcfg_tbl + dcb->cfg_offset +
+ NFP_DCB_DATA_OFF_ENABLE);
+ if (value != enable) {
+ writel(enable, dcb->dcbcfg_tbl + dcb->cfg_offset +
+ NFP_DCB_DATA_OFF_ENABLE);
+ *update |= NFP_DCB_MSG_MSK_ENABLE;
+ }
+}
+
+static int dcb_ets_check(struct net_device *dev, struct ieee_ets *ets)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nfp_app *app = nn->app;
+ bool ets_exists = false;
+ int sum = 0;
+
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ /* For ets mode, check bw percentage sum. */
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
+ ets_exists = true;
+ sum += ets->tc_tx_bw[i];
+ } else if (ets->tc_tx_bw[i]) {
+ nfp_warn(app->cpp, "ETS BW for strict/vendor TC must be 0.");
+ return -EINVAL;
+ }
+ }
+
+ if (ets_exists && sum != 100) {
+ nfp_warn(app->cpp, "Failed to validate ETS BW: sum must be 100.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void nfp_nic_fill_ets(struct nfp_net *nn)
+{
+ struct nfp_dcb *dcb;
+
+ dcb = get_dcb_priv(nn);
+
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ writeb(dcb->tc2idx[dcb->prio2tc[i]],
+ dcb->dcbcfg_tbl + dcb->cfg_offset + NFP_DCB_DATA_OFF_PCP2IDX + i);
+ writeb(dcb->tc_tx_pct[i], dcb->dcbcfg_tbl +
+ dcb->cfg_offset + NFP_DCB_DATA_OFF_IDX_BW_PCT + dcb->tc2idx[i]);
+ writeb(nfp_tsa_ieee2nfp(dcb->tc_tsa[i]), dcb->dcbcfg_tbl +
+ dcb->cfg_offset + NFP_DCB_DATA_OFF_TSA + dcb->tc2idx[i]);
+ }
+}
+
+static void nfp_nic_ets_init(struct nfp_net *nn, u32 *update)
+{
+ struct nfp_dcb *dcb = get_dcb_priv(nn);
+
+ if (dcb->ets_init)
+ return;
+
+ nfp_nic_fill_ets(nn);
+ dcb->ets_init = true;
+ *update |= NFP_DCB_MSG_MSK_TSA | NFP_DCB_MSG_MSK_PCT | NFP_DCB_MSG_MSK_PCP;
+}
+
+static int nfp_nic_dcbnl_ieee_setets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_DCB_UPDATE;
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nfp_app *app = nn->app;
+ struct nfp_dcb *dcb;
+ u32 update = 0;
+ bool change;
+ int err;
+
+ err = dcb_ets_check(dev, ets);
+ if (err)
+ return err;
+
+ dcb = get_dcb_priv(nn);
+
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ dcb->prio2tc[i] = ets->prio_tc[i];
+ dcb->tc_tx_pct[i] = ets->tc_tx_bw[i];
+ dcb->tc_tsa[i] = ets->tc_tsa[i];
+ }
+
+ change = nfp_refresh_tc2idx(nn);
+ nfp_nic_fill_ets(nn);
+ dcb->ets_init = true;
+ if (change || !dcb->rate_init) {
+ err = update_dscp_maxrate(dev, &update);
+ if (err) {
+ nfp_warn(app->cpp,
+ "nfp dcbnl ieee setets ERROR:%d.",
+ err);
+ return err;
+ }
+
+ dcb->rate_init = true;
+ }
+ nfp_nic_set_enable(nn, NFP_DCB_ALL_QOS_ENABLE, &update);
+ nfp_nic_set_trust(nn, &update);
+ err = nfp_net_mbox_lock(nn, NFP_DCB_UPDATE_MSK_SZ);
+ if (err)
+ return err;
+
+ nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL,
+ update | NFP_DCB_MSG_MSK_TSA | NFP_DCB_MSG_MSK_PCT |
+ NFP_DCB_MSG_MSK_PCP);
+
+ return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
+}
+
+static int nfp_nic_dcbnl_ieee_getmaxrate(struct net_device *dev,
+ struct ieee_maxrate *maxrate)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nfp_dcb *dcb;
+
+ dcb = get_dcb_priv(nn);
+
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ maxrate->tc_maxrate[i] = dcb->tc_maxrate[i];
+
+ return 0;
+}
+
+static int nfp_nic_dcbnl_ieee_setmaxrate(struct net_device *dev,
+ struct ieee_maxrate *maxrate)
+{
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_DCB_UPDATE;
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nfp_app *app = nn->app;
+ struct nfp_dcb *dcb;
+ u32 update = 0;
+ int err;
+
+ err = nfp_fill_maxrate(nn, maxrate->tc_maxrate);
+ if (err) {
+ nfp_warn(app->cpp,
+ "nfp dcbnl ieee setmaxrate ERROR:%d.",
+ err);
+ return err;
+ }
+
+ dcb = get_dcb_priv(nn);
+
+ dcb->rate_init = true;
+ nfp_nic_set_enable(nn, NFP_DCB_ALL_QOS_ENABLE, &update);
+ nfp_nic_set_trust(nn, &update);
+ nfp_nic_ets_init(nn, &update);
+
+ err = nfp_net_mbox_lock(nn, NFP_DCB_UPDATE_MSK_SZ);
+ if (err)
+ return err;
+
+ nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL,
+ update | NFP_DCB_MSG_MSK_RATE);
+
+ return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
+}
+
+static int nfp_nic_set_trust_status(struct nfp_net *nn, u8 status)
+{
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_DCB_UPDATE;
+ struct nfp_dcb *dcb;
+ u32 update = 0;
+ int err;
+
+ dcb = get_dcb_priv(nn);
+ if (!dcb->rate_init) {
+ err = nfp_fill_maxrate(nn, dcb->tc_maxrate);
+ if (err)
+ return err;
+
+ update |= NFP_DCB_MSG_MSK_RATE;
+ dcb->rate_init = true;
+ }
+
+ err = nfp_net_mbox_lock(nn, NFP_DCB_UPDATE_MSK_SZ);
+ if (err)
+ return err;
+
+ nfp_nic_ets_init(nn, &update);
+ writeb(status, dcb->dcbcfg_tbl + dcb->cfg_offset +
+ NFP_DCB_DATA_OFF_TRUST);
+ nfp_nic_set_enable(nn, NFP_DCB_ALL_QOS_ENABLE, &update);
+ nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL,
+ update | NFP_DCB_MSG_MSK_TRUST);
+
+ err = nfp_net_mbox_reconfig_and_unlock(nn, cmd);
+ if (err)
+ return err;
+
+ dcb->trust_status = status;
+
+ return 0;
+}
+
+static int nfp_nic_set_dscp2prio(struct nfp_net *nn, u8 dscp, u8 prio)
+{
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_DCB_UPDATE;
+ struct nfp_dcb *dcb;
+ u8 idx, tc;
+ int err;
+
+ err = nfp_net_mbox_lock(nn, NFP_DCB_UPDATE_MSK_SZ);
+ if (err)
+ return err;
+
+ dcb = get_dcb_priv(nn);
+
+ tc = dcb->prio2tc[prio];
+ idx = dcb->tc2idx[tc];
+
+ writeb(idx, dcb->dcbcfg_tbl + dcb->cfg_offset +
+ NFP_DCB_DATA_OFF_DSCP2IDX + dscp);
+
+ nn_writel(nn, nn->tlv_caps.mbox_off +
+ NFP_NET_CFG_MBOX_SIMPLE_VAL, NFP_DCB_MSG_MSK_DSCP);
+
+ err = nfp_net_mbox_reconfig_and_unlock(nn, cmd);
+ if (err)
+ return err;
+
+ dcb->dscp2prio[dscp] = prio;
+
+ return 0;
+}
+
+static int nfp_nic_dcbnl_ieee_setapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ struct dcb_app old_app;
+ struct nfp_dcb *dcb;
+ bool is_new;
+ int err;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
+ return -EINVAL;
+
+ dcb = get_dcb_priv(nn);
+
+ /* Save the old entry info */
+ old_app.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ old_app.protocol = app->protocol;
+ old_app.priority = dcb->dscp2prio[app->protocol];
+
+ /* Check trust status */
+ if (!dcb->dscp_cnt) {
+ err = nfp_nic_set_trust_status(nn, NFP_DCB_TRUST_DSCP);
+ if (err)
+ return err;
+ }
+
+ /* Check if the new mapping is same as old or in init stage */
+ if (app->priority != old_app.priority || app->priority == 0) {
+ err = nfp_nic_set_dscp2prio(nn, app->protocol, app->priority);
+ if (err)
+ return err;
+ }
+
+ /* Delete the old entry if exists */
+ is_new = !!dcb_ieee_delapp(dev, &old_app);
+
+ /* Add new entry and update counter */
+ err = dcb_ieee_setapp(dev, app);
+ if (err)
+ return err;
+
+ if (is_new)
+ dcb->dscp_cnt++;
+
+ return 0;
+}
+
+static int nfp_nic_dcbnl_ieee_delapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nfp_dcb *dcb;
+ int err;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
+ return -EINVAL;
+
+ dcb = get_dcb_priv(nn);
+
+ /* Check if the dcb_app param match fw */
+ if (app->priority != dcb->dscp2prio[app->protocol])
+ return -ENOENT;
+
+ /* Set fw dscp mapping to 0 */
+ err = nfp_nic_set_dscp2prio(nn, app->protocol, 0);
+ if (err)
+ return err;
+
+ /* Delete app from dcb list */
+ err = dcb_ieee_delapp(dev, app);
+ if (err)
+ return err;
+
+ /* Decrease dscp counter */
+ dcb->dscp_cnt--;
+
+ /* If no dscp mapping is configured, trust pcp */
+ if (dcb->dscp_cnt == 0)
+ return nfp_nic_set_trust_status(nn, NFP_DCB_TRUST_PCP);
+
+ return 0;
+}
+
+static const struct dcbnl_rtnl_ops nfp_nic_dcbnl_ops = {
+ /* ieee 802.1Qaz std */
+ .ieee_getets = nfp_nic_dcbnl_ieee_getets,
+ .ieee_setets = nfp_nic_dcbnl_ieee_setets,
+ .ieee_getmaxrate = nfp_nic_dcbnl_ieee_getmaxrate,
+ .ieee_setmaxrate = nfp_nic_dcbnl_ieee_setmaxrate,
+ .ieee_setapp = nfp_nic_dcbnl_ieee_setapp,
+ .ieee_delapp = nfp_nic_dcbnl_ieee_delapp,
+};
+
+int nfp_nic_dcb_init(struct nfp_net *nn)
+{
+ struct nfp_app *app = nn->app;
+ struct nfp_dcb *dcb;
+ int err;
+
+ dcb = get_dcb_priv(nn);
+ dcb->cfg_offset = NFP_DCB_CFG_STRIDE * nn->id;
+ dcb->dcbcfg_tbl = nfp_pf_map_rtsym(app->pf, "net.dcbcfg_tbl",
+ "_abi_dcb_cfg",
+ dcb->cfg_offset, &dcb->dcbcfg_tbl_area);
+ if (IS_ERR(dcb->dcbcfg_tbl)) {
+ if (PTR_ERR(dcb->dcbcfg_tbl) != -ENOENT) {
+ err = PTR_ERR(dcb->dcbcfg_tbl);
+ dcb->dcbcfg_tbl = NULL;
+ nfp_err(app->cpp,
+ "Failed to map dcbcfg_tbl area, min_size %u.\n",
+ dcb->cfg_offset);
+ return err;
+ }
+ dcb->dcbcfg_tbl = NULL;
+ }
+
+ if (dcb->dcbcfg_tbl) {
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ dcb->prio2tc[i] = i;
+ dcb->tc2idx[i] = i;
+ dcb->tc_tx_pct[i] = 0;
+ dcb->tc_maxrate[i] = 0;
+ dcb->tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
+ }
+ dcb->trust_status = NFP_DCB_TRUST_INVALID;
+ dcb->rate_init = false;
+ dcb->ets_init = false;
+
+ nn->dp.netdev->dcbnl_ops = &nfp_nic_dcbnl_ops;
+ }
+
+ return 0;
+}
+
+void nfp_nic_dcb_clean(struct nfp_net *nn)
+{
+ struct nfp_dcb *dcb;
+
+ dcb = get_dcb_priv(nn);
+ if (dcb->dcbcfg_tbl_area)
+ nfp_cpp_area_release_free(dcb->dcbcfg_tbl_area);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.c b/drivers/net/ethernet/netronome/nfp/nic/main.c
index aea8579206ee..9dd5afe37f6e 100644
--- a/drivers/net/ethernet/netronome/nfp/nic/main.c
+++ b/drivers/net/ethernet/netronome/nfp/nic/main.c
@@ -5,6 +5,8 @@
#include "../nfpcore/nfp_nsp.h"
#include "../nfp_app.h"
#include "../nfp_main.h"
+#include "../nfp_net.h"
+#include "main.h"
static int nfp_nic_init(struct nfp_app *app)
{
@@ -28,13 +30,50 @@ static void nfp_nic_sriov_disable(struct nfp_app *app)
{
}
+static int nfp_nic_vnic_init(struct nfp_app *app, struct nfp_net *nn)
+{
+ return nfp_nic_dcb_init(nn);
+}
+
+static void nfp_nic_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
+{
+ nfp_nic_dcb_clean(nn);
+}
+
+static int nfp_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
+ unsigned int id)
+{
+ struct nfp_app_nic_private *app_pri = nn->app_priv;
+ int err;
+
+ err = nfp_app_nic_vnic_alloc(app, nn, id);
+ if (err)
+ return err;
+
+ if (sizeof(*app_pri)) {
+ nn->app_priv = kzalloc(sizeof(*app_pri), GFP_KERNEL);
+ if (!nn->app_priv)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void nfp_nic_vnic_free(struct nfp_app *app, struct nfp_net *nn)
+{
+ kfree(nn->app_priv);
+}
+
const struct nfp_app_type app_nic = {
.id = NFP_APP_CORE_NIC,
.name = "nic",
.init = nfp_nic_init,
- .vnic_alloc = nfp_app_nic_vnic_alloc,
-
+ .vnic_alloc = nfp_nic_vnic_alloc,
+ .vnic_free = nfp_nic_vnic_free,
.sriov_enable = nfp_nic_sriov_enable,
.sriov_disable = nfp_nic_sriov_disable,
+
+ .vnic_init = nfp_nic_vnic_init,
+ .vnic_clean = nfp_nic_vnic_clean,
};
diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.h b/drivers/net/ethernet/netronome/nfp/nic/main.h
new file mode 100644
index 000000000000..094374df42b8
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nic/main.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2023 Corigine, Inc. */
+
+#ifndef __NFP_NIC_H__
+#define __NFP_NIC_H__ 1
+
+#include <linux/netdevice.h>
+
+#ifdef CONFIG_DCB
+/* DCB feature definitions */
+#define NFP_NET_MAX_DSCP 4
+#define NFP_NET_MAX_TC IEEE_8021QAZ_MAX_TCS
+#define NFP_NET_MAX_PRIO 8
+#define NFP_DCB_CFG_STRIDE 256
+
+struct nfp_dcb {
+ u8 dscp2prio[NFP_NET_MAX_DSCP];
+ u8 prio2tc[NFP_NET_MAX_PRIO];
+ u8 tc2idx[IEEE_8021QAZ_MAX_TCS];
+ u64 tc_maxrate[IEEE_8021QAZ_MAX_TCS];
+ u8 tc_tx_pct[IEEE_8021QAZ_MAX_TCS];
+ u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
+ u8 dscp_cnt;
+ u8 trust_status;
+ bool rate_init;
+ bool ets_init;
+
+ struct nfp_cpp_area *dcbcfg_tbl_area;
+ u8 __iomem *dcbcfg_tbl;
+ u32 cfg_offset;
+};
+
+int nfp_nic_dcb_init(struct nfp_net *nn);
+void nfp_nic_dcb_clean(struct nfp_net *nn);
+#else
+static inline int nfp_nic_dcb_init(struct nfp_net *nn) { return 0; }
+static inline void nfp_nic_dcb_clean(struct nfp_net *nn) {}
+#endif
+
+struct nfp_app_nic_private {
+#ifdef CONFIG_DCB
+ struct nfp_dcb dcb;
+#endif
+};
+
+#endif