diff options
Diffstat (limited to 'drivers/net/ethernet/netronome')
21 files changed, 2393 insertions, 1443 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index 6933afa69df2..4b15f0f496aa 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -6,8 +6,10 @@ nfp-objs := \ nfpcore/nfp_cpplib.o \ nfpcore/nfp_hwinfo.o \ nfpcore/nfp_mip.o \ + nfpcore/nfp_mutex.o \ nfpcore/nfp_nffw.o \ nfpcore/nfp_nsp.o \ + nfpcore/nfp_nsp_cmds.o \ nfpcore/nfp_nsp_eth.o \ nfpcore/nfp_resource.o \ nfpcore/nfp_rtsym.o \ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c b/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c index 335beb8b8b45..97a8f00674d0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c @@ -798,7 +798,7 @@ wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, const struct bpf_insn *insn = &meta->insn; if (insn->off < 0) /* TODO */ - return -ENOTSUPP; + return -EOPNOTSUPP; wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op, insn->src_reg * 2, br_mask, insn->off); @@ -818,7 +818,7 @@ wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u32 tmp_reg; if (insn->off < 0) /* TODO */ - return -ENOTSUPP; + return -EOPNOTSUPP; tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); if (!swap) @@ -847,7 +847,7 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 areg = insn->src_reg * 2, breg = insn->dst_reg * 2; if (insn->off < 0) /* TODO */ - return -ENOTSUPP; + return -EOPNOTSUPP; if (swap) { areg ^= breg; @@ -1132,7 +1132,7 @@ static int mem_ldx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) emit_alu(nfp_prog, reg_both(meta->insn.dst_reg * 2), reg_none(), ALU_OP_NONE, NFP_BPF_ABI_LEN); else - return -ENOTSUPP; + return -EOPNOTSUPP; return 0; } @@ -1143,7 +1143,7 @@ static int mem_ldx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) if (meta->insn.off != offsetof(struct xdp_md, data) && meta->insn.off != offsetof(struct xdp_md, data_end)) - return -ENOTSUPP; + return -EOPNOTSUPP; emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT); @@ -1174,12 +1174,12 @@ static int mem_stx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) if (meta->insn.off == offsetof(struct sk_buff, mark)) return wrp_set_mark(nfp_prog, meta->insn.src_reg * 2); - return -ENOTSUPP; + return -EOPNOTSUPP; } static int mem_stx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -1192,7 +1192,7 @@ static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { if (meta->insn.off < 0) /* TODO */ - return -ENOTSUPP; + return -EOPNOTSUPP; emit_br(nfp_prog, BR_UNC, meta->insn.off, 0); return 0; @@ -1206,7 +1206,7 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) u32 tmp_reg; if (insn->off < 0) /* TODO */ - return -ENOTSUPP; + return -EOPNOTSUPP; if (imm & ~0U) { tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); @@ -1245,7 +1245,7 @@ static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) u32 tmp_reg; if (insn->off < 0) /* TODO */ - return -ENOTSUPP; + return -EOPNOTSUPP; if (!imm) { meta->skip = true; @@ -1276,7 +1276,7 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) u32 tmp_reg; if (insn->off < 0) /* TODO */ - return -ENOTSUPP; + return -EOPNOTSUPP; if (!imm) { emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), @@ -1302,7 +1302,7 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) const struct bpf_insn *insn = &meta->insn; if (insn->off < 0) /* TODO */ - return -ENOTSUPP; + return -EOPNOTSUPP; emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2), ALU_OP_XOR, reg_b(insn->src_reg * 2)); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index dedac720fb29..dde35dae35c5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -48,7 +48,7 @@ #include "nfpcore/nfp.h" #include "nfpcore/nfp_cpp.h" #include "nfpcore/nfp_nffw.h" -#include "nfpcore/nfp_nsp_eth.h" +#include "nfpcore/nfp_nsp.h" #include "nfpcore/nfp6000_pcie.h" @@ -253,6 +253,7 @@ exit_release_fw: static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) { + struct nfp_nsp_identify *nspi; struct nfp_nsp *nsp; int err; @@ -269,6 +270,12 @@ static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp); + nspi = __nfp_nsp_identify(nsp); + if (nspi) { + dev_info(&pdev->dev, "BSP: %s\n", nspi->version); + kfree(nspi); + } + err = nfp_fw_load(pdev, pf, nsp); if (err < 0) { kfree(pf->eth_tbl); @@ -385,8 +392,7 @@ static void nfp_pci_remove(struct pci_dev *pdev) { struct nfp_pf *pf = pci_get_drvdata(pdev); - if (!list_empty(&pf->ports)) - nfp_net_pci_remove(pf); + nfp_net_pci_remove(pf); nfp_pcie_sriov_disable(pdev); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index 39105d0435e9..b57de047b002 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -42,7 +42,9 @@ #include <linux/list.h> #include <linux/types.h> #include <linux/msi.h> +#include <linux/mutex.h> #include <linux/pci.h> +#include <linux/workqueue.h> struct dentry; struct pci_dev; @@ -64,8 +66,11 @@ struct nfp_eth_table; * @fw_loaded: Is the firmware loaded? * @eth_tbl: NSP ETH table * @ddir: Per-device debugfs directory - * @num_ports: Number of adapter ports + * @num_ports: Number of adapter ports app firmware supports + * @num_netdevs: Number of netdevs spawned * @ports: Linked list of port structures (struct nfp_net) + * @port_lock: Protects @ports, @num_ports, @num_netdevs + * @port_refresh_work: Work entry for taking netdevs out */ struct nfp_pf { struct pci_dev *pdev; @@ -88,7 +93,11 @@ struct nfp_pf { struct dentry *ddir; unsigned int num_ports; + unsigned int num_netdevs; + struct list_head ports; + struct work_struct port_refresh_work; + struct mutex port_lock; }; extern struct pci_driver nfp_netvf_pci_driver; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index e614a376b595..fcf81b3be830 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -50,14 +50,14 @@ #include "nfp_net_ctrl.h" -#define nn_err(nn, fmt, args...) netdev_err((nn)->netdev, fmt, ## args) -#define nn_warn(nn, fmt, args...) netdev_warn((nn)->netdev, fmt, ## args) -#define nn_info(nn, fmt, args...) netdev_info((nn)->netdev, fmt, ## args) -#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->netdev, fmt, ## args) -#define nn_warn_ratelimit(nn, fmt, args...) \ +#define nn_err(nn, fmt, args...) netdev_err((nn)->dp.netdev, fmt, ## args) +#define nn_warn(nn, fmt, args...) netdev_warn((nn)->dp.netdev, fmt, ## args) +#define nn_info(nn, fmt, args...) netdev_info((nn)->dp.netdev, fmt, ## args) +#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->dp.netdev, fmt, ## args) +#define nn_dp_warn(dp, fmt, args...) \ do { \ if (unlikely(net_ratelimit())) \ - netdev_warn((nn)->netdev, fmt, ## args); \ + netdev_warn((dp)->netdev, fmt, ## args); \ } while (0) /* Max time to wait for NFP to respond on updates (in seconds) */ @@ -112,6 +112,7 @@ /* Forward declarations */ struct nfp_cpp; +struct nfp_eth_table_port; struct nfp_net; struct nfp_net_r_vector; @@ -200,6 +201,7 @@ struct nfp_net_tx_buf { * @txds: Virtual address of TX ring in host memory * @dma: DMA address of the TX ring * @size: Size, in bytes, of the TX ring (needed to free) + * @is_xdp: Is this a XDP TX ring? */ struct nfp_net_tx_ring { struct nfp_net_r_vector *r_vec; @@ -220,6 +222,7 @@ struct nfp_net_tx_ring { dma_addr_t dma; unsigned int size; + bool is_xdp; } ____cacheline_aligned; /* RX and freelist descriptor format */ @@ -283,6 +286,12 @@ struct nfp_net_rx_desc { #define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0) +struct nfp_meta_parsed { + u32 hash_type; + u32 hash; + u32 mark; +}; + struct nfp_net_rx_hash { __be32 hash_type; __be32 hash; @@ -306,17 +315,13 @@ struct nfp_net_rx_buf { * @rd_p: FL/RX ring read pointer (free running) * @idx: Ring index from Linux's perspective * @fl_qcidx: Queue Controller Peripheral (QCP) queue index for the freelist - * @rx_qcidx: Queue Controller Peripheral (QCP) queue index for the RX queue * @qcp_fl: Pointer to base of the QCP freelist queue - * @qcp_rx: Pointer to base of the QCP RX queue * @wr_ptr_add: Accumulated number of buffers to add to QCP write pointer * (used for free list batching) * @rxbufs: Array of transmitted FL/RX buffers * @rxds: Virtual address of FL/RX ring in host memory * @dma: DMA address of the FL/RX ring * @size: Size, in bytes, of the FL/RX ring (needed to free) - * @bufsz: Buffer allocation size for convenience of management routines - * (NOTE: this is in second cache line, do not use on fast path!) */ struct nfp_net_rx_ring { struct nfp_net_r_vector *r_vec; @@ -325,20 +330,17 @@ struct nfp_net_rx_ring { u32 wr_p; u32 rd_p; - u16 idx; - u16 wr_ptr_add; + u32 idx; + u32 wr_ptr_add; int fl_qcidx; - int rx_qcidx; u8 __iomem *qcp_fl; - u8 __iomem *qcp_rx; struct nfp_net_rx_buf *rxbufs; struct nfp_net_rx_desc *rxds; dma_addr_t dma; unsigned int size; - unsigned int bufsz; } ____cacheline_aligned; /** @@ -433,19 +435,76 @@ struct nfp_stat_pair { }; /** - * struct nfp_net - NFP network device structure - * @pdev: Backpointer to PCI device - * @netdev: Backpointer to net_device structure - * @is_vf: Is the driver attached to a VF? + * struct nfp_net_dp - NFP network device datapath data structure + * @dev: Backpointer to struct device + * @netdev: Backpointer to net_device structure + * @is_vf: Is the driver attached to a VF? * @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf * @bpf_offload_xdp: Offloaded BPF program is XDP - * @ctrl: Local copy of the control register/word. - * @fl_bufsz: Currently configured size of the freelist buffers + * @chained_metadata_format: Firemware will use new metadata format + * @rx_dma_dir: Mapping direction for RX buffers + * @rx_dma_off: Offset at which DMA packets (for XDP headroom) * @rx_offset: Offset in the RX buffers where packet data starts + * @ctrl: Local copy of the control register/word. + * @fl_bufsz: Currently configured size of the freelist buffers * @xdp_prog: Installed XDP program - * @fw_ver: Firmware version + * @tx_rings: Array of pre-allocated TX ring structures + * @rx_rings: Array of pre-allocated RX ring structures + * @ctrl_bar: Pointer to mapped control BAR + * + * @txd_cnt: Size of the TX ring in number of descriptors + * @rxd_cnt: Size of the RX ring in number of descriptors + * @num_r_vecs: Number of used ring vectors + * @num_tx_rings: Currently configured number of TX rings + * @num_stack_tx_rings: Number of TX rings used by the stack (not XDP) + * @num_rx_rings: Currently configured number of RX rings + * @mtu: Device MTU + */ +struct nfp_net_dp { + struct device *dev; + struct net_device *netdev; + + u8 is_vf:1; + u8 bpf_offload_skip_sw:1; + u8 bpf_offload_xdp:1; + u8 chained_metadata_format:1; + + u8 rx_dma_dir; + u8 rx_offset; + + u32 rx_dma_off; + + u32 ctrl; + u32 fl_bufsz; + + struct bpf_prog *xdp_prog; + + struct nfp_net_tx_ring *tx_rings; + struct nfp_net_rx_ring *rx_rings; + + u8 __iomem *ctrl_bar; + + /* Cold data follows */ + + unsigned int txd_cnt; + unsigned int rxd_cnt; + + unsigned int num_r_vecs; + + unsigned int num_tx_rings; + unsigned int num_stack_tx_rings; + unsigned int num_rx_rings; + + unsigned int mtu; +}; + +/** + * struct nfp_net - NFP network device structure + * @dp: Datapath structure + * @fw_ver: Firmware version * @cap: Capabilities advertised by the Firmware * @max_mtu: Maximum support MTU advertised by the Firmware + * @rss_hfunc: RSS selected hash function * @rss_cfg: RSS configuration * @rss_key: RSS secret key * @rss_itbl: RSS indirection table @@ -454,17 +513,9 @@ struct nfp_stat_pair { * @rx_filter_change: Jiffies when statistics last changed * @rx_filter_stats_timer: Timer for polling filter offload statistics * @rx_filter_lock: Lock protecting timer state changes (teardown) + * @max_r_vecs: Number of allocated interrupt vectors for RX/TX * @max_tx_rings: Maximum number of TX rings supported by the Firmware * @max_rx_rings: Maximum number of RX rings supported by the Firmware - * @num_tx_rings: Currently configured number of TX rings - * @num_stack_tx_rings: Number of TX rings used by the stack (not XDP) - * @num_rx_rings: Currently configured number of RX rings - * @txd_cnt: Size of the TX ring in number of descriptors - * @rxd_cnt: Size of the RX ring in number of descriptors - * @tx_rings: Array of pre-allocated TX ring structures - * @rx_rings: Array of pre-allocated RX ring structures - * @max_r_vecs: Number of allocated interrupt vectors for RX/TX - * @num_r_vecs: Number of used ring vectors * @r_vecs: Pre-allocated array of ring vectors * @irq_entries: Pre-allocated array of MSI-X entries * @lsc_handler: Handler for Link State Change interrupt @@ -480,7 +531,8 @@ struct nfp_stat_pair { * @reconfig_sync_present: Some thread is performing synchronous reconfig * @reconfig_timer: Timer for async reading of reconfig results * @link_up: Is the link up? - * @link_status_lock: Protects @link_up and ensures atomicity with BAR reading + * @link_changed: Has link state changes since last port refresh? + * @link_status_lock: Protects @link_* and ensures atomicity with BAR reading * @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter * @rx_coalesce_max_frames: RX interrupt moderation frame count parameter * @tx_coalesce_usecs: TX interrupt moderation usecs delay parameter @@ -488,36 +540,24 @@ struct nfp_stat_pair { * @vxlan_ports: VXLAN ports for RX inner csum offload communicated to HW * @vxlan_usecnt: IPv4/IPv6 VXLAN port use counts * @qcp_cfg: Pointer to QCP queue used for configuration notification - * @ctrl_bar: Pointer to mapped control BAR * @tx_bar: Pointer to mapped TX queues * @rx_bar: Pointer to mapped FL/RX queues * @debugfs_dir: Device directory in debugfs * @ethtool_dump_flag: Ethtool dump flag * @port_list: Entry on device port list + * @pdev: Backpointer to PCI device * @cpp: CPP device handle if available + * @eth_port: Translated ETH Table port entry */ struct nfp_net { - struct pci_dev *pdev; - struct net_device *netdev; - - unsigned is_vf:1; - unsigned bpf_offload_skip_sw:1; - unsigned bpf_offload_xdp:1; - - u32 ctrl; - u32 fl_bufsz; - - u32 rx_offset; - - struct bpf_prog *xdp_prog; - - struct nfp_net_tx_ring *tx_rings; - struct nfp_net_rx_ring *rx_rings; + struct nfp_net_dp dp; struct nfp_net_fw_version fw_ver; + u32 cap; u32 max_mtu; + u8 rss_hfunc; u32 rss_cfg; u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ]; u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ]; @@ -530,18 +570,10 @@ struct nfp_net { unsigned int max_tx_rings; unsigned int max_rx_rings; - unsigned int num_tx_rings; - unsigned int num_stack_tx_rings; - unsigned int num_rx_rings; - int stride_tx; int stride_rx; - int txd_cnt; - int rxd_cnt; - unsigned int max_r_vecs; - unsigned int num_r_vecs; struct nfp_net_r_vector r_vecs[NFP_NET_MAX_R_VECS]; struct msix_entry irq_entries[NFP_NET_MAX_IRQS]; @@ -557,6 +589,7 @@ struct nfp_net { u32 me_freq_mhz; bool link_up; + bool link_changed; spinlock_t link_status_lock; spinlock_t reconfig_lock; @@ -575,7 +608,6 @@ struct nfp_net { u8 __iomem *qcp_cfg; - u8 __iomem *ctrl_bar; u8 __iomem *tx_bar; u8 __iomem *rx_bar; @@ -584,14 +616,10 @@ struct nfp_net { struct list_head port_list; + struct pci_dev *pdev; struct nfp_cpp *cpp; -}; -struct nfp_net_ring_set { - unsigned int n_rings; - unsigned int mtu; - unsigned int dcnt; - void *rings; + struct nfp_eth_table_port *eth_port; }; /* Functions to read/write from/to a BAR @@ -599,42 +627,42 @@ struct nfp_net_ring_set { */ static inline u16 nn_readb(struct nfp_net *nn, int off) { - return readb(nn->ctrl_bar + off); + return readb(nn->dp.ctrl_bar + off); } static inline void nn_writeb(struct nfp_net *nn, int off, u8 val) { - writeb(val, nn->ctrl_bar + off); + writeb(val, nn->dp.ctrl_bar + off); } static inline u16 nn_readw(struct nfp_net *nn, int off) { - return readw(nn->ctrl_bar + off); + return readw(nn->dp.ctrl_bar + off); } static inline void nn_writew(struct nfp_net *nn, int off, u16 val) { - writew(val, nn->ctrl_bar + off); + writew(val, nn->dp.ctrl_bar + off); } static inline u32 nn_readl(struct nfp_net *nn, int off) { - return readl(nn->ctrl_bar + off); + return readl(nn->dp.ctrl_bar + off); } static inline void nn_writel(struct nfp_net *nn, int off, u32 val) { - writel(val, nn->ctrl_bar + off); + writel(val, nn->dp.ctrl_bar + off); } static inline u64 nn_readq(struct nfp_net *nn, int off) { - return readq(nn->ctrl_bar + off); + return readq(nn->dp.ctrl_bar + off); } static inline void nn_writeq(struct nfp_net *nn, int off, u64 val) { - writeq(val, nn->ctrl_bar + off); + writeq(val, nn->dp.ctrl_bar + off); } /* Flush posted PCI writes by reading something without side effects */ @@ -776,6 +804,7 @@ void nfp_net_netdev_clean(struct net_device *netdev); void nfp_net_set_ethtool_ops(struct net_device *netdev); void nfp_net_info(struct nfp_net *nn); int nfp_net_reconfig(struct nfp_net *nn, u32 update); +unsigned int nfp_net_rss_key_sz(struct nfp_net *nn); void nfp_net_rss_write_itbl(struct nfp_net *nn); void nfp_net_rss_write_key(struct nfp_net *nn); void nfp_net_coalesce_write_cfg(struct nfp_net *nn); @@ -787,9 +816,14 @@ void nfp_net_irqs_disable(struct pci_dev *pdev); void nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries, unsigned int n); -int -nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog, - struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx); + +struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn); +int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new, + struct netlink_ext_ack *extack); + +bool nfp_net_link_changed_read_clear(struct nfp_net *nn); +int nfp_net_refresh_eth_port(struct nfp_net *nn); +void nfp_net_refresh_port_table(struct nfp_net *nn); #ifdef CONFIG_NFP_DEBUG void nfp_net_debugfs_create(void); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 9179a99563af..db20376260f5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -41,6 +41,7 @@ * Chris Telfer <chris.telfer@netronome.com> */ +#include <linux/bitfield.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <linux/module.h> @@ -66,6 +67,7 @@ #include <net/pkt_cls.h> #include <net/vxlan.h> +#include "nfpcore/nfp_nsp.h" #include "nfp_net_ctrl.h" #include "nfp_net.h" @@ -83,20 +85,33 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, put_unaligned_le32(reg, fw_ver); } -static dma_addr_t -nfp_net_dma_map_rx(struct nfp_net *nn, void *frag, unsigned int bufsz, - int direction) +static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag) { - return dma_map_single(&nn->pdev->dev, frag + NFP_NET_RX_BUF_HEADROOM, - bufsz - NFP_NET_RX_BUF_NON_DATA, direction); + return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM, + dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA, + dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC); } static void -nfp_net_dma_unmap_rx(struct nfp_net *nn, dma_addr_t dma_addr, - unsigned int bufsz, int direction) +nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr) { - dma_unmap_single(&nn->pdev->dev, dma_addr, - bufsz - NFP_NET_RX_BUF_NON_DATA, direction); + dma_sync_single_for_device(dp->dev, dma_addr, + dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA, + dp->rx_dma_dir); +} + +static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr) +{ + dma_unmap_single_attrs(dp->dev, dma_addr, + dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA, + dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC); +} + +static void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr, + unsigned int len) +{ + dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM, + len, dp->rx_dma_dir); } /* Firmware reconfig @@ -327,19 +342,22 @@ void nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries, unsigned int n) { + struct nfp_net_dp *dp = &nn->dp; + nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS; - nn->num_r_vecs = nn->max_r_vecs; + dp->num_r_vecs = nn->max_r_vecs; memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n); - if (nn->num_rx_rings > nn->num_r_vecs || - nn->num_tx_rings > nn->num_r_vecs) - nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n", - nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs); + if (dp->num_rx_rings > dp->num_r_vecs || + dp->num_tx_rings > dp->num_r_vecs) + dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n", + dp->num_rx_rings, dp->num_tx_rings, + dp->num_r_vecs); - nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings); - nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings); - nn->num_stack_tx_rings = nn->num_tx_rings; + dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings); + dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings); + dp->num_stack_tx_rings = dp->num_tx_rings; } /** @@ -373,6 +391,19 @@ static irqreturn_t nfp_net_irq_rxtx(int irq, void *data) return IRQ_HANDLED; } +bool nfp_net_link_changed_read_clear(struct nfp_net *nn) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(&nn->link_status_lock, flags); + ret = nn->link_changed; + nn->link_changed = false; + spin_unlock_irqrestore(&nn->link_status_lock, flags); + + return ret; +} + /** * nfp_net_read_link_status() - Reread link status from control BAR * @nn: NFP Network structure @@ -392,13 +423,14 @@ static void nfp_net_read_link_status(struct nfp_net *nn) goto out; nn->link_up = link_up; + nn->link_changed = true; if (nn->link_up) { - netif_carrier_on(nn->netdev); - netdev_info(nn->netdev, "NIC Link is Up\n"); + netif_carrier_on(nn->dp.netdev); + netdev_info(nn->dp.netdev, "NIC Link is Up\n"); } else { - netif_carrier_off(nn->netdev); - netdev_info(nn->netdev, "NIC Link is Down\n"); + netif_carrier_off(nn->dp.netdev); + netdev_info(nn->dp.netdev, "NIC Link is Down\n"); } out: spin_unlock_irqrestore(&nn->link_status_lock, flags); @@ -446,15 +478,18 @@ static irqreturn_t nfp_net_irq_exn(int irq, void *data) * @tx_ring: TX ring structure * @r_vec: IRQ vector servicing this ring * @idx: Ring index + * @is_xdp: Is this an XDP TX ring? */ static void nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring, - struct nfp_net_r_vector *r_vec, unsigned int idx) + struct nfp_net_r_vector *r_vec, unsigned int idx, + bool is_xdp) { struct nfp_net *nn = r_vec->nfp_net; tx_ring->idx = idx; tx_ring->r_vec = r_vec; + tx_ring->is_xdp = is_xdp; tx_ring->qcidx = tx_ring->idx * nn->stride_tx; tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); @@ -476,10 +511,7 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring, rx_ring->r_vec = r_vec; rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; - rx_ring->rx_qcidx = rx_ring->fl_qcidx + (nn->stride_rx - 1); - rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); - rx_ring->qcp_rx = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->rx_qcidx); } /** @@ -530,7 +562,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset, entry = &nn->irq_entries[vector_idx]; - snprintf(name, name_sz, format, netdev_name(nn->netdev)); + snprintf(name, name_sz, format, netdev_name(nn->dp.netdev)); err = request_irq(entry->vector, handler, 0, name, nn); if (err) { nn_err(nn, "Failed to request IRQ %d (err=%d).\n", @@ -617,7 +649,6 @@ static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q, /** * nfp_net_tx_tso() - Set up Tx descriptor for LSO - * @nn: NFP Net device * @r_vec: per-ring structure * @txbuf: Pointer to driver soft TX descriptor * @txd: Pointer to HW TX descriptor @@ -626,7 +657,7 @@ static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q, * Set up Tx descriptor for LSO, do nothing for non-LSO skbs. * Return error on packet header greater than maximum supported LSO header size. */ -static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, +static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_net_tx_buf *txbuf, struct nfp_net_tx_desc *txd, struct sk_buff *skb) { @@ -657,7 +688,7 @@ static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, /** * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor - * @nn: NFP Net device + * @dp: NFP Net data path struct * @r_vec: per-ring structure * @txbuf: Pointer to driver soft TX descriptor * @txd: Pointer to TX descriptor @@ -666,7 +697,8 @@ static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, * This function sets the TX checksum flags in the TX descriptor based * on the configuration and the protocol of the packet to be transmitted. */ -static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, +static void nfp_net_tx_csum(struct nfp_net_dp *dp, + struct nfp_net_r_vector *r_vec, struct nfp_net_tx_buf *txbuf, struct nfp_net_tx_desc *txd, struct sk_buff *skb) { @@ -674,7 +706,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, struct iphdr *iph; u8 l4_hdr; - if (!(nn->ctrl & NFP_NET_CFG_CTRL_TXCSUM)) + if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM)) return; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -693,8 +725,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, } else if (ipv6h->version == 6) { l4_hdr = ipv6h->nexthdr; } else { - nn_warn_ratelimit(nn, "partial checksum but ipv=%x!\n", - iph->version); + nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version); return; } @@ -706,8 +737,7 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, txd->flags |= PCIE_DESC_TX_UDP_CSUM; break; default: - nn_warn_ratelimit(nn, "partial checksum but l4 proto=%x!\n", - l4_hdr); + nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr); return; } @@ -737,28 +767,31 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); const struct skb_frag_struct *frag; - struct nfp_net_r_vector *r_vec; struct nfp_net_tx_desc *txd, txdg; - struct nfp_net_tx_buf *txbuf; struct nfp_net_tx_ring *tx_ring; + struct nfp_net_r_vector *r_vec; + struct nfp_net_tx_buf *txbuf; struct netdev_queue *nd_q; + struct nfp_net_dp *dp; dma_addr_t dma_addr; unsigned int fsize; int f, nr_frags; int wr_idx; u16 qidx; + dp = &nn->dp; qidx = skb_get_queue_mapping(skb); - tx_ring = &nn->tx_rings[qidx]; + tx_ring = &dp->tx_rings[qidx]; r_vec = tx_ring->r_vec; - nd_q = netdev_get_tx_queue(nn->netdev, qidx); + nd_q = netdev_get_tx_queue(dp->netdev, qidx); nr_frags = skb_shinfo(skb)->nr_frags; if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) { - nn_warn_ratelimit(nn, "TX ring %d busy. wrp=%u rdp=%u\n", - qidx, tx_ring->wr_p, tx_ring->rd_p); + nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n", + qidx, tx_ring->wr_p, tx_ring->rd_p); netif_tx_stop_queue(nd_q); + nfp_net_tx_xmit_more_flush(tx_ring); u64_stats_update_begin(&r_vec->tx_sync); r_vec->tx_busy++; u64_stats_update_end(&r_vec->tx_sync); @@ -766,9 +799,9 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) } /* Start with the head skbuf */ - dma_addr = dma_map_single(&nn->pdev->dev, skb->data, skb_headlen(skb), + dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); - if (dma_mapping_error(&nn->pdev->dev, dma_addr)) + if (dma_mapping_error(dp->dev, dma_addr)) goto err_free; wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1); @@ -792,11 +825,11 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) txd->mss = 0; txd->l4_offset = 0; - nfp_net_tx_tso(nn, r_vec, txbuf, txd, skb); + nfp_net_tx_tso(r_vec, txbuf, txd, skb); - nfp_net_tx_csum(nn, r_vec, txbuf, txd, skb); + nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb); - if (skb_vlan_tag_present(skb) && nn->ctrl & NFP_NET_CFG_CTRL_TXVLAN) { + if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) { txd->flags |= PCIE_DESC_TX_VLAN; txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb)); } @@ -810,9 +843,9 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) frag = &skb_shinfo(skb)->frags[f]; fsize = skb_frag_size(frag); - dma_addr = skb_frag_dma_map(&nn->pdev->dev, frag, 0, + dma_addr = skb_frag_dma_map(dp->dev, frag, 0, fsize, DMA_TO_DEVICE); - if (dma_mapping_error(&nn->pdev->dev, dma_addr)) + if (dma_mapping_error(dp->dev, dma_addr)) goto err_unmap; wr_idx = (wr_idx + 1) & (tx_ring->cnt - 1); @@ -851,8 +884,7 @@ err_unmap: --f; while (f >= 0) { frag = &skb_shinfo(skb)->frags[f]; - dma_unmap_page(&nn->pdev->dev, - tx_ring->txbufs[wr_idx].dma_addr, + dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, skb_frag_size(frag), DMA_TO_DEVICE); tx_ring->txbufs[wr_idx].skb = NULL; tx_ring->txbufs[wr_idx].dma_addr = 0; @@ -861,13 +893,14 @@ err_unmap: if (wr_idx < 0) wr_idx += tx_ring->cnt; } - dma_unmap_single(&nn->pdev->dev, tx_ring->txbufs[wr_idx].dma_addr, + dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, skb_headlen(skb), DMA_TO_DEVICE); tx_ring->txbufs[wr_idx].skb = NULL; tx_ring->txbufs[wr_idx].dma_addr = 0; tx_ring->txbufs[wr_idx].fidx = -2; err_free: - nn_warn_ratelimit(nn, "Failed to map DMA TX buffer\n"); + nn_dp_warn(dp, "Failed to map DMA TX buffer\n"); + nfp_net_tx_xmit_more_flush(tx_ring); u64_stats_update_begin(&r_vec->tx_sync); r_vec->tx_errors++; u64_stats_update_end(&r_vec->tx_sync); @@ -884,7 +917,7 @@ err_free: static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; + struct nfp_net_dp *dp = &r_vec->nfp_net->dp; const struct skb_frag_struct *frag; struct netdev_queue *nd_q; u32 done_pkts = 0, done_bytes = 0; @@ -894,6 +927,9 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) int fidx; int idx; + if (tx_ring->wr_p == tx_ring->rd_p) + return; + /* Work out how many descriptors have been transmitted */ qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); @@ -918,8 +954,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) if (fidx == -1) { /* unmap head */ - dma_unmap_single(&nn->pdev->dev, - tx_ring->txbufs[idx].dma_addr, + dma_unmap_single(dp->dev, tx_ring->txbufs[idx].dma_addr, skb_headlen(skb), DMA_TO_DEVICE); done_pkts += tx_ring->txbufs[idx].pkt_cnt; @@ -927,8 +962,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) } else { /* unmap fragment */ frag = &skb_shinfo(skb)->frags[fidx]; - dma_unmap_page(&nn->pdev->dev, - tx_ring->txbufs[idx].dma_addr, + dma_unmap_page(dp->dev, tx_ring->txbufs[idx].dma_addr, skb_frag_size(frag), DMA_TO_DEVICE); } @@ -948,7 +982,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) r_vec->tx_pkts += done_pkts; u64_stats_update_end(&r_vec->tx_sync); - nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx); + nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); netdev_tx_completed_queue(nd_q, done_pkts, done_bytes); if (nfp_net_tx_ring_should_wake(tx_ring)) { /* Make sure TX thread will see updated tx_ring->rd_p */ @@ -966,11 +1000,13 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; u32 done_pkts = 0, done_bytes = 0; int idx, todo; u32 qcp_rd_p; + if (tx_ring->wr_p == tx_ring->rd_p) + return; + /* Work out how many descriptors have been transmitted */ qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); @@ -982,23 +1018,12 @@ static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring) else todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p; + done_pkts = todo; while (todo--) { idx = tx_ring->rd_p & (tx_ring->cnt - 1); tx_ring->rd_p++; - if (!tx_ring->txbufs[idx].frag) - continue; - - nfp_net_dma_unmap_rx(nn, tx_ring->txbufs[idx].dma_addr, - nn->fl_bufsz, DMA_BIDIRECTIONAL); - __free_page(virt_to_page(tx_ring->txbufs[idx].frag)); - - done_pkts++; done_bytes += tx_ring->txbufs[idx].real_len; - - tx_ring->txbufs[idx].dma_addr = 0; - tx_ring->txbufs[idx].frag = NULL; - tx_ring->txbufs[idx].fidx = -2; } tx_ring->qcp_rd_p = qcp_rd_p; @@ -1015,52 +1040,43 @@ static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring) /** * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers - * @nn: NFP Net device + * @dp: NFP Net data path struct * @tx_ring: TX ring structure * * Assumes that the device is stopped */ static void -nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring) +nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) { - struct nfp_net_r_vector *r_vec = tx_ring->r_vec; const struct skb_frag_struct *frag; - struct pci_dev *pdev = nn->pdev; struct netdev_queue *nd_q; - while (tx_ring->rd_p != tx_ring->wr_p) { + while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) { struct nfp_net_tx_buf *tx_buf; - int idx; + struct sk_buff *skb; + int idx, nr_frags; idx = tx_ring->rd_p & (tx_ring->cnt - 1); tx_buf = &tx_ring->txbufs[idx]; - if (tx_ring == r_vec->xdp_ring) { - nfp_net_dma_unmap_rx(nn, tx_buf->dma_addr, - nn->fl_bufsz, DMA_BIDIRECTIONAL); - __free_page(virt_to_page(tx_ring->txbufs[idx].frag)); - } else { - struct sk_buff *skb = tx_ring->txbufs[idx].skb; - int nr_frags = skb_shinfo(skb)->nr_frags; - - if (tx_buf->fidx == -1) { - /* unmap head */ - dma_unmap_single(&pdev->dev, tx_buf->dma_addr, - skb_headlen(skb), - DMA_TO_DEVICE); - } else { - /* unmap fragment */ - frag = &skb_shinfo(skb)->frags[tx_buf->fidx]; - dma_unmap_page(&pdev->dev, tx_buf->dma_addr, - skb_frag_size(frag), - DMA_TO_DEVICE); - } + skb = tx_ring->txbufs[idx].skb; + nr_frags = skb_shinfo(skb)->nr_frags; - /* check for last gather fragment */ - if (tx_buf->fidx == nr_frags - 1) - dev_kfree_skb_any(skb); + if (tx_buf->fidx == -1) { + /* unmap head */ + dma_unmap_single(dp->dev, tx_buf->dma_addr, + skb_headlen(skb), DMA_TO_DEVICE); + } else { + /* unmap fragment */ + frag = &skb_shinfo(skb)->frags[tx_buf->fidx]; + dma_unmap_page(dp->dev, tx_buf->dma_addr, + skb_frag_size(frag), DMA_TO_DEVICE); } + /* check for last gather fragment */ + if (tx_buf->fidx == nr_frags - 1) + dev_kfree_skb_any(skb); + tx_buf->dma_addr = 0; tx_buf->skb = NULL; tx_buf->fidx = -2; @@ -1075,10 +1091,10 @@ nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring) tx_ring->qcp_rd_p = 0; tx_ring->wr_ptr_add = 0; - if (tx_ring == r_vec->xdp_ring) + if (tx_ring->is_xdp) return; - nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx); + nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); netdev_tx_reset_queue(nd_q); } @@ -1087,7 +1103,7 @@ static void nfp_net_tx_timeout(struct net_device *netdev) struct nfp_net *nn = netdev_priv(netdev); int i; - for (i = 0; i < nn->netdev->real_num_tx_queues; i++) { + for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) { if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i))) continue; nn_warn(nn, "TX timeout on ring: %d\n", i); @@ -1098,16 +1114,17 @@ static void nfp_net_tx_timeout(struct net_device *netdev) /* Receive processing */ static unsigned int -nfp_net_calc_fl_bufsz(struct nfp_net *nn, unsigned int mtu) +nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp) { unsigned int fl_bufsz; fl_bufsz = NFP_NET_RX_BUF_HEADROOM; - if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) + fl_bufsz += dp->rx_dma_off; + if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) fl_bufsz += NFP_NET_MAX_PREPEND; else - fl_bufsz += nn->rx_offset; - fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + mtu; + fl_bufsz += dp->rx_offset; + fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu; fl_bufsz = SKB_DATA_ALIGN(fl_bufsz); fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); @@ -1126,62 +1143,53 @@ nfp_net_free_frag(void *frag, bool xdp) /** * nfp_net_rx_alloc_one() - Allocate and map page frag for RX - * @rx_ring: RX ring structure of the skb + * @dp: NFP Net data path struct * @dma_addr: Pointer to storage for DMA address (output param) - * @fl_bufsz: size of freelist buffers - * @xdp: Whether XDP is enabled * * This function will allcate a new page frag, map it for DMA. * * Return: allocated page frag or NULL on failure. */ -static void * -nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr, - unsigned int fl_bufsz, bool xdp) +static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) { - struct nfp_net *nn = rx_ring->r_vec->nfp_net; - int direction; void *frag; - if (!xdp) - frag = netdev_alloc_frag(fl_bufsz); + if (!dp->xdp_prog) + frag = netdev_alloc_frag(dp->fl_bufsz); else frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD)); if (!frag) { - nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n"); + nn_dp_warn(dp, "Failed to alloc receive page frag\n"); return NULL; } - direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; - - *dma_addr = nfp_net_dma_map_rx(nn, frag, fl_bufsz, direction); - if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) { - nfp_net_free_frag(frag, xdp); - nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n"); + *dma_addr = nfp_net_dma_map_rx(dp, frag); + if (dma_mapping_error(dp->dev, *dma_addr)) { + nfp_net_free_frag(frag, dp->xdp_prog); + nn_dp_warn(dp, "Failed to map DMA RX buffer\n"); return NULL; } return frag; } -static void * -nfp_net_napi_alloc_one(struct nfp_net *nn, int direction, dma_addr_t *dma_addr) +static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) { void *frag; - if (!nn->xdp_prog) - frag = napi_alloc_frag(nn->fl_bufsz); + if (!dp->xdp_prog) + frag = napi_alloc_frag(dp->fl_bufsz); else frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD)); if (!frag) { - nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n"); + nn_dp_warn(dp, "Failed to alloc receive page frag\n"); return NULL; } - *dma_addr = nfp_net_dma_map_rx(nn, frag, nn->fl_bufsz, direction); - if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) { - nfp_net_free_frag(frag, nn->xdp_prog); - nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n"); + *dma_addr = nfp_net_dma_map_rx(dp, frag); + if (dma_mapping_error(dp->dev, *dma_addr)) { + nfp_net_free_frag(frag, dp->xdp_prog); + nn_dp_warn(dp, "Failed to map DMA RX buffer\n"); return NULL; } @@ -1190,17 +1198,21 @@ nfp_net_napi_alloc_one(struct nfp_net *nn, int direction, dma_addr_t *dma_addr) /** * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings + * @dp: NFP Net data path struct * @rx_ring: RX ring structure * @frag: page fragment buffer * @dma_addr: DMA address of skb mapping */ -static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring, +static void nfp_net_rx_give_one(const struct nfp_net_dp *dp, + struct nfp_net_rx_ring *rx_ring, void *frag, dma_addr_t dma_addr) { unsigned int wr_idx; wr_idx = rx_ring->wr_p & (rx_ring->cnt - 1); + nfp_net_dma_sync_dev_rx(dp, dma_addr); + /* Stash SKB and DMA address away */ rx_ring->rxbufs[wr_idx].frag = frag; rx_ring->rxbufs[wr_idx].dma_addr = dma_addr; @@ -1208,7 +1220,8 @@ static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring, /* Fill freelist descriptor */ rx_ring->rxds[wr_idx].fld.reserved = 0; rx_ring->rxds[wr_idx].fld.meta_len_dd = 0; - nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld, dma_addr); + nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld, + dma_addr + dp->rx_dma_off); rx_ring->wr_p++; rx_ring->wr_ptr_add++; @@ -1249,19 +1262,17 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring) /** * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring - * @nn: NFP Net device + * @dp: NFP Net data path struct * @rx_ring: RX ring to remove buffers from - * @xdp: Whether XDP is enabled * * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1) * entries. After device is disabled nfp_net_rx_ring_reset() must be called * to restore required ring geometry. */ static void -nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, - bool xdp) +nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp, + struct nfp_net_rx_ring *rx_ring) { - int direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; unsigned int i; for (i = 0; i < rx_ring->cnt - 1; i++) { @@ -1272,9 +1283,8 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, if (!rx_ring->rxbufs[i].frag) continue; - nfp_net_dma_unmap_rx(nn, rx_ring->rxbufs[i].dma_addr, - rx_ring->bufsz, direction); - nfp_net_free_frag(rx_ring->rxbufs[i].frag, xdp); + nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr); + nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog); rx_ring->rxbufs[i].dma_addr = 0; rx_ring->rxbufs[i].frag = NULL; } @@ -1282,13 +1292,12 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, /** * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW) - * @nn: NFP Net device + * @dp: NFP Net data path struct * @rx_ring: RX ring to remove buffers from - * @xdp: Whether XDP is enabled */ static int -nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, - bool xdp) +nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp, + struct nfp_net_rx_ring *rx_ring) { struct nfp_net_rx_buf *rxbufs; unsigned int i; @@ -1296,11 +1305,9 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, rxbufs = rx_ring->rxbufs; for (i = 0; i < rx_ring->cnt - 1; i++) { - rxbufs[i].frag = - nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr, - rx_ring->bufsz, xdp); + rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr); if (!rxbufs[i].frag) { - nfp_net_rx_ring_bufs_free(nn, rx_ring, xdp); + nfp_net_rx_ring_bufs_free(dp, rx_ring); return -ENOMEM; } } @@ -1310,14 +1317,17 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, /** * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW + * @dp: NFP Net data path struct * @rx_ring: RX ring to fill */ -static void nfp_net_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring) +static void +nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp, + struct nfp_net_rx_ring *rx_ring) { unsigned int i; for (i = 0; i < rx_ring->cnt - 1; i++) - nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[i].frag, + nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag, rx_ring->rxbufs[i].dma_addr); } @@ -1337,17 +1347,18 @@ static int nfp_net_rx_csum_has_errors(u16 flags) /** * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags - * @nn: NFP Net device + * @dp: NFP Net data path struct * @r_vec: per-ring structure * @rxd: Pointer to RX descriptor * @skb: Pointer to SKB */ -static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, +static void nfp_net_rx_csum(struct nfp_net_dp *dp, + struct nfp_net_r_vector *r_vec, struct nfp_net_rx_desc *rxd, struct sk_buff *skb) { skb_checksum_none_assert(skb); - if (!(nn->netdev->features & NETIF_F_RXCSUM)) + if (!(dp->netdev->features & NETIF_F_RXCSUM)) return; if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) { @@ -1378,8 +1389,9 @@ static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, } } -static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb, - unsigned int type, __be32 *hash) +static void +nfp_net_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta, + unsigned int type, __be32 *hash) { if (!(netdev->features & NETIF_F_RXHASH)) return; @@ -1388,34 +1400,33 @@ static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb, case NFP_NET_RSS_IPV4: case NFP_NET_RSS_IPV6: case NFP_NET_RSS_IPV6_EX: - skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L3); + meta->hash_type = PKT_HASH_TYPE_L3; break; default: - skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L4); + meta->hash_type = PKT_HASH_TYPE_L4; break; } + + meta->hash = get_unaligned_be32(hash); } static void -nfp_net_set_hash_desc(struct net_device *netdev, struct sk_buff *skb, - struct nfp_net_rx_desc *rxd) +nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta, + void *data, struct nfp_net_rx_desc *rxd) { - struct nfp_net_rx_hash *rx_hash; + struct nfp_net_rx_hash *rx_hash = data; if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS)) return; - rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash)); - - nfp_net_set_hash(netdev, skb, get_unaligned_be32(&rx_hash->hash_type), + nfp_net_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type), &rx_hash->hash); } static void * -nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb, - int meta_len) +nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta, + void *data, int meta_len) { - u8 *data = skb->data - meta_len; u32 meta_info; meta_info = get_unaligned_be32(data); @@ -1425,13 +1436,13 @@ nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb, switch (meta_info & NFP_NET_META_FIELD_MASK) { case NFP_NET_META_HASH: meta_info >>= NFP_NET_META_FIELD_SIZE; - nfp_net_set_hash(netdev, skb, + nfp_net_set_hash(netdev, meta, meta_info & NFP_NET_META_FIELD_MASK, (__be32 *)data); data += 4; break; case NFP_NET_META_MARK: - skb->mark = get_unaligned_be32(data); + meta->mark = get_unaligned_be32(data); data += 4; break; default: @@ -1445,8 +1456,9 @@ nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb, } static void -nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring, - struct nfp_net_rx_buf *rxbuf, struct sk_buff *skb) +nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, + struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf, + struct sk_buff *skb) { u64_stats_update_begin(&r_vec->rx_sync); r_vec->rx_drops++; @@ -1458,53 +1470,47 @@ nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring, if (skb && rxbuf && skb->head == rxbuf->frag) page_ref_inc(virt_to_head_page(rxbuf->frag)); if (rxbuf) - nfp_net_rx_give_one(rx_ring, rxbuf->frag, rxbuf->dma_addr); + nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr); if (skb) dev_kfree_skb_any(skb); } static bool -nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, +nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring, struct nfp_net_tx_ring *tx_ring, - struct nfp_net_rx_buf *rxbuf, unsigned int pkt_off, + struct nfp_net_rx_buf *rxbuf, unsigned int dma_off, unsigned int pkt_len) { struct nfp_net_tx_buf *txbuf; struct nfp_net_tx_desc *txd; - dma_addr_t new_dma_addr; - void *new_frag; int wr_idx; if (unlikely(nfp_net_tx_full(tx_ring, 1))) { - nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL); - return false; - } - - new_frag = nfp_net_napi_alloc_one(nn, DMA_BIDIRECTIONAL, &new_dma_addr); - if (unlikely(!new_frag)) { - nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL); + nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, NULL); return false; } - nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr); wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1); /* Stash the soft descriptor of the head then initialize it */ txbuf = &tx_ring->txbufs[wr_idx]; + + nfp_net_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr); + txbuf->frag = rxbuf->frag; txbuf->dma_addr = rxbuf->dma_addr; txbuf->fidx = -1; txbuf->pkt_cnt = 1; txbuf->real_len = pkt_len; - dma_sync_single_for_device(&nn->pdev->dev, rxbuf->dma_addr + pkt_off, + dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off, pkt_len, DMA_BIDIRECTIONAL); /* Build TX descriptor */ txd = &tx_ring->txds[wr_idx]; txd->offset_eop = PCIE_DESC_TX_EOP; txd->dma_len = cpu_to_le16(pkt_len); - nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + pkt_off); + nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off); txd->data_len = cpu_to_le16(pkt_len); txd->flags = 0; @@ -1516,14 +1522,24 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, return true; } -static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len) +static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, void *hard_start, + unsigned int *off, unsigned int *len) { struct xdp_buff xdp; + void *orig_data; + int ret; - xdp.data = data; - xdp.data_end = data + len; + xdp.data_hard_start = hard_start; + xdp.data = data + *off; + xdp.data_end = data + *off + *len; - return bpf_prog_run_xdp(prog, &xdp); + orig_data = xdp.data; + ret = bpf_prog_run_xdp(prog, &xdp); + + *len -= xdp.data - orig_data; + *off += xdp.data - orig_data; + + return ret; } /** @@ -1540,25 +1556,24 @@ static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len) static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) { struct nfp_net_r_vector *r_vec = rx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; + struct nfp_net_dp *dp = &r_vec->nfp_net->dp; struct nfp_net_tx_ring *tx_ring; struct bpf_prog *xdp_prog; unsigned int true_bufsz; struct sk_buff *skb; int pkts_polled = 0; - int rx_dma_map_dir; int idx; rcu_read_lock(); - xdp_prog = READ_ONCE(nn->xdp_prog); - rx_dma_map_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; - true_bufsz = xdp_prog ? PAGE_SIZE : nn->fl_bufsz; + xdp_prog = READ_ONCE(dp->xdp_prog); + true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz; tx_ring = r_vec->xdp_ring; while (pkts_polled < budget) { - unsigned int meta_len, data_len, data_off, pkt_len, pkt_off; + unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off; struct nfp_net_rx_buf *rxbuf; struct nfp_net_rx_desc *rxd; + struct nfp_meta_parsed meta; dma_addr_t new_dma_addr; void *new_frag; @@ -1573,6 +1588,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) */ dma_rmb(); + memset(&meta, 0, sizeof(meta)); + rx_ring->rd_p++; pkts_polled++; @@ -1593,11 +1610,12 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) data_len = le16_to_cpu(rxd->rxd.data_len); pkt_len = data_len - meta_len; - if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) - pkt_off = meta_len; + pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off; + if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) + pkt_off += meta_len; else - pkt_off = nn->rx_offset; - data_off = NFP_NET_RX_BUF_HEADROOM + pkt_off; + pkt_off += dp->rx_offset; + meta_off = pkt_off - meta_len; /* Stats update */ u64_stats_update_begin(&r_vec->rx_sync); @@ -1605,30 +1623,62 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) r_vec->rx_bytes += pkt_len; u64_stats_update_end(&r_vec->rx_sync); + if (unlikely(meta_len > NFP_NET_MAX_PREPEND || + (dp->rx_offset && meta_len > dp->rx_offset))) { + nn_dp_warn(dp, "oversized RX packet metadata %u\n", + meta_len); + nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); + continue; + } + + nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, + data_len); + + if (!dp->chained_metadata_format) { + nfp_net_set_hash_desc(dp->netdev, &meta, + rxbuf->frag + meta_off, rxd); + } else if (meta_len) { + void *end; + + end = nfp_net_parse_meta(dp->netdev, &meta, + rxbuf->frag + meta_off, + meta_len); + if (unlikely(end != rxbuf->frag + pkt_off)) { + nn_dp_warn(dp, "invalid RX packet metadata\n"); + nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, + NULL); + continue; + } + } + if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF && - nn->bpf_offload_xdp)) { + dp->bpf_offload_xdp)) { + unsigned int dma_off; + void *hard_start; int act; - dma_sync_single_for_cpu(&nn->pdev->dev, - rxbuf->dma_addr + pkt_off, - pkt_len, DMA_BIDIRECTIONAL); - act = nfp_net_run_xdp(xdp_prog, rxbuf->frag + data_off, - pkt_len); + hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM; + + act = nfp_net_run_xdp(xdp_prog, rxbuf->frag, hard_start, + &pkt_off, &pkt_len); switch (act) { case XDP_PASS: break; case XDP_TX: - if (unlikely(!nfp_net_tx_xdp_buf(nn, rx_ring, + dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM; + if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring, tx_ring, rxbuf, - pkt_off, pkt_len))) - trace_xdp_exception(nn->netdev, xdp_prog, act); + dma_off, + pkt_len))) + trace_xdp_exception(dp->netdev, + xdp_prog, act); continue; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: - trace_xdp_exception(nn->netdev, xdp_prog, act); + trace_xdp_exception(dp->netdev, xdp_prog, act); case XDP_DROP: - nfp_net_rx_give_one(rx_ring, rxbuf->frag, + nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr); continue; } @@ -1636,41 +1686,29 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) skb = build_skb(rxbuf->frag, true_bufsz); if (unlikely(!skb)) { - nfp_net_rx_drop(r_vec, rx_ring, rxbuf, NULL); + nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); continue; } - new_frag = nfp_net_napi_alloc_one(nn, rx_dma_map_dir, - &new_dma_addr); + new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr); if (unlikely(!new_frag)) { - nfp_net_rx_drop(r_vec, rx_ring, rxbuf, skb); + nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb); continue; } - nfp_net_dma_unmap_rx(nn, rxbuf->dma_addr, nn->fl_bufsz, - rx_dma_map_dir); + nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr); - nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr); + nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr); - skb_reserve(skb, data_off); + skb_reserve(skb, pkt_off); skb_put(skb, pkt_len); - if (nn->fw_ver.major <= 3) { - nfp_net_set_hash_desc(nn->netdev, skb, rxd); - } else if (meta_len) { - void *end; - - end = nfp_net_parse_meta(nn->netdev, skb, meta_len); - if (unlikely(end != skb->data)) { - nn_warn_ratelimit(nn, "invalid RX packet metadata\n"); - nfp_net_rx_drop(r_vec, rx_ring, NULL, skb); - continue; - } - } + skb->mark = meta.mark; + skb_set_hash(skb, meta.hash, meta.hash_type); skb_record_rx_queue(skb, rx_ring->idx); - skb->protocol = eth_type_trans(skb, nn->netdev); + skb->protocol = eth_type_trans(skb, dp->netdev); - nfp_net_rx_csum(nn, r_vec, rxd, skb); + nfp_net_rx_csum(dp, r_vec, rxd, skb); if (rxd->rxd.flags & PCIE_DESC_RX_VLAN) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), @@ -1707,10 +1745,9 @@ static int nfp_net_poll(struct napi_struct *napi, int budget) nfp_net_xdp_complete(r_vec->xdp_ring); } - if (pkts_polled < budget) { - napi_complete_done(napi, pkts_polled); - nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); - } + if (pkts_polled < budget) + if (napi_complete_done(napi, pkts_polled)) + nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); return pkts_polled; } @@ -1725,13 +1762,12 @@ static int nfp_net_poll(struct napi_struct *napi, int budget) static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; - struct pci_dev *pdev = nn->pdev; + struct nfp_net_dp *dp = &r_vec->nfp_net->dp; kfree(tx_ring->txbufs); if (tx_ring->txds) - dma_free_coherent(&pdev->dev, tx_ring->size, + dma_free_coherent(dp->dev, tx_ring->size, tx_ring->txds, tx_ring->dma); tx_ring->cnt = 0; @@ -1743,24 +1779,21 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring) /** * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring + * @dp: NFP Net data path struct * @tx_ring: TX Ring structure to allocate - * @cnt: Ring buffer count - * @is_xdp: True if ring will be used for XDP * * Return: 0 on success, negative errno otherwise. */ static int -nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt, bool is_xdp) +nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; - struct pci_dev *pdev = nn->pdev; int sz; - tx_ring->cnt = cnt; + tx_ring->cnt = dp->txd_cnt; tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt; - tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size, + tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->txds) goto err_alloc; @@ -1770,15 +1803,10 @@ nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt, bool is_xdp) if (!tx_ring->txbufs) goto err_alloc; - if (!is_xdp) - netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask, + if (!tx_ring->is_xdp) + netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask, tx_ring->idx); - nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p %s\n", - tx_ring->idx, tx_ring->qcidx, - tx_ring->cnt, (unsigned long long)tx_ring->dma, tx_ring->txds, - is_xdp ? "XDP" : ""); - return 0; err_alloc: @@ -1786,62 +1814,92 @@ err_alloc: return -ENOMEM; } -static struct nfp_net_tx_ring * -nfp_net_tx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s, - unsigned int num_stack_tx_rings) +static void +nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp, + struct nfp_net_tx_ring *tx_ring) { - struct nfp_net_tx_ring *rings; - unsigned int r; + unsigned int i; - rings = kcalloc(s->n_rings, sizeof(*rings), GFP_KERNEL); - if (!rings) - return NULL; + if (!tx_ring->is_xdp) + return; - for (r = 0; r < s->n_rings; r++) { - int bias = 0; + for (i = 0; i < tx_ring->cnt; i++) { + if (!tx_ring->txbufs[i].frag) + return; - if (r >= num_stack_tx_rings) - bias = num_stack_tx_rings; + nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr); + __free_page(virt_to_page(tx_ring->txbufs[i].frag)); + } +} - nfp_net_tx_ring_init(&rings[r], &nn->r_vecs[r - bias], r); +static int +nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp, + struct nfp_net_tx_ring *tx_ring) +{ + struct nfp_net_tx_buf *txbufs = tx_ring->txbufs; + unsigned int i; - if (nfp_net_tx_ring_alloc(&rings[r], s->dcnt, bias)) - goto err_free_prev; - } + if (!tx_ring->is_xdp) + return 0; - return s->rings = rings; + for (i = 0; i < tx_ring->cnt; i++) { + txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr); + if (!txbufs[i].frag) { + nfp_net_tx_ring_bufs_free(dp, tx_ring); + return -ENOMEM; + } + } -err_free_prev: - while (r--) - nfp_net_tx_ring_free(&rings[r]); - kfree(rings); - return NULL; + return 0; } -static void -nfp_net_tx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s) +static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp) { - struct nfp_net_ring_set new = *s; + unsigned int r; + + dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings), + GFP_KERNEL); + if (!dp->tx_rings) + return -ENOMEM; + + for (r = 0; r < dp->num_tx_rings; r++) { + int bias = 0; + + if (r >= dp->num_stack_tx_rings) + bias = dp->num_stack_tx_rings; - s->dcnt = nn->txd_cnt; - s->rings = nn->tx_rings; - s->n_rings = nn->num_tx_rings; + nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias], + r, bias); + + if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r])) + goto err_free_prev; - nn->txd_cnt = new.dcnt; - nn->tx_rings = new.rings; - nn->num_tx_rings = new.n_rings; + if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r])) + goto err_free_ring; + } + + return 0; + +err_free_prev: + while (r--) { + nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]); +err_free_ring: + nfp_net_tx_ring_free(&dp->tx_rings[r]); + } + kfree(dp->tx_rings); + return -ENOMEM; } -static void -nfp_net_tx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s) +static void nfp_net_tx_rings_free(struct nfp_net_dp *dp) { - struct nfp_net_tx_ring *rings = s->rings; unsigned int r; - for (r = 0; r < s->n_rings; r++) - nfp_net_tx_ring_free(&rings[r]); + for (r = 0; r < dp->num_tx_rings; r++) { + nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]); + nfp_net_tx_ring_free(&dp->tx_rings[r]); + } - kfree(rings); + kfree(dp->tx_rings); } /** @@ -1851,13 +1909,12 @@ nfp_net_tx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s) static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) { struct nfp_net_r_vector *r_vec = rx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; - struct pci_dev *pdev = nn->pdev; + struct nfp_net_dp *dp = &r_vec->nfp_net->dp; kfree(rx_ring->rxbufs); if (rx_ring->rxds) - dma_free_coherent(&pdev->dev, rx_ring->size, + dma_free_coherent(dp->dev, rx_ring->size, rx_ring->rxds, rx_ring->dma); rx_ring->cnt = 0; @@ -1869,26 +1926,19 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) /** * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring + * @dp: NFP Net data path struct * @rx_ring: RX ring to allocate - * @fl_bufsz: Size of buffers to allocate - * @cnt: Ring buffer count * * Return: 0 on success, negative errno otherwise. */ static int -nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz, - u32 cnt) +nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) { - struct nfp_net_r_vector *r_vec = rx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; - struct pci_dev *pdev = nn->pdev; int sz; - rx_ring->cnt = cnt; - rx_ring->bufsz = fl_bufsz; - + rx_ring->cnt = dp->rxd_cnt; rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt; - rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size, + rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->rxds) goto err_alloc; @@ -1898,10 +1948,6 @@ nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz, if (!rx_ring->rxbufs) goto err_alloc; - nn_dbg(nn, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n", - rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx, - rx_ring->cnt, (unsigned long long)rx_ring->dma, rx_ring->rxds); - return 0; err_alloc: @@ -1909,82 +1955,59 @@ err_alloc: return -ENOMEM; } -static struct nfp_net_rx_ring * -nfp_net_rx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s, - bool xdp) +static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp) { - unsigned int fl_bufsz = nfp_net_calc_fl_bufsz(nn, s->mtu); - struct nfp_net_rx_ring *rings; unsigned int r; - rings = kcalloc(s->n_rings, sizeof(*rings), GFP_KERNEL); - if (!rings) - return NULL; + dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings), + GFP_KERNEL); + if (!dp->rx_rings) + return -ENOMEM; - for (r = 0; r < s->n_rings; r++) { - nfp_net_rx_ring_init(&rings[r], &nn->r_vecs[r], r); + for (r = 0; r < dp->num_rx_rings; r++) { + nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r); - if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, s->dcnt)) + if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r])) goto err_free_prev; - if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r], xdp)) + if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r])) goto err_free_ring; } - return s->rings = rings; + return 0; err_free_prev: while (r--) { - nfp_net_rx_ring_bufs_free(nn, &rings[r], xdp); + nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]); err_free_ring: - nfp_net_rx_ring_free(&rings[r]); + nfp_net_rx_ring_free(&dp->rx_rings[r]); } - kfree(rings); - return NULL; -} - -static void -nfp_net_rx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s) -{ - struct nfp_net_ring_set new = *s; - - s->mtu = nn->netdev->mtu; - s->dcnt = nn->rxd_cnt; - s->rings = nn->rx_rings; - s->n_rings = nn->num_rx_rings; - - nn->netdev->mtu = new.mtu; - nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, new.mtu); - nn->rxd_cnt = new.dcnt; - nn->rx_rings = new.rings; - nn->num_rx_rings = new.n_rings; + kfree(dp->rx_rings); + return -ENOMEM; } -static void -nfp_net_rx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s, - bool xdp) +static void nfp_net_rx_rings_free(struct nfp_net_dp *dp) { - struct nfp_net_rx_ring *rings = s->rings; unsigned int r; - for (r = 0; r < s->n_rings; r++) { - nfp_net_rx_ring_bufs_free(nn, &rings[r], xdp); - nfp_net_rx_ring_free(&rings[r]); + for (r = 0; r < dp->num_rx_rings; r++) { + nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]); + nfp_net_rx_ring_free(&dp->rx_rings[r]); } - kfree(rings); + kfree(dp->rx_rings); } static void -nfp_net_vector_assign_rings(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, - int idx) +nfp_net_vector_assign_rings(struct nfp_net_dp *dp, + struct nfp_net_r_vector *r_vec, int idx) { - r_vec->rx_ring = idx < nn->num_rx_rings ? &nn->rx_rings[idx] : NULL; + r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL; r_vec->tx_ring = - idx < nn->num_stack_tx_rings ? &nn->tx_rings[idx] : NULL; + idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL; - r_vec->xdp_ring = idx < nn->num_tx_rings - nn->num_stack_tx_rings ? - &nn->tx_rings[nn->num_stack_tx_rings + idx] : NULL; + r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ? + &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL; } static int @@ -1994,11 +2017,11 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, int err; /* Setup NAPI */ - netif_napi_add(nn->netdev, &r_vec->napi, + netif_napi_add(nn->dp.netdev, &r_vec->napi, nfp_net_poll, NAPI_POLL_WEIGHT); snprintf(r_vec->name, sizeof(r_vec->name), - "%s-rxtx-%d", nn->netdev->name, idx); + "%s-rxtx-%d", nn->dp.netdev->name, idx); err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name, r_vec); if (err) { @@ -2045,7 +2068,7 @@ void nfp_net_rss_write_key(struct nfp_net *nn) { int i; - for (i = 0; i < NFP_NET_CFG_RSS_KEY_SZ; i += 4) + for (i = 0; i < nfp_net_rss_key_sz(nn); i += 4) nn_writel(nn, NFP_NET_CFG_RSS_KEY + i, get_unaligned_le32(nn->rss_key + i)); } @@ -2069,13 +2092,13 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn) /* copy RX interrupt coalesce parameters */ value = (nn->rx_coalesce_max_frames << 16) | (factor * nn->rx_coalesce_usecs); - for (i = 0; i < nn->num_rx_rings; i++) + for (i = 0; i < nn->dp.num_rx_rings; i++) nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value); /* copy TX interrupt coalesce parameters */ value = (nn->tx_coalesce_max_frames << 16) | (factor * nn->tx_coalesce_usecs); - for (i = 0; i < nn->num_tx_rings; i++) + for (i = 0; i < nn->dp.num_tx_rings; i++) nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value); } @@ -2090,9 +2113,9 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn) static void nfp_net_write_mac_addr(struct nfp_net *nn) { nn_writel(nn, NFP_NET_CFG_MACADDR + 0, - get_unaligned_be32(nn->netdev->dev_addr)); + get_unaligned_be32(nn->dp.netdev->dev_addr)); nn_writew(nn, NFP_NET_CFG_MACADDR + 6, - get_unaligned_be16(nn->netdev->dev_addr + 4)); + get_unaligned_be16(nn->dp.netdev->dev_addr + 4)); } static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx) @@ -2116,7 +2139,7 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn) unsigned int r; int err; - new_ctrl = nn->ctrl; + new_ctrl = nn->dp.ctrl; new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE; update = NFP_NET_CFG_UPDATE_GEN; update |= NFP_NET_CFG_UPDATE_MSIX; @@ -2133,14 +2156,14 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn) if (err) nn_err(nn, "Could not disable device: %d\n", err); - for (r = 0; r < nn->num_rx_rings; r++) - nfp_net_rx_ring_reset(&nn->rx_rings[r]); - for (r = 0; r < nn->num_tx_rings; r++) - nfp_net_tx_ring_reset(nn, &nn->tx_rings[r]); - for (r = 0; r < nn->num_r_vecs; r++) + for (r = 0; r < nn->dp.num_rx_rings; r++) + nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]); + for (r = 0; r < nn->dp.num_tx_rings; r++) + nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]); + for (r = 0; r < nn->dp.num_r_vecs; r++) nfp_net_vec_clear_ring_data(nn, r); - nn->ctrl = new_ctrl; + nn->dp.ctrl = new_ctrl; } static void @@ -2162,13 +2185,17 @@ nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn, nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry); } -static int __nfp_net_set_config_and_enable(struct nfp_net *nn) +/** + * nfp_net_set_config_and_enable() - Write control BAR and enable NFP + * @nn: NFP Net device to reconfigure + */ +static int nfp_net_set_config_and_enable(struct nfp_net *nn) { - u32 new_ctrl, update = 0; + u32 bufsz, new_ctrl, update = 0; unsigned int r; int err; - new_ctrl = nn->ctrl; + new_ctrl = nn->dp.ctrl; if (nn->cap & NFP_NET_CFG_CTRL_RSS) { nfp_net_rss_write_key(nn); @@ -2184,22 +2211,23 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn) update |= NFP_NET_CFG_UPDATE_IRQMOD; } - for (r = 0; r < nn->num_tx_rings; r++) - nfp_net_tx_ring_hw_cfg_write(nn, &nn->tx_rings[r], r); - for (r = 0; r < nn->num_rx_rings; r++) - nfp_net_rx_ring_hw_cfg_write(nn, &nn->rx_rings[r], r); + for (r = 0; r < nn->dp.num_tx_rings; r++) + nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r); + for (r = 0; r < nn->dp.num_rx_rings; r++) + nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r); - nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ? - 0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1); + nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ? + 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1); - nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ? - 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1); + nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ? + 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1); nfp_net_write_mac_addr(nn); - nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu); - nn_writel(nn, NFP_NET_CFG_FLBUFSZ, - nn->fl_bufsz - NFP_NET_RX_BUF_NON_DATA); + nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.netdev->mtu); + + bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA; + nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz); /* Enable device */ new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; @@ -2211,37 +2239,26 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn) nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); err = nfp_net_reconfig(nn, update); + if (err) { + nfp_net_clear_config_and_disable(nn); + return err; + } - nn->ctrl = new_ctrl; + nn->dp.ctrl = new_ctrl; - for (r = 0; r < nn->num_rx_rings; r++) - nfp_net_rx_ring_fill_freelist(&nn->rx_rings[r]); + for (r = 0; r < nn->dp.num_rx_rings; r++) + nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]); /* Since reconfiguration requests while NFP is down are ignored we * have to wipe the entire VXLAN configuration and reinitialize it. */ - if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) { + if (nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN) { memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports)); memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt)); - udp_tunnel_get_rx_info(nn->netdev); + udp_tunnel_get_rx_info(nn->dp.netdev); } - return err; -} - -/** - * nfp_net_set_config_and_enable() - Write control BAR and enable NFP - * @nn: NFP Net device to reconfigure - */ -static int nfp_net_set_config_and_enable(struct nfp_net *nn) -{ - int err; - - err = __nfp_net_set_config_and_enable(nn); - if (err) - nfp_net_clear_config_and_disable(nn); - - return err; + return 0; } /** @@ -2252,12 +2269,12 @@ static void nfp_net_open_stack(struct nfp_net *nn) { unsigned int r; - for (r = 0; r < nn->num_r_vecs; r++) { + for (r = 0; r < nn->dp.num_r_vecs; r++) { napi_enable(&nn->r_vecs[r].napi); enable_irq(nn->r_vecs[r].irq_vector); } - netif_tx_wake_all_queues(nn->netdev); + netif_tx_wake_all_queues(nn->dp.netdev); enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); nfp_net_read_link_status(nn); @@ -2266,22 +2283,8 @@ static void nfp_net_open_stack(struct nfp_net *nn) static int nfp_net_netdev_open(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); - struct nfp_net_ring_set rx = { - .n_rings = nn->num_rx_rings, - .mtu = nn->netdev->mtu, - .dcnt = nn->rxd_cnt, - }; - struct nfp_net_ring_set tx = { - .n_rings = nn->num_tx_rings, - .dcnt = nn->txd_cnt, - }; int err, r; - if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) { - nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl); - return -EBUSY; - } - /* Step 1: Allocate resources for rings and the like * - Request interrupts * - Allocate RX and TX ring resources @@ -2299,33 +2302,28 @@ static int nfp_net_netdev_open(struct net_device *netdev) goto err_free_exn; disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); - for (r = 0; r < nn->num_r_vecs; r++) { + for (r = 0; r < nn->dp.num_r_vecs; r++) { err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); if (err) goto err_cleanup_vec_p; } - nn->rx_rings = nfp_net_rx_ring_set_prepare(nn, &rx, nn->xdp_prog); - if (!nn->rx_rings) { - err = -ENOMEM; + err = nfp_net_rx_rings_prepare(nn, &nn->dp); + if (err) goto err_cleanup_vec; - } - nn->tx_rings = nfp_net_tx_ring_set_prepare(nn, &tx, - nn->num_stack_tx_rings); - if (!nn->tx_rings) { - err = -ENOMEM; + err = nfp_net_tx_rings_prepare(nn, &nn->dp); + if (err) goto err_free_rx_rings; - } for (r = 0; r < nn->max_r_vecs; r++) - nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r); + nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r); - err = netif_set_real_num_tx_queues(netdev, nn->num_stack_tx_rings); + err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings); if (err) goto err_free_rings; - err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings); + err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings); if (err) goto err_free_rings; @@ -2351,11 +2349,11 @@ static int nfp_net_netdev_open(struct net_device *netdev) return 0; err_free_rings: - nfp_net_tx_ring_set_free(nn, &tx); + nfp_net_tx_rings_free(&nn->dp); err_free_rx_rings: - nfp_net_rx_ring_set_free(nn, &rx, nn->xdp_prog); + nfp_net_rx_rings_free(&nn->dp); err_cleanup_vec: - r = nn->num_r_vecs; + r = nn->dp.num_r_vecs; err_cleanup_vec_p: while (r--) nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); @@ -2374,15 +2372,15 @@ static void nfp_net_close_stack(struct nfp_net *nn) unsigned int r; disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); - netif_carrier_off(nn->netdev); + netif_carrier_off(nn->dp.netdev); nn->link_up = false; - for (r = 0; r < nn->num_r_vecs; r++) { + for (r = 0; r < nn->dp.num_r_vecs; r++) { disable_irq(nn->r_vecs[r].irq_vector); napi_disable(&nn->r_vecs[r].napi); } - netif_tx_disable(nn->netdev); + netif_tx_disable(nn->dp.netdev); } /** @@ -2393,17 +2391,19 @@ static void nfp_net_close_free_all(struct nfp_net *nn) { unsigned int r; - for (r = 0; r < nn->num_rx_rings; r++) { - nfp_net_rx_ring_bufs_free(nn, &nn->rx_rings[r], nn->xdp_prog); - nfp_net_rx_ring_free(&nn->rx_rings[r]); + for (r = 0; r < nn->dp.num_rx_rings; r++) { + nfp_net_rx_ring_bufs_free(&nn->dp, &nn->dp.rx_rings[r]); + nfp_net_rx_ring_free(&nn->dp.rx_rings[r]); + } + for (r = 0; r < nn->dp.num_tx_rings; r++) { + nfp_net_tx_ring_bufs_free(&nn->dp, &nn->dp.tx_rings[r]); + nfp_net_tx_ring_free(&nn->dp.tx_rings[r]); } - for (r = 0; r < nn->num_tx_rings; r++) - nfp_net_tx_ring_free(&nn->tx_rings[r]); - for (r = 0; r < nn->num_r_vecs; r++) + for (r = 0; r < nn->dp.num_r_vecs; r++) nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); - kfree(nn->rx_rings); - kfree(nn->tx_rings); + kfree(nn->dp.rx_rings); + kfree(nn->dp.tx_rings); nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); @@ -2417,11 +2417,6 @@ static int nfp_net_netdev_close(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); - if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) { - nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl); - return 0; - } - /* Step 1: Disable RX and TX rings from the Linux kernel perspective */ nfp_net_close_stack(nn); @@ -2443,7 +2438,7 @@ static void nfp_net_set_rx_mode(struct net_device *netdev) struct nfp_net *nn = netdev_priv(netdev); u32 new_ctrl; - new_ctrl = nn->ctrl; + new_ctrl = nn->dp.ctrl; if (netdev->flags & IFF_PROMISC) { if (nn->cap & NFP_NET_CFG_CTRL_PROMISC) @@ -2454,13 +2449,13 @@ static void nfp_net_set_rx_mode(struct net_device *netdev) new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC; } - if (new_ctrl == nn->ctrl) + if (new_ctrl == nn->dp.ctrl) return; nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN); - nn->ctrl = new_ctrl; + nn->dp.ctrl = new_ctrl; } static void nfp_net_rss_init_itbl(struct nfp_net *nn) @@ -2469,181 +2464,174 @@ static void nfp_net_rss_init_itbl(struct nfp_net *nn) for (i = 0; i < sizeof(nn->rss_itbl); i++) nn->rss_itbl[i] = - ethtool_rxfh_indir_default(i, nn->num_rx_rings); + ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings); } -static int -nfp_net_ring_swap_enable(struct nfp_net *nn, unsigned int *num_vecs, - unsigned int *stack_tx_rings, - struct bpf_prog **xdp_prog, - struct nfp_net_ring_set *rx, - struct nfp_net_ring_set *tx) +static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp) +{ + struct nfp_net_dp new_dp = *dp; + + *dp = nn->dp; + nn->dp = new_dp; + + nn->dp.netdev->mtu = new_dp.mtu; + + if (!netif_is_rxfh_configured(nn->dp.netdev)) + nfp_net_rss_init_itbl(nn); +} + +static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp) { unsigned int r; int err; - if (rx) - nfp_net_rx_ring_set_swap(nn, rx); - if (tx) - nfp_net_tx_ring_set_swap(nn, tx); - - swap(*num_vecs, nn->num_r_vecs); - swap(*stack_tx_rings, nn->num_stack_tx_rings); - *xdp_prog = xchg(&nn->xdp_prog, *xdp_prog); + nfp_net_dp_swap(nn, dp); for (r = 0; r < nn->max_r_vecs; r++) - nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r); + nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r); - if (!netif_is_rxfh_configured(nn->netdev)) - nfp_net_rss_init_itbl(nn); - - err = netif_set_real_num_rx_queues(nn->netdev, - nn->num_rx_rings); + err = netif_set_real_num_rx_queues(nn->dp.netdev, nn->dp.num_rx_rings); if (err) return err; - if (nn->netdev->real_num_tx_queues != nn->num_stack_tx_rings) { - err = netif_set_real_num_tx_queues(nn->netdev, - nn->num_stack_tx_rings); + if (nn->dp.netdev->real_num_tx_queues != nn->dp.num_stack_tx_rings) { + err = netif_set_real_num_tx_queues(nn->dp.netdev, + nn->dp.num_stack_tx_rings); if (err) return err; } - return __nfp_net_set_config_and_enable(nn); + return nfp_net_set_config_and_enable(nn); +} + +struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn) +{ + struct nfp_net_dp *new; + + new = kmalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return NULL; + + *new = nn->dp; + + /* Clear things which need to be recomputed */ + new->fl_bufsz = 0; + new->tx_rings = NULL; + new->rx_rings = NULL; + new->num_r_vecs = 0; + new->num_stack_tx_rings = 0; + + return new; } static int -nfp_net_check_config(struct nfp_net *nn, struct bpf_prog *xdp_prog, - struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx) +nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp, + struct netlink_ext_ack *extack) { /* XDP-enabled tests */ - if (!xdp_prog) + if (!dp->xdp_prog) return 0; - if (rx && nfp_net_calc_fl_bufsz(nn, rx->mtu) > PAGE_SIZE) { - nn_warn(nn, "MTU too large w/ XDP enabled\n"); + if (dp->fl_bufsz > PAGE_SIZE) { + NL_MOD_TRY_SET_ERR_MSG(extack, "MTU too large w/ XDP enabled"); return -EINVAL; } - if (tx && tx->n_rings > nn->max_tx_rings) { - nn_warn(nn, "Insufficient number of TX rings w/ XDP enabled\n"); + if (dp->num_tx_rings > nn->max_tx_rings) { + NL_MOD_TRY_SET_ERR_MSG(extack, "Insufficient number of TX rings w/ XDP enabled"); return -EINVAL; } return 0; } -static void -nfp_net_ring_reconfig_down(struct nfp_net *nn, struct bpf_prog **xdp_prog, - struct nfp_net_ring_set *rx, - struct nfp_net_ring_set *tx, - unsigned int stack_tx_rings, unsigned int num_vecs) -{ - nn->netdev->mtu = rx ? rx->mtu : nn->netdev->mtu; - nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, nn->netdev->mtu); - nn->rxd_cnt = rx ? rx->dcnt : nn->rxd_cnt; - nn->txd_cnt = tx ? tx->dcnt : nn->txd_cnt; - nn->num_rx_rings = rx ? rx->n_rings : nn->num_rx_rings; - nn->num_tx_rings = tx ? tx->n_rings : nn->num_tx_rings; - nn->num_stack_tx_rings = stack_tx_rings; - nn->num_r_vecs = num_vecs; - *xdp_prog = xchg(&nn->xdp_prog, *xdp_prog); - - if (!netif_is_rxfh_configured(nn->netdev)) - nfp_net_rss_init_itbl(nn); -} - -int -nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog, - struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx) +int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp, + struct netlink_ext_ack *extack) { - unsigned int stack_tx_rings, num_vecs, r; - int err; + int r, err; - stack_tx_rings = tx ? tx->n_rings : nn->num_tx_rings; - if (*xdp_prog) - stack_tx_rings -= rx ? rx->n_rings : nn->num_rx_rings; + dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp); - num_vecs = max(rx ? rx->n_rings : nn->num_rx_rings, stack_tx_rings); + dp->num_stack_tx_rings = dp->num_tx_rings; + if (dp->xdp_prog) + dp->num_stack_tx_rings -= dp->num_rx_rings; - err = nfp_net_check_config(nn, *xdp_prog, rx, tx); + dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings); + + err = nfp_net_check_config(nn, dp, extack); if (err) - return err; + goto exit_free_dp; - if (!netif_running(nn->netdev)) { - nfp_net_ring_reconfig_down(nn, xdp_prog, rx, tx, - stack_tx_rings, num_vecs); - return 0; + if (!netif_running(dp->netdev)) { + nfp_net_dp_swap(nn, dp); + err = 0; + goto exit_free_dp; } /* Prepare new rings */ - for (r = nn->num_r_vecs; r < num_vecs; r++) { + for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) { err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); if (err) { - num_vecs = r; - goto err_cleanup_vecs; - } - } - if (rx) { - if (!nfp_net_rx_ring_set_prepare(nn, rx, *xdp_prog)) { - err = -ENOMEM; + dp->num_r_vecs = r; goto err_cleanup_vecs; } } - if (tx) { - if (!nfp_net_tx_ring_set_prepare(nn, tx, stack_tx_rings)) { - err = -ENOMEM; - goto err_free_rx; - } - } + + err = nfp_net_rx_rings_prepare(nn, dp); + if (err) + goto err_cleanup_vecs; + + err = nfp_net_tx_rings_prepare(nn, dp); + if (err) + goto err_free_rx; /* Stop device, swap in new rings, try to start the firmware */ nfp_net_close_stack(nn); nfp_net_clear_config_and_disable(nn); - err = nfp_net_ring_swap_enable(nn, &num_vecs, &stack_tx_rings, - xdp_prog, rx, tx); + err = nfp_net_dp_swap_enable(nn, dp); if (err) { int err2; nfp_net_clear_config_and_disable(nn); /* Try with old configuration and old rings */ - err2 = nfp_net_ring_swap_enable(nn, &num_vecs, &stack_tx_rings, - xdp_prog, rx, tx); + err2 = nfp_net_dp_swap_enable(nn, dp); if (err2) nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n", err, err2); } - for (r = num_vecs - 1; r >= nn->num_r_vecs; r--) + for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--) nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); - if (rx) - nfp_net_rx_ring_set_free(nn, rx, *xdp_prog); - if (tx) - nfp_net_tx_ring_set_free(nn, tx); + nfp_net_rx_rings_free(dp); + nfp_net_tx_rings_free(dp); nfp_net_open_stack(nn); +exit_free_dp: + kfree(dp); return err; err_free_rx: - if (rx) - nfp_net_rx_ring_set_free(nn, rx, *xdp_prog); + nfp_net_rx_rings_free(dp); err_cleanup_vecs: - for (r = num_vecs - 1; r >= nn->num_r_vecs; r--) + for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--) nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); + kfree(dp); return err; } static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) { struct nfp_net *nn = netdev_priv(netdev); - struct nfp_net_ring_set rx = { - .n_rings = nn->num_rx_rings, - .mtu = new_mtu, - .dcnt = nn->rxd_cnt, - }; + struct nfp_net_dp *dp; + + dp = nfp_net_clone_dp(nn); + if (!dp) + return -ENOMEM; - return nfp_net_ring_reconfig(nn, &nn->xdp_prog, &rx, NULL); + dp->mtu = new_mtu; + + return nfp_net_ring_reconfig(nn, dp, NULL); } static void nfp_net_stat64(struct net_device *netdev, @@ -2652,7 +2640,7 @@ static void nfp_net_stat64(struct net_device *netdev, struct nfp_net *nn = netdev_priv(netdev); int r; - for (r = 0; r < nn->num_r_vecs; r++) { + for (r = 0; r < nn->dp.num_r_vecs; r++) { struct nfp_net_r_vector *r_vec = &nn->r_vecs[r]; u64 data[3]; unsigned int start; @@ -2694,12 +2682,12 @@ nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, struct nfp_net *nn = netdev_priv(netdev); if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) - return -ENOTSUPP; + return -EOPNOTSUPP; if (proto != htons(ETH_P_ALL)) - return -ENOTSUPP; + return -EOPNOTSUPP; if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) { - if (!nn->bpf_offload_xdp) + if (!nn->dp.bpf_offload_xdp) return nfp_net_bpf_offload(nn, tc->cls_bpf); else return -EBUSY; @@ -2718,7 +2706,7 @@ static int nfp_net_set_features(struct net_device *netdev, /* Assume this is not called with features we have not advertised */ - new_ctrl = nn->ctrl; + new_ctrl = nn->dp.ctrl; if (changed & NETIF_F_RXCSUM) { if (features & NETIF_F_RXCSUM) @@ -2762,7 +2750,7 @@ static int nfp_net_set_features(struct net_device *netdev, new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER; } - if (changed & NETIF_F_HW_TC && nn->ctrl & NFP_NET_CFG_CTRL_BPF) { + if (changed & NETIF_F_HW_TC && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) { nn_err(nn, "Cannot disable HW TC offload while in use\n"); return -EBUSY; } @@ -2770,16 +2758,16 @@ static int nfp_net_set_features(struct net_device *netdev, nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n", netdev->features, features, changed); - if (new_ctrl == nn->ctrl) + if (new_ctrl == nn->dp.ctrl) return 0; - nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->ctrl, new_ctrl); + nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl); nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); if (err) return err; - nn->ctrl = new_ctrl; + nn->dp.ctrl = new_ctrl; return 0; } @@ -2830,6 +2818,26 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev, return features; } +static int +nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len) +{ + struct nfp_net *nn = netdev_priv(netdev); + int err; + + if (!nn->eth_port) + return -EOPNOTSUPP; + + if (!nn->eth_port->is_split) + err = snprintf(name, len, "p%d", nn->eth_port->label_port); + else + err = snprintf(name, len, "p%ds%d", nn->eth_port->label_port, + nn->eth_port->label_subport); + if (err >= len) + return -EINVAL; + + return 0; +} + /** * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW * @nn: NFP Net device to reconfigure @@ -2842,7 +2850,7 @@ static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port) nn->vxlan_ports[idx] = port; - if (!(nn->ctrl & NFP_NET_CFG_CTRL_VXLAN)) + if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN)) return; BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1); @@ -2921,8 +2929,8 @@ static int nfp_net_xdp_offload(struct nfp_net *nn, struct bpf_prog *prog) if (!nfp_net_ebpf_capable(nn)) return -EINVAL; - if (nn->ctrl & NFP_NET_CFG_CTRL_BPF) { - if (!nn->bpf_offload_xdp) + if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) { + if (!nn->dp.bpf_offload_xdp) return prog ? -EBUSY : 0; cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY; } else { @@ -2935,48 +2943,44 @@ static int nfp_net_xdp_offload(struct nfp_net *nn, struct bpf_prog *prog) /* Stop offload if replace not possible */ if (ret && cmd.command == TC_CLSBPF_REPLACE) nfp_net_xdp_offload(nn, NULL); - nn->bpf_offload_xdp = prog && !ret; + nn->dp.bpf_offload_xdp = prog && !ret; return ret; } -static int nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog) +static int nfp_net_xdp_setup(struct nfp_net *nn, struct netdev_xdp *xdp) { - struct nfp_net_ring_set rx = { - .n_rings = nn->num_rx_rings, - .mtu = nn->netdev->mtu, - .dcnt = nn->rxd_cnt, - }; - struct nfp_net_ring_set tx = { - .n_rings = nn->num_tx_rings, - .dcnt = nn->txd_cnt, - }; + struct bpf_prog *old_prog = nn->dp.xdp_prog; + struct bpf_prog *prog = xdp->prog; + struct nfp_net_dp *dp; int err; - if (prog && prog->xdp_adjust_head) { - nn_err(nn, "Does not support bpf_xdp_adjust_head()\n"); - return -EOPNOTSUPP; - } - if (!prog && !nn->xdp_prog) + if (!prog && !nn->dp.xdp_prog) return 0; - if (prog && nn->xdp_prog) { - prog = xchg(&nn->xdp_prog, prog); + if (prog && nn->dp.xdp_prog) { + prog = xchg(&nn->dp.xdp_prog, prog); bpf_prog_put(prog); - nfp_net_xdp_offload(nn, nn->xdp_prog); + nfp_net_xdp_offload(nn, nn->dp.xdp_prog); return 0; } - tx.n_rings += prog ? nn->num_rx_rings : -nn->num_rx_rings; + dp = nfp_net_clone_dp(nn); + if (!dp) + return -ENOMEM; + + dp->xdp_prog = prog; + dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings; + dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; + dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0; /* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */ - err = nfp_net_ring_reconfig(nn, &prog, &rx, &tx); + err = nfp_net_ring_reconfig(nn, dp, xdp->extack); if (err) return err; - /* @prog got swapped and is now the old one */ - if (prog) - bpf_prog_put(prog); + if (old_prog) + bpf_prog_put(old_prog); - nfp_net_xdp_offload(nn, nn->xdp_prog); + nfp_net_xdp_offload(nn, nn->dp.xdp_prog); return 0; } @@ -2987,9 +2991,9 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp) switch (xdp->command) { case XDP_SETUP_PROG: - return nfp_net_xdp_setup(nn, xdp->prog); + return nfp_net_xdp_setup(nn, xdp); case XDP_QUERY_PROG: - xdp->prog_attached = !!nn->xdp_prog; + xdp->prog_attached = !!nn->dp.xdp_prog; return 0; default: return -EINVAL; @@ -3008,6 +3012,7 @@ static const struct net_device_ops nfp_net_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_set_features = nfp_net_set_features, .ndo_features_check = nfp_net_features_check, + .ndo_get_phys_port_name = nfp_net_get_phys_port_name, .ndo_udp_tunnel_add = nfp_net_add_vxlan_port, .ndo_udp_tunnel_del = nfp_net_del_vxlan_port, .ndo_xdp = nfp_net_xdp, @@ -3020,9 +3025,9 @@ static const struct net_device_ops nfp_net_netdev_ops = { void nfp_net_info(struct nfp_net *nn) { nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n", - nn->is_vf ? "VF " : "", - nn->num_tx_rings, nn->max_tx_rings, - nn->num_rx_rings, nn->max_rx_rings); + nn->dp.is_vf ? "VF " : "", + nn->dp.num_tx_rings, nn->max_tx_rings, + nn->dp.num_rx_rings, nn->max_rx_rings); nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n", nn->fw_ver.resv, nn->fw_ver.class, nn->fw_ver.major, nn->fw_ver.minor, @@ -3074,21 +3079,24 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev, SET_NETDEV_DEV(netdev, &pdev->dev); nn = netdev_priv(netdev); - nn->netdev = netdev; + nn->dp.netdev = netdev; + nn->dp.dev = &pdev->dev; nn->pdev = pdev; nn->max_tx_rings = max_tx_rings; nn->max_rx_rings = max_rx_rings; - nn->num_tx_rings = min_t(unsigned int, max_tx_rings, num_online_cpus()); - nn->num_rx_rings = min_t(unsigned int, max_rx_rings, + nn->dp.num_tx_rings = min_t(unsigned int, + max_tx_rings, num_online_cpus()); + nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings, netif_get_num_default_rss_queues()); - nn->num_r_vecs = max(nn->num_tx_rings, nn->num_rx_rings); - nn->num_r_vecs = min_t(unsigned int, nn->num_r_vecs, num_online_cpus()); + nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings); + nn->dp.num_r_vecs = min_t(unsigned int, + nn->dp.num_r_vecs, num_online_cpus()); - nn->txd_cnt = NFP_NET_TX_DESCS_DEFAULT; - nn->rxd_cnt = NFP_NET_RX_DESCS_DEFAULT; + nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT; + nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT; spin_lock_init(&nn->reconfig_lock); spin_lock_init(&nn->rx_filter_lock); @@ -3108,7 +3116,28 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev, */ void nfp_net_netdev_free(struct nfp_net *nn) { - free_netdev(nn->netdev); + free_netdev(nn->dp.netdev); +} + +/** + * nfp_net_rss_key_sz() - Get current size of the RSS key + * @nn: NFP Net device instance + * + * Return: size of the RSS key for currently selected hash function. + */ +unsigned int nfp_net_rss_key_sz(struct nfp_net *nn) +{ + switch (nn->rss_hfunc) { + case ETH_RSS_HASH_TOP: + return NFP_NET_CFG_RSS_KEY_SZ; + case ETH_RSS_HASH_XOR: + return 0; + case ETH_RSS_HASH_CRC32: + return 4; + } + + nn_warn(nn, "Unknown hash function: %u\n", nn->rss_hfunc); + return 0; } /** @@ -3117,14 +3146,32 @@ void nfp_net_netdev_free(struct nfp_net *nn) */ static void nfp_net_rss_init(struct nfp_net *nn) { - netdev_rss_key_fill(nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ); + unsigned long func_bit, rss_cap_hfunc; + u32 reg; + + /* Read the RSS function capability and select first supported func */ + reg = nn_readl(nn, NFP_NET_CFG_RSS_CAP); + rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, reg); + if (!rss_cap_hfunc) + rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, + NFP_NET_CFG_RSS_TOEPLITZ); + + func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS); + if (func_bit == NFP_NET_CFG_RSS_HFUNCS) { + dev_warn(nn->dp.dev, + "Bad RSS config, defaulting to Toeplitz hash\n"); + func_bit = ETH_RSS_HASH_TOP_BIT; + } + nn->rss_hfunc = 1 << func_bit; + + netdev_rss_key_fill(nn->rss_key, nfp_net_rss_key_sz(nn)); nfp_net_rss_init_itbl(nn); /* Enable IPv4/IPv6 TCP by default */ nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP | NFP_NET_CFG_RSS_IPV6_TCP | - NFP_NET_CFG_RSS_TOEPLITZ | + FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc) | NFP_NET_CFG_RSS_MASK; } @@ -3151,6 +3198,10 @@ int nfp_net_netdev_init(struct net_device *netdev) struct nfp_net *nn = netdev_priv(netdev); int err; + nn->dp.chained_metadata_format = nn->fw_ver.major > 3; + + nn->dp.rx_dma_dir = DMA_FROM_DEVICE; + /* Get some of the read-only fields from the BAR */ nn->cap = nn_readl(nn, NFP_NET_CFG_CAP); nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU); @@ -3158,17 +3209,26 @@ int nfp_net_netdev_init(struct net_device *netdev) nfp_net_write_mac_addr(nn); /* Determine RX packet/metadata boundary offset */ - if (nn->fw_ver.major >= 2) - nn->rx_offset = nn_readl(nn, NFP_NET_CFG_RX_OFFSET); - else - nn->rx_offset = NFP_NET_RX_OFFSET; + if (nn->fw_ver.major >= 2) { + u32 reg; + + reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET); + if (reg > NFP_NET_MAX_PREPEND) { + nn_err(nn, "Invalid rx offset: %d\n", reg); + return -EINVAL; + } + nn->dp.rx_offset = reg; + } else { + nn->dp.rx_offset = NFP_NET_RX_OFFSET; + } /* Set default MTU and Freelist buffer size */ if (nn->max_mtu < NFP_NET_DEFAULT_MTU) netdev->mtu = nn->max_mtu; else netdev->mtu = NFP_NET_DEFAULT_MTU; - nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, netdev->mtu); + nn->dp.mtu = netdev->mtu; + nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp); /* Advertise/enable offloads based on capabilities * @@ -3179,31 +3239,31 @@ int nfp_net_netdev_init(struct net_device *netdev) netdev->hw_features = NETIF_F_HIGHDMA; if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM) { netdev->hw_features |= NETIF_F_RXCSUM; - nn->ctrl |= NFP_NET_CFG_CTRL_RXCSUM; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXCSUM; } if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) { netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - nn->ctrl |= NFP_NET_CFG_CTRL_TXCSUM; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM; } if (nn->cap & NFP_NET_CFG_CTRL_GATHER) { netdev->hw_features |= NETIF_F_SG; - nn->ctrl |= NFP_NET_CFG_CTRL_GATHER; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER; } if ((nn->cap & NFP_NET_CFG_CTRL_LSO) && nn->fw_ver.major > 2) { netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; - nn->ctrl |= NFP_NET_CFG_CTRL_LSO; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_LSO; } if (nn->cap & NFP_NET_CFG_CTRL_RSS) { netdev->hw_features |= NETIF_F_RXHASH; nfp_net_rss_init(nn); - nn->ctrl |= NFP_NET_CFG_CTRL_RSS; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_RSS; } if (nn->cap & NFP_NET_CFG_CTRL_VXLAN && nn->cap & NFP_NET_CFG_CTRL_NVGRE) { if (nn->cap & NFP_NET_CFG_CTRL_LSO) netdev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL; - nn->ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE; netdev->hw_enc_features = netdev->hw_features; } @@ -3212,11 +3272,11 @@ int nfp_net_netdev_init(struct net_device *netdev) if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) { netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; - nn->ctrl |= NFP_NET_CFG_CTRL_RXVLAN; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN; } if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) { netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; - nn->ctrl |= NFP_NET_CFG_CTRL_TXVLAN; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN; } netdev->features = netdev->hw_features; @@ -3229,14 +3289,14 @@ int nfp_net_netdev_init(struct net_device *netdev) /* Allow L2 Broadcast and Multicast through by default, if supported */ if (nn->cap & NFP_NET_CFG_CTRL_L2BC) - nn->ctrl |= NFP_NET_CFG_CTRL_L2BC; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC; if (nn->cap & NFP_NET_CFG_CTRL_L2MC) - nn->ctrl |= NFP_NET_CFG_CTRL_L2MC; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2MC; /* Allow IRQ moderation, if supported */ if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) { nfp_net_irqmod_init(nn); - nn->ctrl |= NFP_NET_CFG_CTRL_IRQMOD; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD; } /* Stash the re-configuration queue away. First odd queue in TX Bar */ @@ -3275,9 +3335,10 @@ void nfp_net_netdev_clean(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); - if (nn->xdp_prog) - bpf_prog_put(nn->xdp_prog); - if (nn->bpf_offload_xdp) + unregister_netdev(nn->dp.netdev); + + if (nn->dp.xdp_prog) + bpf_prog_put(nn->dp.xdp_prog); + if (nn->dp.bpf_offload_xdp) nfp_net_xdp_offload(nn, NULL); - unregister_netdev(nn->netdev); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index 385ba355c965..d04ccc9f6116 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Netronome Systems, Inc. + * Copyright (C) 2015-2017 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -177,6 +177,19 @@ #define NFP_NET_CFG_VERSION_MINOR(x) (((x) & 0xff) << 0) #define NFP_NET_CFG_STS 0x0034 #define NFP_NET_CFG_STS_LINK (0x1 << 0) /* Link up or down */ +/* Link rate */ +#define NFP_NET_CFG_STS_LINK_RATE_SHIFT 1 +#define NFP_NET_CFG_STS_LINK_RATE_MASK 0xF +#define NFP_NET_CFG_STS_LINK_RATE \ + (NFP_NET_CFG_STS_LINK_RATE_MASK << NFP_NET_CFG_STS_LINK_RATE_SHIFT) +#define NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED 0 +#define NFP_NET_CFG_STS_LINK_RATE_UNKNOWN 1 +#define NFP_NET_CFG_STS_LINK_RATE_1G 2 +#define NFP_NET_CFG_STS_LINK_RATE_10G 3 +#define NFP_NET_CFG_STS_LINK_RATE_25G 4 +#define NFP_NET_CFG_STS_LINK_RATE_40G 5 +#define NFP_NET_CFG_STS_LINK_RATE_50G 6 +#define NFP_NET_CFG_STS_LINK_RATE_100G 7 #define NFP_NET_CFG_CAP 0x0038 #define NFP_NET_CFG_MAX_TXRINGS 0x003c #define NFP_NET_CFG_MAX_RXRINGS 0x0040 @@ -192,6 +205,14 @@ #define NFP_NET_CFG_RX_OFFSET_DYNAMIC 0 /* Prepend mode */ /** + * RSS capabilities + * @NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as + * @NFP_NET_CFG_RSS_HFUNC) + */ +#define NFP_NET_CFG_RSS_CAP 0x0054 +#define NFP_NET_CFG_RSS_CAP_HFUNC 0xff000000 + +/** * VXLAN/UDP encap configuration * @NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports * @NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes @@ -249,7 +270,11 @@ #define NFP_NET_CFG_RSS_IPV4_UDP (1 << 11) /* RSS for IPv4/UDP */ #define NFP_NET_CFG_RSS_IPV6_TCP (1 << 12) /* RSS for IPv6/TCP */ #define NFP_NET_CFG_RSS_IPV6_UDP (1 << 13) /* RSS for IPv6/UDP */ +#define NFP_NET_CFG_RSS_HFUNC 0xff000000 #define NFP_NET_CFG_RSS_TOEPLITZ (1 << 24) /* Use Toeplitz hash */ +#define NFP_NET_CFG_RSS_XOR (1 << 25) /* Use XOR as hash */ +#define NFP_NET_CFG_RSS_CRC32 (1 << 26) /* Use CRC32 as hash */ +#define NFP_NET_CFG_RSS_HFUNCS 3 #define NFP_NET_CFG_RSS_KEY (NFP_NET_CFG_RSS_BASE + 0x4) #define NFP_NET_CFG_RSS_KEY_SZ 0x28 #define NFP_NET_CFG_RSS_ITBL (NFP_NET_CFG_RSS_BASE + 0x4 + \ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c index 6e9372a18375..4077c59bf782 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -40,9 +40,9 @@ static struct dentry *nfp_dir; static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data) { - int fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p, rxd_cnt; struct nfp_net_r_vector *r_vec = file->private; struct nfp_net_rx_ring *rx_ring; + int fl_rd_p, fl_wr_p, rxd_cnt; struct nfp_net_rx_desc *rxd; struct nfp_net *nn; void *frag; @@ -54,19 +54,18 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data) goto out; nn = r_vec->nfp_net; rx_ring = r_vec->rx_ring; - if (!netif_running(nn->netdev)) + if (!netif_running(nn->dp.netdev)) goto out; rxd_cnt = rx_ring->cnt; fl_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_fl); fl_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_fl); - rx_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_rx); - rx_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_rx); - seq_printf(file, "RX[%02d]: H_RD=%d H_WR=%d FL_RD=%d FL_WR=%d RX_RD=%d RX_WR=%d\n", - rx_ring->idx, rx_ring->rd_p, rx_ring->wr_p, - fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p); + seq_printf(file, "RX[%02d,%02d]: cnt=%d dma=%pad host=%p H_RD=%d H_WR=%d FL_RD=%d FL_WR=%d\n", + rx_ring->idx, rx_ring->fl_qcidx, + rx_ring->cnt, &rx_ring->dma, rx_ring->rxds, + rx_ring->rd_p, rx_ring->wr_p, fl_rd_p, fl_wr_p); for (i = 0; i < rxd_cnt; i++) { rxd = &rx_ring->rxds[i]; @@ -89,10 +88,6 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data) seq_puts(file, " FL_RD"); if (i == fl_wr_p % rxd_cnt) seq_puts(file, " FL_WR"); - if (i == rx_rd_p % rxd_cnt) - seq_puts(file, " RX_RD"); - if (i == rx_wr_p % rxd_cnt) - seq_puts(file, " RX_WR"); seq_putc(file, '\n'); } @@ -143,7 +138,7 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data) if (!r_vec->nfp_net || !tx_ring) goto out; nn = r_vec->nfp_net; - if (!netif_running(nn->netdev)) + if (!netif_running(nn->dp.netdev)) goto out; txd_cnt = tx_ring->cnt; @@ -151,8 +146,11 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data) d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q); - seq_printf(file, "TX[%02d]: H_RD=%d H_WR=%d D_RD=%d D_WR=%d\n", - tx_ring->idx, tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p); + seq_printf(file, "TX[%02d,%02d%s]: cnt=%d dma=%pad host=%p H_RD=%d H_WR=%d D_RD=%d D_WR=%d\n", + tx_ring->idx, tx_ring->qcidx, + tx_ring == r_vec->tx_ring ? "" : "xdp", + tx_ring->cnt, &tx_ring->dma, tx_ring->txds, + tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p); for (i = 0; i < txd_cnt; i++) { txd = &tx_ring->txds[i]; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 2649f7523c81..abbb47e60cc3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -40,6 +40,7 @@ * Brad Petrus <brad.petrus@netronome.com> */ +#include <linux/bitfield.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -48,6 +49,7 @@ #include <linux/ethtool.h> #include "nfpcore/nfp.h" +#include "nfpcore/nfp_nsp.h" #include "nfp_net_ctrl.h" #include "nfp_net.h" @@ -126,9 +128,9 @@ static const struct _nfp_net_et_stats nfp_net_et_stats[] = { }; #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats) -#define NN_ET_RVEC_STATS_LEN (nn->num_r_vecs * 3) +#define NN_ET_RVEC_STATS_LEN (nn->dp.num_r_vecs * 3) #define NN_ET_RVEC_GATHER_STATS 7 -#define NN_ET_QUEUE_STATS_LEN ((nn->num_tx_rings + nn->num_rx_rings) * 2) +#define NN_ET_QUEUE_STATS_LEN ((nn->dp.num_tx_rings + nn->dp.num_rx_rings) * 2) #define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \ NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN) @@ -172,6 +174,119 @@ static void nfp_net_get_drvinfo(struct net_device *netdev, drvinfo->regdump_len = NFP_NET_CFG_BAR_SZ; } +/** + * nfp_net_get_link_ksettings - Get Link Speed settings + * @netdev: network interface device structure + * @cmd: ethtool command + * + * Reports speed settings based on info in the BAR provided by the fw. + */ +static int +nfp_net_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + static const u32 ls_to_ethtool[] = { + [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = 0, + [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = SPEED_UNKNOWN, + [NFP_NET_CFG_STS_LINK_RATE_1G] = SPEED_1000, + [NFP_NET_CFG_STS_LINK_RATE_10G] = SPEED_10000, + [NFP_NET_CFG_STS_LINK_RATE_25G] = SPEED_25000, + [NFP_NET_CFG_STS_LINK_RATE_40G] = SPEED_40000, + [NFP_NET_CFG_STS_LINK_RATE_50G] = SPEED_50000, + [NFP_NET_CFG_STS_LINK_RATE_100G] = SPEED_100000, + }; + struct nfp_net *nn = netdev_priv(netdev); + u32 sts, ls; + + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + cmd->base.port = PORT_OTHER; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + + if (nn->eth_port) + cmd->base.autoneg = nn->eth_port->aneg != NFP_ANEG_DISABLED ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + if (!netif_carrier_ok(netdev)) + return 0; + + /* Use link speed from ETH table if available, otherwise try the BAR */ + if (nn->eth_port) { + int err; + + if (nfp_net_link_changed_read_clear(nn)) { + err = nfp_net_refresh_eth_port(nn); + if (err) + return err; + } + + cmd->base.port = nn->eth_port->port_type; + cmd->base.speed = nn->eth_port->speed; + cmd->base.duplex = DUPLEX_FULL; + return 0; + } + + sts = nn_readl(nn, NFP_NET_CFG_STS); + + ls = FIELD_GET(NFP_NET_CFG_STS_LINK_RATE, sts); + if (ls == NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED) + return -EOPNOTSUPP; + + if (ls == NFP_NET_CFG_STS_LINK_RATE_UNKNOWN || + ls >= ARRAY_SIZE(ls_to_ethtool)) + return 0; + + cmd->base.speed = ls_to_ethtool[sts]; + cmd->base.duplex = DUPLEX_FULL; + + return 0; +} + +static int +nfp_net_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct nfp_net *nn = netdev_priv(netdev); + struct nfp_nsp *nsp; + int err; + + if (!nn->eth_port) + return -EOPNOTSUPP; + + if (netif_running(netdev)) { + nn_warn(nn, "Changing settings not allowed on an active interface. It may cause the port to be disabled until reboot.\n"); + return -EBUSY; + } + + nsp = nfp_eth_config_start(nn->cpp, nn->eth_port->index); + if (IS_ERR(nsp)) + return PTR_ERR(nsp); + + err = __nfp_eth_set_aneg(nsp, cmd->base.autoneg == AUTONEG_ENABLE ? + NFP_ANEG_AUTO : NFP_ANEG_DISABLED); + if (err) + goto err_bad_set; + if (cmd->base.speed != SPEED_UNKNOWN) { + u32 speed = cmd->base.speed / nn->eth_port->lanes; + + err = __nfp_eth_set_speed(nsp, speed); + if (err) + goto err_bad_set; + } + + err = nfp_eth_config_commit_end(nsp); + if (err > 0) + return 0; /* no change */ + + nfp_net_refresh_port_table(nn); + + return err; + +err_bad_set: + nfp_eth_config_cleanup_end(nsp); + return err; +} + static void nfp_net_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { @@ -179,30 +294,22 @@ static void nfp_net_get_ringparam(struct net_device *netdev, ring->rx_max_pending = NFP_NET_MAX_RX_DESCS; ring->tx_max_pending = NFP_NET_MAX_TX_DESCS; - ring->rx_pending = nn->rxd_cnt; - ring->tx_pending = nn->txd_cnt; + ring->rx_pending = nn->dp.rxd_cnt; + ring->tx_pending = nn->dp.txd_cnt; } static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt) { - struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL; - struct nfp_net_ring_set rx = { - .n_rings = nn->num_rx_rings, - .mtu = nn->netdev->mtu, - .dcnt = rxd_cnt, - }; - struct nfp_net_ring_set tx = { - .n_rings = nn->num_tx_rings, - .dcnt = txd_cnt, - }; + struct nfp_net_dp *dp; - if (nn->rxd_cnt != rxd_cnt) - reconfig_rx = ℞ - if (nn->txd_cnt != txd_cnt) - reconfig_tx = &tx; + dp = nfp_net_clone_dp(nn); + if (!dp) + return -ENOMEM; - return nfp_net_ring_reconfig(nn, &nn->xdp_prog, - reconfig_rx, reconfig_tx); + dp->rxd_cnt = rxd_cnt; + dp->txd_cnt = txd_cnt; + + return nfp_net_ring_reconfig(nn, dp, NULL); } static int nfp_net_set_ringparam(struct net_device *netdev, @@ -223,11 +330,11 @@ static int nfp_net_set_ringparam(struct net_device *netdev, txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS) return -EINVAL; - if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt) + if (nn->dp.rxd_cnt == rxd_cnt && nn->dp.txd_cnt == txd_cnt) return 0; nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n", - nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt); + nn->dp.rxd_cnt, rxd_cnt, nn->dp.txd_cnt, txd_cnt); return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt); } @@ -245,7 +352,7 @@ static void nfp_net_get_strings(struct net_device *netdev, memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - for (i = 0; i < nn->num_r_vecs; i++) { + for (i = 0; i < nn->dp.num_r_vecs; i++) { sprintf(p, "rvec_%u_rx_pkts", i); p += ETH_GSTRING_LEN; sprintf(p, "rvec_%u_tx_pkts", i); @@ -267,13 +374,13 @@ static void nfp_net_get_strings(struct net_device *netdev, p += ETH_GSTRING_LEN; strncpy(p, "tx_lso", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; - for (i = 0; i < nn->num_tx_rings; i++) { + for (i = 0; i < nn->dp.num_tx_rings; i++) { sprintf(p, "txq_%u_pkts", i); p += ETH_GSTRING_LEN; sprintf(p, "txq_%u_bytes", i); p += ETH_GSTRING_LEN; } - for (i = 0; i < nn->num_rx_rings; i++) { + for (i = 0; i < nn->dp.num_rx_rings; i++) { sprintf(p, "rxq_%u_pkts", i); p += ETH_GSTRING_LEN; sprintf(p, "rxq_%u_bytes", i); @@ -306,12 +413,12 @@ static void nfp_net_get_stats(struct net_device *netdev, break; case NFP_NET_DEV_ET_STATS: - io_p = nn->ctrl_bar + nfp_net_et_stats[i].off; + io_p = nn->dp.ctrl_bar + nfp_net_et_stats[i].off; data[i] = readq(io_p); break; } } - for (j = 0; j < nn->num_r_vecs; j++) { + for (j = 0; j < nn->dp.num_r_vecs; j++) { unsigned int start; do { @@ -337,16 +444,16 @@ static void nfp_net_get_stats(struct net_device *netdev, } for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) data[i++] = gathered_stats[j]; - for (j = 0; j < nn->num_tx_rings; j++) { - io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j); + for (j = 0; j < nn->dp.num_tx_rings; j++) { + io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j); data[i++] = readq(io_p); - io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8; + io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8; data[i++] = readq(io_p); } - for (j = 0; j < nn->num_rx_rings; j++) { - io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j); + for (j = 0; j < nn->dp.num_rx_rings; j++) { + io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j); data[i++] = readq(io_p); - io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8; + io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8; data[i++] = readq(io_p); } } @@ -410,7 +517,7 @@ static int nfp_net_get_rxnfc(struct net_device *netdev, switch (cmd->cmd) { case ETHTOOL_GRXRINGS: - cmd->data = nn->num_rx_rings; + cmd->data = nn->dp.num_rx_rings; return 0; case ETHTOOL_GRXFH: return nfp_net_get_rss_hash_opts(nn, cmd); @@ -454,13 +561,13 @@ static int nfp_net_set_rss_hash_opt(struct nfp_net *nn, return -EINVAL; } - new_rss_cfg |= NFP_NET_CFG_RSS_TOEPLITZ; + new_rss_cfg |= FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc); new_rss_cfg |= NFP_NET_CFG_RSS_MASK; if (new_rss_cfg == nn->rss_cfg) return 0; - writel(new_rss_cfg, nn->ctrl_bar + NFP_NET_CFG_RSS_CTRL); + writel(new_rss_cfg, nn->dp.ctrl_bar + NFP_NET_CFG_RSS_CTRL); err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS); if (err) return err; @@ -496,7 +603,12 @@ static u32 nfp_net_get_rxfh_indir_size(struct net_device *netdev) static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev) { - return NFP_NET_CFG_RSS_KEY_SZ; + struct nfp_net *nn = netdev_priv(netdev); + + if (!(nn->cap & NFP_NET_CFG_CTRL_RSS)) + return -EOPNOTSUPP; + + return nfp_net_rss_key_sz(nn); } static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, @@ -512,9 +624,12 @@ static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++) indir[i] = nn->rss_itbl[i]; if (key) - memcpy(key, nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ); - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; + memcpy(key, nn->rss_key, nfp_net_rss_key_sz(nn)); + if (hfunc) { + *hfunc = nn->rss_hfunc; + if (*hfunc >= 1 << ETH_RSS_HASH_FUNCS_COUNT) + *hfunc = ETH_RSS_HASH_UNKNOWN; + } return 0; } @@ -527,14 +642,14 @@ static int nfp_net_set_rxfh(struct net_device *netdev, int i; if (!(nn->cap & NFP_NET_CFG_CTRL_RSS) || - !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == ETH_RSS_HASH_TOP)) + !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == nn->rss_hfunc)) return -EOPNOTSUPP; if (!key && !indir) return 0; if (key) { - memcpy(nn->rss_key, key, NFP_NET_CFG_RSS_KEY_SZ); + memcpy(nn->rss_key, key, nfp_net_rss_key_sz(nn)); nfp_net_rss_write_key(nn); } if (indir) { @@ -564,7 +679,7 @@ static void nfp_net_get_regs(struct net_device *netdev, regs->version = nn_readl(nn, NFP_NET_CFG_VERSION); for (i = 0; i < NFP_NET_CFG_BAR_SZ / sizeof(u32); i++) - regs_buf[i] = readl(nn->ctrl_bar + (i * sizeof(u32))); + regs_buf[i] = readl(nn->dp.ctrl_bar + (i * sizeof(u32))); } static int nfp_net_get_coalesce(struct net_device *netdev, @@ -676,7 +791,7 @@ static int nfp_net_set_coalesce(struct net_device *netdev, ec->tx_coalesce_usecs_high || ec->tx_max_coalesced_frames_high || ec->rate_sample_interval) - return -ENOTSUPP; + return -EOPNOTSUPP; /* Compute factor used to convert coalesce '_usecs' parameters to * ME timestamp ticks. There are 16 ME clock cycles for each timestamp @@ -736,16 +851,16 @@ static void nfp_net_get_channels(struct net_device *netdev, struct nfp_net *nn = netdev_priv(netdev); unsigned int num_tx_rings; - num_tx_rings = nn->num_tx_rings; - if (nn->xdp_prog) - num_tx_rings -= nn->num_rx_rings; + num_tx_rings = nn->dp.num_tx_rings; + if (nn->dp.xdp_prog) + num_tx_rings -= nn->dp.num_rx_rings; channel->max_rx = min(nn->max_rx_rings, nn->max_r_vecs); channel->max_tx = min(nn->max_tx_rings, nn->max_r_vecs); channel->max_combined = min(channel->max_rx, channel->max_tx); channel->max_other = NFP_NET_NON_Q_VECTORS; - channel->combined_count = min(nn->num_rx_rings, num_tx_rings); - channel->rx_count = nn->num_rx_rings - channel->combined_count; + channel->combined_count = min(nn->dp.num_rx_rings, num_tx_rings); + channel->rx_count = nn->dp.num_rx_rings - channel->combined_count; channel->tx_count = num_tx_rings - channel->combined_count; channel->other_count = NFP_NET_NON_Q_VECTORS; } @@ -753,29 +868,19 @@ static void nfp_net_get_channels(struct net_device *netdev, static int nfp_net_set_num_rings(struct nfp_net *nn, unsigned int total_rx, unsigned int total_tx) { - struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL; - struct nfp_net_ring_set rx = { - .n_rings = total_rx, - .mtu = nn->netdev->mtu, - .dcnt = nn->rxd_cnt, - }; - struct nfp_net_ring_set tx = { - .n_rings = total_tx, - .dcnt = nn->txd_cnt, - }; + struct nfp_net_dp *dp; - if (nn->num_rx_rings != total_rx) - reconfig_rx = ℞ - if (nn->num_stack_tx_rings != total_tx || - (nn->xdp_prog && reconfig_rx)) - reconfig_tx = &tx; + dp = nfp_net_clone_dp(nn); + if (!dp) + return -ENOMEM; - /* nfp_net_check_config() will catch tx.n_rings > nn->max_tx_rings */ - if (nn->xdp_prog) - tx.n_rings += total_rx; + dp->num_rx_rings = total_rx; + dp->num_tx_rings = total_tx; + /* nfp_net_check_config() will catch num_tx_rings > nn->max_tx_rings */ + if (dp->xdp_prog) + dp->num_tx_rings += total_rx; - return nfp_net_ring_reconfig(nn, &nn->xdp_prog, - reconfig_rx, reconfig_tx); + return nfp_net_ring_reconfig(nn, dp, NULL); } static int nfp_net_set_channels(struct net_device *netdev, @@ -823,6 +928,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = { .set_coalesce = nfp_net_set_coalesce, .get_channels = nfp_net_get_channels, .set_channels = nfp_net_set_channels, + .get_link_ksettings = nfp_net_get_link_ksettings, + .set_link_ksettings = nfp_net_set_link_ksettings, }; void nfp_net_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 3afcdc11480c..8cb87cbe1120 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -47,11 +47,12 @@ #include <linux/pci_regs.h> #include <linux/msi.h> #include <linux/random.h> +#include <linux/rtnetlink.h> #include "nfpcore/nfp.h" #include "nfpcore/nfp_cpp.h" #include "nfpcore/nfp_nffw.h" -#include "nfpcore/nfp_nsp_eth.h" +#include "nfpcore/nfp_nsp.h" #include "nfpcore/nfp6000_pcie.h" #include "nfp_net_ctrl.h" @@ -129,61 +130,61 @@ err_area: return (u8 __iomem *)ERR_PTR(err); } +/** + * nfp_net_get_mac_addr() - Get the MAC address. + * @nn: NFP Network structure + * @cpp: NFP CPP handle + * @id: NFP port id + * + * First try to get the MAC address from NSP ETH table. If that + * fails try HWInfo. As a last resort generate a random address. + */ static void -nfp_net_get_mac_addr_hwinfo(struct nfp_net *nn, struct nfp_cpp *cpp, - unsigned int id) +nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_cpp *cpp, unsigned int id) { + struct nfp_net_dp *dp = &nn->dp; u8 mac_addr[ETH_ALEN]; const char *mac_str; char name[32]; + if (nn->eth_port) { + ether_addr_copy(dp->netdev->dev_addr, nn->eth_port->mac_addr); + ether_addr_copy(dp->netdev->perm_addr, nn->eth_port->mac_addr); + return; + } + snprintf(name, sizeof(name), "eth%d.mac", id); mac_str = nfp_hwinfo_lookup(cpp, name); if (!mac_str) { - dev_warn(&nn->pdev->dev, - "Can't lookup MAC address. Generate\n"); - eth_hw_addr_random(nn->netdev); + dev_warn(dp->dev, "Can't lookup MAC address. Generate\n"); + eth_hw_addr_random(dp->netdev); return; } if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", &mac_addr[0], &mac_addr[1], &mac_addr[2], &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) { - dev_warn(&nn->pdev->dev, + dev_warn(dp->dev, "Can't parse MAC address (%s). Generate.\n", mac_str); - eth_hw_addr_random(nn->netdev); + eth_hw_addr_random(dp->netdev); return; } - ether_addr_copy(nn->netdev->dev_addr, mac_addr); - ether_addr_copy(nn->netdev->perm_addr, mac_addr); + ether_addr_copy(dp->netdev->dev_addr, mac_addr); + ether_addr_copy(dp->netdev->perm_addr, mac_addr); } -/** - * nfp_net_get_mac_addr() - Get the MAC address. - * @nn: NFP Network structure - * @pf: NFP PF device structure - * @id: NFP port id - * - * First try to get the MAC address from NSP ETH table. If that - * fails try HWInfo. As a last resort generate a random address. - */ -static void -nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_pf *pf, unsigned int id) +static struct nfp_eth_table_port * +nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int id) { int i; - for (i = 0; pf->eth_tbl && i < pf->eth_tbl->count; i++) - if (pf->eth_tbl->ports[i].eth_index == id) { - const u8 *mac_addr = pf->eth_tbl->ports[i].mac_addr; + for (i = 0; eth_tbl && i < eth_tbl->count; i++) + if (eth_tbl->ports[i].eth_index == id) + return ð_tbl->ports[i]; - ether_addr_copy(nn->netdev->dev_addr, mac_addr); - ether_addr_copy(nn->netdev->perm_addr, mac_addr); - return; - } - - nfp_net_get_mac_addr_hwinfo(nn, pf->cpp, id); + return NULL; } static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf) @@ -282,6 +283,7 @@ static void nfp_net_pf_free_netdevs(struct nfp_pf *pf) while (!list_empty(&pf->ports)) { nn = list_first_entry(&pf->ports, struct nfp_net, port_list); list_del(&nn->port_list); + pf->num_netdevs--; nfp_net_netdev_free(nn); } @@ -290,7 +292,8 @@ static void nfp_net_pf_free_netdevs(struct nfp_pf *pf) static struct nfp_net * nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar, void __iomem *tx_bar, void __iomem *rx_bar, - int stride, struct nfp_net_fw_version *fw_ver) + int stride, struct nfp_net_fw_version *fw_ver, + struct nfp_eth_table_port *eth_port) { u32 n_tx_rings, n_rx_rings; struct nfp_net *nn; @@ -305,12 +308,13 @@ nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar, nn->cpp = pf->cpp; nn->fw_ver = *fw_ver; - nn->ctrl_bar = ctrl_bar; + nn->dp.ctrl_bar = ctrl_bar; nn->tx_bar = tx_bar; nn->rx_bar = rx_bar; - nn->is_vf = 0; + nn->dp.is_vf = 0; nn->stride_rx = stride; nn->stride_tx = stride; + nn->eth_port = eth_port; return nn; } @@ -322,7 +326,7 @@ nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn, int err; /* Get MAC address */ - nfp_net_get_mac_addr(nn, pf, id); + nfp_net_get_mac_addr(nn, pf->cpp, id); /* Get ME clock frequency from ctrl BAR * XXX for now frequency is hardcoded until we figure out how @@ -330,7 +334,7 @@ nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn, */ nn->me_freq_mhz = 1200; - err = nfp_net_netdev_init(nn->netdev); + err = nfp_net_netdev_init(nn->dp.netdev); if (err) return err; @@ -347,6 +351,7 @@ nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar, int stride, struct nfp_net_fw_version *fw_ver) { u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base; + struct nfp_eth_table_port *eth_port; struct nfp_net *nn; unsigned int i; int err; @@ -362,17 +367,27 @@ nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar, prev_tx_base = tgt_tx_base; prev_rx_base = tgt_rx_base; - nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar, rx_bar, - stride, fw_ver); - if (IS_ERR(nn)) { - err = PTR_ERR(nn); - goto err_free_prev; + eth_port = nfp_net_find_port(pf->eth_tbl, i); + if (eth_port && eth_port->override_changed) { + nfp_warn(pf->cpp, "Config changed for port #%d, reboot required before port will be operational\n", i); + } else { + nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar, + rx_bar, stride, + fw_ver, eth_port); + if (IS_ERR(nn)) { + err = PTR_ERR(nn); + goto err_free_prev; + } + list_add_tail(&nn->port_list, &pf->ports); + pf->num_netdevs++; } - list_add_tail(&nn->port_list, &pf->ports); ctrl_bar += NFP_PF_CSR_SLICE_SIZE; } + if (list_empty(&pf->ports)) + return -ENODEV; + return 0; err_free_prev: @@ -399,7 +414,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf, /* Get MSI-X vectors */ wanted_irqs = 0; list_for_each_entry(nn, &pf->ports, port_list) - wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->num_r_vecs; + wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs; pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries), GFP_KERNEL); if (!pf->irq_entries) { @@ -408,7 +423,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf, } num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries, - NFP_NET_MIN_PORT_IRQS * pf->num_ports, + NFP_NET_MIN_PORT_IRQS * pf->num_netdevs, wanted_irqs); if (!num_irqs) { nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n"); @@ -418,7 +433,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf, /* Distribute IRQs to ports */ irqs_left = num_irqs; - ports_left = pf->num_ports; + ports_left = pf->num_netdevs; list_for_each_entry(nn, &pf->ports, port_list) { unsigned int n; @@ -444,7 +459,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf, err_prev_deinit: list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) { nfp_net_debugfs_dir_clean(&nn->debugfs_dir); - nfp_net_netdev_clean(nn->netdev); + nfp_net_netdev_clean(nn->dp.netdev); } nfp_net_irqs_disable(pf->pdev); err_vec_free: @@ -454,6 +469,108 @@ err_nn_free: return err; } +static void nfp_net_pci_remove_finish(struct nfp_pf *pf) +{ + nfp_net_debugfs_dir_clean(&pf->ddir); + + nfp_net_irqs_disable(pf->pdev); + kfree(pf->irq_entries); + + nfp_cpp_area_release_free(pf->rx_area); + nfp_cpp_area_release_free(pf->tx_area); + nfp_cpp_area_release_free(pf->ctrl_area); +} + +static void nfp_net_refresh_netdevs(struct work_struct *work) +{ + struct nfp_pf *pf = container_of(work, struct nfp_pf, + port_refresh_work); + struct nfp_eth_table *eth_table; + struct nfp_net *nn, *next; + + mutex_lock(&pf->port_lock); + + /* Check for nfp_net_pci_remove() racing against us */ + if (list_empty(&pf->ports)) + goto out; + + list_for_each_entry(nn, &pf->ports, port_list) + nfp_net_link_changed_read_clear(nn); + + eth_table = nfp_eth_read_ports(pf->cpp); + if (!eth_table) { + nfp_err(pf->cpp, "Error refreshing port config!\n"); + goto out; + } + + rtnl_lock(); + list_for_each_entry(nn, &pf->ports, port_list) { + if (!nn->eth_port) + continue; + nn->eth_port = nfp_net_find_port(eth_table, + nn->eth_port->eth_index); + } + rtnl_unlock(); + + kfree(pf->eth_tbl); + pf->eth_tbl = eth_table; + + list_for_each_entry_safe(nn, next, &pf->ports, port_list) { + if (!nn->eth_port) { + nfp_warn(pf->cpp, "Warning: port not present after reconfig\n"); + continue; + } + if (!nn->eth_port->override_changed) + continue; + + nn_warn(nn, "Port config changed, unregistering. Reboot required before port will be operational again.\n"); + + nfp_net_debugfs_dir_clean(&nn->debugfs_dir); + nfp_net_netdev_clean(nn->dp.netdev); + + list_del(&nn->port_list); + pf->num_netdevs--; + nfp_net_netdev_free(nn); + } + + if (list_empty(&pf->ports)) + nfp_net_pci_remove_finish(pf); +out: + mutex_unlock(&pf->port_lock); +} + +void nfp_net_refresh_port_table(struct nfp_net *nn) +{ + struct nfp_pf *pf = pci_get_drvdata(nn->pdev); + + schedule_work(&pf->port_refresh_work); +} + +int nfp_net_refresh_eth_port(struct nfp_net *nn) +{ + struct nfp_eth_table_port *eth_port; + struct nfp_eth_table *eth_table; + + eth_table = nfp_eth_read_ports(nn->cpp); + if (!eth_table) { + nn_err(nn, "Error refreshing port state table!\n"); + return -EIO; + } + + eth_port = nfp_net_find_port(eth_table, nn->eth_port->eth_index); + if (!eth_port) { + nn_err(nn, "Error finding state of the port!\n"); + kfree(eth_table); + return -EIO; + } + + memcpy(nn->eth_port, eth_port, sizeof(*eth_port)); + + kfree(eth_table); + + return 0; +} + /* * PCI device functions */ @@ -467,17 +584,23 @@ int nfp_net_pci_probe(struct nfp_pf *pf) int stride; int err; + INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_netdevs); + mutex_init(&pf->port_lock); + /* Verify that the board has completed initialization */ if (!nfp_is_ready(pf->cpp)) { nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n"); return -EINVAL; } + mutex_lock(&pf->port_lock); pf->num_ports = nfp_net_pf_get_num_ports(pf); ctrl_bar = nfp_net_pf_map_ctrl_bar(pf); - if (!ctrl_bar) - return pf->fw_loaded ? -EINVAL : -EPROBE_DEFER; + if (!ctrl_bar) { + err = pf->fw_loaded ? -EINVAL : -EPROBE_DEFER; + goto err_unlock; + } nfp_net_get_fw_version(&fw_ver, ctrl_bar); if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { @@ -551,6 +674,8 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (err) goto err_clean_ddir; + mutex_unlock(&pf->port_lock); + return 0; err_clean_ddir: @@ -560,6 +685,8 @@ err_unmap_tx: nfp_cpp_area_release_free(pf->tx_area); err_ctrl_unmap: nfp_cpp_area_release_free(pf->ctrl_area); +err_unlock: + mutex_unlock(&pf->port_lock); return err; } @@ -567,20 +694,21 @@ void nfp_net_pci_remove(struct nfp_pf *pf) { struct nfp_net *nn; + mutex_lock(&pf->port_lock); + if (list_empty(&pf->ports)) + goto out; + list_for_each_entry(nn, &pf->ports, port_list) { nfp_net_debugfs_dir_clean(&nn->debugfs_dir); - nfp_net_netdev_clean(nn->netdev); + nfp_net_netdev_clean(nn->dp.netdev); } nfp_net_pf_free_netdevs(pf); - nfp_net_debugfs_dir_clean(&pf->ddir); - - nfp_net_irqs_disable(pf->pdev); - kfree(pf->irq_entries); + nfp_net_pci_remove_finish(pf); +out: + mutex_unlock(&pf->port_lock); - nfp_cpp_area_release_free(pf->rx_area); - nfp_cpp_area_release_free(pf->tx_area); - nfp_cpp_area_release_free(pf->ctrl_area); + cancel_work_sync(&pf->port_refresh_work); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c index 18a851eb3508..cc823df12c8a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c @@ -58,7 +58,7 @@ void nfp_net_filter_stats_timer(unsigned long data) spin_lock_bh(&nn->rx_filter_lock); - if (nn->ctrl & NFP_NET_CFG_CTRL_BPF) + if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL); @@ -119,12 +119,12 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) if (tc_no_actions(cls_bpf->exts)) return NN_ACT_DIRECT; - return -ENOTSUPP; + return -EOPNOTSUPP; } /* TC legacy mode */ if (!tc_single_action(cls_bpf->exts)) - return -ENOTSUPP; + return -EOPNOTSUPP; tcf_exts_to_list(cls_bpf->exts, &actions); list_for_each_entry(a, &actions, list) { @@ -132,11 +132,11 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) return NN_ACT_TC_DROP; if (is_tcf_mirred_egress_redirect(a) && - tcf_mirred_ifindex(a) == nn->netdev->ifindex) + tcf_mirred_ifindex(a) == nn->dp.netdev->ifindex) return NN_ACT_TC_REDIR; } - return -ENOTSUPP; + return -EOPNOTSUPP; } static int @@ -152,7 +152,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, int ret; if (!IS_ENABLED(CONFIG_BPF_SYSCALL)) - return -ENOTSUPP; + return -EOPNOTSUPP; ret = nfp_net_bpf_get_act(nn, cls_bpf); if (ret < 0) @@ -160,16 +160,15 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, act = ret; max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; - if (max_mtu < nn->netdev->mtu) { + if (max_mtu < nn->dp.netdev->mtu) { nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n"); - return -ENOTSUPP; + return -EOPNOTSUPP; } start_off = nn_readw(nn, NFP_NET_CFG_BPF_START); done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE); - *code = dma_zalloc_coherent(&nn->pdev->dev, code_sz, dma_addr, - GFP_KERNEL); + *code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL); if (!*code) return -ENOMEM; @@ -181,7 +180,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, return 0; out: - dma_free_coherent(&nn->pdev->dev, code_sz, *code, *dma_addr); + dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr); return ret; } @@ -194,7 +193,7 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags, u64 bpf_addr = dma_addr; int err; - nn->bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW); + nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW); if (dense_mode) bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX; @@ -208,13 +207,13 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags, nn_err(nn, "FW command error while loading BPF: %d\n", err); /* Enable passing packets through BPF function */ - nn->ctrl |= NFP_NET_CFG_CTRL_BPF; - nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl); + nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF; + nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl); err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); if (err) nn_err(nn, "FW command error while enabling BPF: %d\n", err); - dma_free_coherent(&nn->pdev->dev, code_sz, code, dma_addr); + dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr); nfp_net_bpf_stats_reset(nn); mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL); @@ -222,16 +221,16 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags, static int nfp_net_bpf_stop(struct nfp_net *nn) { - if (!(nn->ctrl & NFP_NET_CFG_CTRL_BPF)) + if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)) return 0; spin_lock_bh(&nn->rx_filter_lock); - nn->ctrl &= ~NFP_NET_CFG_CTRL_BPF; + nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF; spin_unlock_bh(&nn->rx_filter_lock); - nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl); + nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl); del_timer_sync(&nn->rx_filter_stats_timer); - nn->bpf_offload_skip_sw = 0; + nn->dp.bpf_offload_skip_sw = 0; return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); } @@ -255,7 +254,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) * frames which didn't have BPF applied in the hardware should * be fine if software fallback is available, though. */ - if (nn->bpf_offload_skip_sw) + if (nn->dp.bpf_offload_skip_sw) return -EBUSY; err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code, @@ -270,7 +269,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) return 0; case TC_CLSBPF_ADD: - if (nn->ctrl & NFP_NET_CFG_CTRL_BPF) + if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) return -EBUSY; err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code, @@ -290,6 +289,6 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) return nfp_net_bpf_stats_update(nn, cls_bpf); default: - return -ENOTSUPP; + return -EOPNOTSUPP; } } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index 39407f7cc586..86e61be6f35c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -84,12 +84,12 @@ static void nfp_netvf_get_mac_addr(struct nfp_net *nn) put_unaligned_be16(nn_readw(nn, NFP_NET_CFG_MACADDR + 6), &mac_addr[4]); if (!is_valid_ether_addr(mac_addr)) { - eth_hw_addr_random(nn->netdev); + eth_hw_addr_random(nn->dp.netdev); return; } - ether_addr_copy(nn->netdev->dev_addr, mac_addr); - ether_addr_copy(nn->netdev->perm_addr, mac_addr); + ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr); + ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr); } static int nfp_netvf_pci_probe(struct pci_dev *pdev, @@ -210,8 +210,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, vf->nn = nn; nn->fw_ver = fw_ver; - nn->ctrl_bar = ctrl_bar; - nn->is_vf = 1; + nn->dp.ctrl_bar = ctrl_bar; + nn->dp.is_vf = 1; nn->stride_tx = stride; nn->stride_rx = stride; @@ -268,7 +268,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries, NFP_NET_MIN_PORT_IRQS, - NFP_NET_NON_Q_VECTORS + nn->num_r_vecs); + NFP_NET_NON_Q_VECTORS + + nn->dp.num_r_vecs); if (!num_irqs) { nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n"); err = -EIO; @@ -282,7 +283,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, */ nn->me_freq_mhz = 1200; - err = nfp_net_netdev_init(nn->netdev); + err = nfp_net_netdev_init(nn->dp.netdev); if (err) goto err_irqs_disable; @@ -327,7 +328,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev) nfp_net_debugfs_dir_clean(&nn->debugfs_dir); nfp_net_debugfs_dir_clean(&vf->ddir); - nfp_net_netdev_clean(nn->netdev); + nfp_net_netdev_clean(nn->dp.netdev); nfp_net_irqs_disable(pdev); @@ -337,7 +338,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev) } else { iounmap(vf->q_bar); } - iounmap(nn->ctrl_bar); + iounmap(nn->dp.ctrl_bar); nfp_net_netdev_free(nn); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h index 42cb720b696d..4df2ce261b3f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h @@ -48,32 +48,26 @@ const char *nfp_hwinfo_lookup(struct nfp_cpp *cpp, const char *lookup); -/* Implemented in nfp_nsp.c */ +/* Implemented in nfp_nsp.c, low level functions */ struct nfp_nsp; -struct firmware; - -struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp); -void nfp_nsp_close(struct nfp_nsp *state); -u16 nfp_nsp_get_abi_ver_major(struct nfp_nsp *state); -u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state); -int nfp_nsp_wait(struct nfp_nsp *state); -int nfp_nsp_device_soft_reset(struct nfp_nsp *state); -int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw); + +struct nfp_cpp *nfp_nsp_cpp(struct nfp_nsp *state); +bool nfp_nsp_config_modified(struct nfp_nsp *state); +void nfp_nsp_config_set_modified(struct nfp_nsp *state, bool modified); +void *nfp_nsp_config_entries(struct nfp_nsp *state); +unsigned int nfp_nsp_config_idx(struct nfp_nsp *state); +void nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, + unsigned int idx); +void nfp_nsp_config_clear_state(struct nfp_nsp *state); int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size); int nfp_nsp_write_eth_table(struct nfp_nsp *state, const void *buf, unsigned int size); +int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size); /* Implemented in nfp_resource.c */ -#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU -#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL - -/* NFP Resource Table self-identifier */ -#define NFP_RESOURCE_TBL_NAME "nfp.res" -#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */ - -/* All other keys are CRC32-POSIX of the 8-byte identification string */ +/* All keys are CRC32-POSIX of the 8-byte identification string */ /* ARM/PCI vNIC Interfaces 0..3 */ #define NFP_RESOURCE_VNIC_PCI_0 "vnic.p0" diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index 15cc3e77cf6a..43dc68e01274 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -217,7 +217,7 @@ static resource_size_t nfp_bar_resource_start(struct nfp_bar *bar) #define TARGET_WIDTH_64 8 static int -compute_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar, +compute_bar(const struct nfp6000_pcie *nfp, const struct nfp_bar *bar, u32 *bar_config, u64 *bar_base, int tgt, int act, int tok, u64 offset, size_t size, int width) { @@ -410,35 +410,36 @@ find_matching_bar(struct nfp6000_pcie *nfp, /* Return EAGAIN if no resource is available */ static int -find_unused_bar_noblock(struct nfp6000_pcie *nfp, +find_unused_bar_noblock(const struct nfp6000_pcie *nfp, int tgt, int act, int tok, u64 offset, size_t size, int width) { - int n, invalid = 0; + int n, busy = 0; for (n = 0; n < nfp->bars; n++) { - struct nfp_bar *bar = &nfp->bar[n]; + const struct nfp_bar *bar = &nfp->bar[n]; int err; - if (bar->bitsize == 0) { - invalid++; - continue; - } - - if (atomic_read(&bar->refcnt) != 0) + if (!bar->bitsize) continue; /* Just check to see if we can make it fit... */ err = compute_bar(nfp, bar, NULL, NULL, tgt, act, tok, offset, size, width); + if (err) + continue; - if (err < 0) - invalid++; - else + if (!atomic_read(&bar->refcnt)) return n; + + busy++; } - return (n == invalid) ? -EINVAL : -EAGAIN; + if (WARN(!busy, "No suitable BAR found for request tgt:0x%x act:0x%x tok:0x%x off:0x%llx size:%zd width:%d\n", + tgt, act, tok, offset, size, width)) + return -EINVAL; + + return -EAGAIN; } static int diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c index 40108e66c654..e2abba4c3a3f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -65,39 +65,49 @@ struct nfp_cpp_resource { u64 end; }; -struct nfp_cpp_mutex { - struct list_head list; - struct nfp_cpp *cpp; - int target; - u16 usage; - u16 depth; - unsigned long long address; - u32 key; -}; - +/** + * struct nfp_cpp - main nfpcore device structure + * Following fields are read-only after probe() exits or netdevs are spawned. + * @dev: embedded device structure + * @op: low-level implementation ops + * @priv: private data of the low-level implementation + * @model: chip model + * @interface: chip interface id we are using to reach it + * @serial: chip serial number + * @imb_cat_table: CPP Mapping Table + * + * Following fields can be used only in probe() or with rtnl held: + * @hwinfo: HWInfo database fetched from the device + * @rtsym: firmware run time symbols + * + * Following fields use explicit locking: + * @resource_list: NFP CPP resource list + * @resource_lock: protects @resource_list + * + * @area_cache_list: cached areas for cpp/xpb read/write speed up + * @area_cache_mutex: protects @area_cache_list + * + * @waitq: area wait queue + */ struct nfp_cpp { struct device dev; - void *priv; /* Private data of the low-level implementation */ + void *priv; u32 model; u16 interface; u8 serial[NFP_SERIAL_LEN]; const struct nfp_cpp_operations *op; - struct list_head resource_list; /* NFP CPP resource list */ - struct list_head mutex_cache; /* Mutex cache */ + struct list_head resource_list; rwlock_t resource_lock; wait_queue_head_t waitq; - /* NFP6000 CPP Mapping Table */ u32 imb_cat_table[16]; - /* Cached areas for cpp/xpb readl/writel speedups */ - struct mutex area_cache_mutex; /* Lock for the area cache */ + struct mutex area_cache_mutex; struct list_head area_cache_list; - /* Cached information */ void *hwinfo; void *rtsym; }; @@ -187,24 +197,6 @@ void nfp_cpp_free(struct nfp_cpp *cpp) { struct nfp_cpp_area_cache *cache, *ctmp; struct nfp_cpp_resource *res, *rtmp; - struct nfp_cpp_mutex *mutex, *mtmp; - - /* There should be no mutexes in the cache at this point. */ - WARN_ON(!list_empty(&cpp->mutex_cache)); - /* .. but if there are, unlock them and complain. */ - list_for_each_entry_safe(mutex, mtmp, &cpp->mutex_cache, list) { - dev_err(cpp->dev.parent, "Dangling mutex: @%d::0x%llx, %d locks held by %d owners\n", - mutex->target, (unsigned long long)mutex->address, - mutex->depth, mutex->usage); - - /* Forcing an unlock */ - mutex->depth = 1; - nfp_cpp_mutex_unlock(mutex); - - /* Forcing a free */ - mutex->usage = 1; - nfp_cpp_mutex_free(mutex); - } /* Remove all caches */ list_for_each_entry_safe(cache, ctmp, &cpp->area_cache_list, entry) { @@ -419,9 +411,43 @@ nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest, */ void nfp_cpp_area_free(struct nfp_cpp_area *area) { + if (atomic_read(&area->refcount)) + nfp_warn(area->cpp, "Warning: freeing busy area\n"); nfp_cpp_area_put(area); } +static bool nfp_cpp_area_acquire_try(struct nfp_cpp_area *area, int *status) +{ + *status = area->cpp->op->area_acquire(area); + + return *status != -EAGAIN; +} + +static int __nfp_cpp_area_acquire(struct nfp_cpp_area *area) +{ + int err, status; + + if (atomic_inc_return(&area->refcount) > 1) + return 0; + + if (!area->cpp->op->area_acquire) + return 0; + + err = wait_event_interruptible(area->cpp->waitq, + nfp_cpp_area_acquire_try(area, &status)); + if (!err) + err = status; + if (err) { + nfp_warn(area->cpp, "Warning: area wait failed: %d\n", err); + atomic_dec(&area->refcount); + return err; + } + + nfp_cpp_area_get(area); + + return 0; +} + /** * nfp_cpp_area_acquire() - lock down a CPP area for access * @area: CPP area handle @@ -433,27 +459,13 @@ void nfp_cpp_area_free(struct nfp_cpp_area *area) */ int nfp_cpp_area_acquire(struct nfp_cpp_area *area) { - mutex_lock(&area->mutex); - if (atomic_inc_return(&area->refcount) == 1) { - int (*a_a)(struct nfp_cpp_area *); - - a_a = area->cpp->op->area_acquire; - if (a_a) { - int err; + int ret; - wait_event_interruptible(area->cpp->waitq, - (err = a_a(area)) != -EAGAIN); - if (err < 0) { - atomic_dec(&area->refcount); - mutex_unlock(&area->mutex); - return err; - } - } - } + mutex_lock(&area->mutex); + ret = __nfp_cpp_area_acquire(area); mutex_unlock(&area->mutex); - nfp_cpp_area_get(area); - return 0; + return ret; } /** @@ -829,10 +841,7 @@ area_cache_get(struct nfp_cpp *cpp, u32 id, * the need for special case code below when * checking against available cache size. */ - if (length == 0) - return NULL; - - if (list_empty(&cpp->area_cache_list) || id == 0) + if (length == 0 || id == 0) return NULL; /* Remap from cpp_island to cpp_target */ @@ -840,10 +849,15 @@ area_cache_get(struct nfp_cpp *cpp, u32 id, if (err < 0) return NULL; - addr += *offset; - mutex_lock(&cpp->area_cache_mutex); + if (list_empty(&cpp->area_cache_list)) { + mutex_unlock(&cpp->area_cache_mutex); + return NULL; + } + + addr += *offset; + /* See if we have a match */ list_for_each_entry(cache, &cpp->area_cache_list, entry) { if (id == cache->id && @@ -937,12 +951,14 @@ int nfp_cpp_read(struct nfp_cpp *cpp, u32 destination, return -ENOMEM; err = nfp_cpp_area_acquire(area); - if (err) - goto out; + if (err) { + nfp_cpp_area_free(area); + return err; + } } err = nfp_cpp_area_read(area, offset, kernel_vaddr, length); -out: + if (cache) area_cache_put(cpp, cache); else @@ -979,13 +995,14 @@ int nfp_cpp_write(struct nfp_cpp *cpp, u32 destination, return -ENOMEM; err = nfp_cpp_area_acquire(area); - if (err) - goto out; + if (err) { + nfp_cpp_area_free(area); + return err; + } } err = nfp_cpp_area_write(area, offset, kernel_vaddr, length); -out: if (cache) area_cache_put(cpp, cache); else @@ -1127,7 +1144,6 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, rwlock_init(&cpp->resource_lock); init_waitqueue_head(&cpp->waitq); lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key); - INIT_LIST_HEAD(&cpp->mutex_cache); INIT_LIST_HEAD(&cpp->resource_list); INIT_LIST_HEAD(&cpp->area_cache_list); mutex_init(&cpp->area_cache_mutex); @@ -1425,322 +1441,3 @@ void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit) { return &cpp_explicit[1]; } - -/* THIS FUNCTION IS NOT EXPORTED */ -static u32 nfp_mutex_locked(u16 interface) -{ - return (u32)interface << 16 | 0x000f; -} - -static u32 nfp_mutex_unlocked(u16 interface) -{ - return (u32)interface << 16 | 0x0000; -} - -static bool nfp_mutex_is_locked(u32 val) -{ - return (val & 0xffff) == 0x000f; -} - -static bool nfp_mutex_is_unlocked(u32 val) -{ - return (val & 0xffff) == 0000; -} - -/* If you need more than 65536 recursive locks, please rethink your code. */ -#define MUTEX_DEPTH_MAX 0xffff - -static int -nfp_cpp_mutex_validate(u16 interface, int *target, unsigned long long address) -{ - /* Not permitted on invalid interfaces */ - if (NFP_CPP_INTERFACE_TYPE_of(interface) == - NFP_CPP_INTERFACE_TYPE_INVALID) - return -EINVAL; - - /* Address must be 64-bit aligned */ - if (address & 7) - return -EINVAL; - - if (*target != NFP_CPP_TARGET_MU) - return -EINVAL; - - return 0; -} - -/** - * nfp_cpp_mutex_init() - Initialize a mutex location - * @cpp: NFP CPP handle - * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) - * @address: Offset into the address space of the NFP CPP target ID - * @key: Unique 32-bit value for this mutex - * - * The CPP target:address must point to a 64-bit aligned location, and - * will initialize 64 bits of data at the location. - * - * This creates the initial mutex state, as locked by this - * nfp_cpp_interface(). - * - * This function should only be called when setting up - * the initial lock state upon boot-up of the system. - * - * Return: 0 on success, or -errno on failure - */ -int nfp_cpp_mutex_init(struct nfp_cpp *cpp, - int target, unsigned long long address, u32 key) -{ - const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */ - u16 interface = nfp_cpp_interface(cpp); - int err; - - err = nfp_cpp_mutex_validate(interface, &target, address); - if (err) - return err; - - err = nfp_cpp_writel(cpp, muw, address + 4, key); - if (err) - return err; - - err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_locked(interface)); - if (err) - return err; - - return 0; -} - -/** - * nfp_cpp_mutex_alloc() - Create a mutex handle - * @cpp: NFP CPP handle - * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) - * @address: Offset into the address space of the NFP CPP target ID - * @key: 32-bit unique key (must match the key at this location) - * - * The CPP target:address must point to a 64-bit aligned location, and - * reserve 64 bits of data at the location for use by the handle. - * - * Only target/address pairs that point to entities that support the - * MU Atomic Engine's CmpAndSwap32 command are supported. - * - * Return: A non-NULL struct nfp_cpp_mutex * on success, NULL on failure. - */ -struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, - unsigned long long address, u32 key) -{ - const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */ - u16 interface = nfp_cpp_interface(cpp); - struct nfp_cpp_mutex *mutex; - int err; - u32 tmp; - - err = nfp_cpp_mutex_validate(interface, &target, address); - if (err) - return NULL; - - /* Look for mutex on cache list */ - list_for_each_entry(mutex, &cpp->mutex_cache, list) { - if (mutex->target == target && mutex->address == address) { - mutex->usage++; - return mutex; - } - } - - err = nfp_cpp_readl(cpp, mur, address + 4, &tmp); - if (err < 0) - return NULL; - - if (tmp != key) - return NULL; - - mutex = kzalloc(sizeof(*mutex), GFP_KERNEL); - if (!mutex) - return NULL; - - mutex->cpp = cpp; - mutex->target = target; - mutex->address = address; - mutex->key = key; - mutex->depth = 0; - mutex->usage = 1; - - /* Add mutex to cache list */ - list_add(&mutex->list, &cpp->mutex_cache); - - return mutex; -} - -/** - * nfp_cpp_mutex_free() - Free a mutex handle - does not alter the lock state - * @mutex: NFP CPP Mutex handle - */ -void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex) -{ - if (--mutex->usage) - return; - - /* Remove mutex from cache */ - list_del(&mutex->list); - kfree(mutex); -} - -/** - * nfp_cpp_mutex_lock() - Lock a mutex handle, using the NFP MU Atomic Engine - * @mutex: NFP CPP Mutex handle - * - * Return: 0 on success, or -errno on failure - */ -int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex) -{ - unsigned long warn_at = jiffies + 15 * HZ; - unsigned int timeout_ms = 1; - int err; - - /* We can't use a waitqueue here, because the unlocker - * might be on a separate CPU. - * - * So just wait for now. - */ - for (;;) { - err = nfp_cpp_mutex_trylock(mutex); - if (err != -EBUSY) - break; - - err = msleep_interruptible(timeout_ms); - if (err != 0) - return -ERESTARTSYS; - - if (time_is_before_eq_jiffies(warn_at)) { - warn_at = jiffies + 60 * HZ; - dev_warn(mutex->cpp->dev.parent, - "Warning: waiting for NFP mutex [usage:%hd depth:%hd target:%d addr:%llx key:%08x]\n", - mutex->usage, mutex->depth, - mutex->target, mutex->address, mutex->key); - } - } - - return err; -} - -/** - * nfp_cpp_mutex_unlock() - Unlock a mutex handle, using the MU Atomic Engine - * @mutex: NFP CPP Mutex handle - * - * Return: 0 on success, or -errno on failure - */ -int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex) -{ - const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ - const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ - struct nfp_cpp *cpp = mutex->cpp; - u32 key, value; - u16 interface; - int err; - - interface = nfp_cpp_interface(cpp); - - if (mutex->depth > 1) { - mutex->depth--; - return 0; - } - - err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key); - if (err < 0) - return err; - - if (key != mutex->key) - return -EPERM; - - err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value); - if (err < 0) - return err; - - if (value != nfp_mutex_locked(interface)) - return -EACCES; - - err = nfp_cpp_writel(cpp, muw, mutex->address, - nfp_mutex_unlocked(interface)); - if (err < 0) - return err; - - mutex->depth = 0; - return 0; -} - -/** - * nfp_cpp_mutex_trylock() - Attempt to lock a mutex handle - * @mutex: NFP CPP Mutex handle - * - * Return: 0 if the lock succeeded, -errno on failure - */ -int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex) -{ - const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ - const u32 mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */ - const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ - struct nfp_cpp *cpp = mutex->cpp; - u32 key, value, tmp; - int err; - - if (mutex->depth > 0) { - if (mutex->depth == MUTEX_DEPTH_MAX) - return -E2BIG; - mutex->depth++; - return 0; - } - - /* Verify that the lock marker is not damaged */ - err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key); - if (err < 0) - return err; - - if (key != mutex->key) - return -EPERM; - - /* Compare against the unlocked state, and if true, - * write the interface id into the top 16 bits, and - * mark as locked. - */ - value = nfp_mutex_locked(nfp_cpp_interface(cpp)); - - /* We use test_set_imm here, as it implies a read - * of the current state, and sets the bits in the - * bytemask of the command to 1s. Since the mutex - * is guaranteed to be 64-bit aligned, the bytemask - * of this 32-bit command is ensured to be 8'b00001111, - * which implies that the lower 4 bits will be set to - * ones regardless of the initial state. - * - * Since this is a 'Readback' operation, with no Pull - * data, we can treat this as a normal Push (read) - * atomic, which returns the original value. - */ - err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp); - if (err < 0) - return err; - - /* Was it unlocked? */ - if (nfp_mutex_is_unlocked(tmp)) { - /* The read value can only be 0x....0000 in the unlocked state. - * If there was another contending for this lock, then - * the lock state would be 0x....000f - */ - - /* Write our owner ID into the lock - * While not strictly necessary, this helps with - * debug and bookkeeping. - */ - err = nfp_cpp_writel(cpp, muw, mutex->address, value); - if (err < 0) - return err; - - mutex->depth = 1; - return 0; - } - - /* Already locked by us? Success! */ - if (tmp == value) { - mutex->depth = 1; - return 0; - } - - return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL; -} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c new file mode 100644 index 000000000000..8a99c189efa8 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c @@ -0,0 +1,345 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/jiffies.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/wait.h> + +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" + +struct nfp_cpp_mutex { + struct nfp_cpp *cpp; + int target; + u16 depth; + unsigned long long address; + u32 key; +}; + +static u32 nfp_mutex_locked(u16 interface) +{ + return (u32)interface << 16 | 0x000f; +} + +static u32 nfp_mutex_unlocked(u16 interface) +{ + return (u32)interface << 16 | 0x0000; +} + +static bool nfp_mutex_is_locked(u32 val) +{ + return (val & 0xffff) == 0x000f; +} + +static bool nfp_mutex_is_unlocked(u32 val) +{ + return (val & 0xffff) == 0000; +} + +/* If you need more than 65536 recursive locks, please rethink your code. */ +#define NFP_MUTEX_DEPTH_MAX 0xffff + +static int +nfp_cpp_mutex_validate(u16 interface, int *target, unsigned long long address) +{ + /* Not permitted on invalid interfaces */ + if (NFP_CPP_INTERFACE_TYPE_of(interface) == + NFP_CPP_INTERFACE_TYPE_INVALID) + return -EINVAL; + + /* Address must be 64-bit aligned */ + if (address & 7) + return -EINVAL; + + if (*target != NFP_CPP_TARGET_MU) + return -EINVAL; + + return 0; +} + +/** + * nfp_cpp_mutex_init() - Initialize a mutex location + * @cpp: NFP CPP handle + * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) + * @address: Offset into the address space of the NFP CPP target ID + * @key: Unique 32-bit value for this mutex + * + * The CPP target:address must point to a 64-bit aligned location, and + * will initialize 64 bits of data at the location. + * + * This creates the initial mutex state, as locked by this + * nfp_cpp_interface(). + * + * This function should only be called when setting up + * the initial lock state upon boot-up of the system. + * + * Return: 0 on success, or -errno on failure + */ +int nfp_cpp_mutex_init(struct nfp_cpp *cpp, + int target, unsigned long long address, u32 key) +{ + const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */ + u16 interface = nfp_cpp_interface(cpp); + int err; + + err = nfp_cpp_mutex_validate(interface, &target, address); + if (err) + return err; + + err = nfp_cpp_writel(cpp, muw, address + 4, key); + if (err) + return err; + + err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_locked(interface)); + if (err) + return err; + + return 0; +} + +/** + * nfp_cpp_mutex_alloc() - Create a mutex handle + * @cpp: NFP CPP handle + * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) + * @address: Offset into the address space of the NFP CPP target ID + * @key: 32-bit unique key (must match the key at this location) + * + * The CPP target:address must point to a 64-bit aligned location, and + * reserve 64 bits of data at the location for use by the handle. + * + * Only target/address pairs that point to entities that support the + * MU Atomic Engine's CmpAndSwap32 command are supported. + * + * Return: A non-NULL struct nfp_cpp_mutex * on success, NULL on failure. + */ +struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, + unsigned long long address, u32 key) +{ + const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */ + u16 interface = nfp_cpp_interface(cpp); + struct nfp_cpp_mutex *mutex; + int err; + u32 tmp; + + err = nfp_cpp_mutex_validate(interface, &target, address); + if (err) + return NULL; + + err = nfp_cpp_readl(cpp, mur, address + 4, &tmp); + if (err < 0) + return NULL; + + if (tmp != key) + return NULL; + + mutex = kzalloc(sizeof(*mutex), GFP_KERNEL); + if (!mutex) + return NULL; + + mutex->cpp = cpp; + mutex->target = target; + mutex->address = address; + mutex->key = key; + mutex->depth = 0; + + return mutex; +} + +/** + * nfp_cpp_mutex_free() - Free a mutex handle - does not alter the lock state + * @mutex: NFP CPP Mutex handle + */ +void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex) +{ + kfree(mutex); +} + +/** + * nfp_cpp_mutex_lock() - Lock a mutex handle, using the NFP MU Atomic Engine + * @mutex: NFP CPP Mutex handle + * + * Return: 0 on success, or -errno on failure + */ +int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex) +{ + unsigned long warn_at = jiffies + 15 * HZ; + unsigned int timeout_ms = 1; + int err; + + /* We can't use a waitqueue here, because the unlocker + * might be on a separate CPU. + * + * So just wait for now. + */ + for (;;) { + err = nfp_cpp_mutex_trylock(mutex); + if (err != -EBUSY) + break; + + err = msleep_interruptible(timeout_ms); + if (err != 0) + return -ERESTARTSYS; + + if (time_is_before_eq_jiffies(warn_at)) { + warn_at = jiffies + 60 * HZ; + nfp_warn(mutex->cpp, + "Warning: waiting for NFP mutex [depth:%hd target:%d addr:%llx key:%08x]\n", + mutex->depth, + mutex->target, mutex->address, mutex->key); + } + } + + return err; +} + +/** + * nfp_cpp_mutex_unlock() - Unlock a mutex handle, using the MU Atomic Engine + * @mutex: NFP CPP Mutex handle + * + * Return: 0 on success, or -errno on failure + */ +int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex) +{ + const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ + const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ + struct nfp_cpp *cpp = mutex->cpp; + u32 key, value; + u16 interface; + int err; + + interface = nfp_cpp_interface(cpp); + + if (mutex->depth > 1) { + mutex->depth--; + return 0; + } + + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key); + if (err < 0) + return err; + + if (key != mutex->key) + return -EPERM; + + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value); + if (err < 0) + return err; + + if (value != nfp_mutex_locked(interface)) + return -EACCES; + + err = nfp_cpp_writel(cpp, muw, mutex->address, + nfp_mutex_unlocked(interface)); + if (err < 0) + return err; + + mutex->depth = 0; + return 0; +} + +/** + * nfp_cpp_mutex_trylock() - Attempt to lock a mutex handle + * @mutex: NFP CPP Mutex handle + * + * Return: 0 if the lock succeeded, -errno on failure + */ +int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex) +{ + const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ + const u32 mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */ + const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ + struct nfp_cpp *cpp = mutex->cpp; + u32 key, value, tmp; + int err; + + if (mutex->depth > 0) { + if (mutex->depth == NFP_MUTEX_DEPTH_MAX) + return -E2BIG; + mutex->depth++; + return 0; + } + + /* Verify that the lock marker is not damaged */ + err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key); + if (err < 0) + return err; + + if (key != mutex->key) + return -EPERM; + + /* Compare against the unlocked state, and if true, + * write the interface id into the top 16 bits, and + * mark as locked. + */ + value = nfp_mutex_locked(nfp_cpp_interface(cpp)); + + /* We use test_set_imm here, as it implies a read + * of the current state, and sets the bits in the + * bytemask of the command to 1s. Since the mutex + * is guaranteed to be 64-bit aligned, the bytemask + * of this 32-bit command is ensured to be 8'b00001111, + * which implies that the lower 4 bits will be set to + * ones regardless of the initial state. + * + * Since this is a 'Readback' operation, with no Pull + * data, we can treat this as a normal Push (read) + * atomic, which returns the original value. + */ + err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp); + if (err < 0) + return err; + + /* Was it unlocked? */ + if (nfp_mutex_is_unlocked(tmp)) { + /* The read value can only be 0x....0000 in the unlocked state. + * If there was another contending for this lock, then + * the lock state would be 0x....000f + */ + + /* Write our owner ID into the lock + * While not strictly necessary, this helps with + * debug and bookkeeping. + */ + err = nfp_cpp_writel(cpp, muw, mutex->address, value); + if (err < 0) + return err; + + mutex->depth = 1; + return 0; + } + + return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 34c50987c377..2fa9247bb23d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -49,6 +49,7 @@ #include "nfp.h" #include "nfp_cpp.h" +#include "nfp_nsp.h" /* Offsets relative to the CSR base */ #define NSP_STATUS 0x00 @@ -77,7 +78,7 @@ #define NSP_MAGIC 0xab10 #define NSP_MAJOR 0 -#define NSP_MINOR (__MAX_SPCODE - 1) +#define NSP_MINOR 8 #define NSP_CODE_MAJOR GENMASK(15, 12) #define NSP_CODE_MINOR GENMASK(11, 0) @@ -92,8 +93,18 @@ enum nfp_nsp_cmd { SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */ SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */ SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */ + SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */ +}; - __MAX_SPCODE, +static const struct { + int code; + const char *msg; +} nsp_errors[] = { + { 6010, "could not map to phy for port" }, + { 6011, "not an allowed rate/lanes for port" }, + { 6012, "not an allowed rate/lanes for port" }, + { 6013, "high/low error, change other port first" }, + { 6014, "config not found in flash" }, }; struct nfp_nsp { @@ -103,8 +114,63 @@ struct nfp_nsp { u16 major; u16 minor; } ver; + + /* Eth table config state */ + bool modified; + unsigned int idx; + void *entries; }; +struct nfp_cpp *nfp_nsp_cpp(struct nfp_nsp *state) +{ + return state->cpp; +} + +bool nfp_nsp_config_modified(struct nfp_nsp *state) +{ + return state->modified; +} + +void nfp_nsp_config_set_modified(struct nfp_nsp *state, bool modified) +{ + state->modified = modified; +} + +void *nfp_nsp_config_entries(struct nfp_nsp *state) +{ + return state->entries; +} + +unsigned int nfp_nsp_config_idx(struct nfp_nsp *state) +{ + return state->idx; +} + +void +nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, unsigned int idx) +{ + state->entries = entries; + state->idx = idx; +} + +void nfp_nsp_config_clear_state(struct nfp_nsp *state) +{ + state->entries = NULL; + state->idx = 0; +} + +static void nfp_nsp_print_extended_error(struct nfp_nsp *state, u32 ret_val) +{ + int i; + + if (!ret_val) + return; + + for (i = 0; i < ARRAY_SIZE(nsp_errors); i++) + if (ret_val == nsp_errors[i].code) + nfp_err(state->cpp, "err msg: %s\n", nsp_errors[i].msg); +} + static int nfp_nsp_check(struct nfp_nsp *state) { struct nfp_cpp *cpp = state->cpp; @@ -209,9 +275,8 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, if ((*reg & mask) == val) return 0; - err = msleep_interruptible(100); - if (err) - return err; + if (msleep_interruptible(25)) + return -ERESTARTSYS; if (time_after(start_time, wait_until)) return -ETIMEDOUT; @@ -228,7 +293,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, * * Return: 0 for success with no result * - * 1..255 for NSP completion with a result code + * positive value for NSP completion with a result code * * -EAGAIN if the NSP is not yet present * -ENODEV if the NSP is not a supported model @@ -239,7 +304,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp, u64 buff_addr) { - u64 reg, nsp_base, nsp_buffer, nsp_status, nsp_command; + u64 reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command; struct nfp_cpp *cpp = state->cpp; u32 nsp_cpp; int err; @@ -292,18 +357,20 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, return err; } + err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &ret_val); + if (err < 0) + return err; + ret_val = FIELD_GET(NSP_COMMAND_OPTION, ret_val); + err = FIELD_GET(NSP_STATUS_RESULT, reg); if (err) { - nfp_warn(cpp, "Result (error) code set: %d command: %d\n", - -err, code); + nfp_warn(cpp, "Result (error) code set: %d (%d) command: %d\n", + -err, (int)ret_val, code); + nfp_nsp_print_extended_error(state, ret_val); return -err; } - err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, ®); - if (err < 0) - return err; - - return FIELD_GET(NSP_COMMAND_OPTION, reg); + return ret_val; } static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, @@ -380,9 +447,10 @@ int nfp_nsp_wait(struct nfp_nsp *state) if (err != -EAGAIN) break; - err = msleep_interruptible(100); - if (err) + if (msleep_interruptible(25)) { + err = -ERESTARTSYS; break; + } if (time_after(start_time, wait_until)) { err = -ETIMEDOUT; @@ -424,3 +492,9 @@ int nfp_nsp_write_eth_table(struct nfp_nsp *state, return nfp_nsp_command_buf(state, SPCODE_ETH_CONTROL, size, buf, size, NULL, 0); } + +int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size) +{ + return nfp_nsp_command_buf(state, SPCODE_NSP_IDENTIFY, size, NULL, 0, + buf, size); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h new file mode 100644 index 000000000000..36b21e4dc56d --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h @@ -0,0 +1,174 @@ +/* + * Copyright (C) 2015-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef NSP_NSP_H +#define NSP_NSP_H 1 + +#include <linux/types.h> +#include <linux/if_ether.h> + +struct firmware; +struct nfp_cpp; +struct nfp_nsp; + +struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp); +void nfp_nsp_close(struct nfp_nsp *state); +u16 nfp_nsp_get_abi_ver_major(struct nfp_nsp *state); +u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state); +int nfp_nsp_wait(struct nfp_nsp *state); +int nfp_nsp_device_soft_reset(struct nfp_nsp *state); +int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw); + +enum nfp_eth_interface { + NFP_INTERFACE_NONE = 0, + NFP_INTERFACE_SFP = 1, + NFP_INTERFACE_SFPP = 10, + NFP_INTERFACE_SFP28 = 28, + NFP_INTERFACE_QSFP = 40, + NFP_INTERFACE_CXP = 100, + NFP_INTERFACE_QSFP28 = 112, +}; + +enum nfp_eth_media { + NFP_MEDIA_DAC_PASSIVE = 0, + NFP_MEDIA_DAC_ACTIVE, + NFP_MEDIA_FIBRE, +}; + +enum nfp_eth_aneg { + NFP_ANEG_AUTO = 0, + NFP_ANEG_SEARCH, + NFP_ANEG_25G_CONSORTIUM, + NFP_ANEG_25G_IEEE, + NFP_ANEG_DISABLED, +}; + +/** + * struct nfp_eth_table - ETH table information + * @count: number of table entries + * @ports: table of ports + * + * @eth_index: port index according to legacy ethX numbering + * @index: chip-wide first channel index + * @nbi: NBI index + * @base: first channel index (within NBI) + * @lanes: number of channels + * @speed: interface speed (in Mbps) + * @interface: interface (module) plugged in + * @media: media type of the @interface + * @aneg: auto negotiation mode + * @mac_addr: interface MAC address + * @label_port: port id + * @label_subport: id of interface within port (for split ports) + * @enabled: is enabled? + * @tx_enabled: is TX enabled? + * @rx_enabled: is RX enabled? + * @override_changed: is media reconfig pending? + * + * @port_type: one of %PORT_* defines for ethtool + * @is_split: is interface part of a split port + */ +struct nfp_eth_table { + unsigned int count; + struct nfp_eth_table_port { + unsigned int eth_index; + unsigned int index; + unsigned int nbi; + unsigned int base; + unsigned int lanes; + unsigned int speed; + + unsigned int interface; + enum nfp_eth_media media; + + enum nfp_eth_aneg aneg; + + u8 mac_addr[ETH_ALEN]; + + u8 label_port; + u8 label_subport; + + bool enabled; + bool tx_enabled; + bool rx_enabled; + + bool override_changed; + + /* Computed fields */ + u8 port_type; + + bool is_split; + } ports[0]; +}; + +struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp); +struct nfp_eth_table * +__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp); + +int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable); +int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, + bool configed); + +struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx); +int nfp_eth_config_commit_end(struct nfp_nsp *nsp); +void nfp_eth_config_cleanup_end(struct nfp_nsp *nsp); + +int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode); +int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed); +int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes); + +/** + * struct nfp_nsp_identify - NSP static information + * @version: opaque version string + * @flags: version flags + * @br_primary: branch id of primary bootloader + * @br_secondary: branch id of secondary bootloader + * @br_nsp: branch id of NSP + * @primary: version of primarary bootloader + * @secondary: version id of secondary bootloader + * @nsp: version id of NSP + */ +struct nfp_nsp_identify { + char version[40]; + u8 flags; + u8 br_primary; + u8 br_secondary; + u8 br_nsp; + u16 primary; + u16 secondary; + u16 nsp; +}; + +struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp); + +#endif diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c index edf703d319c8..e7a263de3731 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015-2017 Netronome Systems, Inc. + * Copyright (C) 2017 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -31,51 +31,59 @@ * SOFTWARE. */ -#ifndef NSP_NSP_ETH_H -#define NSP_NSP_ETH_H 1 +#include <linux/kernel.h> +#include <linux/slab.h> -#include <linux/types.h> -#include <linux/if_ether.h> +#include "nfp.h" +#include "nfp_nsp.h" -/** - * struct nfp_eth_table - ETH table information - * @count: number of table entries - * @ports: table of ports - * - * @eth_index: port index according to legacy ethX numbering - * @index: chip-wide first channel index - * @nbi: NBI index - * @base: first channel index (within NBI) - * @lanes: number of channels - * @speed: interface speed (in Mbps) - * @mac_addr: interface MAC address - * @label: interface id string - * @enabled: is enabled? - * @tx_enabled: is TX enabled? - * @rx_enabled: is RX enabled? - */ -struct nfp_eth_table { - unsigned int count; - struct nfp_eth_table_port { - unsigned int eth_index; - unsigned int index; - unsigned int nbi; - unsigned int base; - unsigned int lanes; - unsigned int speed; +struct nsp_identify { + u8 version[40]; + u8 flags; + u8 br_primary; + u8 br_secondary; + u8 br_nsp; + __le16 primary; + __le16 secondary; + __le16 nsp; + __le16 reserved; +}; - u8 mac_addr[ETH_ALEN]; - char label[8]; +struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp) +{ + struct nfp_nsp_identify *nspi = NULL; + struct nsp_identify *ni; + int ret; - bool enabled; - bool tx_enabled; - bool rx_enabled; - } ports[0]; -}; + if (nfp_nsp_get_abi_ver_minor(nsp) < 15) + return NULL; + + ni = kzalloc(sizeof(*ni), GFP_KERNEL); + if (!ni) + return NULL; + + ret = nfp_nsp_read_identify(nsp, ni, sizeof(*ni)); + if (ret < 0) { + nfp_err(nfp_nsp_cpp(nsp), "reading bsp version failed %d\n", + ret); + goto exit_free; + } + + nspi = kzalloc(sizeof(*nspi), GFP_KERNEL); + if (!nspi) + goto exit_free; -struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp); -struct nfp_eth_table * -__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp); -int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable); + memcpy(nspi->version, ni->version, sizeof(nspi->version)); + nspi->version[sizeof(nspi->version) - 1] = '\0'; + nspi->flags = ni->flags; + nspi->br_primary = ni->br_primary; + nspi->br_secondary = ni->br_secondary; + nspi->br_nsp = ni->br_nsp; + nspi->primary = le16_to_cpu(ni->primary); + nspi->secondary = le16_to_cpu(ni->secondary); + nspi->nsp = le16_to_cpu(ni->nsp); -#endif +exit_free: + kfree(ni); + return nspi; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c index 1ece1f8ae4b3..639438d8313a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c @@ -43,13 +43,13 @@ #include <linux/module.h> #include "nfp.h" -#include "nfp_nsp_eth.h" +#include "nfp_nsp.h" #include "nfp6000/nfp6000.h" #define NSP_ETH_NBI_PORT_COUNT 24 #define NSP_ETH_MAX_COUNT (2 * NSP_ETH_NBI_PORT_COUNT) #define NSP_ETH_TABLE_SIZE (NSP_ETH_MAX_COUNT * \ - sizeof(struct eth_table_entry)) + sizeof(union eth_table_entry)) #define NSP_ETH_PORT_LANES GENMASK_ULL(3, 0) #define NSP_ETH_PORT_INDEX GENMASK_ULL(15, 8) @@ -58,14 +58,32 @@ #define NSP_ETH_PORT_LANES_MASK cpu_to_le64(NSP_ETH_PORT_LANES) +#define NSP_ETH_STATE_CONFIGURED BIT_ULL(0) #define NSP_ETH_STATE_ENABLED BIT_ULL(1) #define NSP_ETH_STATE_TX_ENABLED BIT_ULL(2) #define NSP_ETH_STATE_RX_ENABLED BIT_ULL(3) #define NSP_ETH_STATE_RATE GENMASK_ULL(11, 8) +#define NSP_ETH_STATE_INTERFACE GENMASK_ULL(19, 12) +#define NSP_ETH_STATE_MEDIA GENMASK_ULL(21, 20) +#define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22) +#define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23) +#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0) #define NSP_ETH_CTRL_ENABLED BIT_ULL(1) #define NSP_ETH_CTRL_TX_ENABLED BIT_ULL(2) #define NSP_ETH_CTRL_RX_ENABLED BIT_ULL(3) +#define NSP_ETH_CTRL_SET_RATE BIT_ULL(4) +#define NSP_ETH_CTRL_SET_LANES BIT_ULL(5) +#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6) + +enum nfp_eth_raw { + NSP_ETH_RAW_PORT = 0, + NSP_ETH_RAW_STATE, + NSP_ETH_RAW_MAC, + NSP_ETH_RAW_CONTROL, + + NSP_ETH_NUM_RAW +}; enum nfp_eth_rate { RATE_INVALID = 0, @@ -76,29 +94,49 @@ enum nfp_eth_rate { RATE_25G, }; -struct eth_table_entry { - __le64 port; - __le64 state; - u8 mac_addr[6]; - u8 resv[2]; - __le64 control; +union eth_table_entry { + struct { + __le64 port; + __le64 state; + u8 mac_addr[6]; + u8 resv[2]; + __le64 control; + }; + __le64 raw[NSP_ETH_NUM_RAW]; +}; + +static const struct { + enum nfp_eth_rate rate; + unsigned int speed; +} nsp_eth_rate_tbl[] = { + { RATE_INVALID, 0, }, + { RATE_10M, SPEED_10, }, + { RATE_100M, SPEED_100, }, + { RATE_1G, SPEED_1000, }, + { RATE_10G, SPEED_10000, }, + { RATE_25G, SPEED_25000, }, }; -static unsigned int nfp_eth_rate(enum nfp_eth_rate rate) +static unsigned int nfp_eth_rate2speed(enum nfp_eth_rate rate) { - unsigned int rate_xlate[] = { - [RATE_INVALID] = 0, - [RATE_10M] = SPEED_10, - [RATE_100M] = SPEED_100, - [RATE_1G] = SPEED_1000, - [RATE_10G] = SPEED_10000, - [RATE_25G] = SPEED_25000, - }; + int i; - if (rate >= ARRAY_SIZE(rate_xlate)) - return 0; + for (i = 0; i < ARRAY_SIZE(nsp_eth_rate_tbl); i++) + if (nsp_eth_rate_tbl[i].rate == rate) + return nsp_eth_rate_tbl[i].speed; + + return 0; +} + +static unsigned int nfp_eth_speed2rate(unsigned int speed) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(nsp_eth_rate_tbl); i++) + if (nsp_eth_rate_tbl[i].speed == speed) + return nsp_eth_rate_tbl[i].rate; - return rate_xlate[rate]; + return RATE_INVALID; } static void nfp_eth_copy_mac_reverse(u8 *dst, const u8 *src) @@ -110,8 +148,8 @@ static void nfp_eth_copy_mac_reverse(u8 *dst, const u8 *src) } static void -nfp_eth_port_translate(const struct eth_table_entry *src, unsigned int index, - struct nfp_eth_table_port *dst) +nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src, + unsigned int index, struct nfp_eth_table_port *dst) { unsigned int rate; u64 port, state; @@ -129,14 +167,60 @@ nfp_eth_port_translate(const struct eth_table_entry *src, unsigned int index, dst->tx_enabled = FIELD_GET(NSP_ETH_STATE_TX_ENABLED, state); dst->rx_enabled = FIELD_GET(NSP_ETH_STATE_RX_ENABLED, state); - rate = nfp_eth_rate(FIELD_GET(NSP_ETH_STATE_RATE, state)); + rate = nfp_eth_rate2speed(FIELD_GET(NSP_ETH_STATE_RATE, state)); dst->speed = dst->lanes * rate; + dst->interface = FIELD_GET(NSP_ETH_STATE_INTERFACE, state); + dst->media = FIELD_GET(NSP_ETH_STATE_MEDIA, state); + nfp_eth_copy_mac_reverse(dst->mac_addr, src->mac_addr); - snprintf(dst->label, sizeof(dst->label) - 1, "%llu.%llu", - FIELD_GET(NSP_ETH_PORT_PHYLABEL, port), - FIELD_GET(NSP_ETH_PORT_LABEL, port)); + dst->label_port = FIELD_GET(NSP_ETH_PORT_PHYLABEL, port); + dst->label_subport = FIELD_GET(NSP_ETH_PORT_LABEL, port); + + if (nfp_nsp_get_abi_ver_minor(nsp) < 17) + return; + + dst->override_changed = FIELD_GET(NSP_ETH_STATE_OVRD_CHNG, state); + dst->aneg = FIELD_GET(NSP_ETH_STATE_ANEG, state); +} + +static void +nfp_eth_mark_split_ports(struct nfp_cpp *cpp, struct nfp_eth_table *table) +{ + unsigned int i, j; + + for (i = 0; i < table->count; i++) + for (j = 0; j < table->count; j++) { + if (i == j) + continue; + if (table->ports[i].label_port != + table->ports[j].label_port) + continue; + if (table->ports[i].label_subport == + table->ports[j].label_subport) + nfp_warn(cpp, + "Port %d subport %d is a duplicate\n", + table->ports[i].label_port, + table->ports[i].label_subport); + + table->ports[i].is_split = true; + break; + } +} + +static void +nfp_eth_calc_port_type(struct nfp_cpp *cpp, struct nfp_eth_table_port *entry) +{ + if (entry->interface == NFP_INTERFACE_NONE) { + entry->port_type = PORT_NONE; + return; + } + + if (entry->media == NFP_MEDIA_FIBRE) + entry->port_type = PORT_FIBRE; + else + entry->port_type = PORT_DA; } /** @@ -166,10 +250,9 @@ struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp) struct nfp_eth_table * __nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp) { - struct eth_table_entry *entries; + union eth_table_entry *entries; struct nfp_eth_table *table; - unsigned int cnt; - int i, j, ret; + int i, j, ret, cnt = 0; entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL); if (!entries) @@ -178,93 +261,288 @@ __nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp) ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); if (ret < 0) { nfp_err(cpp, "reading port table failed %d\n", ret); - kfree(entries); - return NULL; + goto err; } - /* Some versions of flash will give us 0 instead of port count */ - cnt = ret; - if (!cnt) { - for (i = 0; i < NSP_ETH_MAX_COUNT; i++) - if (entries[i].port & NSP_ETH_PORT_LANES_MASK) - cnt++; + for (i = 0; i < NSP_ETH_MAX_COUNT; i++) + if (entries[i].port & NSP_ETH_PORT_LANES_MASK) + cnt++; + + /* Some versions of flash will give us 0 instead of port count. + * For those that give a port count, verify it against the value + * calculated above. + */ + if (ret && ret != cnt) { + nfp_err(cpp, "table entry count reported (%d) does not match entries present (%d)\n", + ret, cnt); + goto err; } table = kzalloc(sizeof(*table) + sizeof(struct nfp_eth_table_port) * cnt, GFP_KERNEL); - if (!table) { - kfree(entries); - return NULL; - } + if (!table) + goto err; table->count = cnt; for (i = 0, j = 0; i < NSP_ETH_MAX_COUNT; i++) if (entries[i].port & NSP_ETH_PORT_LANES_MASK) - nfp_eth_port_translate(&entries[i], i, + nfp_eth_port_translate(nsp, &entries[i], i, &table->ports[j++]); + nfp_eth_mark_split_ports(cpp, table); + for (i = 0; i < table->count; i++) + nfp_eth_calc_port_type(cpp, &table->ports[i]); + kfree(entries); return table; + +err: + kfree(entries); + return NULL; } -/** - * nfp_eth_set_mod_enable() - set PHY module enable control bit - * @cpp: NFP CPP handle - * @idx: NFP chip-wide port index - * @enable: Desired state - * - * Enable or disable PHY module (this usually means setting the TX lanes - * disable bits). - * - * Return: 0 or -ERRNO. - */ -int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable) +struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx) { - struct eth_table_entry *entries; + union eth_table_entry *entries; struct nfp_nsp *nsp; - u64 reg; int ret; entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL); if (!entries) - return -ENOMEM; + return ERR_PTR(-ENOMEM); nsp = nfp_nsp_open(cpp); if (IS_ERR(nsp)) { kfree(entries); - return PTR_ERR(nsp); + return nsp; } ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); if (ret < 0) { nfp_err(cpp, "reading port table failed %d\n", ret); - goto exit_close_nsp; + goto err; } if (!(entries[idx].port & NSP_ETH_PORT_LANES_MASK)) { nfp_warn(cpp, "trying to set port state on disabled port %d\n", idx); - ret = -EINVAL; - goto exit_close_nsp; + goto err; + } + + nfp_nsp_config_set_state(nsp, entries, idx); + return nsp; + +err: + nfp_nsp_close(nsp); + kfree(entries); + return ERR_PTR(-EIO); +} + +void nfp_eth_config_cleanup_end(struct nfp_nsp *nsp) +{ + union eth_table_entry *entries = nfp_nsp_config_entries(nsp); + + nfp_nsp_config_set_modified(nsp, false); + nfp_nsp_config_clear_state(nsp); + nfp_nsp_close(nsp); + kfree(entries); +} + +/** + * nfp_eth_config_commit_end() - perform recorded configuration changes + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * + * Perform the configuration which was requested with __nfp_eth_set_*() + * helpers and recorded in @nsp state. If device was already configured + * as requested or no __nfp_eth_set_*() operations were made no NSP command + * will be performed. + * + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. + */ +int nfp_eth_config_commit_end(struct nfp_nsp *nsp) +{ + union eth_table_entry *entries = nfp_nsp_config_entries(nsp); + int ret = 1; + + if (nfp_nsp_config_modified(nsp)) { + ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); + ret = ret < 0 ? ret : 0; + } + + nfp_eth_config_cleanup_end(nsp); + + return ret; +} + +/** + * nfp_eth_set_mod_enable() - set PHY module enable control bit + * @cpp: NFP CPP handle + * @idx: NFP chip-wide port index + * @enable: Desired state + * + * Enable or disable PHY module (this usually means setting the TX lanes + * disable bits). + * + * Return: 0 or -ERRNO. + */ +int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable) +{ + union eth_table_entry *entries; + struct nfp_nsp *nsp; + u64 reg; + + nsp = nfp_eth_config_start(cpp, idx); + if (IS_ERR(nsp)) + return PTR_ERR(nsp); + + entries = nfp_nsp_config_entries(nsp); + + /* Check if we are already in requested state */ + reg = le64_to_cpu(entries[idx].state); + if (enable != FIELD_GET(NSP_ETH_CTRL_ENABLED, reg)) { + reg = le64_to_cpu(entries[idx].control); + reg &= ~NSP_ETH_CTRL_ENABLED; + reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable); + entries[idx].control = cpu_to_le64(reg); + + nfp_nsp_config_set_modified(nsp, true); } + return nfp_eth_config_commit_end(nsp); +} + +/** + * nfp_eth_set_configured() - set PHY module configured control bit + * @cpp: NFP CPP handle + * @idx: NFP chip-wide port index + * @configed: Desired state + * + * Set the ifup/ifdown state on the PHY. + * + * Return: 0 or -ERRNO. + */ +int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, bool configed) +{ + union eth_table_entry *entries; + struct nfp_nsp *nsp; + u64 reg; + + nsp = nfp_eth_config_start(cpp, idx); + if (IS_ERR(nsp)) + return PTR_ERR(nsp); + + entries = nfp_nsp_config_entries(nsp); + /* Check if we are already in requested state */ reg = le64_to_cpu(entries[idx].state); - if (enable == FIELD_GET(NSP_ETH_CTRL_ENABLED, reg)) { - ret = 0; - goto exit_close_nsp; + if (configed != FIELD_GET(NSP_ETH_STATE_CONFIGURED, reg)) { + reg = le64_to_cpu(entries[idx].control); + reg &= ~NSP_ETH_CTRL_CONFIGURED; + reg |= FIELD_PREP(NSP_ETH_CTRL_CONFIGURED, configed); + entries[idx].control = cpu_to_le64(reg); + + nfp_nsp_config_set_modified(nsp, true); } - reg = le64_to_cpu(entries[idx].control); - reg &= ~NSP_ETH_CTRL_ENABLED; - reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable); - entries[idx].control = cpu_to_le64(reg); + return nfp_eth_config_commit_end(nsp); +} - ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); -exit_close_nsp: - nfp_nsp_close(nsp); - kfree(entries); +/* Force inline, FIELD_* macroes require masks to be compilation-time known */ +static __always_inline int +nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx, + const u64 mask, unsigned int val, const u64 ctrl_bit) +{ + union eth_table_entry *entries = nfp_nsp_config_entries(nsp); + unsigned int idx = nfp_nsp_config_idx(nsp); + u64 reg; + + /* Note: set features were added in ABI 0.14 but the error + * codes were initially not populated correctly. + */ + if (nfp_nsp_get_abi_ver_minor(nsp) < 17) { + nfp_err(nfp_nsp_cpp(nsp), + "set operations not supported, please update flash\n"); + return -EOPNOTSUPP; + } + + /* Check if we are already in requested state */ + reg = le64_to_cpu(entries[idx].raw[raw_idx]); + if (val == FIELD_GET(mask, reg)) + return 0; - return ret < 0 ? ret : 0; + reg &= ~mask; + reg |= FIELD_PREP(mask, val); + entries[idx].raw[raw_idx] = cpu_to_le64(reg); + + entries[idx].control |= cpu_to_le64(ctrl_bit); + + nfp_nsp_config_set_modified(nsp, true); + + return 0; +} + +/** + * __nfp_eth_set_aneg() - set PHY autonegotiation control bit + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @mode: Desired autonegotiation mode + * + * Allow/disallow PHY module to advertise/perform autonegotiation. + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode) +{ + return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_STATE, + NSP_ETH_STATE_ANEG, mode, + NSP_ETH_CTRL_SET_ANEG); +} + +/** + * __nfp_eth_set_speed() - set interface speed/rate + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @speed: Desired speed (per lane) + * + * Set lane speed. Provided @speed value should be subport speed divided + * by number of lanes this subport is spanning (i.e. 10000 for 40G, 25000 for + * 50G, etc.) + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed) +{ + enum nfp_eth_rate rate; + + rate = nfp_eth_speed2rate(speed); + if (rate == RATE_INVALID) { + nfp_warn(nfp_nsp_cpp(nsp), + "could not find matching lane rate for speed %u\n", + speed); + return -EINVAL; + } + + return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_STATE, + NSP_ETH_STATE_RATE, rate, + NSP_ETH_CTRL_SET_RATE); +} + +/** + * __nfp_eth_set_split() - set interface lane split + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @lanes: Desired lanes per port + * + * Set number of lanes in the port. + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes) +{ + return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES, + lanes, NSP_ETH_CTRL_SET_LANES); } diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c index a2850344f8b4..2d15a7c9d0de 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c @@ -45,6 +45,13 @@ #include "nfp_cpp.h" #include "nfp6000/nfp6000.h" +#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU +#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL + +/* NFP Resource Table self-identifier */ +#define NFP_RESOURCE_TBL_NAME "nfp.res" +#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */ + #define NFP_RESOURCE_ENTRY_NAME_SZ 8 /** @@ -100,9 +107,11 @@ static int nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res) strncpy(name_pad, res->name, sizeof(name_pad)); /* Search for a matching entry */ - key = NFP_RESOURCE_TBL_KEY; - if (memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8)) - key = crc32_posix(name_pad, sizeof(name_pad)); + if (!memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8)) { + nfp_err(cpp, "Grabbing device lock not supported\n"); + return -EOPNOTSUPP; + } + key = crc32_posix(name_pad, sizeof(name_pad)); for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) { u64 addr = NFP_RESOURCE_TBL_BASE + |