diff options
Diffstat (limited to 'drivers/net/ethernet/freescale')
25 files changed, 5641 insertions, 138 deletions
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index d3a62bc1f1c6..71793e03c3c8 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -97,5 +97,6 @@ config GIANFAR source "drivers/net/ethernet/freescale/dpaa/Kconfig" source "drivers/net/ethernet/freescale/dpaa2/Kconfig" +source "drivers/net/ethernet/freescale/enetc/Kconfig" endif # NET_VENDOR_FREESCALE diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile index 3b4ff08e3841..6a93293d31e0 100644 --- a/drivers/net/ethernet/freescale/Makefile +++ b/drivers/net/ethernet/freescale/Makefile @@ -23,3 +23,6 @@ obj-$(CONFIG_FSL_FMAN) += fman/ obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/ obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/ + +obj-$(CONFIG_FSL_ENETC) += enetc/ +obj-$(CONFIG_FSL_ENETC_VF) += enetc/ diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 62497119c85f..bdee441bc3b7 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -501,7 +501,7 @@ static int dpaa_get_ts_info(struct net_device *net_dev, struct device_node *mac_node = dev->of_node; struct device_node *fman_node = NULL, *ptp_node = NULL; struct platform_device *ptp_dev = NULL; - struct qoriq_ptp *ptp = NULL; + struct ptp_qoriq *ptp = NULL; info->phc_index = -1; diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile index 2f424e0a8225..d1e78cdd512f 100644 --- a/drivers/net/ethernet/freescale/dpaa2/Makefile +++ b/drivers/net/ethernet/freescale/dpaa2/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o +fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o # Needed by the tracing framework diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c new file mode 100644 index 000000000000..a027f4a9d0cc --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2015 Freescale Semiconductor Inc. + * Copyright 2018-2019 NXP + */ +#include <linux/module.h> +#include <linux/debugfs.h> +#include "dpaa2-eth.h" +#include "dpaa2-eth-debugfs.h" + +#define DPAA2_ETH_DBG_ROOT "dpaa2-eth" + +static struct dentry *dpaa2_dbg_root; + +static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset) +{ + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; + struct rtnl_link_stats64 *stats; + struct dpaa2_eth_drv_stats *extras; + int i; + + seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name); + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s%16s\n", + "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf", + "Tx SG", "Tx realloc", "Enq busy"); + + for_each_online_cpu(i) { + stats = per_cpu_ptr(priv->percpu_stats, i); + extras = per_cpu_ptr(priv->percpu_extras, i); + seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n", + i, + stats->rx_packets, + stats->rx_errors, + extras->rx_sg_frames, + stats->tx_packets, + stats->tx_errors, + extras->tx_conf_frames, + extras->tx_sg_frames, + extras->tx_reallocs, + extras->tx_portal_busy); + } + + return 0; +} + +static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file) +{ + int err; + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; + + err = single_open(file, dpaa2_dbg_cpu_show, priv); + if (err < 0) + netdev_err(priv->net_dev, "single_open() failed\n"); + + return err; +} + +static const struct file_operations dpaa2_dbg_cpu_ops = { + .open = dpaa2_dbg_cpu_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static char *fq_type_to_str(struct dpaa2_eth_fq *fq) +{ + switch (fq->type) { + case DPAA2_RX_FQ: + return "Rx"; + case DPAA2_TX_CONF_FQ: + return "Tx conf"; + default: + return "N/A"; + } +} + +static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset) +{ + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; + struct dpaa2_eth_fq *fq; + u32 fcnt, bcnt; + int i, err; + + seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name); + seq_printf(file, "%s%16s%16s%16s%16s\n", + "VFQID", "CPU", "Type", "Frames", "Pending frames"); + + for (i = 0; i < priv->num_fqs; i++) { + fq = &priv->fq[i]; + err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); + if (err) + fcnt = 0; + + seq_printf(file, "%5d%16d%16s%16llu%16u\n", + fq->fqid, + fq->target_cpu, + fq_type_to_str(fq), + fq->stats.frames, + fcnt); + } + + return 0; +} + +static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file) +{ + int err; + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; + + err = single_open(file, dpaa2_dbg_fqs_show, priv); + if (err < 0) + netdev_err(priv->net_dev, "single_open() failed\n"); + + return err; +} + +static const struct file_operations dpaa2_dbg_fq_ops = { + .open = dpaa2_dbg_fqs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset) +{ + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; + struct dpaa2_eth_channel *ch; + int i; + + seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name); + seq_printf(file, "%s%16s%16s%16s%16s\n", + "CHID", "CPU", "Deq busy", "CDANs", "Buf count"); + + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; + seq_printf(file, "%4d%16d%16llu%16llu%16d\n", + ch->ch_id, + ch->nctx.desired_cpu, + ch->stats.dequeue_portal_busy, + ch->stats.cdan, + ch->buf_count); + } + + return 0; +} + +static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file) +{ + int err; + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; + + err = single_open(file, dpaa2_dbg_ch_show, priv); + if (err < 0) + netdev_err(priv->net_dev, "single_open() failed\n"); + + return err; +} + +static const struct file_operations dpaa2_dbg_ch_ops = { + .open = dpaa2_dbg_ch_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) +{ + if (!dpaa2_dbg_root) + return; + + /* Create a directory for the interface */ + priv->dbg.dir = debugfs_create_dir(priv->net_dev->name, + dpaa2_dbg_root); + if (!priv->dbg.dir) { + netdev_err(priv->net_dev, "debugfs_create_dir() failed\n"); + return; + } + + /* per-cpu stats file */ + priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444, + priv->dbg.dir, priv, + &dpaa2_dbg_cpu_ops); + if (!priv->dbg.cpu_stats) { + netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); + goto err_cpu_stats; + } + + /* per-fq stats file */ + priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444, + priv->dbg.dir, priv, + &dpaa2_dbg_fq_ops); + if (!priv->dbg.fq_stats) { + netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); + goto err_fq_stats; + } + + /* per-fq stats file */ + priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444, + priv->dbg.dir, priv, + &dpaa2_dbg_ch_ops); + if (!priv->dbg.fq_stats) { + netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); + goto err_ch_stats; + } + + return; + +err_ch_stats: + debugfs_remove(priv->dbg.fq_stats); +err_fq_stats: + debugfs_remove(priv->dbg.cpu_stats); +err_cpu_stats: + debugfs_remove(priv->dbg.dir); +} + +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) +{ + debugfs_remove(priv->dbg.fq_stats); + debugfs_remove(priv->dbg.ch_stats); + debugfs_remove(priv->dbg.cpu_stats); + debugfs_remove(priv->dbg.dir); +} + +void dpaa2_eth_dbg_init(void) +{ + dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL); + if (!dpaa2_dbg_root) { + pr_err("DPAA2-ETH: debugfs create failed\n"); + return; + } + + pr_debug("DPAA2-ETH: debugfs created\n"); +} + +void dpaa2_eth_dbg_exit(void) +{ + debugfs_remove(dpaa2_dbg_root); +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h new file mode 100644 index 000000000000..4f63de997a26 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2015 Freescale Semiconductor Inc. + * Copyright 2018-2019 NXP + */ +#ifndef DPAA2_ETH_DEBUGFS_H +#define DPAA2_ETH_DEBUGFS_H + +#include <linux/dcache.h> + +struct dpaa2_eth_priv; + +struct dpaa2_debugfs { + struct dentry *dir; + struct dentry *fq_stats; + struct dentry *ch_stats; + struct dentry *cpu_stats; +}; + +#ifdef CONFIG_DEBUG_FS +void dpaa2_eth_dbg_init(void); +void dpaa2_eth_dbg_exit(void); +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv); +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv); +#else +static inline void dpaa2_eth_dbg_init(void) {} +static inline void dpaa2_eth_dbg_exit(void) {} +static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {} +static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {} +#endif /* CONFIG_DEBUG_FS */ + +#endif /* DPAA2_ETH_DEBUGFS_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 1ca9a18139ec..2ba49e959c3f 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -86,16 +86,16 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv, for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { addr = dpaa2_sg_get_addr(&sgt[i]); sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); - dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, - DMA_BIDIRECTIONAL); + dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); - skb_free_frag(sg_vaddr); + free_pages((unsigned long)sg_vaddr, 0); if (dpaa2_sg_is_final(&sgt[i])) break; } free_buf: - skb_free_frag(vaddr); + free_pages((unsigned long)vaddr, 0); } /* Build a linear skb based on a single-buffer frame descriptor */ @@ -109,7 +109,7 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch, ch->buf_count--; - skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE); + skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); if (unlikely(!skb)) return NULL; @@ -144,19 +144,19 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, /* Get the address and length from the S/G entry */ sg_addr = dpaa2_sg_get_addr(sge); sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); - dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, - DMA_BIDIRECTIONAL); + dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); sg_length = dpaa2_sg_get_len(sge); if (i == 0) { /* We build the skb around the first data buffer */ - skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE); + skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); if (unlikely(!skb)) { /* Free the first SG entry now, since we already * unmapped it and obtained the virtual address */ - skb_free_frag(sg_vaddr); + free_pages((unsigned long)sg_vaddr, 0); /* We still need to subtract the buffers used * by this FD from our software counter @@ -211,9 +211,9 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) for (i = 0; i < count; i++) { vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); - dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, - DMA_BIDIRECTIONAL); - skb_free_frag(vaddr); + dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); + free_pages((unsigned long)vaddr, 0); } } @@ -264,9 +264,7 @@ static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd, fq = &priv->fq[queue_id]; for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { - err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, - priv->tx_qdid, 0, - fq->tx_qdbin, fd); + err = priv->enqueue(priv, fq, fd, 0); if (err != -EBUSY) break; } @@ -298,6 +296,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv, xdp.data_end = xdp.data + dpaa2_fd_get_len(fd); xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; xdp_set_data_meta_invalid(&xdp); + xdp.rxq = &ch->xdp_rxq; xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); @@ -330,8 +329,20 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv, xdp_release_buf(priv, ch, addr); ch->stats.xdp_drop++; break; + case XDP_REDIRECT: + dma_unmap_page(priv->net_dev->dev.parent, addr, + DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL); + ch->buf_count--; + xdp.data_hard_start = vaddr; + err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog); + if (unlikely(err)) + ch->stats.xdp_drop++; + else + ch->stats.xdp_redirect++; + break; } + ch->xdp.res |= xdp_act; out: rcu_read_unlock(); return xdp_act; @@ -378,16 +389,16 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, return; } - dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, - DMA_BIDIRECTIONAL); + dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); skb = build_linear_skb(ch, fd, vaddr); } else if (fd_format == dpaa2_fd_sg) { WARN_ON(priv->xdp_prog); - dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, - DMA_BIDIRECTIONAL); + dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); skb = build_frag_skb(priv, ch, buf_data); - skb_free_frag(vaddr); + free_pages((unsigned long)vaddr, 0); percpu_extras->rx_sg_frames++; percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); } else { @@ -573,10 +584,11 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv, * all of them on Tx Conf. */ swa = (struct dpaa2_eth_swa *)sgt_buf; - swa->skb = skb; - swa->scl = scl; - swa->num_sg = num_sg; - swa->sgt_size = sgt_buf_size; + swa->type = DPAA2_ETH_SWA_SG; + swa->sg.skb = skb; + swa->sg.scl = scl; + swa->sg.num_sg = num_sg; + swa->sg.sgt_size = sgt_buf_size; /* Separately map the SGT buffer */ addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); @@ -611,7 +623,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv, { struct device *dev = priv->net_dev->dev.parent; u8 *buffer_start, *aligned_start; - struct sk_buff **skbh; + struct dpaa2_eth_swa *swa; dma_addr_t addr; buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb); @@ -628,8 +640,9 @@ static int build_single_fd(struct dpaa2_eth_priv *priv, * (in the private data area) such that we can release it * on Tx confirm */ - skbh = (struct sk_buff **)buffer_start; - *skbh = skb; + swa = (struct dpaa2_eth_swa *)buffer_start; + swa->type = DPAA2_ETH_SWA_SINGLE; + swa->single.skb = skb; addr = dma_map_single(dev, buffer_start, skb_tail_pointer(skb) - buffer_start, @@ -657,47 +670,65 @@ static int build_single_fd(struct dpaa2_eth_priv *priv, * dpaa2_eth_tx(). */ static void free_tx_fd(const struct dpaa2_eth_priv *priv, - const struct dpaa2_fd *fd) + struct dpaa2_eth_fq *fq, + const struct dpaa2_fd *fd, bool in_napi) { struct device *dev = priv->net_dev->dev.parent; dma_addr_t fd_addr; - struct sk_buff **skbh, *skb; + struct sk_buff *skb = NULL; unsigned char *buffer_start; struct dpaa2_eth_swa *swa; u8 fd_format = dpaa2_fd_get_format(fd); + u32 fd_len = dpaa2_fd_get_len(fd); fd_addr = dpaa2_fd_get_addr(fd); - skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr); + buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr); + swa = (struct dpaa2_eth_swa *)buffer_start; if (fd_format == dpaa2_fd_single) { - skb = *skbh; - buffer_start = (unsigned char *)skbh; - /* Accessing the skb buffer is safe before dma unmap, because - * we didn't map the actual skb shell. - */ - dma_unmap_single(dev, fd_addr, - skb_tail_pointer(skb) - buffer_start, - DMA_BIDIRECTIONAL); + if (swa->type == DPAA2_ETH_SWA_SINGLE) { + skb = swa->single.skb; + /* Accessing the skb buffer is safe before dma unmap, + * because we didn't map the actual skb shell. + */ + dma_unmap_single(dev, fd_addr, + skb_tail_pointer(skb) - buffer_start, + DMA_BIDIRECTIONAL); + } else { + WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type"); + dma_unmap_single(dev, fd_addr, swa->xdp.dma_size, + DMA_BIDIRECTIONAL); + } } else if (fd_format == dpaa2_fd_sg) { - swa = (struct dpaa2_eth_swa *)skbh; - skb = swa->skb; + skb = swa->sg.skb; /* Unmap the scatterlist */ - dma_unmap_sg(dev, swa->scl, swa->num_sg, DMA_BIDIRECTIONAL); - kfree(swa->scl); + dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, + DMA_BIDIRECTIONAL); + kfree(swa->sg.scl); /* Unmap the SGT buffer */ - dma_unmap_single(dev, fd_addr, swa->sgt_size, + dma_unmap_single(dev, fd_addr, swa->sg.sgt_size, DMA_BIDIRECTIONAL); } else { netdev_dbg(priv->net_dev, "Invalid FD format\n"); return; } + if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) { + fq->dq_frames++; + fq->dq_bytes += fd_len; + } + + if (swa->type == DPAA2_ETH_SWA_XDP) { + xdp_return_frame(swa->xdp.xdpf); + return; + } + /* Get the timestamp value */ if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { struct skb_shared_hwtstamps shhwtstamps; - __le64 *ts = dpaa2_get_ts(skbh, true); + __le64 *ts = dpaa2_get_ts(buffer_start, true); u64 ns; memset(&shhwtstamps, 0, sizeof(shhwtstamps)); @@ -709,10 +740,10 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv, /* Free SGT buffer allocated on tx */ if (fd_format != dpaa2_fd_single) - skb_free_frag(skbh); + skb_free_frag(buffer_start); /* Move on with skb release */ - dev_kfree_skb(skb); + napi_consume_skb(skb, in_napi); } static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) @@ -785,9 +816,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) queue_mapping = skb_get_queue_mapping(skb); fq = &priv->fq[queue_mapping]; for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { - err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, - priv->tx_qdid, 0, - fq->tx_qdbin, &fd); + err = priv->enqueue(priv, fq, &fd, 0); if (err != -EBUSY) break; } @@ -795,7 +824,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) if (unlikely(err < 0)) { percpu_stats->tx_errors++; /* Clean up everything, including freeing the skb */ - free_tx_fd(priv, &fd); + free_tx_fd(priv, fq, &fd, false); } else { fd_len = dpaa2_fd_get_len(&fd); percpu_stats->tx_packets++; @@ -832,12 +861,9 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, percpu_extras->tx_conf_frames++; percpu_extras->tx_conf_bytes += fd_len; - fq->dq_frames++; - fq->dq_bytes += fd_len; - /* Check frame errors in the FD field */ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; - free_tx_fd(priv, fd); + free_tx_fd(priv, fq, fd, true); if (likely(!fd_errors)) return; @@ -903,7 +929,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, { struct device *dev = priv->net_dev->dev.parent; u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; - void *buf; + struct page *page; dma_addr_t addr; int i, err; @@ -911,14 +937,16 @@ static int add_bufs(struct dpaa2_eth_priv *priv, /* Allocate buffer visible to WRIOP + skb shared info + * alignment padding */ - buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv)); - if (unlikely(!buf)) + /* allocate one page for each Rx buffer. WRIOP sees + * the entire page except for a tailroom reserved for + * skb shared info + */ + page = dev_alloc_pages(0); + if (!page) goto err_alloc; - buf = PTR_ALIGN(buf, priv->rx_buf_align); - - addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE, - DMA_BIDIRECTIONAL); + addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(dev, addr))) goto err_map; @@ -926,7 +954,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, /* tracing point */ trace_dpaa2_eth_buf_seed(priv->net_dev, - buf, dpaa2_eth_buf_raw_size(priv), + page, DPAA2_ETH_RX_BUF_RAW_SIZE, addr, DPAA2_ETH_RX_BUF_SIZE, bpid); } @@ -948,7 +976,7 @@ release_bufs: return i; err_map: - skb_free_frag(buf); + __free_pages(page, 0); err_alloc: /* If we managed to allocate at least some buffers, * release them to hardware @@ -1083,6 +1111,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) int err; ch = container_of(napi, struct dpaa2_eth_channel, napi); + ch->xdp.res = 0; priv = ch->priv; do { @@ -1128,7 +1157,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) work_done = max(rx_cleaned, 1); out: - if (txc_fq) { + if (txc_fq && txc_fq->dq_frames) { nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid); netdev_tx_completed_queue(nq, txc_fq->dq_frames, txc_fq->dq_bytes); @@ -1136,6 +1165,9 @@ out: txc_fq->dq_bytes = 0; } + if (ch->xdp.res & XDP_REDIRECT) + xdp_do_flush_map(); + return work_done; } @@ -1243,34 +1275,36 @@ enable_err: return err; } -/* The DPIO store must be empty when we call this, - * at the end of every NAPI cycle. - */ -static u32 drain_channel(struct dpaa2_eth_channel *ch) +/* Total number of in-flight frames on ingress queues */ +static u32 ingress_fq_count(struct dpaa2_eth_priv *priv) { - u32 drained = 0, total = 0; + struct dpaa2_eth_fq *fq; + u32 fcnt = 0, bcnt = 0, total = 0; + int i, err; - do { - pull_channel(ch); - drained = consume_frames(ch, NULL); - total += drained; - } while (drained); + for (i = 0; i < priv->num_fqs; i++) { + fq = &priv->fq[i]; + err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); + if (err) { + netdev_warn(priv->net_dev, "query_fq_count failed"); + break; + } + total += fcnt; + } return total; } -static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv) +static void wait_for_fq_empty(struct dpaa2_eth_priv *priv) { - struct dpaa2_eth_channel *ch; - int i; - u32 drained = 0; - - for (i = 0; i < priv->num_channels; i++) { - ch = priv->channel[i]; - drained += drain_channel(ch); - } + int retries = 10; + u32 pending; - return drained; + do { + pending = ingress_fq_count(priv); + if (pending) + msleep(100); + } while (pending && --retries); } static int dpaa2_eth_stop(struct net_device *net_dev) @@ -1278,14 +1312,22 @@ static int dpaa2_eth_stop(struct net_device *net_dev) struct dpaa2_eth_priv *priv = netdev_priv(net_dev); int dpni_enabled = 0; int retries = 10; - u32 drained; netif_tx_stop_all_queues(net_dev); netif_carrier_off(net_dev); - /* Loop while dpni_disable() attempts to drain the egress FQs - * and confirm them back to us. + /* On dpni_disable(), the MC firmware will: + * - stop MAC Rx and wait for all Rx frames to be enqueued to software + * - cut off WRIOP dequeues from egress FQs and wait until transmission + * of all in flight Tx frames is finished (and corresponding Tx conf + * frames are enqueued back to software) + * + * Before calling dpni_disable(), we wait for all Tx frames to arrive + * on WRIOP. After it finishes, wait until all remaining frames on Rx + * and Tx conf queues are consumed on NAPI poll. */ + msleep(500); + do { dpni_disable(priv->mc_io, 0, priv->mc_token); dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); @@ -1300,19 +1342,9 @@ static int dpaa2_eth_stop(struct net_device *net_dev) */ } - /* Wait for NAPI to complete on every core and disable it. - * In particular, this will also prevent NAPI from being rescheduled if - * a new CDAN is serviced, effectively discarding the CDAN. We therefore - * don't even need to disarm the channels, except perhaps for the case - * of a huge coalescing value. - */ + wait_for_fq_empty(priv); disable_ch_napi(priv); - /* Manually drain the Rx and TxConf queues */ - drained = drain_ingress_frames(priv); - if (drained) - netdev_dbg(net_dev, "Drained %d frames.\n", drained); - /* Empty the buffer pool */ drain_pool(priv); @@ -1730,6 +1762,105 @@ static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp) return 0; } +static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev, + struct xdp_frame *xdpf) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct device *dev = net_dev->dev.parent; + struct rtnl_link_stats64 *percpu_stats; + struct dpaa2_eth_drv_stats *percpu_extras; + unsigned int needed_headroom; + struct dpaa2_eth_swa *swa; + struct dpaa2_eth_fq *fq; + struct dpaa2_fd fd; + void *buffer_start, *aligned_start; + dma_addr_t addr; + int err, i; + + /* We require a minimum headroom to be able to transmit the frame. + * Otherwise return an error and let the original net_device handle it + */ + needed_headroom = dpaa2_eth_needed_headroom(priv, NULL); + if (xdpf->headroom < needed_headroom) + return -EINVAL; + + percpu_stats = this_cpu_ptr(priv->percpu_stats); + percpu_extras = this_cpu_ptr(priv->percpu_extras); + + /* Setup the FD fields */ + memset(&fd, 0, sizeof(fd)); + + /* Align FD address, if possible */ + buffer_start = xdpf->data - needed_headroom; + aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, + DPAA2_ETH_TX_BUF_ALIGN); + if (aligned_start >= xdpf->data - xdpf->headroom) + buffer_start = aligned_start; + + swa = (struct dpaa2_eth_swa *)buffer_start; + /* fill in necessary fields here */ + swa->type = DPAA2_ETH_SWA_XDP; + swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start; + swa->xdp.xdpf = xdpf; + + addr = dma_map_single(dev, buffer_start, + swa->xdp.dma_size, + DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, addr))) { + percpu_stats->tx_dropped++; + return -ENOMEM; + } + + dpaa2_fd_set_addr(&fd, addr); + dpaa2_fd_set_offset(&fd, xdpf->data - buffer_start); + dpaa2_fd_set_len(&fd, xdpf->len); + dpaa2_fd_set_format(&fd, dpaa2_fd_single); + dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA); + + fq = &priv->fq[smp_processor_id()]; + for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { + err = priv->enqueue(priv, fq, &fd, 0); + if (err != -EBUSY) + break; + } + percpu_extras->tx_portal_busy += i; + if (unlikely(err < 0)) { + percpu_stats->tx_errors++; + /* let the Rx device handle the cleanup */ + return err; + } + + percpu_stats->tx_packets++; + percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd); + + return 0; +} + +static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n, + struct xdp_frame **frames, u32 flags) +{ + int drops = 0; + int i, err; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + if (!netif_running(net_dev)) + return -ENETDOWN; + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + + err = dpaa2_eth_xdp_xmit_frame(net_dev, xdpf); + if (err) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + return n - drops; +} + static const struct net_device_ops dpaa2_eth_ops = { .ndo_open = dpaa2_eth_open, .ndo_start_xmit = dpaa2_eth_tx, @@ -1741,6 +1872,7 @@ static const struct net_device_ops dpaa2_eth_ops = { .ndo_do_ioctl = dpaa2_eth_ioctl, .ndo_change_mtu = dpaa2_eth_change_mtu, .ndo_bpf = dpaa2_eth_xdp, + .ndo_xdp_xmit = dpaa2_eth_xdp_xmit, }; static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) @@ -1902,7 +2034,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv) /* Register the new context */ channel->dpio = dpaa2_io_service_select(i); - err = dpaa2_io_service_register(channel->dpio, nctx); + err = dpaa2_io_service_register(channel->dpio, nctx, dev); if (err) { dev_dbg(dev, "No affine DPIO for cpu %d\n", i); /* If no affine DPIO for this core, there's probably @@ -1942,7 +2074,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv) return 0; err_set_cdan: - dpaa2_io_service_deregister(channel->dpio, nctx); + dpaa2_io_service_deregister(channel->dpio, nctx, dev); err_service_reg: free_channel(priv, channel); err_alloc_ch: @@ -1962,13 +2094,14 @@ err_alloc_ch: static void free_dpio(struct dpaa2_eth_priv *priv) { - int i; + struct device *dev = priv->net_dev->dev.parent; struct dpaa2_eth_channel *ch; + int i; /* deregister CDAN notifications and free channels */ for (i = 0; i < priv->num_channels; i++) { ch = priv->channel[i]; - dpaa2_io_service_deregister(ch->dpio, &ch->nctx); + dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev); free_channel(priv, ch); } } @@ -2134,6 +2267,7 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv) { struct device *dev = priv->net_dev->dev.parent; struct dpni_buffer_layout buf_layout = {0}; + u16 rx_buf_align; int err; /* We need to check for WRIOP version 1.0.0, but depending on the MC @@ -2142,9 +2276,9 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv) */ if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) || priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0)) - priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1; + rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1; else - priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; + rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; /* tx buffer */ buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; @@ -2184,7 +2318,7 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv) /* rx buffer */ buf_layout.pass_frame_status = true; buf_layout.pass_parser_result = true; - buf_layout.data_align = priv->rx_buf_align; + buf_layout.data_align = rx_buf_align; buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv); buf_layout.private_data_size = 0; buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | @@ -2202,6 +2336,36 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv) return 0; } +#define DPNI_ENQUEUE_FQID_VER_MAJOR 7 +#define DPNI_ENQUEUE_FQID_VER_MINOR 9 + +static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_fq *fq, + struct dpaa2_fd *fd, u8 prio) +{ + return dpaa2_io_service_enqueue_qd(fq->channel->dpio, + priv->tx_qdid, prio, + fq->tx_qdbin, fd); +} + +static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_fq *fq, + struct dpaa2_fd *fd, + u8 prio __always_unused) +{ + return dpaa2_io_service_enqueue_fq(fq->channel->dpio, + fq->tx_fqid, fd); +} + +static void set_enqueue_mode(struct dpaa2_eth_priv *priv) +{ + if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR, + DPNI_ENQUEUE_FQID_VER_MINOR) < 0) + priv->enqueue = dpaa2_eth_enqueue_qd; + else + priv->enqueue = dpaa2_eth_enqueue_fq; +} + /* Configure the DPNI object this interface is associated with */ static int setup_dpni(struct fsl_mc_device *ls_dev) { @@ -2255,6 +2419,8 @@ static int setup_dpni(struct fsl_mc_device *ls_dev) if (err) goto close; + set_enqueue_mode(priv); + priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) * dpaa2_eth_fs_count(priv), GFP_KERNEL); if (!priv->cls_rules) @@ -2302,9 +2468,14 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv, queue.destination.type = DPNI_DEST_DPCON; queue.destination.priority = 1; queue.user_context = (u64)(uintptr_t)fq; + queue.flc.stash_control = 1; + queue.flc.value &= 0xFFFFFFFFFFFFFFC0; + /* 01 01 00 - data, annotation, flow context */ + queue.flc.value |= 0x14; err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_RX, 0, fq->flowid, - DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, + DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST | + DPNI_QUEUE_OPT_FLC, &queue); if (err) { dev_err(dev, "dpni_set_queue(RX) failed\n"); @@ -2320,6 +2491,21 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv, return err; } + /* xdp_rxq setup */ + err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev, + fq->flowid); + if (err) { + dev_err(dev, "xdp_rxq_info_reg failed\n"); + return err; + } + + err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq, + MEM_TYPE_PAGE_ORDER0, NULL); + if (err) { + dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n"); + return err; + } + return 0; } @@ -2339,6 +2525,7 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv, } fq->tx_qdbin = qid.qdbin; + fq->tx_fqid = qid.fqid; err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, @@ -3083,6 +3270,10 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) goto err_netdev_reg; } +#ifdef CONFIG_DEBUG_FS + dpaa2_dbg_add(priv); +#endif + dev_info(dev, "Probed interface %s\n", net_dev->name); return 0; @@ -3126,6 +3317,9 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) net_dev = dev_get_drvdata(dev); priv = netdev_priv(net_dev); +#ifdef CONFIG_DEBUG_FS + dpaa2_dbg_remove(priv); +#endif unregister_netdev(net_dev); if (priv->do_link_poll) @@ -3170,4 +3364,25 @@ static struct fsl_mc_driver dpaa2_eth_driver = { .match_id_table = dpaa2_eth_match_id_table }; -module_fsl_mc_driver(dpaa2_eth_driver); +static int __init dpaa2_eth_driver_init(void) +{ + int err; + + dpaa2_eth_dbg_init(); + err = fsl_mc_driver_register(&dpaa2_eth_driver); + if (err) { + dpaa2_eth_dbg_exit(); + return err; + } + + return 0; +} + +static void __exit dpaa2_eth_driver_exit(void) +{ + dpaa2_eth_dbg_exit(); + fsl_mc_driver_unregister(&dpaa2_eth_driver); +} + +module_init(dpaa2_eth_driver_init); +module_exit(dpaa2_eth_driver_exit); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index 69c965de192b..7879622aa3e6 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -16,6 +16,7 @@ #include "dpni-cmd.h" #include "dpaa2-eth-trace.h" +#include "dpaa2-eth-debugfs.h" #define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0) @@ -52,7 +53,8 @@ */ #define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64) #define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256) -#define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE +#define DPAA2_ETH_REFILL_THRESH \ + (DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD) /* Maximum number of buffers that can be acquired/released through a single * QBMan command @@ -62,9 +64,11 @@ /* Hardware requires alignment for ingress/egress buffer addresses */ #define DPAA2_ETH_TX_BUF_ALIGN 64 -#define DPAA2_ETH_RX_BUF_SIZE 2048 -#define DPAA2_ETH_SKB_SIZE \ - (DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define DPAA2_ETH_RX_BUF_RAW_SIZE PAGE_SIZE +#define DPAA2_ETH_RX_BUF_TAILROOM \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +#define DPAA2_ETH_RX_BUF_SIZE \ + (DPAA2_ETH_RX_BUF_RAW_SIZE - DPAA2_ETH_RX_BUF_TAILROOM) /* Hardware annotation area in RX/TX buffers */ #define DPAA2_ETH_RX_HWA_SIZE 64 @@ -85,12 +89,33 @@ */ #define DPAA2_ETH_SWA_SIZE 64 +/* We store different information in the software annotation area of a Tx frame + * based on what type of frame it is + */ +enum dpaa2_eth_swa_type { + DPAA2_ETH_SWA_SINGLE, + DPAA2_ETH_SWA_SG, + DPAA2_ETH_SWA_XDP, +}; + /* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */ struct dpaa2_eth_swa { - struct sk_buff *skb; - struct scatterlist *scl; - int num_sg; - int sgt_size; + enum dpaa2_eth_swa_type type; + union { + struct { + struct sk_buff *skb; + } single; + struct { + struct sk_buff *skb; + struct scatterlist *scl; + int num_sg; + int sgt_size; + } sg; + struct { + int dma_size; + struct xdp_frame *xdpf; + } xdp; + }; }; /* Annotation valid bits in FD FRC */ @@ -253,6 +278,7 @@ struct dpaa2_eth_ch_stats { __u64 xdp_drop; __u64 xdp_tx; __u64 xdp_tx_err; + __u64 xdp_redirect; }; /* Maximum number of queues associated with a DPNI */ @@ -273,6 +299,7 @@ struct dpaa2_eth_priv; struct dpaa2_eth_fq { u32 fqid; u32 tx_qdbin; + u32 tx_fqid; u16 flowid; int target_cpu; u32 dq_frames; @@ -291,6 +318,7 @@ struct dpaa2_eth_ch_xdp { struct bpf_prog *prog; u64 drop_bufs[DPAA2_ETH_BUFS_PER_CMD]; int drop_cnt; + unsigned int res; }; struct dpaa2_eth_channel { @@ -305,6 +333,7 @@ struct dpaa2_eth_channel { int buf_count; struct dpaa2_eth_ch_stats stats; struct dpaa2_eth_ch_xdp xdp; + struct xdp_rxq_info xdp_rxq; }; struct dpaa2_eth_dist_fields { @@ -325,6 +354,9 @@ struct dpaa2_eth_priv { u8 num_fqs; struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES]; + int (*enqueue)(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_fq *fq, + struct dpaa2_fd *fd, u8 prio); u8 num_channels; struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS]; @@ -342,7 +374,6 @@ struct dpaa2_eth_priv { bool rx_tstamp; /* Rx timestamping enabled */ u16 tx_qdid; - u16 rx_buf_align; struct fsl_mc_io *mc_io; /* Cores which have an affine DPIO/DPCON. * This is the cpu set on which Rx and Tx conf frames are processed @@ -365,6 +396,9 @@ struct dpaa2_eth_priv { struct dpaa2_eth_cls_rule *cls_rules; u8 rx_cls_enabled; struct bpf_prog *xdp_prog; +#ifdef CONFIG_DEBUG_FS + struct dpaa2_debugfs dbg; +#endif }; #define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \ @@ -405,26 +439,27 @@ static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv, #define dpaa2_eth_fs_count(priv) \ ((priv)->dpni_attrs.fs_entries) +/* We have exactly one {Rx, Tx conf} queue per channel */ +#define dpaa2_eth_queue_count(priv) \ + ((priv)->num_channels) + enum dpaa2_eth_rx_dist { DPAA2_ETH_RX_DIST_HASH, DPAA2_ETH_RX_DIST_CLS }; -/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around - * the buffer also needs space for its shared info struct, and we need - * to allocate enough to accommodate hardware alignment restrictions - */ -static inline unsigned int dpaa2_eth_buf_raw_size(struct dpaa2_eth_priv *priv) -{ - return DPAA2_ETH_SKB_SIZE + priv->rx_buf_align; -} - static inline unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv, struct sk_buff *skb) { unsigned int headroom = DPAA2_ETH_SWA_SIZE; + /* If we don't have an skb (e.g. XDP buffer), we only need space for + * the software annotation area + */ + if (!skb) + return headroom; + /* For non-linear skbs we have no headroom requirement, as we build a * SG frame with a newly allocated SGT buffer */ @@ -443,14 +478,7 @@ unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv, */ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv) { - return priv->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN - - DPAA2_ETH_RX_HWA_SIZE; -} - -/* We have exactly one {Rx, Tx conf} queue per channel */ -static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv) -{ - return priv->num_channels; + return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE; } int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index a7389e722c49..591dfcf76adb 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -48,6 +48,7 @@ static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = { "[drv] xdp drop", "[drv] xdp tx", "[drv] xdp tx errors", + "[drv] xdp redirect", /* FQ stats */ "[qbman] rx pending frames", "[qbman] rx pending bytes", diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig new file mode 100644 index 000000000000..8429f5c1d810 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/Kconfig @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: GPL-2.0 +config FSL_ENETC + tristate "ENETC PF driver" + depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST) + help + This driver supports NXP ENETC gigabit ethernet controller PCIe + physical function (PF) devices, managing ENETC Ports at a privileged + level. + + If compiled as module (M), the module name is fsl-enetc. + +config FSL_ENETC_VF + tristate "ENETC VF driver" + depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST) + help + This driver supports NXP ENETC gigabit ethernet controller PCIe + virtual function (VF) devices enabled by the ENETC PF driver. + + If compiled as module (M), the module name is fsl-enetc-vf. + +config FSL_ENETC_PTP_CLOCK + tristate "ENETC PTP clock driver" + depends on PTP_1588_CLOCK_QORIQ && (FSL_ENETC || FSL_ENETC_VF) + default y + help + This driver adds support for using the ENETC 1588 timer + as a PTP clock. This clock is only useful if your PTP + programs are getting hardware time stamps on the PTP Ethernet + packets using the SO_TIMESTAMPING API. + + If compiled as module (M), the module name is fsl-enetc-ptp. diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile new file mode 100644 index 000000000000..7139e414dccf --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o +fsl-enetc-$(CONFIG_FSL_ENETC) += enetc.o enetc_cbdr.o enetc_ethtool.o \ + enetc_mdio.o +fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o +fsl-enetc-objs := enetc_pf.o $(fsl-enetc-y) + +obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o + +ifeq ($(CONFIG_FSL_ENETC)$(CONFIG_FSL_ENETC_VF), yy) +fsl-enetc-vf-objs := enetc_vf.o +else +fsl-enetc-vf-$(CONFIG_FSL_ENETC_VF) += enetc.o enetc_cbdr.o \ + enetc_ethtool.o +fsl-enetc-vf-objs := enetc_vf.o $(fsl-enetc-vf-y) +endif + +obj-$(CONFIG_FSL_ENETC_PTP_CLOCK) += fsl-enetc-ptp.o +fsl-enetc-ptp-$(CONFIG_FSL_ENETC_PTP_CLOCK) += enetc_ptp.o diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c new file mode 100644 index 000000000000..5bb9eb35d76d --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -0,0 +1,1604 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include "enetc.h" +#include <linux/tcp.h> +#include <linux/udp.h> +#include <linux/of_mdio.h> +#include <linux/vmalloc.h> + +/* ENETC overhead: optional extension BD + 1 BD gap */ +#define ENETC_TXBDS_NEEDED(val) ((val) + 2) +/* max # of chained Tx BDs is 15, including head and extension BD */ +#define ENETC_MAX_SKB_FRAGS 13 +#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1) + +static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb); + +netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_bdr *tx_ring; + int count; + + tx_ring = priv->tx_ring[skb->queue_mapping]; + + if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS)) + if (unlikely(skb_linearize(skb))) + goto drop_packet_err; + + count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */ + if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { + netif_stop_subqueue(ndev, tx_ring->index); + return NETDEV_TX_BUSY; + } + + count = enetc_map_tx_buffs(tx_ring, skb); + if (unlikely(!count)) + goto drop_packet_err; + + if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED) + netif_stop_subqueue(ndev, tx_ring->index); + + return NETDEV_TX_OK; + +drop_packet_err: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static bool enetc_tx_csum(struct sk_buff *skb, union enetc_tx_bd *txbd) +{ + int l3_start, l3_hsize; + u16 l3_flags, l4_flags; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + l4_flags = ENETC_TXBD_L4_TCP; + break; + case offsetof(struct udphdr, check): + l4_flags = ENETC_TXBD_L4_UDP; + break; + default: + skb_checksum_help(skb); + return false; + } + + l3_start = skb_network_offset(skb); + l3_hsize = skb_network_header_len(skb); + + l3_flags = 0; + if (skb->protocol == htons(ETH_P_IPV6)) + l3_flags = ENETC_TXBD_L3_IPV6; + + /* write BD fields */ + txbd->l3_csoff = enetc_txbd_l3_csoff(l3_start, l3_hsize, l3_flags); + txbd->l4_csoff = l4_flags; + + return true; +} + +static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring, + struct enetc_tx_swbd *tx_swbd) +{ + if (tx_swbd->is_dma_page) + dma_unmap_page(tx_ring->dev, tx_swbd->dma, + tx_swbd->len, DMA_TO_DEVICE); + else + dma_unmap_single(tx_ring->dev, tx_swbd->dma, + tx_swbd->len, DMA_TO_DEVICE); + tx_swbd->dma = 0; +} + +static void enetc_free_tx_skb(struct enetc_bdr *tx_ring, + struct enetc_tx_swbd *tx_swbd) +{ + if (tx_swbd->dma) + enetc_unmap_tx_buff(tx_ring, tx_swbd); + + if (tx_swbd->skb) { + dev_kfree_skb_any(tx_swbd->skb); + tx_swbd->skb = NULL; + } +} + +static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) +{ + struct enetc_tx_swbd *tx_swbd; + struct skb_frag_struct *frag; + int len = skb_headlen(skb); + union enetc_tx_bd temp_bd; + union enetc_tx_bd *txbd; + bool do_vlan, do_tstamp; + int i, count = 0; + unsigned int f; + dma_addr_t dma; + u8 flags = 0; + + i = tx_ring->next_to_use; + txbd = ENETC_TXBD(*tx_ring, i); + prefetchw(txbd); + + dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx_ring->dev, dma))) + goto dma_err; + + temp_bd.addr = cpu_to_le64(dma); + temp_bd.buf_len = cpu_to_le16(len); + temp_bd.lstatus = 0; + + tx_swbd = &tx_ring->tx_swbd[i]; + tx_swbd->dma = dma; + tx_swbd->len = len; + tx_swbd->is_dma_page = 0; + count++; + + do_vlan = skb_vlan_tag_present(skb); + do_tstamp = skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP; + + if (do_vlan || do_tstamp) + flags |= ENETC_TXBD_FLAGS_EX; + + if (enetc_tx_csum(skb, &temp_bd)) + flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS; + + /* first BD needs frm_len and offload flags set */ + temp_bd.frm_len = cpu_to_le16(skb->len); + temp_bd.flags = flags; + + if (flags & ENETC_TXBD_FLAGS_EX) { + u8 e_flags = 0; + *txbd = temp_bd; + enetc_clear_tx_bd(&temp_bd); + + /* add extension BD for VLAN and/or timestamping */ + flags = 0; + tx_swbd++; + txbd++; + i++; + if (unlikely(i == tx_ring->bd_count)) { + i = 0; + tx_swbd = tx_ring->tx_swbd; + txbd = ENETC_TXBD(*tx_ring, 0); + } + prefetchw(txbd); + + if (do_vlan) { + temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb)); + temp_bd.ext.tpid = 0; /* < C-TAG */ + e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS; + } + + if (do_tstamp) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP; + } + + temp_bd.ext.e_flags = e_flags; + count++; + } + + frag = &skb_shinfo(skb)->frags[0]; + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) { + len = skb_frag_size(frag); + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, + DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_err; + + *txbd = temp_bd; + enetc_clear_tx_bd(&temp_bd); + + flags = 0; + tx_swbd++; + txbd++; + i++; + if (unlikely(i == tx_ring->bd_count)) { + i = 0; + tx_swbd = tx_ring->tx_swbd; + txbd = ENETC_TXBD(*tx_ring, 0); + } + prefetchw(txbd); + + temp_bd.addr = cpu_to_le64(dma); + temp_bd.buf_len = cpu_to_le16(len); + + tx_swbd->dma = dma; + tx_swbd->len = len; + tx_swbd->is_dma_page = 1; + count++; + } + + /* last BD needs 'F' bit set */ + flags |= ENETC_TXBD_FLAGS_F; + temp_bd.flags = flags; + *txbd = temp_bd; + + tx_ring->tx_swbd[i].skb = skb; + + enetc_bdr_idx_inc(tx_ring, &i); + tx_ring->next_to_use = i; + + /* let H/W know BD ring has been updated */ + enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */ + + return count; + +dma_err: + dev_err(tx_ring->dev, "DMA map error"); + + do { + tx_swbd = &tx_ring->tx_swbd[i]; + enetc_free_tx_skb(tx_ring, tx_swbd); + if (i == 0) + i = tx_ring->bd_count; + i--; + } while (count--); + + return 0; +} + +static irqreturn_t enetc_msix(int irq, void *data) +{ + struct enetc_int_vector *v = data; + int i; + + /* disable interrupts */ + enetc_wr_reg(v->rbier, 0); + + for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings) + enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0); + + napi_schedule_irqoff(&v->napi); + + return IRQ_HANDLED; +} + +static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget); +static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, + struct napi_struct *napi, int work_limit); + +static int enetc_poll(struct napi_struct *napi, int budget) +{ + struct enetc_int_vector + *v = container_of(napi, struct enetc_int_vector, napi); + bool complete = true; + int work_done; + int i; + + for (i = 0; i < v->count_tx_rings; i++) + if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) + complete = false; + + work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget); + if (work_done == budget) + complete = false; + + if (!complete) + return budget; + + napi_complete_done(napi, work_done); + + /* enable interrupts */ + enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE); + + for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings) + enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), + ENETC_TBIER_TXTIE); + + return work_done; +} + +static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci) +{ + int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK; + + return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi; +} + +static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) +{ + struct net_device *ndev = tx_ring->ndev; + int tx_frm_cnt = 0, tx_byte_cnt = 0; + struct enetc_tx_swbd *tx_swbd; + int i, bds_to_clean; + + i = tx_ring->next_to_clean; + tx_swbd = &tx_ring->tx_swbd[i]; + bds_to_clean = enetc_bd_ready_count(tx_ring, i); + + while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) { + bool is_eof = !!tx_swbd->skb; + + enetc_unmap_tx_buff(tx_ring, tx_swbd); + if (is_eof) { + napi_consume_skb(tx_swbd->skb, napi_budget); + tx_swbd->skb = NULL; + } + + tx_byte_cnt += tx_swbd->len; + + bds_to_clean--; + tx_swbd++; + i++; + if (unlikely(i == tx_ring->bd_count)) { + i = 0; + tx_swbd = tx_ring->tx_swbd; + } + + /* BD iteration loop end */ + if (is_eof) { + tx_frm_cnt++; + /* re-arm interrupt source */ + enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) | + BIT(16 + tx_ring->index)); + } + + if (unlikely(!bds_to_clean)) + bds_to_clean = enetc_bd_ready_count(tx_ring, i); + } + + tx_ring->next_to_clean = i; + tx_ring->stats.packets += tx_frm_cnt; + tx_ring->stats.bytes += tx_byte_cnt; + + if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) && + __netif_subqueue_stopped(ndev, tx_ring->index) && + (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) { + netif_wake_subqueue(ndev, tx_ring->index); + } + + return tx_frm_cnt != ENETC_DEFAULT_TX_WORK; +} + +static bool enetc_new_page(struct enetc_bdr *rx_ring, + struct enetc_rx_swbd *rx_swbd) +{ + struct page *page; + dma_addr_t addr; + + page = dev_alloc_page(); + if (unlikely(!page)) + return false; + + addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(rx_ring->dev, addr))) { + __free_page(page); + + return false; + } + + rx_swbd->dma = addr; + rx_swbd->page = page; + rx_swbd->page_offset = ENETC_RXB_PAD; + + return true; +} + +static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt) +{ + struct enetc_rx_swbd *rx_swbd; + union enetc_rx_bd *rxbd; + int i, j; + + i = rx_ring->next_to_use; + rx_swbd = &rx_ring->rx_swbd[i]; + rxbd = ENETC_RXBD(*rx_ring, i); + + for (j = 0; j < buff_cnt; j++) { + /* try reuse page */ + if (unlikely(!rx_swbd->page)) { + if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) { + rx_ring->stats.rx_alloc_errs++; + break; + } + } + + /* update RxBD */ + rxbd->w.addr = cpu_to_le64(rx_swbd->dma + + rx_swbd->page_offset); + /* clear 'R" as well */ + rxbd->r.lstatus = 0; + + rx_swbd++; + rxbd++; + i++; + if (unlikely(i == rx_ring->bd_count)) { + i = 0; + rx_swbd = rx_ring->rx_swbd; + rxbd = ENETC_RXBD(*rx_ring, 0); + } + } + + if (likely(j)) { + rx_ring->next_to_alloc = i; /* keep track from page reuse */ + rx_ring->next_to_use = i; + /* update ENETC's consumer index */ + enetc_wr_reg(rx_ring->rcir, i); + } + + return j; +} + +static void enetc_get_offloads(struct enetc_bdr *rx_ring, + union enetc_rx_bd *rxbd, struct sk_buff *skb) +{ + /* TODO: add tstamp, hashing */ + if (rx_ring->ndev->features & NETIF_F_RXCSUM) { + u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum); + + skb->csum = csum_unfold((__force __sum16)~htons(inet_csum)); + skb->ip_summed = CHECKSUM_COMPLETE; + } + + /* copy VLAN to skb, if one is extracted, for now we assume it's a + * standard TPID, but HW also supports custom values + */ + if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + le16_to_cpu(rxbd->r.vlan_opt)); +} + +static void enetc_process_skb(struct enetc_bdr *rx_ring, + struct sk_buff *skb) +{ + skb_record_rx_queue(skb, rx_ring->index); + skb->protocol = eth_type_trans(skb, rx_ring->ndev); +} + +static bool enetc_page_reusable(struct page *page) +{ + return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1); +} + +static void enetc_reuse_page(struct enetc_bdr *rx_ring, + struct enetc_rx_swbd *old) +{ + struct enetc_rx_swbd *new; + + new = &rx_ring->rx_swbd[rx_ring->next_to_alloc]; + + /* next buf that may reuse a page */ + enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc); + + /* copy page reference */ + *new = *old; +} + +static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring, + int i, u16 size) +{ + struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; + + dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma, + rx_swbd->page_offset, + size, DMA_FROM_DEVICE); + return rx_swbd; +} + +static void enetc_put_rx_buff(struct enetc_bdr *rx_ring, + struct enetc_rx_swbd *rx_swbd) +{ + if (likely(enetc_page_reusable(rx_swbd->page))) { + rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE; + page_ref_inc(rx_swbd->page); + + enetc_reuse_page(rx_ring, rx_swbd); + + /* sync for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, + rx_swbd->page_offset, + ENETC_RXB_DMA_SIZE, + DMA_FROM_DEVICE); + } else { + dma_unmap_page(rx_ring->dev, rx_swbd->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + } + + rx_swbd->page = NULL; +} + +static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring, + int i, u16 size) +{ + struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); + struct sk_buff *skb; + void *ba; + + ba = page_address(rx_swbd->page) + rx_swbd->page_offset; + skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE); + if (unlikely(!skb)) { + rx_ring->stats.rx_alloc_errs++; + return NULL; + } + + skb_reserve(skb, ENETC_RXB_PAD); + __skb_put(skb, size); + + enetc_put_rx_buff(rx_ring, rx_swbd); + + return skb; +} + +static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i, + u16 size, struct sk_buff *skb) +{ + struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page, + rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE); + + enetc_put_rx_buff(rx_ring, rx_swbd); +} + +#define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */ + +static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, + struct napi_struct *napi, int work_limit) +{ + int rx_frm_cnt = 0, rx_byte_cnt = 0; + int cleaned_cnt, i; + + cleaned_cnt = enetc_bd_unused(rx_ring); + /* next descriptor to process */ + i = rx_ring->next_to_clean; + + while (likely(rx_frm_cnt < work_limit)) { + union enetc_rx_bd *rxbd; + struct sk_buff *skb; + u32 bd_status; + u16 size; + + if (cleaned_cnt >= ENETC_RXBD_BUNDLE) { + int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt); + + cleaned_cnt -= count; + } + + rxbd = ENETC_RXBD(*rx_ring, i); + bd_status = le32_to_cpu(rxbd->r.lstatus); + if (!bd_status) + break; + + enetc_wr_reg(rx_ring->idr, BIT(rx_ring->index)); + dma_rmb(); /* for reading other rxbd fields */ + size = le16_to_cpu(rxbd->r.buf_len); + skb = enetc_map_rx_buff_to_skb(rx_ring, i, size); + if (!skb) + break; + + enetc_get_offloads(rx_ring, rxbd, skb); + + cleaned_cnt++; + rxbd++; + i++; + if (unlikely(i == rx_ring->bd_count)) { + i = 0; + rxbd = ENETC_RXBD(*rx_ring, 0); + } + + if (unlikely(bd_status & + ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) { + dev_kfree_skb(skb); + while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { + dma_rmb(); + bd_status = le32_to_cpu(rxbd->r.lstatus); + rxbd++; + i++; + if (unlikely(i == rx_ring->bd_count)) { + i = 0; + rxbd = ENETC_RXBD(*rx_ring, 0); + } + } + + rx_ring->ndev->stats.rx_dropped++; + rx_ring->ndev->stats.rx_errors++; + + break; + } + + /* not last BD in frame? */ + while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { + bd_status = le32_to_cpu(rxbd->r.lstatus); + size = ENETC_RXB_DMA_SIZE; + + if (bd_status & ENETC_RXBD_LSTATUS_F) { + dma_rmb(); + size = le16_to_cpu(rxbd->r.buf_len); + } + + enetc_add_rx_buff_to_skb(rx_ring, i, size, skb); + + cleaned_cnt++; + rxbd++; + i++; + if (unlikely(i == rx_ring->bd_count)) { + i = 0; + rxbd = ENETC_RXBD(*rx_ring, 0); + } + } + + rx_byte_cnt += skb->len; + + enetc_process_skb(rx_ring, skb); + + napi_gro_receive(napi, skb); + + rx_frm_cnt++; + } + + rx_ring->next_to_clean = i; + + rx_ring->stats.packets += rx_frm_cnt; + rx_ring->stats.bytes += rx_byte_cnt; + + return rx_frm_cnt; +} + +/* Probing and Init */ +#define ENETC_MAX_RFS_SIZE 64 +void enetc_get_si_caps(struct enetc_si *si) +{ + struct enetc_hw *hw = &si->hw; + u32 val; + + /* find out how many of various resources we have to work with */ + val = enetc_rd(hw, ENETC_SICAPR0); + si->num_rx_rings = (val >> 16) & 0xff; + si->num_tx_rings = val & 0xff; + + val = enetc_rd(hw, ENETC_SIRFSCAPR); + si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val); + si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE); + + si->num_rss = 0; + val = enetc_rd(hw, ENETC_SIPCAPR0); + if (val & ENETC_SIPCAPR0_RSS) { + val = enetc_rd(hw, ENETC_SIRSSCAPR); + si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(val); + } +} + +static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size) +{ + r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size, + &r->bd_dma_base, GFP_KERNEL); + if (!r->bd_base) + return -ENOMEM; + + /* h/w requires 128B alignment */ + if (!IS_ALIGNED(r->bd_dma_base, 128)) { + dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base, + r->bd_dma_base); + return -EINVAL; + } + + return 0; +} + +static int enetc_alloc_txbdr(struct enetc_bdr *txr) +{ + int err; + + txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd)); + if (!txr->tx_swbd) + return -ENOMEM; + + err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd)); + if (err) { + vfree(txr->tx_swbd); + return err; + } + + txr->next_to_clean = 0; + txr->next_to_use = 0; + + return 0; +} + +static void enetc_free_txbdr(struct enetc_bdr *txr) +{ + int size, i; + + for (i = 0; i < txr->bd_count; i++) + enetc_free_tx_skb(txr, &txr->tx_swbd[i]); + + size = txr->bd_count * sizeof(union enetc_tx_bd); + + dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base); + txr->bd_base = NULL; + + vfree(txr->tx_swbd); + txr->tx_swbd = NULL; +} + +static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv) +{ + int i, err; + + for (i = 0; i < priv->num_tx_rings; i++) { + err = enetc_alloc_txbdr(priv->tx_ring[i]); + + if (err) + goto fail; + } + + return 0; + +fail: + while (i-- > 0) + enetc_free_txbdr(priv->tx_ring[i]); + + return err; +} + +static void enetc_free_tx_resources(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_free_txbdr(priv->tx_ring[i]); +} + +static int enetc_alloc_rxbdr(struct enetc_bdr *rxr) +{ + int err; + + rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd)); + if (!rxr->rx_swbd) + return -ENOMEM; + + err = enetc_dma_alloc_bdr(rxr, sizeof(union enetc_rx_bd)); + if (err) { + vfree(rxr->rx_swbd); + return err; + } + + rxr->next_to_clean = 0; + rxr->next_to_use = 0; + rxr->next_to_alloc = 0; + + return 0; +} + +static void enetc_free_rxbdr(struct enetc_bdr *rxr) +{ + int size; + + size = rxr->bd_count * sizeof(union enetc_rx_bd); + + dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base); + rxr->bd_base = NULL; + + vfree(rxr->rx_swbd); + rxr->rx_swbd = NULL; +} + +static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv) +{ + int i, err; + + for (i = 0; i < priv->num_rx_rings; i++) { + err = enetc_alloc_rxbdr(priv->rx_ring[i]); + + if (err) + goto fail; + } + + return 0; + +fail: + while (i-- > 0) + enetc_free_rxbdr(priv->rx_ring[i]); + + return err; +} + +static void enetc_free_rx_resources(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_free_rxbdr(priv->rx_ring[i]); +} + +static void enetc_free_tx_ring(struct enetc_bdr *tx_ring) +{ + int i; + + if (!tx_ring->tx_swbd) + return; + + for (i = 0; i < tx_ring->bd_count; i++) { + struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; + + enetc_free_tx_skb(tx_ring, tx_swbd); + } + + tx_ring->next_to_clean = 0; + tx_ring->next_to_use = 0; +} + +static void enetc_free_rx_ring(struct enetc_bdr *rx_ring) +{ + int i; + + if (!rx_ring->rx_swbd) + return; + + for (i = 0; i < rx_ring->bd_count; i++) { + struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; + + if (!rx_swbd->page) + continue; + + dma_unmap_page(rx_ring->dev, rx_swbd->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + __free_page(rx_swbd->page); + rx_swbd->page = NULL; + } + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->next_to_alloc = 0; +} + +static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_free_rx_ring(priv->rx_ring[i]); + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_free_tx_ring(priv->tx_ring[i]); +} + +static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr) +{ + int size = cbdr->bd_count * sizeof(struct enetc_cbd); + + cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base, + GFP_KERNEL); + if (!cbdr->bd_base) + return -ENOMEM; + + /* h/w requires 128B alignment */ + if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) { + dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base); + return -EINVAL; + } + + cbdr->next_to_clean = 0; + cbdr->next_to_use = 0; + + return 0; +} + +static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr) +{ + int size = cbdr->bd_count * sizeof(struct enetc_cbd); + + dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base); + cbdr->bd_base = NULL; +} + +static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr) +{ + /* set CBDR cache attributes */ + enetc_wr(hw, ENETC_SICAR2, + ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); + + enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base)); + enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base)); + enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count)); + + enetc_wr(hw, ENETC_SICBDRPIR, 0); + enetc_wr(hw, ENETC_SICBDRCIR, 0); + + /* enable ring */ + enetc_wr(hw, ENETC_SICBDRMR, BIT(31)); + + cbdr->pir = hw->reg + ENETC_SICBDRPIR; + cbdr->cir = hw->reg + ENETC_SICBDRCIR; +} + +static void enetc_clear_cbdr(struct enetc_hw *hw) +{ + enetc_wr(hw, ENETC_SICBDRMR, 0); +} + +static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups) +{ + int *rss_table; + int i; + + rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL); + if (!rss_table) + return -ENOMEM; + + /* Set up RSS table defaults */ + for (i = 0; i < si->num_rss; i++) + rss_table[i] = i % num_groups; + + enetc_set_rss_table(si, rss_table, si->num_rss); + + kfree(rss_table); + + return 0; +} + +static int enetc_configure_si(struct enetc_ndev_priv *priv) +{ + struct enetc_si *si = priv->si; + struct enetc_hw *hw = &si->hw; + int err; + + enetc_setup_cbdr(hw, &si->cbd_ring); + /* set SI cache attributes */ + enetc_wr(hw, ENETC_SICAR0, + ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); + enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI); + /* enable SI */ + enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN); + + if (si->num_rss) { + err = enetc_setup_default_rss_table(si, priv->num_rx_rings); + if (err) + return err; + } + + return 0; +} + +void enetc_init_si_rings_params(struct enetc_ndev_priv *priv) +{ + struct enetc_si *si = priv->si; + int cpus = num_online_cpus(); + + priv->tx_bd_count = ENETC_BDR_DEFAULT_SIZE; + priv->rx_bd_count = ENETC_BDR_DEFAULT_SIZE; + + /* Enable all available TX rings in order to configure as many + * priorities as possible, when needed. + * TODO: Make # of TX rings run-time configurable + */ + priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings); + priv->num_tx_rings = si->num_tx_rings; + priv->bdr_int_num = cpus; + + /* SI specific */ + si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE; +} + +int enetc_alloc_si_resources(struct enetc_ndev_priv *priv) +{ + struct enetc_si *si = priv->si; + int err; + + err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring); + if (err) + return err; + + priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules), + GFP_KERNEL); + if (!priv->cls_rules) { + err = -ENOMEM; + goto err_alloc_cls; + } + + err = enetc_configure_si(priv); + if (err) + goto err_config_si; + + return 0; + +err_config_si: + kfree(priv->cls_rules); +err_alloc_cls: + enetc_clear_cbdr(&si->hw); + enetc_free_cbdr(priv->dev, &si->cbd_ring); + + return err; +} + +void enetc_free_si_resources(struct enetc_ndev_priv *priv) +{ + struct enetc_si *si = priv->si; + + enetc_clear_cbdr(&si->hw); + enetc_free_cbdr(priv->dev, &si->cbd_ring); + + kfree(priv->cls_rules); +} + +static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) +{ + int idx = tx_ring->index; + u32 tbmr; + + enetc_txbdr_wr(hw, idx, ENETC_TBBAR0, + lower_32_bits(tx_ring->bd_dma_base)); + + enetc_txbdr_wr(hw, idx, ENETC_TBBAR1, + upper_32_bits(tx_ring->bd_dma_base)); + + WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */ + enetc_txbdr_wr(hw, idx, ENETC_TBLENR, + ENETC_RTBLENR_LEN(tx_ring->bd_count)); + + /* clearing PI/CI registers for Tx not supported, adjust sw indexes */ + tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR); + tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR); + + /* enable Tx ints by setting pkt thr to 1 */ + enetc_txbdr_wr(hw, idx, ENETC_TBICIR0, ENETC_TBICIR0_ICEN | 0x1); + + tbmr = ENETC_TBMR_EN; + if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) + tbmr |= ENETC_TBMR_VIH; + + /* enable ring */ + enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr); + + tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR); + tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR); + tx_ring->idr = hw->reg + ENETC_SITXIDR; +} + +static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) +{ + int idx = rx_ring->index; + u32 rbmr; + + enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0, + lower_32_bits(rx_ring->bd_dma_base)); + + enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1, + upper_32_bits(rx_ring->bd_dma_base)); + + WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */ + enetc_rxbdr_wr(hw, idx, ENETC_RBLENR, + ENETC_RTBLENR_LEN(rx_ring->bd_count)); + + enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE); + + enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0); + + /* enable Rx ints by setting pkt thr to 1 */ + enetc_rxbdr_wr(hw, idx, ENETC_RBICIR0, ENETC_RBICIR0_ICEN | 0x1); + + rbmr = ENETC_RBMR_EN; + if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) + rbmr |= ENETC_RBMR_VTE; + + rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR); + rx_ring->idr = hw->reg + ENETC_SIRXIDR; + + enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring)); + + /* enable ring */ + enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); +} + +static void enetc_setup_bdrs(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]); + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]); +} + +static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) +{ + int idx = rx_ring->index; + + /* disable EN bit on ring */ + enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0); +} + +static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) +{ + int delay = 8, timeout = 100; + int idx = tx_ring->index; + + /* disable EN bit on ring */ + enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0); + + /* wait for busy to clear */ + while (delay < timeout && + enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) { + msleep(delay); + delay *= 2; + } + + if (delay >= timeout) + netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n", + idx); +} + +static void enetc_clear_bdrs(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]); + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]); + + udelay(1); +} + +static int enetc_setup_irqs(struct enetc_ndev_priv *priv) +{ + struct pci_dev *pdev = priv->si->pdev; + cpumask_t cpu_mask; + int i, j, err; + + for (i = 0; i < priv->bdr_int_num; i++) { + int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); + struct enetc_int_vector *v = priv->int_vector[i]; + int entry = ENETC_BDR_INT_BASE_IDX + i; + struct enetc_hw *hw = &priv->si->hw; + + snprintf(v->name, sizeof(v->name), "%s-rxtx%d", + priv->ndev->name, i); + err = request_irq(irq, enetc_msix, 0, v->name, v); + if (err) { + dev_err(priv->dev, "request_irq() failed!\n"); + goto irq_err; + } + + v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER); + v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER); + + enetc_wr(hw, ENETC_SIMSIRRV(i), entry); + + for (j = 0; j < v->count_tx_rings; j++) { + int idx = v->tx_ring[j].index; + + enetc_wr(hw, ENETC_SIMSITRV(idx), entry); + } + cpumask_clear(&cpu_mask); + cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); + irq_set_affinity_hint(irq, &cpu_mask); + } + + return 0; + +irq_err: + while (i--) { + int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); + + irq_set_affinity_hint(irq, NULL); + free_irq(irq, priv->int_vector[i]); + } + + return err; +} + +static void enetc_free_irqs(struct enetc_ndev_priv *priv) +{ + struct pci_dev *pdev = priv->si->pdev; + int i; + + for (i = 0; i < priv->bdr_int_num; i++) { + int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); + + irq_set_affinity_hint(irq, NULL); + free_irq(irq, priv->int_vector[i]); + } +} + +static void enetc_enable_interrupts(struct enetc_ndev_priv *priv) +{ + int i; + + /* enable Tx & Rx event indication */ + for (i = 0; i < priv->num_rx_rings; i++) { + enetc_rxbdr_wr(&priv->si->hw, i, + ENETC_RBIER, ENETC_RBIER_RXTIE); + } + + for (i = 0; i < priv->num_tx_rings; i++) { + enetc_txbdr_wr(&priv->si->hw, i, + ENETC_TBIER, ENETC_TBIER_TXTIE); + } +} + +static void enetc_disable_interrupts(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0); + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0); +} + +static void adjust_link(struct net_device *ndev) +{ + struct phy_device *phydev = ndev->phydev; + + phy_print_status(phydev); +} + +static int enetc_phy_connect(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct phy_device *phydev; + + if (!priv->phy_node) + return 0; /* phy-less mode */ + + phydev = of_phy_connect(ndev, priv->phy_node, &adjust_link, + 0, priv->if_mode); + if (!phydev) { + dev_err(&ndev->dev, "could not attach to PHY\n"); + return -ENODEV; + } + + phy_attached_info(phydev); + + return 0; +} + +int enetc_open(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int i, err; + + err = enetc_setup_irqs(priv); + if (err) + return err; + + err = enetc_phy_connect(ndev); + if (err) + goto err_phy_connect; + + err = enetc_alloc_tx_resources(priv); + if (err) + goto err_alloc_tx; + + err = enetc_alloc_rx_resources(priv); + if (err) + goto err_alloc_rx; + + enetc_setup_bdrs(priv); + + err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings); + if (err) + goto err_set_queues; + + for (i = 0; i < priv->bdr_int_num; i++) + napi_enable(&priv->int_vector[i]->napi); + + enetc_enable_interrupts(priv); + + if (ndev->phydev) + phy_start(ndev->phydev); + else + netif_carrier_on(ndev); + + netif_tx_start_all_queues(ndev); + + return 0; + +err_set_queues: + enetc_free_rx_resources(priv); +err_alloc_rx: + enetc_free_tx_resources(priv); +err_alloc_tx: + if (ndev->phydev) + phy_disconnect(ndev->phydev); +err_phy_connect: + enetc_free_irqs(priv); + + return err; +} + +int enetc_close(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int i; + + netif_tx_stop_all_queues(ndev); + + if (ndev->phydev) { + phy_stop(ndev->phydev); + phy_disconnect(ndev->phydev); + } else { + netif_carrier_off(ndev); + } + + for (i = 0; i < priv->bdr_int_num; i++) { + napi_synchronize(&priv->int_vector[i]->napi); + napi_disable(&priv->int_vector[i]->napi); + } + + enetc_disable_interrupts(priv); + enetc_clear_bdrs(priv); + + enetc_free_rxtx_rings(priv); + enetc_free_rx_resources(priv); + enetc_free_tx_resources(priv); + enetc_free_irqs(priv); + + return 0; +} + +struct net_device_stats *enetc_get_stats(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + unsigned long packets = 0, bytes = 0; + int i; + + for (i = 0; i < priv->num_rx_rings; i++) { + packets += priv->rx_ring[i]->stats.packets; + bytes += priv->rx_ring[i]->stats.bytes; + } + + stats->rx_packets = packets; + stats->rx_bytes = bytes; + bytes = 0; + packets = 0; + + for (i = 0; i < priv->num_tx_rings; i++) { + packets += priv->tx_ring[i]->stats.packets; + bytes += priv->tx_ring[i]->stats.bytes; + } + + stats->tx_packets = packets; + stats->tx_bytes = bytes; + + return stats; +} + +static int enetc_set_rss(struct net_device *ndev, int en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + u32 reg; + + enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings); + + reg = enetc_rd(hw, ENETC_SIMR); + reg &= ~ENETC_SIMR_RSSE; + reg |= (en) ? ENETC_SIMR_RSSE : 0; + enetc_wr(hw, ENETC_SIMR, reg); + + return 0; +} + +int enetc_set_features(struct net_device *ndev, + netdev_features_t features) +{ + netdev_features_t changed = ndev->features ^ features; + + if (changed & NETIF_F_RXHASH) + enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH)); + + return 0; +} + +int enetc_alloc_msix(struct enetc_ndev_priv *priv) +{ + struct pci_dev *pdev = priv->si->pdev; + int size, v_tx_rings; + int i, n, err, nvec; + + nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num; + /* allocate MSIX for both messaging and Rx/Tx interrupts */ + n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); + + if (n < 0) + return n; + + if (n != nvec) + return -EPERM; + + /* # of tx rings per int vector */ + v_tx_rings = priv->num_tx_rings / priv->bdr_int_num; + size = sizeof(struct enetc_int_vector) + + sizeof(struct enetc_bdr) * v_tx_rings; + + for (i = 0; i < priv->bdr_int_num; i++) { + struct enetc_int_vector *v; + struct enetc_bdr *bdr; + int j; + + v = kzalloc(size, GFP_KERNEL); + if (!v) { + err = -ENOMEM; + goto fail; + } + + priv->int_vector[i] = v; + + netif_napi_add(priv->ndev, &v->napi, enetc_poll, + NAPI_POLL_WEIGHT); + v->count_tx_rings = v_tx_rings; + + for (j = 0; j < v_tx_rings; j++) { + int idx; + + /* default tx ring mapping policy */ + if (priv->bdr_int_num == ENETC_MAX_BDR_INT) + idx = 2 * j + i; /* 2 CPUs */ + else + idx = j + i * v_tx_rings; /* default */ + + __set_bit(idx, &v->tx_rings_map); + bdr = &v->tx_ring[j]; + bdr->index = idx; + bdr->ndev = priv->ndev; + bdr->dev = priv->dev; + bdr->bd_count = priv->tx_bd_count; + priv->tx_ring[idx] = bdr; + } + + bdr = &v->rx_ring; + bdr->index = i; + bdr->ndev = priv->ndev; + bdr->dev = priv->dev; + bdr->bd_count = priv->rx_bd_count; + priv->rx_ring[i] = bdr; + } + + return 0; + +fail: + while (i--) { + netif_napi_del(&priv->int_vector[i]->napi); + kfree(priv->int_vector[i]); + } + + pci_free_irq_vectors(pdev); + + return err; +} + +void enetc_free_msix(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->bdr_int_num; i++) { + struct enetc_int_vector *v = priv->int_vector[i]; + + netif_napi_del(&v->napi); + } + + for (i = 0; i < priv->num_rx_rings; i++) + priv->rx_ring[i] = NULL; + + for (i = 0; i < priv->num_tx_rings; i++) + priv->tx_ring[i] = NULL; + + for (i = 0; i < priv->bdr_int_num; i++) { + kfree(priv->int_vector[i]); + priv->int_vector[i] = NULL; + } + + /* disable all MSIX for this device */ + pci_free_irq_vectors(priv->si->pdev); +} + +static void enetc_kfree_si(struct enetc_si *si) +{ + char *p = (char *)si - si->pad; + + kfree(p); +} + +static void enetc_detect_errata(struct enetc_si *si) +{ + if (si->pdev->revision == ENETC_REV1) + si->errata = ENETC_ERR_TXCSUM | ENETC_ERR_VLAN_ISOL | + ENETC_ERR_UCMCSWP; +} + +int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv) +{ + struct enetc_si *si, *p; + struct enetc_hw *hw; + size_t alloc_size; + int err, len; + + pcie_flr(pdev); + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "device enable failed\n"); + return err; + } + + /* set up for high or low dma */ + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + } + + err = pci_request_mem_regions(pdev, name); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err); + goto err_pci_mem_reg; + } + + pci_set_master(pdev); + + alloc_size = sizeof(struct enetc_si); + if (sizeof_priv) { + /* align priv to 32B */ + alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN); + alloc_size += sizeof_priv; + } + /* force 32B alignment for enetc_si */ + alloc_size += ENETC_SI_ALIGN - 1; + + p = kzalloc(alloc_size, GFP_KERNEL); + if (!p) { + err = -ENOMEM; + goto err_alloc_si; + } + + si = PTR_ALIGN(p, ENETC_SI_ALIGN); + si->pad = (char *)si - (char *)p; + + pci_set_drvdata(pdev, si); + si->pdev = pdev; + hw = &si->hw; + + len = pci_resource_len(pdev, ENETC_BAR_REGS); + hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len); + if (!hw->reg) { + err = -ENXIO; + dev_err(&pdev->dev, "ioremap() failed\n"); + goto err_ioremap; + } + if (len > ENETC_PORT_BASE) + hw->port = hw->reg + ENETC_PORT_BASE; + if (len > ENETC_GLOBAL_BASE) + hw->global = hw->reg + ENETC_GLOBAL_BASE; + + enetc_detect_errata(si); + + return 0; + +err_ioremap: + enetc_kfree_si(si); +err_alloc_si: + pci_release_mem_regions(pdev); +err_pci_mem_reg: +err_dma: + pci_disable_device(pdev); + + return err; +} + +void enetc_pci_remove(struct pci_dev *pdev) +{ + struct enetc_si *si = pci_get_drvdata(pdev); + struct enetc_hw *hw = &si->hw; + + iounmap(hw->reg); + enetc_kfree_si(si); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h new file mode 100644 index 000000000000..b274135c5103 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2017-2019 NXP */ + +#include <linux/timer.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/dma-mapping.h> +#include <linux/skbuff.h> +#include <linux/ethtool.h> +#include <linux/if_vlan.h> +#include <linux/phy.h> + +#include "enetc_hw.h" + +#define ENETC_MAC_MAXFRM_SIZE 9600 +#define ENETC_MAX_MTU (ENETC_MAC_MAXFRM_SIZE - \ + (ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN)) + +struct enetc_tx_swbd { + struct sk_buff *skb; + dma_addr_t dma; + u16 len; + u16 is_dma_page; +}; + +#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE +#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */ +#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */ +#define ENETC_RXB_DMA_SIZE \ + (SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD) + +struct enetc_rx_swbd { + dma_addr_t dma; + struct page *page; + u16 page_offset; +}; + +struct enetc_ring_stats { + unsigned int packets; + unsigned int bytes; + unsigned int rx_alloc_errs; +}; + +#define ENETC_BDR_DEFAULT_SIZE 1024 +#define ENETC_DEFAULT_TX_WORK 256 + +struct enetc_bdr { + struct device *dev; /* for DMA mapping */ + struct net_device *ndev; + void *bd_base; /* points to Rx or Tx BD ring */ + union { + void __iomem *tpir; + void __iomem *rcir; + }; + u16 index; + int bd_count; /* # of BDs */ + int next_to_use; + int next_to_clean; + union { + struct enetc_tx_swbd *tx_swbd; + struct enetc_rx_swbd *rx_swbd; + }; + union { + void __iomem *tcir; /* Tx */ + int next_to_alloc; /* Rx */ + }; + void __iomem *idr; /* Interrupt Detect Register pointer */ + + struct enetc_ring_stats stats; + + dma_addr_t bd_dma_base; +} ____cacheline_aligned_in_smp; + +static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i) +{ + if (unlikely(++*i == bdr->bd_count)) + *i = 0; +} + +static inline int enetc_bd_unused(struct enetc_bdr *bdr) +{ + if (bdr->next_to_clean > bdr->next_to_use) + return bdr->next_to_clean - bdr->next_to_use - 1; + + return bdr->bd_count + bdr->next_to_clean - bdr->next_to_use - 1; +} + +/* Control BD ring */ +#define ENETC_CBDR_DEFAULT_SIZE 64 +struct enetc_cbdr { + void *bd_base; /* points to Rx or Tx BD ring */ + void __iomem *pir; + void __iomem *cir; + + int bd_count; /* # of BDs */ + int next_to_use; + int next_to_clean; + + dma_addr_t bd_dma_base; +}; + +#define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i])) +#define ENETC_RXBD(BDR, i) (&(((union enetc_rx_bd *)((BDR).bd_base))[i])) + +struct enetc_msg_swbd { + void *vaddr; + dma_addr_t dma; + int size; +}; + +#define ENETC_REV1 0x1 +enum enetc_errata { + ENETC_ERR_TXCSUM = BIT(0), + ENETC_ERR_VLAN_ISOL = BIT(1), + ENETC_ERR_UCMCSWP = BIT(2), +}; + +/* PCI IEP device data */ +struct enetc_si { + struct pci_dev *pdev; + struct enetc_hw hw; + enum enetc_errata errata; + + struct net_device *ndev; /* back ref. */ + + struct enetc_cbdr cbd_ring; + + int num_rx_rings; /* how many rings are available in the SI */ + int num_tx_rings; + int num_fs_entries; + int num_rss; /* number of RSS buckets */ + unsigned short pad; +}; + +#define ENETC_SI_ALIGN 32 + +static inline void *enetc_si_priv(const struct enetc_si *si) +{ + return (char *)si + ALIGN(sizeof(struct enetc_si), ENETC_SI_ALIGN); +} + +static inline bool enetc_si_is_pf(struct enetc_si *si) +{ + return !!(si->hw.port); +} + +#define ENETC_MAX_NUM_TXQS 8 +#define ENETC_INT_NAME_MAX (IFNAMSIZ + 8) + +struct enetc_int_vector { + void __iomem *rbier; + void __iomem *tbier_base; + unsigned long tx_rings_map; + int count_tx_rings; + struct napi_struct napi; + char name[ENETC_INT_NAME_MAX]; + + struct enetc_bdr rx_ring ____cacheline_aligned_in_smp; + struct enetc_bdr tx_ring[0]; +}; + +struct enetc_cls_rule { + struct ethtool_rx_flow_spec fs; + int used; +}; + +#define ENETC_MAX_BDR_INT 2 /* fixed to max # of available cpus */ + +struct enetc_ndev_priv { + struct net_device *ndev; + struct device *dev; /* dma-mapping device */ + struct enetc_si *si; + + int bdr_int_num; /* number of Rx/Tx ring interrupts */ + struct enetc_int_vector *int_vector[ENETC_MAX_BDR_INT]; + u16 num_rx_rings, num_tx_rings; + u16 rx_bd_count, tx_bd_count; + + u16 msg_enable; + + struct enetc_bdr *tx_ring[16]; + struct enetc_bdr *rx_ring[16]; + + struct enetc_cls_rule *cls_rules; + + struct device_node *phy_node; + phy_interface_t if_mode; +}; + +/* Messaging */ + +/* VF-PF set primary MAC address message format */ +struct enetc_msg_cmd_set_primary_mac { + struct enetc_msg_cmd_header header; + struct sockaddr mac; +}; + +#define ENETC_CBD(R, i) (&(((struct enetc_cbd *)((R).bd_base))[i])) + +#define ENETC_CBDR_TIMEOUT 1000 /* usecs */ + +/* SI common */ +int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv); +void enetc_pci_remove(struct pci_dev *pdev); +int enetc_alloc_msix(struct enetc_ndev_priv *priv); +void enetc_free_msix(struct enetc_ndev_priv *priv); +void enetc_get_si_caps(struct enetc_si *si); +void enetc_init_si_rings_params(struct enetc_ndev_priv *priv); +int enetc_alloc_si_resources(struct enetc_ndev_priv *priv); +void enetc_free_si_resources(struct enetc_ndev_priv *priv); + +int enetc_open(struct net_device *ndev); +int enetc_close(struct net_device *ndev); +netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev); +struct net_device_stats *enetc_get_stats(struct net_device *ndev); +int enetc_set_features(struct net_device *ndev, + netdev_features_t features); +/* ethtool */ +void enetc_set_ethtool_ops(struct net_device *ndev); + +/* control buffer descriptor ring (CBDR) */ +int enetc_set_mac_flt_entry(struct enetc_si *si, int index, + char *mac_addr, int si_map); +int enetc_clear_mac_flt_entry(struct enetc_si *si, int index); +int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse, + int index); +void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes); +int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count); +int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c new file mode 100644 index 000000000000..de466b71bf8f --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include "enetc.h" + +static void enetc_clean_cbdr(struct enetc_si *si) +{ + struct enetc_cbdr *ring = &si->cbd_ring; + struct enetc_cbd *dest_cbd; + int i, status; + + i = ring->next_to_clean; + + while (enetc_rd_reg(ring->cir) != i) { + dest_cbd = ENETC_CBD(*ring, i); + status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK; + if (status) + dev_warn(&si->pdev->dev, "CMD err %04x for cmd %04x\n", + status, dest_cbd->cmd); + + memset(dest_cbd, 0, sizeof(*dest_cbd)); + + i = (i + 1) % ring->bd_count; + } + + ring->next_to_clean = i; +} + +static int enetc_cbd_unused(struct enetc_cbdr *r) +{ + return (r->next_to_clean - r->next_to_use - 1 + r->bd_count) % + r->bd_count; +} + +static int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd) +{ + struct enetc_cbdr *ring = &si->cbd_ring; + int timeout = ENETC_CBDR_TIMEOUT; + struct enetc_cbd *dest_cbd; + int i; + + if (unlikely(!ring->bd_base)) + return -EIO; + + if (unlikely(!enetc_cbd_unused(ring))) + enetc_clean_cbdr(si); + + i = ring->next_to_use; + dest_cbd = ENETC_CBD(*ring, i); + + /* copy command to the ring */ + *dest_cbd = *cbd; + i = (i + 1) % ring->bd_count; + + ring->next_to_use = i; + /* let H/W know BD ring has been updated */ + enetc_wr_reg(ring->pir, i); + + do { + if (enetc_rd_reg(ring->cir) == i) + break; + udelay(10); /* cannot sleep, rtnl_lock() */ + timeout -= 10; + } while (timeout); + + if (!timeout) + return -EBUSY; + + enetc_clean_cbdr(si); + + return 0; +} + +int enetc_clear_mac_flt_entry(struct enetc_si *si, int index) +{ + struct enetc_cbd cbd; + + memset(&cbd, 0, sizeof(cbd)); + + cbd.cls = 1; + cbd.status_flags = ENETC_CBD_FLAGS_SF; + cbd.index = cpu_to_le16(index); + + return enetc_send_cmd(si, &cbd); +} + +int enetc_set_mac_flt_entry(struct enetc_si *si, int index, + char *mac_addr, int si_map) +{ + struct enetc_cbd cbd; + u32 upper; + u16 lower; + + memset(&cbd, 0, sizeof(cbd)); + + /* fill up the "set" descriptor */ + cbd.cls = 1; + cbd.status_flags = ENETC_CBD_FLAGS_SF; + cbd.index = cpu_to_le16(index); + cbd.opt[3] = cpu_to_le32(si_map); + /* enable entry */ + cbd.opt[0] = cpu_to_le32(BIT(31)); + + upper = *(const u32 *)mac_addr; + lower = *(const u16 *)(mac_addr + 4); + cbd.addr[0] = cpu_to_le32(upper); + cbd.addr[1] = cpu_to_le32(lower); + + return enetc_send_cmd(si, &cbd); +} + +#define RFSE_ALIGN 64 +/* Set entry in RFS table */ +int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse, + int index) +{ + struct enetc_cbd cbd = {.cmd = 0}; + dma_addr_t dma, dma_align; + void *tmp, *tmp_align; + int err; + + /* fill up the "set" descriptor */ + cbd.cmd = 0; + cbd.cls = 4; + cbd.index = cpu_to_le16(index); + cbd.length = cpu_to_le16(sizeof(*rfse)); + cbd.opt[3] = cpu_to_le32(0); /* SI */ + + tmp = dma_alloc_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN, + &dma, GFP_KERNEL); + if (!tmp) { + dev_err(&si->pdev->dev, "DMA mapping of RFS entry failed!\n"); + return -ENOMEM; + } + + dma_align = ALIGN(dma, RFSE_ALIGN); + tmp_align = PTR_ALIGN(tmp, RFSE_ALIGN); + memcpy(tmp_align, rfse, sizeof(*rfse)); + + cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align)); + cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align)); + + err = enetc_send_cmd(si, &cbd); + if (err) + dev_err(&si->pdev->dev, "FS entry add failed (%d)!", err); + + dma_free_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN, + tmp, dma); + + return err; +} + +#define RSSE_ALIGN 64 +static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count, + bool read) +{ + struct enetc_cbd cbd = {.cmd = 0}; + dma_addr_t dma, dma_align; + u8 *tmp, *tmp_align; + int err, i; + + if (count < RSSE_ALIGN) + /* HW only takes in a full 64 entry table */ + return -EINVAL; + + tmp = dma_alloc_coherent(&si->pdev->dev, count + RSSE_ALIGN, + &dma, GFP_KERNEL); + if (!tmp) { + dev_err(&si->pdev->dev, "DMA mapping of RSS table failed!\n"); + return -ENOMEM; + } + dma_align = ALIGN(dma, RSSE_ALIGN); + tmp_align = PTR_ALIGN(tmp, RSSE_ALIGN); + + if (!read) + for (i = 0; i < count; i++) + tmp_align[i] = (u8)(table[i]); + + /* fill up the descriptor */ + cbd.cmd = read ? 2 : 1; + cbd.cls = 3; + cbd.length = cpu_to_le16(count); + + cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align)); + cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align)); + + err = enetc_send_cmd(si, &cbd); + if (err) + dev_err(&si->pdev->dev, "RSS cmd failed (%d)!", err); + + if (read) + for (i = 0; i < count; i++) + table[i] = tmp_align[i]; + + dma_free_coherent(&si->pdev->dev, count + RSSE_ALIGN, tmp, dma); + + return err; +} + +/* Get RSS table */ +int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count) +{ + return enetc_cmd_rss_table(si, table, count, true); +} + +/* Set RSS table */ +int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count) +{ + return enetc_cmd_rss_table(si, (u32 *)table, count, false); +} diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c new file mode 100644 index 000000000000..1ecad9ffabae --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -0,0 +1,597 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include <linux/net_tstamp.h> +#include <linux/module.h> +#include "enetc.h" + +static const u32 enetc_si_regs[] = { + ENETC_SIMR, ENETC_SIPMAR0, ENETC_SIPMAR1, ENETC_SICBDRMR, + ENETC_SICBDRSR, ENETC_SICBDRBAR0, ENETC_SICBDRBAR1, ENETC_SICBDRPIR, + ENETC_SICBDRCIR, ENETC_SICBDRLENR, ENETC_SICAPR0, ENETC_SICAPR1, + ENETC_SIUEFDCR +}; + +static const u32 enetc_txbdr_regs[] = { + ENETC_TBMR, ENETC_TBSR, ENETC_TBBAR0, ENETC_TBBAR1, + ENETC_TBPIR, ENETC_TBCIR, ENETC_TBLENR, ENETC_TBIER +}; + +static const u32 enetc_rxbdr_regs[] = { + ENETC_RBMR, ENETC_RBSR, ENETC_RBBSR, ENETC_RBCIR, ENETC_RBBAR0, + ENETC_RBBAR1, ENETC_RBPIR, ENETC_RBLENR, ENETC_RBICIR0, ENETC_RBIER +}; + +static const u32 enetc_port_regs[] = { + ENETC_PMR, ENETC_PSR, ENETC_PSIPMR, ENETC_PSIPMAR0(0), + ENETC_PSIPMAR1(0), ENETC_PTXMBAR, ENETC_PCAPR0, ENETC_PCAPR1, + ENETC_PSICFGR0(0), ENETC_PRFSCAPR, ENETC_PTCMSDUR(0), + ENETC_PM0_CMD_CFG, ENETC_PM0_MAXFRM, ENETC_PM0_IF_MODE +}; + +static int enetc_get_reglen(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + int len; + + len = ARRAY_SIZE(enetc_si_regs); + len += ARRAY_SIZE(enetc_txbdr_regs) * priv->num_tx_rings; + len += ARRAY_SIZE(enetc_rxbdr_regs) * priv->num_rx_rings; + + if (hw->port) + len += ARRAY_SIZE(enetc_port_regs); + + len *= sizeof(u32) * 2; /* store 2 entries per reg: addr and value */ + + return len; +} + +static void enetc_get_regs(struct net_device *ndev, struct ethtool_regs *regs, + void *regbuf) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + u32 *buf = (u32 *)regbuf; + int i, j; + u32 addr; + + for (i = 0; i < ARRAY_SIZE(enetc_si_regs); i++) { + *buf++ = enetc_si_regs[i]; + *buf++ = enetc_rd(hw, enetc_si_regs[i]); + } + + for (i = 0; i < priv->num_tx_rings; i++) { + for (j = 0; j < ARRAY_SIZE(enetc_txbdr_regs); j++) { + addr = ENETC_BDR(TX, i, enetc_txbdr_regs[j]); + + *buf++ = addr; + *buf++ = enetc_rd(hw, addr); + } + } + + for (i = 0; i < priv->num_rx_rings; i++) { + for (j = 0; j < ARRAY_SIZE(enetc_rxbdr_regs); j++) { + addr = ENETC_BDR(RX, i, enetc_rxbdr_regs[j]); + + *buf++ = addr; + *buf++ = enetc_rd(hw, addr); + } + } + + if (!hw->port) + return; + + for (i = 0; i < ARRAY_SIZE(enetc_port_regs); i++) { + addr = ENETC_PORT_BASE + enetc_port_regs[i]; + *buf++ = addr; + *buf++ = enetc_rd(hw, addr); + } +} + +static const struct { + int reg; + char name[ETH_GSTRING_LEN]; +} enetc_si_counters[] = { + { ENETC_SIROCT, "SI rx octets" }, + { ENETC_SIRFRM, "SI rx frames" }, + { ENETC_SIRUCA, "SI rx u-cast frames" }, + { ENETC_SIRMCA, "SI rx m-cast frames" }, + { ENETC_SITOCT, "SI tx octets" }, + { ENETC_SITFRM, "SI tx frames" }, + { ENETC_SITUCA, "SI tx u-cast frames" }, + { ENETC_SITMCA, "SI tx m-cast frames" }, + { ENETC_RBDCR(0), "Rx ring 0 discarded frames" }, + { ENETC_RBDCR(1), "Rx ring 1 discarded frames" }, + { ENETC_RBDCR(2), "Rx ring 2 discarded frames" }, + { ENETC_RBDCR(3), "Rx ring 3 discarded frames" }, + { ENETC_RBDCR(4), "Rx ring 4 discarded frames" }, + { ENETC_RBDCR(5), "Rx ring 5 discarded frames" }, + { ENETC_RBDCR(6), "Rx ring 6 discarded frames" }, + { ENETC_RBDCR(7), "Rx ring 7 discarded frames" }, + { ENETC_RBDCR(8), "Rx ring 8 discarded frames" }, + { ENETC_RBDCR(9), "Rx ring 9 discarded frames" }, + { ENETC_RBDCR(10), "Rx ring 10 discarded frames" }, + { ENETC_RBDCR(11), "Rx ring 11 discarded frames" }, + { ENETC_RBDCR(12), "Rx ring 12 discarded frames" }, + { ENETC_RBDCR(13), "Rx ring 13 discarded frames" }, + { ENETC_RBDCR(14), "Rx ring 14 discarded frames" }, + { ENETC_RBDCR(15), "Rx ring 15 discarded frames" }, +}; + +static const struct { + int reg; + char name[ETH_GSTRING_LEN]; +} enetc_port_counters[] = { + { ENETC_PM0_REOCT, "MAC rx ethernet octets" }, + { ENETC_PM0_RALN, "MAC rx alignment errors" }, + { ENETC_PM0_RXPF, "MAC rx valid pause frames" }, + { ENETC_PM0_RFRM, "MAC rx valid frames" }, + { ENETC_PM0_RFCS, "MAC rx fcs errors" }, + { ENETC_PM0_RVLAN, "MAC rx VLAN frames" }, + { ENETC_PM0_RERR, "MAC rx frame errors" }, + { ENETC_PM0_RUCA, "MAC rx unicast frames" }, + { ENETC_PM0_RMCA, "MAC rx multicast frames" }, + { ENETC_PM0_RBCA, "MAC rx broadcast frames" }, + { ENETC_PM0_RDRP, "MAC rx dropped packets" }, + { ENETC_PM0_RPKT, "MAC rx packets" }, + { ENETC_PM0_RUND, "MAC rx undersized packets" }, + { ENETC_PM0_R64, "MAC rx 64 byte packets" }, + { ENETC_PM0_R127, "MAC rx 65-127 byte packets" }, + { ENETC_PM0_R255, "MAC rx 128-255 byte packets" }, + { ENETC_PM0_R511, "MAC rx 256-511 byte packets" }, + { ENETC_PM0_R1023, "MAC rx 512-1023 byte packets" }, + { ENETC_PM0_R1518, "MAC rx 1024-1518 byte packets" }, + { ENETC_PM0_R1519X, "MAC rx 1519 to max-octet packets" }, + { ENETC_PM0_ROVR, "MAC rx oversized packets" }, + { ENETC_PM0_RJBR, "MAC rx jabber packets" }, + { ENETC_PM0_RFRG, "MAC rx fragment packets" }, + { ENETC_PM0_RCNP, "MAC rx control packets" }, + { ENETC_PM0_RDRNTP, "MAC rx fifo drop" }, + { ENETC_PM0_TEOCT, "MAC tx ethernet octets" }, + { ENETC_PM0_TOCT, "MAC tx octets" }, + { ENETC_PM0_TCRSE, "MAC tx carrier sense errors" }, + { ENETC_PM0_TXPF, "MAC tx valid pause frames" }, + { ENETC_PM0_TFRM, "MAC tx frames" }, + { ENETC_PM0_TFCS, "MAC tx fcs errors" }, + { ENETC_PM0_TVLAN, "MAC tx VLAN frames" }, + { ENETC_PM0_TERR, "MAC tx frames" }, + { ENETC_PM0_TUCA, "MAC tx unicast frames" }, + { ENETC_PM0_TMCA, "MAC tx multicast frames" }, + { ENETC_PM0_TBCA, "MAC tx broadcast frames" }, + { ENETC_PM0_TPKT, "MAC tx packets" }, + { ENETC_PM0_TUND, "MAC tx undersized packets" }, + { ENETC_PM0_T127, "MAC tx 65-127 byte packets" }, + { ENETC_PM0_T1023, "MAC tx 512-1023 byte packets" }, + { ENETC_PM0_T1518, "MAC tx 1024-1518 byte packets" }, + { ENETC_PM0_TCNP, "MAC tx control packets" }, + { ENETC_PM0_TDFR, "MAC tx deferred packets" }, + { ENETC_PM0_TMCOL, "MAC tx multiple collisions" }, + { ENETC_PM0_TSCOL, "MAC tx single collisions" }, + { ENETC_PM0_TLCOL, "MAC tx late collisions" }, + { ENETC_PM0_TECOL, "MAC tx excessive collisions" }, + { ENETC_UFDMF, "SI MAC nomatch u-cast discards" }, + { ENETC_MFDMF, "SI MAC nomatch m-cast discards" }, + { ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" }, + { ENETC_PUFDVFR, "SI VLAN nomatch u-cast discards" }, + { ENETC_PMFDVFR, "SI VLAN nomatch m-cast discards" }, + { ENETC_PBFDVFR, "SI VLAN nomatch b-cast discards" }, + { ENETC_PFDMSAPR, "SI pruning discarded frames" }, + { ENETC_PICDR(0), "ICM DR0 discarded frames" }, + { ENETC_PICDR(1), "ICM DR1 discarded frames" }, + { ENETC_PICDR(2), "ICM DR2 discarded frames" }, + { ENETC_PICDR(3), "ICM DR3 discarded frames" }, +}; + +static const char rx_ring_stats[][ETH_GSTRING_LEN] = { + "Rx ring %2d frames", + "Rx ring %2d alloc errors", +}; + +static const char tx_ring_stats[][ETH_GSTRING_LEN] = { + "Tx ring %2d frames", +}; + +static int enetc_get_sset_count(struct net_device *ndev, int sset) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + if (sset == ETH_SS_STATS) + return ARRAY_SIZE(enetc_si_counters) + + ARRAY_SIZE(tx_ring_stats) * priv->num_tx_rings + + ARRAY_SIZE(rx_ring_stats) * priv->num_rx_rings + + (enetc_si_is_pf(priv->si) ? + ARRAY_SIZE(enetc_port_counters) : 0); + + return -EOPNOTSUPP; +} + +static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + u8 *p = data; + int i, j; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) { + strlcpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < priv->num_tx_rings; i++) { + for (j = 0; j < ARRAY_SIZE(tx_ring_stats); j++) { + snprintf(p, ETH_GSTRING_LEN, tx_ring_stats[j], + i); + p += ETH_GSTRING_LEN; + } + } + for (i = 0; i < priv->num_rx_rings; i++) { + for (j = 0; j < ARRAY_SIZE(rx_ring_stats); j++) { + snprintf(p, ETH_GSTRING_LEN, rx_ring_stats[j], + i); + p += ETH_GSTRING_LEN; + } + } + + if (!enetc_si_is_pf(priv->si)) + break; + + for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) { + strlcpy(p, enetc_port_counters[i].name, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static void enetc_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + int i, o = 0; + + for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) + data[o++] = enetc_rd64(hw, enetc_si_counters[i].reg); + + for (i = 0; i < priv->num_tx_rings; i++) + data[o++] = priv->tx_ring[i]->stats.packets; + + for (i = 0; i < priv->num_rx_rings; i++) { + data[o++] = priv->rx_ring[i]->stats.packets; + data[o++] = priv->rx_ring[i]->stats.rx_alloc_errs; + } + + if (!enetc_si_is_pf(priv->si)) + return; + + for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) + data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg); +} + +#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \ + RXH_IP_DST) +#define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3) +static int enetc_get_rsshash(struct ethtool_rxnfc *rxnfc) +{ + static const u32 rsshash[] = { + [TCP_V4_FLOW] = ENETC_RSSHASH_L4, + [UDP_V4_FLOW] = ENETC_RSSHASH_L4, + [SCTP_V4_FLOW] = ENETC_RSSHASH_L4, + [AH_ESP_V4_FLOW] = ENETC_RSSHASH_L3, + [IPV4_FLOW] = ENETC_RSSHASH_L3, + [TCP_V6_FLOW] = ENETC_RSSHASH_L4, + [UDP_V6_FLOW] = ENETC_RSSHASH_L4, + [SCTP_V6_FLOW] = ENETC_RSSHASH_L4, + [AH_ESP_V6_FLOW] = ENETC_RSSHASH_L3, + [IPV6_FLOW] = ENETC_RSSHASH_L3, + [ETHER_FLOW] = 0, + }; + + if (rxnfc->flow_type >= ARRAY_SIZE(rsshash)) + return -EINVAL; + + rxnfc->data = rsshash[rxnfc->flow_type]; + + return 0; +} + +/* current HW spec does byte reversal on everything including MAC addresses */ +static void ether_addr_copy_swap(u8 *dst, const u8 *src) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) + dst[i] = src[ETH_ALEN - i - 1]; +} + +static int enetc_set_cls_entry(struct enetc_si *si, + struct ethtool_rx_flow_spec *fs, bool en) +{ + struct ethtool_tcpip4_spec *l4ip4_h, *l4ip4_m; + struct ethtool_usrip4_spec *l3ip4_h, *l3ip4_m; + struct ethhdr *eth_h, *eth_m; + struct enetc_cmd_rfse rfse = { {0} }; + + if (!en) + goto done; + + switch (fs->flow_type & 0xff) { + case TCP_V4_FLOW: + l4ip4_h = &fs->h_u.tcp_ip4_spec; + l4ip4_m = &fs->m_u.tcp_ip4_spec; + goto l4ip4; + case UDP_V4_FLOW: + l4ip4_h = &fs->h_u.udp_ip4_spec; + l4ip4_m = &fs->m_u.udp_ip4_spec; + goto l4ip4; + case SCTP_V4_FLOW: + l4ip4_h = &fs->h_u.sctp_ip4_spec; + l4ip4_m = &fs->m_u.sctp_ip4_spec; +l4ip4: + rfse.sip_h[0] = l4ip4_h->ip4src; + rfse.sip_m[0] = l4ip4_m->ip4src; + rfse.dip_h[0] = l4ip4_h->ip4dst; + rfse.dip_m[0] = l4ip4_m->ip4dst; + rfse.sport_h = ntohs(l4ip4_h->psrc); + rfse.sport_m = ntohs(l4ip4_m->psrc); + rfse.dport_h = ntohs(l4ip4_h->pdst); + rfse.dport_m = ntohs(l4ip4_m->pdst); + if (l4ip4_m->tos) + netdev_warn(si->ndev, "ToS field is not supported and was ignored\n"); + rfse.ethtype_h = ETH_P_IP; /* IPv4 */ + rfse.ethtype_m = 0xffff; + break; + case IP_USER_FLOW: + l3ip4_h = &fs->h_u.usr_ip4_spec; + l3ip4_m = &fs->m_u.usr_ip4_spec; + + rfse.sip_h[0] = l3ip4_h->ip4src; + rfse.sip_m[0] = l3ip4_m->ip4src; + rfse.dip_h[0] = l3ip4_h->ip4dst; + rfse.dip_m[0] = l3ip4_m->ip4dst; + if (l3ip4_m->tos) + netdev_warn(si->ndev, "ToS field is not supported and was ignored\n"); + rfse.ethtype_h = ETH_P_IP; /* IPv4 */ + rfse.ethtype_m = 0xffff; + break; + case ETHER_FLOW: + eth_h = &fs->h_u.ether_spec; + eth_m = &fs->m_u.ether_spec; + + ether_addr_copy_swap(rfse.smac_h, eth_h->h_source); + ether_addr_copy_swap(rfse.smac_m, eth_m->h_source); + ether_addr_copy_swap(rfse.dmac_h, eth_h->h_dest); + ether_addr_copy_swap(rfse.dmac_m, eth_m->h_dest); + rfse.ethtype_h = ntohs(eth_h->h_proto); + rfse.ethtype_m = ntohs(eth_m->h_proto); + break; + default: + return -EOPNOTSUPP; + } + + rfse.mode |= ENETC_RFSE_EN; + if (fs->ring_cookie != RX_CLS_FLOW_DISC) { + rfse.mode |= ENETC_RFSE_MODE_BD; + rfse.result = fs->ring_cookie; + } +done: + return enetc_set_fs_entry(si, &rfse, fs->location); +} + +static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc, + u32 *rule_locs) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int i, j; + + switch (rxnfc->cmd) { + case ETHTOOL_GRXRINGS: + rxnfc->data = priv->num_rx_rings; + break; + case ETHTOOL_GRXFH: + /* get RSS hash config */ + return enetc_get_rsshash(rxnfc); + case ETHTOOL_GRXCLSRLCNT: + /* total number of entries */ + rxnfc->data = priv->si->num_fs_entries; + /* number of entries in use */ + rxnfc->rule_cnt = 0; + for (i = 0; i < priv->si->num_fs_entries; i++) + if (priv->cls_rules[i].used) + rxnfc->rule_cnt++; + break; + case ETHTOOL_GRXCLSRULE: + if (rxnfc->fs.location >= priv->si->num_fs_entries) + return -EINVAL; + + /* get entry x */ + rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs; + break; + case ETHTOOL_GRXCLSRLALL: + /* total number of entries */ + rxnfc->data = priv->si->num_fs_entries; + /* array of indexes of used entries */ + j = 0; + for (i = 0; i < priv->si->num_fs_entries; i++) { + if (!priv->cls_rules[i].used) + continue; + if (j == rxnfc->rule_cnt) + return -EMSGSIZE; + rule_locs[j++] = i; + } + /* number of entries in use */ + rxnfc->rule_cnt = j; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int enetc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int err; + + switch (rxnfc->cmd) { + case ETHTOOL_SRXCLSRLINS: + if (rxnfc->fs.location >= priv->si->num_fs_entries) + return -EINVAL; + + if (rxnfc->fs.ring_cookie >= priv->num_rx_rings && + rxnfc->fs.ring_cookie != RX_CLS_FLOW_DISC) + return -EINVAL; + + err = enetc_set_cls_entry(priv->si, &rxnfc->fs, true); + if (err) + return err; + priv->cls_rules[rxnfc->fs.location].fs = rxnfc->fs; + priv->cls_rules[rxnfc->fs.location].used = 1; + break; + case ETHTOOL_SRXCLSRLDEL: + if (rxnfc->fs.location >= priv->si->num_fs_entries) + return -EINVAL; + + err = enetc_set_cls_entry(priv->si, &rxnfc->fs, false); + if (err) + return err; + priv->cls_rules[rxnfc->fs.location].used = 0; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static u32 enetc_get_rxfh_key_size(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + /* return the size of the RX flow hash key. PF only */ + return (priv->si->hw.port) ? ENETC_RSSHASH_KEY_SIZE : 0; +} + +static u32 enetc_get_rxfh_indir_size(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + /* return the size of the RX flow hash indirection table */ + return priv->si->num_rss; +} + +static int enetc_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + int err = 0, i; + + /* return hash function */ + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + /* return hash key */ + if (key && hw->port) + for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++) + ((u32 *)key)[i] = enetc_port_rd(hw, ENETC_PRSSK(i)); + + /* return RSS table */ + if (indir) + err = enetc_get_rss_table(priv->si, indir, priv->si->num_rss); + + return err; +} + +void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes) +{ + int i; + + for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++) + enetc_port_wr(hw, ENETC_PRSSK(i), ((u32 *)bytes)[i]); +} + +static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + int err = 0; + + /* set hash key, if PF */ + if (key && hw->port) + enetc_set_rss_key(hw, key); + + /* set RSS table */ + if (indir) + err = enetc_set_rss_table(priv->si, indir, priv->si->num_rss); + + return err; +} + +static void enetc_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + ring->rx_pending = priv->rx_bd_count; + ring->tx_pending = priv->tx_bd_count; + + /* do some h/w sanity checks for BDR length */ + if (netif_running(ndev)) { + struct enetc_hw *hw = &priv->si->hw; + u32 val = enetc_rxbdr_rd(hw, 0, ENETC_RBLENR); + + if (val != priv->rx_bd_count) + netif_err(priv, hw, ndev, "RxBDR[RBLENR] = %d!\n", val); + + val = enetc_txbdr_rd(hw, 0, ENETC_TBLENR); + + if (val != priv->tx_bd_count) + netif_err(priv, hw, ndev, "TxBDR[TBLENR] = %d!\n", val); + } +} + +static const struct ethtool_ops enetc_pf_ethtool_ops = { + .get_regs_len = enetc_get_reglen, + .get_regs = enetc_get_regs, + .get_sset_count = enetc_get_sset_count, + .get_strings = enetc_get_strings, + .get_ethtool_stats = enetc_get_ethtool_stats, + .get_rxnfc = enetc_get_rxnfc, + .set_rxnfc = enetc_set_rxnfc, + .get_rxfh_key_size = enetc_get_rxfh_key_size, + .get_rxfh_indir_size = enetc_get_rxfh_indir_size, + .get_rxfh = enetc_get_rxfh, + .set_rxfh = enetc_set_rxfh, + .get_ringparam = enetc_get_ringparam, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, +}; + +static const struct ethtool_ops enetc_vf_ethtool_ops = { + .get_regs_len = enetc_get_reglen, + .get_regs = enetc_get_regs, + .get_sset_count = enetc_get_sset_count, + .get_strings = enetc_get_strings, + .get_ethtool_stats = enetc_get_ethtool_stats, + .get_rxnfc = enetc_get_rxnfc, + .set_rxnfc = enetc_set_rxnfc, + .get_rxfh_indir_size = enetc_get_rxfh_indir_size, + .get_rxfh = enetc_get_rxfh, + .set_rxfh = enetc_set_rxfh, + .get_ringparam = enetc_get_ringparam, +}; + +void enetc_set_ethtool_ops(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + if (enetc_si_is_pf(priv->si)) + ndev->ethtool_ops = &enetc_pf_ethtool_ops; + else + ndev->ethtool_ops = &enetc_vf_ethtool_ops; +} diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h new file mode 100644 index 000000000000..df8eb8882d92 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -0,0 +1,533 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2017-2019 NXP */ + +#include <linux/bitops.h> + +/* ENETC device IDs */ +#define ENETC_DEV_ID_PF 0xe100 +#define ENETC_DEV_ID_VF 0xef00 +#define ENETC_DEV_ID_PTP 0xee02 + +/* ENETC register block BAR */ +#define ENETC_BAR_REGS 0 + +/** SI regs, offset: 0h */ +#define ENETC_SIMR 0 +#define ENETC_SIMR_EN BIT(31) +#define ENETC_SIMR_RSSE BIT(0) +#define ENETC_SICTR0 0x18 +#define ENETC_SICTR1 0x1c +#define ENETC_SIPCAPR0 0x20 +#define ENETC_SIPCAPR0_RSS BIT(8) +#define ENETC_SIPCAPR1 0x24 +#define ENETC_SITGTGR 0x30 +#define ENETC_SIRBGCR 0x38 +/* cache attribute registers for transactions initiated by ENETC */ +#define ENETC_SICAR0 0x40 +#define ENETC_SICAR1 0x44 +#define ENETC_SICAR2 0x48 +/* rd snoop, no alloc + * wr snoop, no alloc, partial cache line update for BDs and full cache line + * update for data + */ +#define ENETC_SICAR_RD_COHERENT 0x2b2b0000 +#define ENETC_SICAR_WR_COHERENT 0x00006727 +#define ENETC_SICAR_MSI 0x00300030 /* rd/wr device, no snoop, no alloc */ + +#define ENETC_SIPMAR0 0x80 +#define ENETC_SIPMAR1 0x84 + +/* VF-PF Message passing */ +#define ENETC_DEFAULT_MSG_SIZE 1024 /* and max size */ +/* msg size encoding: default and max msg value of 1024B encoded as 0 */ +static inline u32 enetc_vsi_set_msize(u32 size) +{ + return size < ENETC_DEFAULT_MSG_SIZE ? size >> 5 : 0; +} + +#define ENETC_PSIMSGRR 0x204 +#define ENETC_PSIMSGRR_MR_MASK GENMASK(2, 1) +#define ENETC_PSIMSGRR_MR(n) BIT((n) + 1) /* n = VSI index */ +#define ENETC_PSIVMSGRCVAR0(n) (0x210 + (n) * 0x8) /* n = VSI index */ +#define ENETC_PSIVMSGRCVAR1(n) (0x214 + (n) * 0x8) + +#define ENETC_VSIMSGSR 0x204 /* RO */ +#define ENETC_VSIMSGSR_MB BIT(0) +#define ENETC_VSIMSGSR_MS BIT(1) +#define ENETC_VSIMSGSNDAR0 0x210 +#define ENETC_VSIMSGSNDAR1 0x214 + +#define ENETC_SIMSGSR_SET_MC(val) ((val) << 16) +#define ENETC_SIMSGSR_GET_MC(val) ((val) >> 16) + +/* SI statistics */ +#define ENETC_SIROCT 0x300 +#define ENETC_SIRFRM 0x308 +#define ENETC_SIRUCA 0x310 +#define ENETC_SIRMCA 0x318 +#define ENETC_SITOCT 0x320 +#define ENETC_SITFRM 0x328 +#define ENETC_SITUCA 0x330 +#define ENETC_SITMCA 0x338 +#define ENETC_RBDCR(n) (0x8180 + (n) * 0x200) + +/* Control BDR regs */ +#define ENETC_SICBDRMR 0x800 +#define ENETC_SICBDRSR 0x804 /* RO */ +#define ENETC_SICBDRBAR0 0x810 +#define ENETC_SICBDRBAR1 0x814 +#define ENETC_SICBDRPIR 0x818 +#define ENETC_SICBDRCIR 0x81c +#define ENETC_SICBDRLENR 0x820 + +#define ENETC_SICAPR0 0x900 +#define ENETC_SICAPR1 0x904 + +#define ENETC_PSIIER 0xa00 +#define ENETC_PSIIER_MR_MASK GENMASK(2, 1) +#define ENETC_PSIIDR 0xa08 +#define ENETC_SITXIDR 0xa18 +#define ENETC_SIRXIDR 0xa28 +#define ENETC_SIMSIVR 0xa30 + +#define ENETC_SIMSITRV(n) (0xB00 + (n) * 0x4) +#define ENETC_SIMSIRRV(n) (0xB80 + (n) * 0x4) + +#define ENETC_SIUEFDCR 0xe28 + +#define ENETC_SIRFSCAPR 0x1200 +#define ENETC_SIRFSCAPR_GET_NUM_RFS(val) ((val) & 0x7f) +#define ENETC_SIRSSCAPR 0x1600 +#define ENETC_SIRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32) + +/** SI BDR sub-blocks, n = 0..7 */ +enum enetc_bdr_type {TX, RX}; +#define ENETC_BDR_OFF(i) ((i) * 0x200) +#define ENETC_BDR(t, i, r) (0x8000 + (t) * 0x100 + ENETC_BDR_OFF(i) + (r)) +/* RX BDR reg offsets */ +#define ENETC_RBMR 0 +#define ENETC_RBMR_BDS BIT(2) +#define ENETC_RBMR_VTE BIT(5) +#define ENETC_RBMR_EN BIT(31) +#define ENETC_RBSR 0x4 +#define ENETC_RBBSR 0x8 +#define ENETC_RBCIR 0xc +#define ENETC_RBBAR0 0x10 +#define ENETC_RBBAR1 0x14 +#define ENETC_RBPIR 0x18 +#define ENETC_RBLENR 0x20 +#define ENETC_RBIER 0xa0 +#define ENETC_RBIER_RXTIE BIT(0) +#define ENETC_RBIDR 0xa4 +#define ENETC_RBICIR0 0xa8 +#define ENETC_RBICIR0_ICEN BIT(31) + +/* TX BDR reg offsets */ +#define ENETC_TBMR 0 +#define ENETC_TBSR_BUSY BIT(0) +#define ENETC_TBMR_VIH BIT(9) +#define ENETC_TBMR_PRIO_MASK GENMASK(2, 0) +#define ENETC_TBMR_PRIO_SET(val) val +#define ENETC_TBMR_EN BIT(31) +#define ENETC_TBSR 0x4 +#define ENETC_TBBAR0 0x10 +#define ENETC_TBBAR1 0x14 +#define ENETC_TBPIR 0x18 +#define ENETC_TBCIR 0x1c +#define ENETC_TBCIR_IDX_MASK 0xffff +#define ENETC_TBLENR 0x20 +#define ENETC_TBIER 0xa0 +#define ENETC_TBIER_TXTIE BIT(0) +#define ENETC_TBIDR 0xa4 +#define ENETC_TBICIR0 0xa8 +#define ENETC_TBICIR0_ICEN BIT(31) + +#define ENETC_RTBLENR_LEN(n) ((n) & ~0x7) + +/* Port regs, offset: 1_0000h */ +#define ENETC_PORT_BASE 0x10000 +#define ENETC_PMR 0x0000 +#define ENETC_PMR_EN GENMASK(18, 16) +#define ENETC_PSR 0x0004 /* RO */ +#define ENETC_PSIPMR 0x0018 +#define ENETC_PSIPMR_SET_UP(n) BIT(n) /* n = SI index */ +#define ENETC_PSIPMR_SET_MP(n) BIT((n) + 16) +#define ENETC_PSIPVMR 0x001c +#define ENETC_VLAN_PROMISC_MAP_ALL 0x7 +#define ENETC_PSIPVMR_SET_VP(simap) ((simap) & 0x7) +#define ENETC_PSIPVMR_SET_VUTA(simap) (((simap) & 0x7) << 16) +#define ENETC_PSIPMAR0(n) (0x0100 + (n) * 0x8) /* n = SI index */ +#define ENETC_PSIPMAR1(n) (0x0104 + (n) * 0x8) +#define ENETC_PVCLCTR 0x0208 +#define ENETC_VLAN_TYPE_C BIT(0) +#define ENETC_VLAN_TYPE_S BIT(1) +#define ENETC_PVCLCTR_OVTPIDL(bmp) ((bmp) & 0xff) /* VLAN_TYPE */ +#define ENETC_PSIVLANR(n) (0x0240 + (n) * 4) /* n = SI index */ +#define ENETC_PSIVLAN_EN BIT(31) +#define ENETC_PSIVLAN_SET_QOS(val) ((u32)(val) << 12) +#define ENETC_PTXMBAR 0x0608 +#define ENETC_PCAPR0 0x0900 +#define ENETC_PCAPR0_RXBDR(val) ((val) >> 24) +#define ENETC_PCAPR0_TXBDR(val) (((val) >> 16) & 0xff) +#define ENETC_PCAPR1 0x0904 +#define ENETC_PSICFGR0(n) (0x0940 + (n) * 0xc) /* n = SI index */ +#define ENETC_PSICFGR0_SET_TXBDR(val) ((val) & 0xff) +#define ENETC_PSICFGR0_SET_RXBDR(val) (((val) & 0xff) << 16) +#define ENETC_PSICFGR0_VTE BIT(12) +#define ENETC_PSICFGR0_SIVIE BIT(14) +#define ENETC_PSICFGR0_ASE BIT(15) +#define ENETC_PSICFGR0_SIVC(bmp) (((bmp) & 0xff) << 24) /* VLAN_TYPE */ + +#define ENETC_PTCCBSR0(n) (0x1110 + (n) * 8) /* n = 0 to 7*/ +#define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/ +#define ENETC_RSSHASH_KEY_SIZE 40 +#define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */ +#define ENETC_PSIVLANFMR 0x1700 +#define ENETC_PSIVLANFMR_VS BIT(0) +#define ENETC_PRFSMR 0x1800 +#define ENETC_PRFSMR_RFSE BIT(31) +#define ENETC_PRFSCAPR 0x1804 +#define ENETC_PRFSCAPR_GET_NUM_RFS(val) ((((val) & 0xf) + 1) * 16) +#define ENETC_PSIRFSCFGR(n) (0x1814 + (n) * 4) /* n = SI index */ +#define ENETC_PFPMR 0x1900 +#define ENETC_PFPMR_PMACE BIT(1) +#define ENETC_PFPMR_MWLM BIT(0) +#define ENETC_PSIUMHFR0(n, err) (((err) ? 0x1d08 : 0x1d00) + (n) * 0x10) +#define ENETC_PSIUMHFR1(n) (0x1d04 + (n) * 0x10) +#define ENETC_PSIMMHFR0(n, err) (((err) ? 0x1d00 : 0x1d08) + (n) * 0x10) +#define ENETC_PSIMMHFR1(n) (0x1d0c + (n) * 0x10) +#define ENETC_PSIVHFR0(n) (0x1e00 + (n) * 8) /* n = SI index */ +#define ENETC_PSIVHFR1(n) (0x1e04 + (n) * 8) /* n = SI index */ +#define ENETC_MMCSR 0x1f00 +#define ENETC_MMCSR_ME BIT(16) +#define ENETC_PTCMSDUR(n) (0x2020 + (n) * 4) /* n = TC index [0..7] */ + +#define ENETC_PM0_CMD_CFG 0x8008 +#define ENETC_PM1_CMD_CFG 0x9008 +#define ENETC_PM0_TX_EN BIT(0) +#define ENETC_PM0_RX_EN BIT(1) +#define ENETC_PM0_PROMISC BIT(4) +#define ENETC_PM0_CMD_XGLP BIT(10) +#define ENETC_PM0_CMD_TXP BIT(11) +#define ENETC_PM0_CMD_PHY_TX_EN BIT(15) +#define ENETC_PM0_CMD_SFD BIT(21) +#define ENETC_PM0_MAXFRM 0x8014 +#define ENETC_SET_TX_MTU(val) ((val) << 16) +#define ENETC_SET_MAXFRM(val) ((val) & 0xffff) +#define ENETC_PM0_IF_MODE 0x8300 +#define ENETC_PMO_IFM_RG BIT(2) +#define ENETC_PM0_IFM_RLP (BIT(5) | BIT(11)) +#define ENETC_PM0_IFM_RGAUTO (BIT(15) | ENETC_PMO_IFM_RG | BIT(1)) +#define ENETC_PM0_IFM_XGMII BIT(12) + +/* MAC counters */ +#define ENETC_PM0_REOCT 0x8100 +#define ENETC_PM0_RALN 0x8110 +#define ENETC_PM0_RXPF 0x8118 +#define ENETC_PM0_RFRM 0x8120 +#define ENETC_PM0_RFCS 0x8128 +#define ENETC_PM0_RVLAN 0x8130 +#define ENETC_PM0_RERR 0x8138 +#define ENETC_PM0_RUCA 0x8140 +#define ENETC_PM0_RMCA 0x8148 +#define ENETC_PM0_RBCA 0x8150 +#define ENETC_PM0_RDRP 0x8158 +#define ENETC_PM0_RPKT 0x8160 +#define ENETC_PM0_RUND 0x8168 +#define ENETC_PM0_R64 0x8170 +#define ENETC_PM0_R127 0x8178 +#define ENETC_PM0_R255 0x8180 +#define ENETC_PM0_R511 0x8188 +#define ENETC_PM0_R1023 0x8190 +#define ENETC_PM0_R1518 0x8198 +#define ENETC_PM0_R1519X 0x81A0 +#define ENETC_PM0_ROVR 0x81A8 +#define ENETC_PM0_RJBR 0x81B0 +#define ENETC_PM0_RFRG 0x81B8 +#define ENETC_PM0_RCNP 0x81C0 +#define ENETC_PM0_RDRNTP 0x81C8 +#define ENETC_PM0_TEOCT 0x8200 +#define ENETC_PM0_TOCT 0x8208 +#define ENETC_PM0_TCRSE 0x8210 +#define ENETC_PM0_TXPF 0x8218 +#define ENETC_PM0_TFRM 0x8220 +#define ENETC_PM0_TFCS 0x8228 +#define ENETC_PM0_TVLAN 0x8230 +#define ENETC_PM0_TERR 0x8238 +#define ENETC_PM0_TUCA 0x8240 +#define ENETC_PM0_TMCA 0x8248 +#define ENETC_PM0_TBCA 0x8250 +#define ENETC_PM0_TPKT 0x8260 +#define ENETC_PM0_TUND 0x8268 +#define ENETC_PM0_T127 0x8278 +#define ENETC_PM0_T1023 0x8290 +#define ENETC_PM0_T1518 0x8298 +#define ENETC_PM0_TCNP 0x82C0 +#define ENETC_PM0_TDFR 0x82D0 +#define ENETC_PM0_TMCOL 0x82D8 +#define ENETC_PM0_TSCOL 0x82E0 +#define ENETC_PM0_TLCOL 0x82E8 +#define ENETC_PM0_TECOL 0x82F0 + +/* Port counters */ +#define ENETC_PICDR(n) (0x0700 + (n) * 8) /* n = [0..3] */ +#define ENETC_PBFDSIR 0x0810 +#define ENETC_PFDMSAPR 0x0814 +#define ENETC_UFDMF 0x1680 +#define ENETC_MFDMF 0x1684 +#define ENETC_PUFDVFR 0x1780 +#define ENETC_PMFDVFR 0x1784 +#define ENETC_PBFDVFR 0x1788 + +/** Global regs, offset: 2_0000h */ +#define ENETC_GLOBAL_BASE 0x20000 +#define ENETC_G_EIPBRR0 0x0bf8 +#define ENETC_G_EIPBRR1 0x0bfc +#define ENETC_G_EPFBLPR(n) (0xd00 + 4 * (n)) +#define ENETC_G_EPFBLPR1_XGMII 0x80000000 + +/* PCI device info */ +struct enetc_hw { + /* SI registers, used by all PCI functions */ + void __iomem *reg; + /* Port registers, PF only */ + void __iomem *port; + /* IP global registers, PF only */ + void __iomem *global; +}; + +/* general register accessors */ +#define enetc_rd_reg(reg) ioread32((reg)) +#define enetc_wr_reg(reg, val) iowrite32((val), (reg)) +#ifdef ioread64 +#define enetc_rd_reg64(reg) ioread64((reg)) +#else +/* using this to read out stats on 32b systems */ +static inline u64 enetc_rd_reg64(void __iomem *reg) +{ + u32 low, high, tmp; + + do { + high = ioread32(reg + 4); + low = ioread32(reg); + tmp = ioread32(reg + 4); + } while (high != tmp); + + return le64_to_cpu((__le64)high << 32 | low); +} +#endif + +#define enetc_rd(hw, off) enetc_rd_reg((hw)->reg + (off)) +#define enetc_wr(hw, off, val) enetc_wr_reg((hw)->reg + (off), val) +#define enetc_rd64(hw, off) enetc_rd_reg64((hw)->reg + (off)) +/* port register accessors - PF only */ +#define enetc_port_rd(hw, off) enetc_rd_reg((hw)->port + (off)) +#define enetc_port_wr(hw, off, val) enetc_wr_reg((hw)->port + (off), val) +/* global register accessors - PF only */ +#define enetc_global_rd(hw, off) enetc_rd_reg((hw)->global + (off)) +#define enetc_global_wr(hw, off, val) enetc_wr_reg((hw)->global + (off), val) +/* BDR register accessors, see ENETC_BDR() */ +#define enetc_bdr_rd(hw, t, n, off) \ + enetc_rd(hw, ENETC_BDR(t, n, off)) +#define enetc_bdr_wr(hw, t, n, off, val) \ + enetc_wr(hw, ENETC_BDR(t, n, off), val) +#define enetc_txbdr_rd(hw, n, off) enetc_bdr_rd(hw, TX, n, off) +#define enetc_rxbdr_rd(hw, n, off) enetc_bdr_rd(hw, RX, n, off) +#define enetc_txbdr_wr(hw, n, off, val) \ + enetc_bdr_wr(hw, TX, n, off, val) +#define enetc_rxbdr_wr(hw, n, off, val) \ + enetc_bdr_wr(hw, RX, n, off, val) + +/* Buffer Descriptors (BD) */ +union enetc_tx_bd { + struct { + __le64 addr; + __le16 buf_len; + __le16 frm_len; + union { + struct { + __le16 l3_csoff; + u8 l4_csoff; + u8 flags; + }; /* default layout */ + __le32 lstatus; + }; + }; + struct { + __le32 tstamp; + __le16 tpid; + __le16 vid; + u8 reserved[6]; + u8 e_flags; + u8 flags; + } ext; /* Tx BD extension */ +}; + +#define ENETC_TXBD_FLAGS_L4CS BIT(0) +#define ENETC_TXBD_FLAGS_W BIT(2) +#define ENETC_TXBD_FLAGS_CSUM BIT(3) +#define ENETC_TXBD_FLAGS_EX BIT(6) +#define ENETC_TXBD_FLAGS_F BIT(7) + +static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd) +{ + memset(txbd, 0, sizeof(*txbd)); +} + +/* L3 csum flags */ +#define ENETC_TXBD_L3_IPCS BIT(7) +#define ENETC_TXBD_L3_IPV6 BIT(15) + +#define ENETC_TXBD_L3_START_MASK GENMASK(6, 0) +#define ENETC_TXBD_L3_SET_HSIZE(val) ((((val) >> 2) & 0x7f) << 8) + +/* Extension flags */ +#define ENETC_TXBD_E_FLAGS_VLAN_INS BIT(0) +#define ENETC_TXBD_E_FLAGS_TWO_STEP_PTP BIT(2) + +static inline __le16 enetc_txbd_l3_csoff(int start, int hdr_sz, u16 l3_flags) +{ + return cpu_to_le16(l3_flags | ENETC_TXBD_L3_SET_HSIZE(hdr_sz) | + (start & ENETC_TXBD_L3_START_MASK)); +} + +/* L4 csum flags */ +#define ENETC_TXBD_L4_UDP BIT(5) +#define ENETC_TXBD_L4_TCP BIT(6) + +union enetc_rx_bd { + struct { + __le64 addr; + u8 reserved[8]; + } w; + struct { + __le16 inet_csum; + __le16 parse_summary; + __le32 rss_hash; + __le16 buf_len; + __le16 vlan_opt; + union { + struct { + __le16 flags; + __le16 error; + }; + __le32 lstatus; + }; + } r; +}; + +#define ENETC_RXBD_LSTATUS_R BIT(30) +#define ENETC_RXBD_LSTATUS_F BIT(31) +#define ENETC_RXBD_ERR_MASK 0xff +#define ENETC_RXBD_LSTATUS(flags) ((flags) << 16) +#define ENETC_RXBD_FLAG_VLAN BIT(9) +#define ENETC_RXBD_FLAG_TSTMP BIT(10) + +#define ENETC_MAC_ADDR_FILT_CNT 8 /* # of supported entries per port */ +#define EMETC_MAC_ADDR_FILT_RES 3 /* # of reserved entries at the beginning */ +#define ENETC_MAX_NUM_VFS 2 + +struct enetc_cbd { + union { + struct { + __le32 addr[2]; + __le32 opt[4]; + }; + __le32 data[6]; + }; + __le16 index; + __le16 length; + u8 cmd; + u8 cls; + u8 _res; + u8 status_flags; +}; + +#define ENETC_CBD_FLAGS_SF BIT(7) /* short format */ +#define ENETC_CBD_STATUS_MASK 0xf + +struct enetc_cmd_rfse { + u8 smac_h[6]; + u8 smac_m[6]; + u8 dmac_h[6]; + u8 dmac_m[6]; + u32 sip_h[4]; + u32 sip_m[4]; + u32 dip_h[4]; + u32 dip_m[4]; + u16 ethtype_h; + u16 ethtype_m; + u16 ethtype4_h; + u16 ethtype4_m; + u16 sport_h; + u16 sport_m; + u16 dport_h; + u16 dport_m; + u16 vlan_h; + u16 vlan_m; + u8 proto_h; + u8 proto_m; + u16 flags; + u16 result; + u16 mode; +}; + +#define ENETC_RFSE_EN BIT(15) +#define ENETC_RFSE_MODE_BD 2 + +static inline void enetc_get_primary_mac_addr(struct enetc_hw *hw, u8 *addr) +{ + *(u32 *)addr = __raw_readl(hw->reg + ENETC_SIPMAR0); + *(u16 *)(addr + 4) = __raw_readw(hw->reg + ENETC_SIPMAR1); +} + +#define ENETC_SI_INT_IDX 0 +/* base index for Rx/Tx interrupts */ +#define ENETC_BDR_INT_BASE_IDX 1 + +/* Messaging */ + +/* Command completion status */ +enum enetc_msg_cmd_status { + ENETC_MSG_CMD_STATUS_OK, + ENETC_MSG_CMD_STATUS_FAIL +}; + +/* VSI-PSI command message types */ +enum enetc_msg_cmd_type { + ENETC_MSG_CMD_MNG_MAC = 1, /* manage MAC address */ + ENETC_MSG_CMD_MNG_RX_MAC_FILTER,/* manage RX MAC table */ + ENETC_MSG_CMD_MNG_RX_VLAN_FILTER /* manage RX VLAN table */ +}; + +/* VSI-PSI command action types */ +enum enetc_msg_cmd_action_type { + ENETC_MSG_CMD_MNG_ADD = 1, + ENETC_MSG_CMD_MNG_REMOVE +}; + +/* PSI-VSI command header format */ +struct enetc_msg_cmd_header { + u16 type; /* command class type */ + u16 id; /* denotes the specific required action */ +}; + +/* Common H/W utility functions */ + +static inline void enetc_enable_rxvlan(struct enetc_hw *hw, int si_idx, + bool en) +{ + u32 val = enetc_rxbdr_rd(hw, si_idx, ENETC_RBMR); + + val = (val & ~ENETC_RBMR_VTE) | (en ? ENETC_RBMR_VTE : 0); + enetc_rxbdr_wr(hw, si_idx, ENETC_RBMR, val); +} + +static inline void enetc_enable_txvlan(struct enetc_hw *hw, int si_idx, + bool en) +{ + u32 val = enetc_txbdr_rd(hw, si_idx, ENETC_TBMR); + + val = (val & ~ENETC_TBMR_VIH) | (en ? ENETC_TBMR_VIH : 0); + enetc_txbdr_wr(hw, si_idx, ENETC_TBMR, val); +} diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c new file mode 100644 index 000000000000..77b9cd10ba2b --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2019 NXP */ + +#include <linux/mdio.h> +#include <linux/of_mdio.h> +#include <linux/iopoll.h> +#include <linux/of.h> + +#include "enetc_pf.h" + +struct enetc_mdio_regs { + u32 mdio_cfg; /* MDIO configuration and status */ + u32 mdio_ctl; /* MDIO control */ + u32 mdio_data; /* MDIO data */ + u32 mdio_addr; /* MDIO address */ +}; + +#define bus_to_enetc_regs(bus) (struct enetc_mdio_regs __iomem *)((bus)->priv) + +#define ENETC_MDIO_REG_OFFSET 0x1c00 +#define ENETC_MDC_DIV 258 + +#define MDIO_CFG_CLKDIV(x) ((((x) >> 1) & 0xff) << 8) +#define MDIO_CFG_BSY BIT(0) +#define MDIO_CFG_RD_ER BIT(1) +#define MDIO_CFG_ENC45 BIT(6) + /* external MDIO only - driven on neg MDC edge */ +#define MDIO_CFG_NEG BIT(23) + +#define MDIO_CTL_DEV_ADDR(x) ((x) & 0x1f) +#define MDIO_CTL_PORT_ADDR(x) (((x) & 0x1f) << 5) +#define MDIO_CTL_READ BIT(15) +#define MDIO_DATA(x) ((x) & 0xffff) + +#define TIMEOUT 1000 +static int enetc_mdio_wait_complete(struct enetc_mdio_regs __iomem *regs) +{ + u32 val; + + return readx_poll_timeout(enetc_rd_reg, ®s->mdio_cfg, val, + !(val & MDIO_CFG_BSY), 10, 10 * TIMEOUT); +} + +static int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, + u16 value) +{ + struct enetc_mdio_regs __iomem *regs = bus_to_enetc_regs(bus); + u32 mdio_ctl, mdio_cfg; + u16 dev_addr; + int ret; + + mdio_cfg = MDIO_CFG_CLKDIV(ENETC_MDC_DIV) | MDIO_CFG_NEG; + if (regnum & MII_ADDR_C45) { + dev_addr = (regnum >> 16) & 0x1f; + mdio_cfg |= MDIO_CFG_ENC45; + } else { + /* clause 22 (ie 1G) */ + dev_addr = regnum & 0x1f; + mdio_cfg &= ~MDIO_CFG_ENC45; + } + + enetc_wr_reg(®s->mdio_cfg, mdio_cfg); + + ret = enetc_mdio_wait_complete(regs); + if (ret) + return ret; + + /* set port and dev addr */ + mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); + enetc_wr_reg(®s->mdio_ctl, mdio_ctl); + + /* set the register address */ + if (regnum & MII_ADDR_C45) { + enetc_wr_reg(®s->mdio_addr, regnum & 0xffff); + + ret = enetc_mdio_wait_complete(regs); + if (ret) + return ret; + } + + /* write the value */ + enetc_wr_reg(®s->mdio_data, MDIO_DATA(value)); + + ret = enetc_mdio_wait_complete(regs); + if (ret) + return ret; + + return 0; +} + +static int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum) +{ + struct enetc_mdio_regs __iomem *regs = bus_to_enetc_regs(bus); + u32 mdio_ctl, mdio_cfg; + u16 dev_addr, value; + int ret; + + mdio_cfg = MDIO_CFG_CLKDIV(ENETC_MDC_DIV) | MDIO_CFG_NEG; + if (regnum & MII_ADDR_C45) { + dev_addr = (regnum >> 16) & 0x1f; + mdio_cfg |= MDIO_CFG_ENC45; + } else { + dev_addr = regnum & 0x1f; + mdio_cfg &= ~MDIO_CFG_ENC45; + } + + enetc_wr_reg(®s->mdio_cfg, mdio_cfg); + + ret = enetc_mdio_wait_complete(regs); + if (ret) + return ret; + + /* set port and device addr */ + mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); + enetc_wr_reg(®s->mdio_ctl, mdio_ctl); + + /* set the register address */ + if (regnum & MII_ADDR_C45) { + enetc_wr_reg(®s->mdio_addr, regnum & 0xffff); + + ret = enetc_mdio_wait_complete(regs); + if (ret) + return ret; + } + + /* initiate the read */ + enetc_wr_reg(®s->mdio_ctl, mdio_ctl | MDIO_CTL_READ); + + ret = enetc_mdio_wait_complete(regs); + if (ret) + return ret; + + /* return all Fs if nothing was there */ + if (enetc_rd_reg(®s->mdio_cfg) & MDIO_CFG_RD_ER) { + dev_dbg(&bus->dev, + "Error while reading PHY%d reg at %d.%hhu\n", + phy_id, dev_addr, regnum); + return 0xffff; + } + + value = enetc_rd_reg(®s->mdio_data) & 0xffff; + + return value; +} + +int enetc_mdio_probe(struct enetc_pf *pf) +{ + struct device *dev = &pf->si->pdev->dev; + struct enetc_mdio_regs __iomem *regs; + struct device_node *np; + struct mii_bus *bus; + int ret; + + bus = mdiobus_alloc_size(sizeof(regs)); + if (!bus) + return -ENOMEM; + + bus->name = "Freescale ENETC MDIO Bus"; + bus->read = enetc_mdio_read; + bus->write = enetc_mdio_write; + bus->parent = dev; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); + + /* store the enetc mdio base address for this bus */ + regs = pf->si->hw.port + ENETC_MDIO_REG_OFFSET; + bus->priv = regs; + + np = of_get_child_by_name(dev->of_node, "mdio"); + if (!np) { + dev_err(dev, "MDIO node missing\n"); + ret = -EINVAL; + goto err_registration; + } + + ret = of_mdiobus_register(bus, np); + if (ret) { + of_node_put(np); + dev_err(dev, "cannot register MDIO bus\n"); + goto err_registration; + } + + of_node_put(np); + pf->mdio = bus; + + return 0; + +err_registration: + mdiobus_free(bus); + + return ret; +} + +void enetc_mdio_remove(struct enetc_pf *pf) +{ + if (pf->mdio) { + mdiobus_unregister(pf->mdio); + mdiobus_free(pf->mdio); + } +} diff --git a/drivers/net/ethernet/freescale/enetc/enetc_msg.c b/drivers/net/ethernet/freescale/enetc/enetc_msg.c new file mode 100644 index 000000000000..40d22ebe9224 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_msg.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include "enetc_pf.h" + +static void enetc_msg_disable_mr_int(struct enetc_hw *hw) +{ + u32 psiier = enetc_rd(hw, ENETC_PSIIER); + /* disable MR int source(s) */ + enetc_wr(hw, ENETC_PSIIER, psiier & ~ENETC_PSIIER_MR_MASK); +} + +static void enetc_msg_enable_mr_int(struct enetc_hw *hw) +{ + u32 psiier = enetc_rd(hw, ENETC_PSIIER); + + enetc_wr(hw, ENETC_PSIIER, psiier | ENETC_PSIIER_MR_MASK); +} + +static irqreturn_t enetc_msg_psi_msix(int irq, void *data) +{ + struct enetc_si *si = (struct enetc_si *)data; + struct enetc_pf *pf = enetc_si_priv(si); + + enetc_msg_disable_mr_int(&si->hw); + schedule_work(&pf->msg_task); + + return IRQ_HANDLED; +} + +static void enetc_msg_task(struct work_struct *work) +{ + struct enetc_pf *pf = container_of(work, struct enetc_pf, msg_task); + struct enetc_hw *hw = &pf->si->hw; + unsigned long mr_mask; + int i; + + for (;;) { + mr_mask = enetc_rd(hw, ENETC_PSIMSGRR) & ENETC_PSIMSGRR_MR_MASK; + if (!mr_mask) { + /* re-arm MR interrupts, w1c the IDR reg */ + enetc_wr(hw, ENETC_PSIIDR, ENETC_PSIIER_MR_MASK); + enetc_msg_enable_mr_int(hw); + return; + } + + for (i = 0; i < pf->num_vfs; i++) { + u32 psimsgrr; + u16 msg_code; + + if (!(ENETC_PSIMSGRR_MR(i) & mr_mask)) + continue; + + enetc_msg_handle_rxmsg(pf, i, &msg_code); + + psimsgrr = ENETC_SIMSGSR_SET_MC(msg_code); + psimsgrr |= ENETC_PSIMSGRR_MR(i); /* w1c */ + enetc_wr(hw, ENETC_PSIMSGRR, psimsgrr); + } + } +} + +/* Init */ +static int enetc_msg_alloc_mbx(struct enetc_si *si, int idx) +{ + struct enetc_pf *pf = enetc_si_priv(si); + struct device *dev = &si->pdev->dev; + struct enetc_hw *hw = &si->hw; + struct enetc_msg_swbd *msg; + u32 val; + + msg = &pf->rxmsg[idx]; + /* allocate and set receive buffer */ + msg->size = ENETC_DEFAULT_MSG_SIZE; + + msg->vaddr = dma_alloc_coherent(dev, msg->size, &msg->dma, + GFP_KERNEL); + if (!msg->vaddr) { + dev_err(dev, "msg: fail to alloc dma buffer of size: %d\n", + msg->size); + return -ENOMEM; + } + + /* set multiple of 32 bytes */ + val = lower_32_bits(msg->dma); + enetc_wr(hw, ENETC_PSIVMSGRCVAR0(idx), val); + val = upper_32_bits(msg->dma); + enetc_wr(hw, ENETC_PSIVMSGRCVAR1(idx), val); + + return 0; +} + +static void enetc_msg_free_mbx(struct enetc_si *si, int idx) +{ + struct enetc_pf *pf = enetc_si_priv(si); + struct enetc_hw *hw = &si->hw; + struct enetc_msg_swbd *msg; + + msg = &pf->rxmsg[idx]; + dma_free_coherent(&si->pdev->dev, msg->size, msg->vaddr, msg->dma); + memset(msg, 0, sizeof(*msg)); + + enetc_wr(hw, ENETC_PSIVMSGRCVAR0(idx), 0); + enetc_wr(hw, ENETC_PSIVMSGRCVAR1(idx), 0); +} + +int enetc_msg_psi_init(struct enetc_pf *pf) +{ + struct enetc_si *si = pf->si; + int vector, i, err; + + /* register message passing interrupt handler */ + snprintf(pf->msg_int_name, sizeof(pf->msg_int_name), "%s-vfmsg", + si->ndev->name); + vector = pci_irq_vector(si->pdev, ENETC_SI_INT_IDX); + err = request_irq(vector, enetc_msg_psi_msix, 0, pf->msg_int_name, si); + if (err) { + dev_err(&si->pdev->dev, + "PSI messaging: request_irq() failed!\n"); + return err; + } + + /* set one IRQ entry for PSI message receive notification (SI int) */ + enetc_wr(&si->hw, ENETC_SIMSIVR, ENETC_SI_INT_IDX); + + /* initialize PSI mailbox */ + INIT_WORK(&pf->msg_task, enetc_msg_task); + + for (i = 0; i < pf->num_vfs; i++) { + err = enetc_msg_alloc_mbx(si, i); + if (err) + goto err_init_mbx; + } + + /* enable MR interrupts */ + enetc_msg_enable_mr_int(&si->hw); + + return 0; + +err_init_mbx: + for (i--; i >= 0; i--) + enetc_msg_free_mbx(si, i); + + free_irq(vector, si); + + return err; +} + +void enetc_msg_psi_free(struct enetc_pf *pf) +{ + struct enetc_si *si = pf->si; + int i; + + cancel_work_sync(&pf->msg_task); + + /* disable MR interrupts */ + enetc_msg_disable_mr_int(&si->hw); + + for (i = 0; i < pf->num_vfs; i++) + enetc_msg_free_mbx(si, i); + + /* de-register message passing interrupt handler */ + free_irq(pci_irq_vector(si->pdev, ENETC_SI_INT_IDX), si); +} diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c new file mode 100644 index 000000000000..15876a6e7598 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -0,0 +1,943 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include <linux/module.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include "enetc_pf.h" + +#define ENETC_DRV_VER_MAJ 1 +#define ENETC_DRV_VER_MIN 0 + +#define ENETC_DRV_VER_STR __stringify(ENETC_DRV_VER_MAJ) "." \ + __stringify(ENETC_DRV_VER_MIN) +static const char enetc_drv_ver[] = ENETC_DRV_VER_STR; +#define ENETC_DRV_NAME_STR "ENETC PF driver" +static const char enetc_drv_name[] = ENETC_DRV_NAME_STR; + +static void enetc_pf_get_primary_mac_addr(struct enetc_hw *hw, int si, u8 *addr) +{ + u32 upper = __raw_readl(hw->port + ENETC_PSIPMAR0(si)); + u16 lower = __raw_readw(hw->port + ENETC_PSIPMAR1(si)); + + *(u32 *)addr = upper; + *(u16 *)(addr + 4) = lower; +} + +static void enetc_pf_set_primary_mac_addr(struct enetc_hw *hw, int si, + const u8 *addr) +{ + u32 upper = *(const u32 *)addr; + u16 lower = *(const u16 *)(addr + 4); + + __raw_writel(upper, hw->port + ENETC_PSIPMAR0(si)); + __raw_writew(lower, hw->port + ENETC_PSIPMAR1(si)); +} + +static int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct sockaddr *saddr = addr; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(ndev->dev_addr, saddr->sa_data, ndev->addr_len); + enetc_pf_set_primary_mac_addr(&priv->si->hw, 0, saddr->sa_data); + + return 0; +} + +static void enetc_set_vlan_promisc(struct enetc_hw *hw, char si_map) +{ + u32 val = enetc_port_rd(hw, ENETC_PSIPVMR); + + val &= ~ENETC_PSIPVMR_SET_VP(ENETC_VLAN_PROMISC_MAP_ALL); + enetc_port_wr(hw, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VP(si_map) | val); +} + +static bool enetc_si_vlan_promisc_is_on(struct enetc_pf *pf, int si_idx) +{ + return pf->vlan_promisc_simap & BIT(si_idx); +} + +static bool enetc_vlan_filter_is_on(struct enetc_pf *pf) +{ + int i; + + for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) + return true; + + return false; +} + +static void enetc_enable_si_vlan_promisc(struct enetc_pf *pf, int si_idx) +{ + pf->vlan_promisc_simap |= BIT(si_idx); + enetc_set_vlan_promisc(&pf->si->hw, pf->vlan_promisc_simap); +} + +static void enetc_disable_si_vlan_promisc(struct enetc_pf *pf, int si_idx) +{ + pf->vlan_promisc_simap &= ~BIT(si_idx); + enetc_set_vlan_promisc(&pf->si->hw, pf->vlan_promisc_simap); +} + +static void enetc_set_isol_vlan(struct enetc_hw *hw, int si, u16 vlan, u8 qos) +{ + u32 val = 0; + + if (vlan) + val = ENETC_PSIVLAN_EN | ENETC_PSIVLAN_SET_QOS(qos) | vlan; + + enetc_port_wr(hw, ENETC_PSIVLANR(si), val); +} + +static int enetc_mac_addr_hash_idx(const u8 *addr) +{ + u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16; + u64 mask = 0; + int res = 0; + int i; + + for (i = 0; i < 8; i++) + mask |= BIT_ULL(i * 6); + + for (i = 0; i < 6; i++) + res |= (hweight64(fold & (mask << i)) & 0x1) << i; + + return res; +} + +static void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter) +{ + filter->mac_addr_cnt = 0; + + bitmap_zero(filter->mac_hash_table, + ENETC_MADDR_HASH_TBL_SZ); +} + +static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter, + const unsigned char *addr) +{ + /* add exact match addr */ + ether_addr_copy(filter->mac_addr, addr); + filter->mac_addr_cnt++; +} + +static void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter, + const unsigned char *addr) +{ + int idx = enetc_mac_addr_hash_idx(addr); + + /* add hash table entry */ + __set_bit(idx, filter->mac_hash_table); + filter->mac_addr_cnt++; +} + +static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type) +{ + bool err = si->errata & ENETC_ERR_UCMCSWP; + + if (type == UC) { + enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), 0); + enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), 0); + } else { /* MC */ + enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), 0); + enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), 0); + } +} + +static void enetc_set_mac_ht_flt(struct enetc_si *si, int si_idx, int type, + u32 *hash) +{ + bool err = si->errata & ENETC_ERR_UCMCSWP; + + if (type == UC) { + enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), *hash); + enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), *(hash + 1)); + } else { /* MC */ + enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), *hash); + enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), *(hash + 1)); + } +} + +static void enetc_sync_mac_filters(struct enetc_pf *pf) +{ + struct enetc_mac_filter *f = pf->mac_filter; + struct enetc_si *si = pf->si; + int i, pos; + + pos = EMETC_MAC_ADDR_FILT_RES; + + for (i = 0; i < MADDR_TYPE; i++, f++) { + bool em = (f->mac_addr_cnt == 1) && (i == UC); + bool clear = !f->mac_addr_cnt; + + if (clear) { + if (i == UC) + enetc_clear_mac_flt_entry(si, pos); + + enetc_clear_mac_ht_flt(si, 0, i); + continue; + } + + /* exact match filter */ + if (em) { + int err; + + enetc_clear_mac_ht_flt(si, 0, UC); + + err = enetc_set_mac_flt_entry(si, pos, f->mac_addr, + BIT(0)); + if (!err) + continue; + + /* fallback to HT filtering */ + dev_warn(&si->pdev->dev, "fallback to HT filt (%d)\n", + err); + } + + /* hash table filter, clear EM filter for UC entries */ + if (i == UC) + enetc_clear_mac_flt_entry(si, pos); + + enetc_set_mac_ht_flt(si, 0, i, (u32 *)f->mac_hash_table); + } +} + +static void enetc_pf_set_rx_mode(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + struct enetc_hw *hw = &priv->si->hw; + bool uprom = false, mprom = false; + struct enetc_mac_filter *filter; + struct netdev_hw_addr *ha; + u32 psipmr = 0; + bool em; + + if (ndev->flags & IFF_PROMISC) { + /* enable promisc mode for SI0 (PF) */ + psipmr = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0); + uprom = true; + mprom = true; + /* enable VLAN promisc mode for SI0 */ + if (!enetc_si_vlan_promisc_is_on(pf, 0)) + enetc_enable_si_vlan_promisc(pf, 0); + + } else if (ndev->flags & IFF_ALLMULTI) { + /* enable multi cast promisc mode for SI0 (PF) */ + psipmr = ENETC_PSIPMR_SET_MP(0); + mprom = true; + } + + /* first 2 filter entries belong to PF */ + if (!uprom) { + /* Update unicast filters */ + filter = &pf->mac_filter[UC]; + enetc_reset_mac_addr_filter(filter); + + em = (netdev_uc_count(ndev) == 1); + netdev_for_each_uc_addr(ha, ndev) { + if (em) { + enetc_add_mac_addr_em_filter(filter, ha->addr); + break; + } + + enetc_add_mac_addr_ht_filter(filter, ha->addr); + } + } + + if (!mprom) { + /* Update multicast filters */ + filter = &pf->mac_filter[MC]; + enetc_reset_mac_addr_filter(filter); + + netdev_for_each_mc_addr(ha, ndev) { + if (!is_multicast_ether_addr(ha->addr)) + continue; + + enetc_add_mac_addr_ht_filter(filter, ha->addr); + } + } + + if (!uprom || !mprom) + /* update PF entries */ + enetc_sync_mac_filters(pf); + + psipmr |= enetc_port_rd(hw, ENETC_PSIPMR) & + ~(ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0)); + enetc_port_wr(hw, ENETC_PSIPMR, psipmr); +} + +static void enetc_set_vlan_ht_filter(struct enetc_hw *hw, int si_idx, + u32 *hash) +{ + enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), *hash); + enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), *(hash + 1)); +} + +static int enetc_vid_hash_idx(unsigned int vid) +{ + int res = 0; + int i; + + for (i = 0; i < 6; i++) + res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i; + + return res; +} + +static void enetc_sync_vlan_ht_filter(struct enetc_pf *pf, bool rehash) +{ + int i; + + if (rehash) { + bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE); + + for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) { + int hidx = enetc_vid_hash_idx(i); + + __set_bit(hidx, pf->vlan_ht_filter); + } + } + + enetc_set_vlan_ht_filter(&pf->si->hw, 0, (u32 *)pf->vlan_ht_filter); +} + +static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + int idx; + + if (enetc_si_vlan_promisc_is_on(pf, 0)) + enetc_disable_si_vlan_promisc(pf, 0); + + __set_bit(vid, pf->active_vlans); + + idx = enetc_vid_hash_idx(vid); + if (!__test_and_set_bit(idx, pf->vlan_ht_filter)) + enetc_sync_vlan_ht_filter(pf, false); + + return 0; +} + +static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + + __clear_bit(vid, pf->active_vlans); + enetc_sync_vlan_ht_filter(pf, true); + + if (!enetc_vlan_filter_is_on(pf)) + enetc_enable_si_vlan_promisc(pf, 0); + + return 0; +} + +static void enetc_set_loopback(struct net_device *ndev, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + u32 reg; + + reg = enetc_port_rd(hw, ENETC_PM0_IF_MODE); + if (reg & ENETC_PMO_IFM_RG) { + /* RGMII mode */ + reg = (reg & ~ENETC_PM0_IFM_RLP) | + (en ? ENETC_PM0_IFM_RLP : 0); + enetc_port_wr(hw, ENETC_PM0_IF_MODE, reg); + } else { + /* assume SGMII mode */ + reg = enetc_port_rd(hw, ENETC_PM0_CMD_CFG); + reg = (reg & ~ENETC_PM0_CMD_XGLP) | + (en ? ENETC_PM0_CMD_XGLP : 0); + reg = (reg & ~ENETC_PM0_CMD_PHY_TX_EN) | + (en ? ENETC_PM0_CMD_PHY_TX_EN : 0); + enetc_port_wr(hw, ENETC_PM0_CMD_CFG, reg); + enetc_port_wr(hw, ENETC_PM1_CMD_CFG, reg); + } +} + +static int enetc_pf_set_vf_mac(struct net_device *ndev, int vf, u8 *mac) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + struct enetc_vf_state *vf_state; + + if (vf >= pf->total_vfs) + return -EINVAL; + + if (!is_valid_ether_addr(mac)) + return -EADDRNOTAVAIL; + + vf_state = &pf->vf_state[vf]; + vf_state->flags |= ENETC_VF_FLAG_PF_SET_MAC; + enetc_pf_set_primary_mac_addr(&priv->si->hw, vf + 1, mac); + return 0; +} + +static int enetc_pf_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, + u8 qos, __be16 proto) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + + if (priv->si->errata & ENETC_ERR_VLAN_ISOL) + return -EOPNOTSUPP; + + if (vf >= pf->total_vfs) + return -EINVAL; + + if (proto != htons(ETH_P_8021Q)) + /* only C-tags supported for now */ + return -EPROTONOSUPPORT; + + enetc_set_isol_vlan(&priv->si->hw, vf + 1, vlan, qos); + return 0; +} + +static int enetc_pf_set_vf_spoofchk(struct net_device *ndev, int vf, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + u32 cfgr; + + if (vf >= pf->total_vfs) + return -EINVAL; + + cfgr = enetc_port_rd(&priv->si->hw, ENETC_PSICFGR0(vf + 1)); + cfgr = (cfgr & ~ENETC_PSICFGR0_ASE) | (en ? ENETC_PSICFGR0_ASE : 0); + enetc_port_wr(&priv->si->hw, ENETC_PSICFGR0(vf + 1), cfgr); + + return 0; +} + +static void enetc_port_setup_primary_mac_address(struct enetc_si *si) +{ + unsigned char mac_addr[MAX_ADDR_LEN]; + struct enetc_pf *pf = enetc_si_priv(si); + struct enetc_hw *hw = &si->hw; + int i; + + /* check MAC addresses for PF and all VFs, if any is 0 set it ro rand */ + for (i = 0; i < pf->total_vfs + 1; i++) { + enetc_pf_get_primary_mac_addr(hw, i, mac_addr); + if (!is_zero_ether_addr(mac_addr)) + continue; + eth_random_addr(mac_addr); + dev_info(&si->pdev->dev, "no MAC address specified for SI%d, using %pM\n", + i, mac_addr); + enetc_pf_set_primary_mac_addr(hw, i, mac_addr); + } +} + +static void enetc_port_assign_rfs_entries(struct enetc_si *si) +{ + struct enetc_pf *pf = enetc_si_priv(si); + struct enetc_hw *hw = &si->hw; + int num_entries, vf_entries, i; + u32 val; + + /* split RFS entries between functions */ + val = enetc_port_rd(hw, ENETC_PRFSCAPR); + num_entries = ENETC_PRFSCAPR_GET_NUM_RFS(val); + vf_entries = num_entries / (pf->total_vfs + 1); + + for (i = 0; i < pf->total_vfs; i++) + enetc_port_wr(hw, ENETC_PSIRFSCFGR(i + 1), vf_entries); + enetc_port_wr(hw, ENETC_PSIRFSCFGR(0), + num_entries - vf_entries * pf->total_vfs); + + /* enable RFS on port */ + enetc_port_wr(hw, ENETC_PRFSMR, ENETC_PRFSMR_RFSE); +} + +static void enetc_port_si_configure(struct enetc_si *si) +{ + struct enetc_pf *pf = enetc_si_priv(si); + struct enetc_hw *hw = &si->hw; + int num_rings, i; + u32 val; + + val = enetc_port_rd(hw, ENETC_PCAPR0); + num_rings = min(ENETC_PCAPR0_RXBDR(val), ENETC_PCAPR0_TXBDR(val)); + + val = ENETC_PSICFGR0_SET_TXBDR(ENETC_PF_NUM_RINGS); + val |= ENETC_PSICFGR0_SET_RXBDR(ENETC_PF_NUM_RINGS); + + if (unlikely(num_rings < ENETC_PF_NUM_RINGS)) { + val = ENETC_PSICFGR0_SET_TXBDR(num_rings); + val |= ENETC_PSICFGR0_SET_RXBDR(num_rings); + + dev_warn(&si->pdev->dev, "Found %d rings, expected %d!\n", + num_rings, ENETC_PF_NUM_RINGS); + + num_rings = 0; + } + + /* Add default one-time settings for SI0 (PF) */ + val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); + + enetc_port_wr(hw, ENETC_PSICFGR0(0), val); + + if (num_rings) + num_rings -= ENETC_PF_NUM_RINGS; + + /* Configure the SIs for each available VF */ + val = ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); + val |= ENETC_PSICFGR0_VTE | ENETC_PSICFGR0_SIVIE; + + if (num_rings) { + num_rings /= pf->total_vfs; + val |= ENETC_PSICFGR0_SET_TXBDR(num_rings); + val |= ENETC_PSICFGR0_SET_RXBDR(num_rings); + } + + for (i = 0; i < pf->total_vfs; i++) + enetc_port_wr(hw, ENETC_PSICFGR0(i + 1), val); + + /* Port level VLAN settings */ + val = ENETC_PVCLCTR_OVTPIDL(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); + enetc_port_wr(hw, ENETC_PVCLCTR, val); + /* use outer tag for VLAN filtering */ + enetc_port_wr(hw, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS); +} + +static void enetc_configure_port_mac(struct enetc_hw *hw) +{ + enetc_port_wr(hw, ENETC_PM0_MAXFRM, + ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE)); + + enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE); + enetc_port_wr(hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE); + + enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN | + ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC | + ENETC_PM0_TX_EN | ENETC_PM0_RX_EN); + + enetc_port_wr(hw, ENETC_PM1_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN | + ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC | + ENETC_PM0_TX_EN | ENETC_PM0_RX_EN); + /* set auto-speed for RGMII */ + if (enetc_port_rd(hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) + enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_RGAUTO); + if (enetc_global_rd(hw, ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) + enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_XGMII); +} + +static void enetc_configure_port_pmac(struct enetc_hw *hw) +{ + u32 temp; + + /* Set pMAC step lock */ + temp = enetc_port_rd(hw, ENETC_PFPMR); + enetc_port_wr(hw, ENETC_PFPMR, + temp | ENETC_PFPMR_PMACE | ENETC_PFPMR_MWLM); + + temp = enetc_port_rd(hw, ENETC_MMCSR); + enetc_port_wr(hw, ENETC_MMCSR, temp | ENETC_MMCSR_ME); +} + +static void enetc_configure_port(struct enetc_pf *pf) +{ + u8 hash_key[ENETC_RSSHASH_KEY_SIZE]; + struct enetc_hw *hw = &pf->si->hw; + + enetc_configure_port_pmac(hw); + + enetc_configure_port_mac(hw); + + enetc_port_si_configure(pf->si); + + /* set up hash key */ + get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE); + enetc_set_rss_key(hw, hash_key); + + /* split up RFS entries */ + enetc_port_assign_rfs_entries(pf->si); + + /* fix-up primary MAC addresses, if not set already */ + enetc_port_setup_primary_mac_address(pf->si); + + /* enforce VLAN promisc mode for all SIs */ + pf->vlan_promisc_simap = ENETC_VLAN_PROMISC_MAP_ALL; + enetc_set_vlan_promisc(hw, pf->vlan_promisc_simap); + + enetc_port_wr(hw, ENETC_PSIPMR, 0); + + /* enable port */ + enetc_port_wr(hw, ENETC_PMR, ENETC_PMR_EN); +} + +/* Messaging */ +static u16 enetc_msg_pf_set_vf_primary_mac_addr(struct enetc_pf *pf, + int vf_id) +{ + struct enetc_vf_state *vf_state = &pf->vf_state[vf_id]; + struct enetc_msg_swbd *msg = &pf->rxmsg[vf_id]; + struct enetc_msg_cmd_set_primary_mac *cmd; + struct device *dev = &pf->si->pdev->dev; + u16 cmd_id; + char *addr; + + cmd = (struct enetc_msg_cmd_set_primary_mac *)msg->vaddr; + cmd_id = cmd->header.id; + if (cmd_id != ENETC_MSG_CMD_MNG_ADD) + return ENETC_MSG_CMD_STATUS_FAIL; + + addr = cmd->mac.sa_data; + if (vf_state->flags & ENETC_VF_FLAG_PF_SET_MAC) + dev_warn(dev, "Attempt to override PF set mac addr for VF%d\n", + vf_id); + else + enetc_pf_set_primary_mac_addr(&pf->si->hw, vf_id + 1, addr); + + return ENETC_MSG_CMD_STATUS_OK; +} + +void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int vf_id, u16 *status) +{ + struct enetc_msg_swbd *msg = &pf->rxmsg[vf_id]; + struct device *dev = &pf->si->pdev->dev; + struct enetc_msg_cmd_header *cmd_hdr; + u16 cmd_type; + + *status = ENETC_MSG_CMD_STATUS_OK; + cmd_hdr = (struct enetc_msg_cmd_header *)msg->vaddr; + cmd_type = cmd_hdr->type; + + switch (cmd_type) { + case ENETC_MSG_CMD_MNG_MAC: + *status = enetc_msg_pf_set_vf_primary_mac_addr(pf, vf_id); + break; + default: + dev_err(dev, "command not supported (cmd_type: 0x%x)\n", + cmd_type); + } +} + +#ifdef CONFIG_PCI_IOV +static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct enetc_si *si = pci_get_drvdata(pdev); + struct enetc_pf *pf = enetc_si_priv(si); + int err; + + if (!num_vfs) { + enetc_msg_psi_free(pf); + kfree(pf->vf_state); + pf->num_vfs = 0; + pci_disable_sriov(pdev); + } else { + pf->num_vfs = num_vfs; + + pf->vf_state = kcalloc(num_vfs, sizeof(struct enetc_vf_state), + GFP_KERNEL); + if (!pf->vf_state) { + pf->num_vfs = 0; + return -ENOMEM; + } + + err = enetc_msg_psi_init(pf); + if (err) { + dev_err(&pdev->dev, "enetc_msg_psi_init (%d)\n", err); + goto err_msg_psi; + } + + err = pci_enable_sriov(pdev, num_vfs); + if (err) { + dev_err(&pdev->dev, "pci_enable_sriov err %d\n", err); + goto err_en_sriov; + } + } + + return num_vfs; + +err_en_sriov: + enetc_msg_psi_free(pf); +err_msg_psi: + kfree(pf->vf_state); + pf->num_vfs = 0; + + return err; +} +#else +#define enetc_sriov_configure(pdev, num_vfs) (void)0 +#endif + +static int enetc_pf_set_features(struct net_device *ndev, + netdev_features_t features) +{ + netdev_features_t changed = ndev->features ^ features; + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + enetc_enable_rxvlan(&priv->si->hw, 0, + !!(features & NETIF_F_HW_VLAN_CTAG_RX)); + + if (changed & NETIF_F_HW_VLAN_CTAG_TX) + enetc_enable_txvlan(&priv->si->hw, 0, + !!(features & NETIF_F_HW_VLAN_CTAG_TX)); + + if (changed & NETIF_F_LOOPBACK) + enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK)); + + return enetc_set_features(ndev, features); +} + +static const struct net_device_ops enetc_ndev_ops = { + .ndo_open = enetc_open, + .ndo_stop = enetc_close, + .ndo_start_xmit = enetc_xmit, + .ndo_get_stats = enetc_get_stats, + .ndo_set_mac_address = enetc_pf_set_mac_addr, + .ndo_set_rx_mode = enetc_pf_set_rx_mode, + .ndo_vlan_rx_add_vid = enetc_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = enetc_vlan_rx_del_vid, + .ndo_set_vf_mac = enetc_pf_set_vf_mac, + .ndo_set_vf_vlan = enetc_pf_set_vf_vlan, + .ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk, + .ndo_set_features = enetc_pf_set_features, +}; + +static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, + const struct net_device_ops *ndev_ops) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + SET_NETDEV_DEV(ndev, &si->pdev->dev); + priv->ndev = ndev; + priv->si = si; + priv->dev = &si->pdev->dev; + si->ndev = ndev; + + priv->msg_enable = (NETIF_MSG_WOL << 1) - 1; + ndev->netdev_ops = ndev_ops; + enetc_set_ethtool_ops(ndev); + ndev->watchdog_timeo = 5 * HZ; + ndev->max_mtu = ENETC_MAX_MTU; + + ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_LOOPBACK; + ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | + NETIF_F_RXCSUM | NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; + + if (si->num_rss) + ndev->hw_features |= NETIF_F_RXHASH; + + if (si->errata & ENETC_ERR_TXCSUM) { + ndev->hw_features &= ~NETIF_F_HW_CSUM; + ndev->features &= ~NETIF_F_HW_CSUM; + } + + ndev->priv_flags |= IFF_UNICAST_FLT; + + /* pick up primary MAC address from SI */ + enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr); +} + +static int enetc_of_get_phy(struct enetc_ndev_priv *priv) +{ + struct enetc_pf *pf = enetc_si_priv(priv->si); + struct device_node *np = priv->dev->of_node; + int err; + + if (!np) { + dev_err(priv->dev, "missing ENETC port node\n"); + return -ENODEV; + } + + priv->phy_node = of_parse_phandle(np, "phy-handle", 0); + if (!priv->phy_node) { + if (!of_phy_is_fixed_link(np)) { + dev_err(priv->dev, "PHY not specified\n"); + return -ENODEV; + } + + err = of_phy_register_fixed_link(np); + if (err < 0) { + dev_err(priv->dev, "fixed link registration failed\n"); + return err; + } + + priv->phy_node = of_node_get(np); + } + + if (!of_phy_is_fixed_link(np)) { + err = enetc_mdio_probe(pf); + if (err) { + of_node_put(priv->phy_node); + return err; + } + } + + priv->if_mode = of_get_phy_mode(np); + if (priv->if_mode < 0) { + dev_err(priv->dev, "missing phy type\n"); + of_node_put(priv->phy_node); + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); + else + enetc_mdio_remove(pf); + + return -EINVAL; + } + + return 0; +} + +static void enetc_of_put_phy(struct enetc_ndev_priv *priv) +{ + struct device_node *np = priv->dev->of_node; + + if (np && of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); + if (priv->phy_node) + of_node_put(priv->phy_node); +} + +static int enetc_pf_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct enetc_ndev_priv *priv; + struct net_device *ndev; + struct enetc_si *si; + struct enetc_pf *pf; + int err; + + if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) { + dev_info(&pdev->dev, "device is disabled, skipping\n"); + return -ENODEV; + } + + err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf)); + if (err) { + dev_err(&pdev->dev, "PCI probing failed\n"); + return err; + } + + si = pci_get_drvdata(pdev); + if (!si->hw.port || !si->hw.global) { + err = -ENODEV; + dev_err(&pdev->dev, "could not map PF space, probing a VF?\n"); + goto err_map_pf_space; + } + + pf = enetc_si_priv(si); + pf->si = si; + pf->total_vfs = pci_sriov_get_totalvfs(pdev); + + enetc_configure_port(pf); + + enetc_get_si_caps(si); + + ndev = alloc_etherdev_mq(sizeof(*priv), ENETC_MAX_NUM_TXQS); + if (!ndev) { + err = -ENOMEM; + dev_err(&pdev->dev, "netdev creation failed\n"); + goto err_alloc_netdev; + } + + enetc_pf_netdev_setup(si, ndev, &enetc_ndev_ops); + + priv = netdev_priv(ndev); + + enetc_init_si_rings_params(priv); + + err = enetc_alloc_si_resources(priv); + if (err) { + dev_err(&pdev->dev, "SI resource alloc failed\n"); + goto err_alloc_si_res; + } + + err = enetc_alloc_msix(priv); + if (err) { + dev_err(&pdev->dev, "MSIX alloc failed\n"); + goto err_alloc_msix; + } + + err = enetc_of_get_phy(priv); + if (err) + dev_warn(&pdev->dev, "Fallback to PHY-less operation\n"); + + err = register_netdev(ndev); + if (err) + goto err_reg_netdev; + + netif_carrier_off(ndev); + + netif_info(priv, probe, ndev, "%s v%s\n", + enetc_drv_name, enetc_drv_ver); + + return 0; + +err_reg_netdev: + enetc_of_put_phy(priv); + enetc_free_msix(priv); +err_alloc_msix: + enetc_free_si_resources(priv); +err_alloc_si_res: + si->ndev = NULL; + free_netdev(ndev); +err_alloc_netdev: +err_map_pf_space: + enetc_pci_remove(pdev); + + return err; +} + +static void enetc_pf_remove(struct pci_dev *pdev) +{ + struct enetc_si *si = pci_get_drvdata(pdev); + struct enetc_pf *pf = enetc_si_priv(si); + struct enetc_ndev_priv *priv; + + if (pf->num_vfs) + enetc_sriov_configure(pdev, 0); + + priv = netdev_priv(si->ndev); + netif_info(priv, drv, si->ndev, "%s v%s remove\n", + enetc_drv_name, enetc_drv_ver); + + unregister_netdev(si->ndev); + + enetc_mdio_remove(pf); + enetc_of_put_phy(priv); + + enetc_free_msix(priv); + + enetc_free_si_resources(priv); + + free_netdev(si->ndev); + + enetc_pci_remove(pdev); +} + +static const struct pci_device_id enetc_pf_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF) }, + { 0, } /* End of table. */ +}; +MODULE_DEVICE_TABLE(pci, enetc_pf_id_table); + +static struct pci_driver enetc_pf_driver = { + .name = KBUILD_MODNAME, + .id_table = enetc_pf_id_table, + .probe = enetc_pf_probe, + .remove = enetc_pf_remove, +#ifdef CONFIG_PCI_IOV + .sriov_configure = enetc_sriov_configure, +#endif +}; +module_pci_driver(enetc_pf_driver); + +MODULE_DESCRIPTION(ENETC_DRV_NAME_STR); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(ENETC_DRV_VER_STR); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h new file mode 100644 index 000000000000..10dd1b53bb08 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2017-2019 NXP */ + +#include "enetc.h" + +#define ENETC_PF_NUM_RINGS 8 + +enum enetc_mac_addr_type {UC, MC, MADDR_TYPE}; +#define ENETC_MAX_NUM_MAC_FLT ((ENETC_MAX_NUM_VFS + 1) * MADDR_TYPE) + +#define ENETC_MADDR_HASH_TBL_SZ 64 +struct enetc_mac_filter { + union { + char mac_addr[ETH_ALEN]; + DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ); + }; + int mac_addr_cnt; +}; + +#define ENETC_VLAN_HT_SIZE 64 + +enum enetc_vf_flags { + ENETC_VF_FLAG_PF_SET_MAC = BIT(0), +}; + +struct enetc_vf_state { + enum enetc_vf_flags flags; +}; + +struct enetc_pf { + struct enetc_si *si; + int num_vfs; /* number of active VFs, after sriov_init */ + int total_vfs; /* max number of VFs, set for PF at probe */ + struct enetc_vf_state *vf_state; + + struct enetc_mac_filter mac_filter[ENETC_MAX_NUM_MAC_FLT]; + + struct enetc_msg_swbd rxmsg[ENETC_MAX_NUM_VFS]; + struct work_struct msg_task; + char msg_int_name[ENETC_INT_NAME_MAX]; + + char vlan_promisc_simap; /* bitmap of SIs in VLAN promisc mode */ + DECLARE_BITMAP(vlan_ht_filter, ENETC_VLAN_HT_SIZE); + DECLARE_BITMAP(active_vlans, VLAN_N_VID); + + struct mii_bus *mdio; /* saved for cleanup */ +}; + +int enetc_msg_psi_init(struct enetc_pf *pf); +void enetc_msg_psi_free(struct enetc_pf *pf); +void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int mbox_id, u16 *status); + +/* MDIO */ +int enetc_mdio_probe(struct enetc_pf *pf); +void enetc_mdio_remove(struct enetc_pf *pf); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c new file mode 100644 index 000000000000..8c1497e7d9c5 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2019 NXP */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/fsl/ptp_qoriq.h> + +#include "enetc.h" + +static struct ptp_clock_info enetc_ptp_caps = { + .owner = THIS_MODULE, + .name = "ENETC PTP clock", + .max_adj = 512000, + .n_alarm = 0, + .n_ext_ts = 2, + .n_per_out = 0, + .n_pins = 0, + .pps = 1, + .adjfine = ptp_qoriq_adjfine, + .adjtime = ptp_qoriq_adjtime, + .gettime64 = ptp_qoriq_gettime, + .settime64 = ptp_qoriq_settime, + .enable = ptp_qoriq_enable, +}; + +static int enetc_ptp_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct ptp_qoriq *ptp_qoriq; + void __iomem *base; + int err, len, n; + + if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) { + dev_info(&pdev->dev, "device is disabled, skipping\n"); + return -ENODEV; + } + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "device enable failed\n"); + return err; + } + + /* set up for high or low dma */ + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + } + + err = pci_request_mem_regions(pdev, KBUILD_MODNAME); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err); + goto err_pci_mem_reg; + } + + pci_set_master(pdev); + + ptp_qoriq = kzalloc(sizeof(*ptp_qoriq), GFP_KERNEL); + if (!ptp_qoriq) { + err = -ENOMEM; + goto err_alloc_ptp; + } + + len = pci_resource_len(pdev, ENETC_BAR_REGS); + + base = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len); + if (!base) { + err = -ENXIO; + dev_err(&pdev->dev, "ioremap() failed\n"); + goto err_ioremap; + } + + /* Allocate 1 interrupt */ + n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); + if (n != 1) { + err = -EPERM; + goto err_irq; + } + + ptp_qoriq->irq = pci_irq_vector(pdev, 0); + + err = request_irq(ptp_qoriq->irq, ptp_qoriq_isr, 0, DRIVER, ptp_qoriq); + if (err) { + dev_err(&pdev->dev, "request_irq() failed!\n"); + goto err_irq; + } + + ptp_qoriq->dev = &pdev->dev; + + err = ptp_qoriq_init(ptp_qoriq, base, &enetc_ptp_caps); + if (err) + goto err_no_clock; + + pci_set_drvdata(pdev, ptp_qoriq); + + return 0; + +err_no_clock: + free_irq(ptp_qoriq->irq, ptp_qoriq); +err_irq: + iounmap(base); +err_ioremap: + kfree(ptp_qoriq); +err_alloc_ptp: + pci_release_mem_regions(pdev); +err_pci_mem_reg: +err_dma: + pci_disable_device(pdev); + + return err; +} + +static void enetc_ptp_remove(struct pci_dev *pdev) +{ + struct ptp_qoriq *ptp_qoriq = pci_get_drvdata(pdev); + + ptp_qoriq_free(ptp_qoriq); + kfree(ptp_qoriq); + + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +static const struct pci_device_id enetc_ptp_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PTP) }, + { 0, } /* End of table. */ +}; +MODULE_DEVICE_TABLE(pci, enetc_ptp_id_table); + +static struct pci_driver enetc_ptp_driver = { + .name = KBUILD_MODNAME, + .id_table = enetc_ptp_id_table, + .probe = enetc_ptp_probe, + .remove = enetc_ptp_remove, +}; +module_pci_driver(enetc_ptp_driver); + +MODULE_DESCRIPTION("ENETC PTP clock driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c new file mode 100644 index 000000000000..64bebee9f52a --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include <linux/module.h> +#include "enetc.h" + +#define ENETC_DRV_VER_MAJ 1 +#define ENETC_DRV_VER_MIN 0 + +#define ENETC_DRV_VER_STR __stringify(ENETC_DRV_VER_MAJ) "." \ + __stringify(ENETC_DRV_VER_MIN) +static const char enetc_drv_ver[] = ENETC_DRV_VER_STR; +#define ENETC_DRV_NAME_STR "ENETC VF driver" +static const char enetc_drv_name[] = ENETC_DRV_NAME_STR; + +/* Messaging */ +static void enetc_msg_vsi_write_msg(struct enetc_hw *hw, + struct enetc_msg_swbd *msg) +{ + u32 val; + + val = enetc_vsi_set_msize(msg->size) | lower_32_bits(msg->dma); + enetc_wr(hw, ENETC_VSIMSGSNDAR1, upper_32_bits(msg->dma)); + enetc_wr(hw, ENETC_VSIMSGSNDAR0, val); +} + +static int enetc_msg_vsi_send(struct enetc_si *si, struct enetc_msg_swbd *msg) +{ + int timeout = 100; + u32 vsimsgsr; + + enetc_msg_vsi_write_msg(&si->hw, msg); + + do { + vsimsgsr = enetc_rd(&si->hw, ENETC_VSIMSGSR); + if (!(vsimsgsr & ENETC_VSIMSGSR_MB)) + break; + + usleep_range(1000, 2000); + } while (--timeout); + + if (!timeout) + return -ETIMEDOUT; + + /* check for message delivery error */ + if (vsimsgsr & ENETC_VSIMSGSR_MS) { + dev_err(&si->pdev->dev, "VSI command execute error: %d\n", + ENETC_SIMSGSR_GET_MC(vsimsgsr)); + return -EIO; + } + + return 0; +} + +static int enetc_msg_vsi_set_primary_mac_addr(struct enetc_ndev_priv *priv, + struct sockaddr *saddr) +{ + struct enetc_msg_cmd_set_primary_mac *cmd; + struct enetc_msg_swbd msg; + int err; + + msg.size = ALIGN(sizeof(struct enetc_msg_cmd_set_primary_mac), 64); + msg.vaddr = dma_alloc_coherent(priv->dev, msg.size, &msg.dma, + GFP_KERNEL); + if (!msg.vaddr) { + dev_err(priv->dev, "Failed to alloc Tx msg (size: %d)\n", + msg.size); + return -ENOMEM; + } + + cmd = (struct enetc_msg_cmd_set_primary_mac *)msg.vaddr; + cmd->header.type = ENETC_MSG_CMD_MNG_MAC; + cmd->header.id = ENETC_MSG_CMD_MNG_ADD; + memcpy(&cmd->mac, saddr, sizeof(struct sockaddr)); + + /* send the command and wait */ + err = enetc_msg_vsi_send(priv->si, &msg); + + dma_free_coherent(priv->dev, msg.size, msg.vaddr, msg.dma); + + return err; +} + +static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct sockaddr *saddr = addr; + int err; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + err = enetc_msg_vsi_set_primary_mac_addr(priv, saddr); + if (err) + return err; + + return 0; +} + +static int enetc_vf_set_features(struct net_device *ndev, + netdev_features_t features) +{ + return enetc_set_features(ndev, features); +} + +/* Probing/ Init */ +static const struct net_device_ops enetc_ndev_ops = { + .ndo_open = enetc_open, + .ndo_stop = enetc_close, + .ndo_start_xmit = enetc_xmit, + .ndo_get_stats = enetc_get_stats, + .ndo_set_mac_address = enetc_vf_set_mac_addr, + .ndo_set_features = enetc_vf_set_features, +}; + +static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev, + const struct net_device_ops *ndev_ops) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + SET_NETDEV_DEV(ndev, &si->pdev->dev); + priv->ndev = ndev; + priv->si = si; + priv->dev = &si->pdev->dev; + si->ndev = ndev; + + priv->msg_enable = (NETIF_MSG_IFUP << 1) - 1; + ndev->netdev_ops = ndev_ops; + enetc_set_ethtool_ops(ndev); + ndev->watchdog_timeo = 5 * HZ; + ndev->max_mtu = ENETC_MAX_MTU; + + ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | + NETIF_F_RXCSUM | NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + + if (si->num_rss) + ndev->hw_features |= NETIF_F_RXHASH; + + if (si->errata & ENETC_ERR_TXCSUM) { + ndev->hw_features &= ~NETIF_F_HW_CSUM; + ndev->features &= ~NETIF_F_HW_CSUM; + } + + /* pick up primary MAC address from SI */ + enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr); +} + +static int enetc_vf_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct enetc_ndev_priv *priv; + struct net_device *ndev; + struct enetc_si *si; + int err; + + err = enetc_pci_probe(pdev, KBUILD_MODNAME, 0); + if (err) { + dev_err(&pdev->dev, "PCI probing failed\n"); + return err; + } + + si = pci_get_drvdata(pdev); + + enetc_get_si_caps(si); + + ndev = alloc_etherdev_mq(sizeof(*priv), ENETC_MAX_NUM_TXQS); + if (!ndev) { + err = -ENOMEM; + dev_err(&pdev->dev, "netdev creation failed\n"); + goto err_alloc_netdev; + } + + enetc_vf_netdev_setup(si, ndev, &enetc_ndev_ops); + + priv = netdev_priv(ndev); + + enetc_init_si_rings_params(priv); + + err = enetc_alloc_si_resources(priv); + if (err) { + dev_err(&pdev->dev, "SI resource alloc failed\n"); + goto err_alloc_si_res; + } + + err = enetc_alloc_msix(priv); + if (err) { + dev_err(&pdev->dev, "MSIX alloc failed\n"); + goto err_alloc_msix; + } + + err = register_netdev(ndev); + if (err) + goto err_reg_netdev; + + netif_carrier_off(ndev); + + netif_info(priv, probe, ndev, "%s v%s\n", + enetc_drv_name, enetc_drv_ver); + + return 0; + +err_reg_netdev: + enetc_free_msix(priv); +err_alloc_msix: + enetc_free_si_resources(priv); +err_alloc_si_res: + si->ndev = NULL; + free_netdev(ndev); +err_alloc_netdev: + enetc_pci_remove(pdev); + + return err; +} + +static void enetc_vf_remove(struct pci_dev *pdev) +{ + struct enetc_si *si = pci_get_drvdata(pdev); + struct enetc_ndev_priv *priv; + + priv = netdev_priv(si->ndev); + netif_info(priv, drv, si->ndev, "%s v%s remove\n", + enetc_drv_name, enetc_drv_ver); + unregister_netdev(si->ndev); + + enetc_free_msix(priv); + + enetc_free_si_resources(priv); + + free_netdev(si->ndev); + + enetc_pci_remove(pdev); +} + +static const struct pci_device_id enetc_vf_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) }, + { 0, } /* End of table. */ +}; +MODULE_DEVICE_TABLE(pci, enetc_vf_id_table); + +static struct pci_driver enetc_vf_driver = { + .name = KBUILD_MODNAME, + .id_table = enetc_vf_id_table, + .probe = enetc_vf_probe, + .remove = enetc_vf_remove, +}; +module_pci_driver(enetc_vf_driver); + +MODULE_DESCRIPTION(ENETC_DRV_NAME_STR); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(ENETC_DRV_VER_STR); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2370dc204202..697c2427f2b7 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2098,6 +2098,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev) #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) +static __u32 fec_enet_register_version = 2; static u32 fec_enet_register_offset[] = { FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, @@ -2128,6 +2129,7 @@ static u32 fec_enet_register_offset[] = { IEEE_R_FDXFC, IEEE_R_OCTETS_OK }; #else +static __u32 fec_enet_register_version = 1; static u32 fec_enet_register_offset[] = { FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, @@ -2149,6 +2151,8 @@ static void fec_enet_get_regs(struct net_device *ndev, u32 *buf = (u32 *)regbuf; u32 i, off; + regs->version = fec_enet_register_version; + memset(buf, 0, regs->len); for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 71f4205f14e7..3c21486c6c84 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -855,9 +855,7 @@ static int mac_probe(struct platform_device *_of_dev) if (err < 0) dev_err(dev, "fman_set_mac_active_pause() = %d\n", err); - dev_info(dev, "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n", - mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2], - mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]); + dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr); priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev); if (IS_ERR(priv->eth_dev)) { diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 241325c35cb4..27ed995f439a 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1492,7 +1492,7 @@ static int gfar_get_ts_info(struct net_device *dev, struct gfar_private *priv = netdev_priv(dev); struct platform_device *ptp_dev; struct device_node *ptp_node; - struct qoriq_ptp *ptp = NULL; + struct ptp_qoriq *ptp = NULL; info->phc_index = -1; |