diff options
Diffstat (limited to 'drivers/net/ethernet/freescale/fec_main.c')
-rw-r--r-- | drivers/net/ethernet/freescale/fec_main.c | 234 |
1 files changed, 172 insertions, 62 deletions
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index e8e2aa1e7f01..98d5cd313fdd 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -66,6 +66,8 @@ #include <linux/mfd/syscon.h> #include <linux/regmap.h> #include <soc/imx/cpuidle.h> +#include <linux/filter.h> +#include <linux/bpf.h> #include <asm/cacheflush.h> @@ -111,7 +113,8 @@ static const struct fec_devinfo fec_imx6q_info = { .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | - FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII, + FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII | + FEC_QUIRK_HAS_PMQOS, }; static const struct fec_devinfo fec_mvf600_info = { @@ -155,6 +158,13 @@ static const struct fec_devinfo fec_imx8qm_info = { FEC_QUIRK_DELAYED_CLKS_SUPPORT, }; +static const struct fec_devinfo fec_s32v234_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | + FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE, +}; + static struct platform_device_id fec_devtype[] = { { /* keep it for coldfire */ @@ -188,6 +198,9 @@ static struct platform_device_id fec_devtype[] = { .name = "imx8qm-fec", .driver_data = (kernel_ulong_t)&fec_imx8qm_info, }, { + .name = "s32v234-fec", + .driver_data = (kernel_ulong_t)&fec_s32v234_info, + }, { /* sentinel */ } }; @@ -203,6 +216,7 @@ enum imx_fec_type { IMX6UL_FEC, IMX8MQ_FEC, IMX8QM_FEC, + S32V234_FEC, }; static const struct of_device_id fec_dt_ids[] = { @@ -215,6 +229,7 @@ static const struct of_device_id fec_dt_ids[] = { { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], }, { .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], }, { .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], }, + { .compatible = "fsl,s32v234-fec", .data = &fec_devtype[S32V234_FEC], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); @@ -409,6 +424,48 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) return 0; } +static int +fec_enet_create_page_pool(struct fec_enet_private *fep, + struct fec_enet_priv_rx_q *rxq, int size) +{ + struct page_pool_params pp_params = { + .order = 0, + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .pool_size = size, + .nid = dev_to_node(&fep->pdev->dev), + .dev = &fep->pdev->dev, + .dma_dir = DMA_FROM_DEVICE, + .offset = FEC_ENET_XDP_HEADROOM, + .max_len = FEC_ENET_RX_FRSIZE, + }; + int err; + + rxq->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rxq->page_pool)) { + err = PTR_ERR(rxq->page_pool); + rxq->page_pool = NULL; + return err; + } + + err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0); + if (err < 0) + goto err_free_pp; + + err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, + rxq->page_pool); + if (err) + goto err_unregister_rxq; + + return 0; + +err_unregister_rxq: + xdp_rxq_info_unreg(&rxq->xdp_rxq); +err_free_pp: + page_pool_destroy(rxq->page_pool); + rxq->page_pool = NULL; + return err; +} + static struct bufdesc * fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, @@ -1167,6 +1224,34 @@ fec_restart(struct net_device *ndev) } +static int fec_enet_ipc_handle_init(struct fec_enet_private *fep) +{ + if (!(of_machine_is_compatible("fsl,imx8qm") || + of_machine_is_compatible("fsl,imx8qxp") || + of_machine_is_compatible("fsl,imx8dxl"))) + return 0; + + return imx_scu_get_handle(&fep->ipc_handle); +} + +static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled) +{ + struct device_node *np = fep->pdev->dev.of_node; + u32 rsrc_id, val; + int idx; + + if (!np || !fep->ipc_handle) + return; + + idx = of_alias_get_id(np, "ethernet"); + if (idx < 0) + idx = 0; + rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0; + + val = enabled ? 1 : 0; + imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val); +} + static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled) { struct fec_platform_data *pdata = fep->pdev->dev.platform_data; @@ -1182,6 +1267,8 @@ static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled) BIT(stop_gpr->bit), 0); } else if (pdata && pdata->sleep_mode_enable) { pdata->sleep_mode_enable(enabled); + } else { + fec_enet_ipg_stop_set(fep, enabled); } } @@ -1407,7 +1494,7 @@ static void fec_enet_tx(struct net_device *ndev) fec_enet_tx_queue(ndev, i); } -static int +static int __maybe_unused fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb) { struct fec_enet_private *fep = netdev_priv(ndev); @@ -1427,8 +1514,9 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff return 0; } -static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, - struct bufdesc *bdp, u32 length, bool swap) +static bool __maybe_unused +fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, + struct bufdesc *bdp, u32 length, bool swap) { struct fec_enet_private *fep = netdev_priv(ndev); struct sk_buff *new_skb; @@ -1453,6 +1541,21 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, return true; } +static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, + struct bufdesc *bdp, int index) +{ + struct page *new_page; + dma_addr_t phys_addr; + + new_page = page_pool_dev_alloc_pages(rxq->page_pool); + WARN_ON(!new_page); + rxq->rx_skb_info[index].page = new_page; + + rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM; + phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM; + bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); +} + /* During a receive, the bd_rx.cur points to the current incoming buffer. * When we update through the ring, if the next incoming buffer has * not been given to the system, we just set the empty indicator, @@ -1465,7 +1568,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) struct fec_enet_priv_rx_q *rxq; struct bufdesc *bdp; unsigned short status; - struct sk_buff *skb_new = NULL; struct sk_buff *skb; ushort pkt_len; __u8 *data; @@ -1474,8 +1576,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) bool vlan_packet_rcvd = false; u16 vlan_tag; int index = 0; - bool is_copybreak; bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; + struct page *page; #ifdef CONFIG_M532x flush_cache_all(); @@ -1527,31 +1629,25 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ndev->stats.rx_bytes += pkt_len; index = fec_enet_get_bd_index(bdp, &rxq->bd); - skb = rxq->rx_skbuff[index]; + page = rxq->rx_skb_info[index].page; + dma_sync_single_for_cpu(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + pkt_len, + DMA_FROM_DEVICE); + prefetch(page_address(page)); + fec_enet_update_cbd(rxq, bdp, index); /* The packet length includes FCS, but we don't want to * include that when passing upstream as it messes up * bridging applications. */ - is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4, - need_swap); - if (!is_copybreak) { - skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); - if (unlikely(!skb_new)) { - ndev->stats.rx_dropped++; - goto rx_processing_done; - } - dma_unmap_single(&fep->pdev->dev, - fec32_to_cpu(bdp->cbd_bufaddr), - FEC_ENET_RX_FRSIZE - fep->rx_align, - DMA_FROM_DEVICE); - } - - prefetch(skb->data - NET_IP_ALIGN); + skb = build_skb(page_address(page), PAGE_SIZE); + skb_reserve(skb, FEC_ENET_XDP_HEADROOM); skb_put(skb, pkt_len - 4); + skb_mark_for_recycle(skb); data = skb->data; - if (!is_copybreak && need_swap) + if (need_swap) swap_buffer(data, pkt_len); #if !defined(CONFIG_M5272) @@ -1606,16 +1702,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) skb_record_rx_queue(skb, queue_id); napi_gro_receive(&fep->napi, skb); - if (is_copybreak) { - dma_sync_single_for_device(&fep->pdev->dev, - fec32_to_cpu(bdp->cbd_bufaddr), - FEC_ENET_RX_FRSIZE - fep->rx_align, - DMA_FROM_DEVICE); - } else { - rxq->rx_skbuff[index] = skb_new; - fec_enet_new_rxbdp(ndev, bdp, skb_new); - } - rx_processing_done: /* Clear the status flags for this buffer */ status &= ~BD_ENET_RX_STATS; @@ -2104,13 +2190,13 @@ static int fec_enet_mii_probe(struct net_device *ndev) continue; if (dev_id--) continue; - strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); + strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); break; } if (phy_id >= PHY_MAX_ADDR) { netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); - strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); + strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); phy_id = 0; } @@ -2294,9 +2380,9 @@ static void fec_enet_get_drvinfo(struct net_device *ndev, { struct fec_enet_private *fep = netdev_priv(ndev); - strlcpy(info->driver, fep->pdev->dev.driver->name, + strscpy(info->driver, fep->pdev->dev.driver->name, sizeof(info->driver)); - strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); + strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); } static int fec_enet_get_regs_len(struct net_device *ndev) @@ -2959,26 +3045,19 @@ static void fec_enet_free_buffers(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); unsigned int i; struct sk_buff *skb; - struct bufdesc *bdp; struct fec_enet_priv_tx_q *txq; struct fec_enet_priv_rx_q *rxq; unsigned int q; for (q = 0; q < fep->num_rx_queues; q++) { rxq = fep->rx_queue[q]; - bdp = rxq->bd.base; - for (i = 0; i < rxq->bd.ring_size; i++) { - skb = rxq->rx_skbuff[i]; - rxq->rx_skbuff[i] = NULL; - if (skb) { - dma_unmap_single(&fep->pdev->dev, - fec32_to_cpu(bdp->cbd_bufaddr), - FEC_ENET_RX_FRSIZE - fep->rx_align, - DMA_FROM_DEVICE); - dev_kfree_skb(skb); - } - bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); - } + for (i = 0; i < rxq->bd.ring_size; i++) + page_pool_release_page(rxq->page_pool, rxq->rx_skb_info[i].page); + + if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) + xdp_rxq_info_unreg(&rxq->xdp_rxq); + page_pool_destroy(rxq->page_pool); + rxq->page_pool = NULL; } for (q = 0; q < fep->num_tx_queues; q++) { @@ -3068,24 +3147,31 @@ static int fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) { struct fec_enet_private *fep = netdev_priv(ndev); - unsigned int i; - struct sk_buff *skb; - struct bufdesc *bdp; struct fec_enet_priv_rx_q *rxq; + dma_addr_t phys_addr; + struct bufdesc *bdp; + struct page *page; + int i, err; rxq = fep->rx_queue[queue]; bdp = rxq->bd.base; + + err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size); + if (err < 0) { + netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err); + return err; + } + for (i = 0; i < rxq->bd.ring_size; i++) { - skb = __netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE, GFP_KERNEL); - if (!skb) + page = page_pool_dev_alloc_pages(rxq->page_pool); + if (!page) goto err_alloc; - if (fec_enet_new_rxbdp(ndev, bdp, skb)) { - dev_kfree_skb(skb); - goto err_alloc; - } + phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM; + bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); - rxq->rx_skbuff[i] = skb; + rxq->rx_skb_info[i].page = page; + rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM; bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); if (fep->bufdesc_ex) { @@ -3210,6 +3296,9 @@ fec_enet_open(struct net_device *ndev) if (fep->quirks & FEC_QUIRK_ERR006687) imx6q_cpuidle_fec_irqs_used(); + if (fep->quirks & FEC_QUIRK_HAS_PMQOS) + cpu_latency_qos_add_request(&fep->pm_qos_req, 0); + napi_enable(&fep->napi); phy_start(ndev->phydev); netif_tx_start_all_queues(ndev); @@ -3251,6 +3340,9 @@ fec_enet_close(struct net_device *ndev) fec_enet_update_ethtool_stats(ndev); fec_enet_clk_enable(ndev, false); + if (fep->quirks & FEC_QUIRK_HAS_PMQOS) + cpu_latency_qos_remove_request(&fep->pm_qos_req); + pinctrl_pm_select_sleep_state(&fep->pdev->dev); pm_runtime_mark_last_busy(&fep->pdev->dev); pm_runtime_put_autosuspend(&fep->pdev->dev); @@ -3559,7 +3651,7 @@ static int fec_enet_init(struct net_device *ndev) ndev->ethtool_ops = &fec_enet_ethtool_ops; writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); - netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); + netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi); if (fep->quirks & FEC_QUIRK_HAS_VLAN) /* enable hw VLAN support */ @@ -3817,6 +3909,10 @@ fec_probe(struct platform_device *pdev) !of_property_read_bool(np, "fsl,err006687-workaround-present")) fep->quirks |= FEC_QUIRK_ERR006687; + ret = fec_enet_ipc_handle_init(fep); + if (ret) + goto failed_ipc_init; + if (of_get_property(np, "fsl,magic-packet", NULL)) fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; @@ -4014,6 +4110,7 @@ failed_rgmii_delay: of_phy_deregister_fixed_link(np); of_node_put(phy_node); failed_stop_mode: +failed_ipc_init: failed_phy: dev_id--; failed_ioremap: @@ -4058,6 +4155,7 @@ static int __maybe_unused fec_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); + int ret; rtnl_lock(); if (netif_running(ndev)) { @@ -4082,6 +4180,15 @@ static int __maybe_unused fec_suspend(struct device *dev) } /* It's safe to disable clocks since interrupts are masked */ fec_enet_clk_enable(ndev, false); + + fep->rpm_active = !pm_runtime_status_suspended(dev); + if (fep->rpm_active) { + ret = pm_runtime_force_suspend(dev); + if (ret < 0) { + rtnl_unlock(); + return ret; + } + } } rtnl_unlock(); @@ -4112,6 +4219,9 @@ static int __maybe_unused fec_resume(struct device *dev) rtnl_lock(); if (netif_running(ndev)) { + if (fep->rpm_active) + pm_runtime_force_resume(dev); + ret = fec_enet_clk_enable(ndev, true); if (ret) { rtnl_unlock(); |