aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/stmicro/stmmac/stmmac_main.c')
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c126
1 files changed, 105 insertions, 21 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 17310ade88dd..0fca81507a77 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1134,20 +1134,26 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ struct fwnode_handle *phy_fwnode;
struct fwnode_handle *fwnode;
int ret;
+ if (!phylink_expects_phy(priv->phylink))
+ return 0;
+
fwnode = of_fwnode_handle(priv->plat->phylink_node);
if (!fwnode)
fwnode = dev_fwnode(priv->device);
if (fwnode)
- ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
+ phy_fwnode = fwnode_get_phy_node(fwnode);
+ else
+ phy_fwnode = NULL;
/* Some DT bindings do not set-up the PHY handle. Let's try to
* manually parse it
*/
- if (!fwnode || ret) {
+ if (!phy_fwnode || IS_ERR(phy_fwnode)) {
int addr = priv->plat->phy_addr;
struct phy_device *phydev;
@@ -1163,6 +1169,9 @@ static int stmmac_init_phy(struct net_device *dev)
}
ret = phylink_connect_phy(priv->phylink, phydev);
+ } else {
+ fwnode_handle_put(phy_fwnode);
+ ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
}
if (!priv->plat->pmt) {
@@ -1605,6 +1614,12 @@ static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
+ /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
+ * in struct xdp_buff_xsk to stash driver specific information. Thus,
+ * use this macro to make sure no size violations.
+ */
+ XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
+
for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf;
dma_addr_t dma_addr;
@@ -4554,13 +4569,10 @@ dma_map_err:
static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
{
- struct vlan_ethhdr *veth;
- __be16 vlan_proto;
+ struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
+ __be16 vlan_proto = veth->h_vlan_proto;
u16 vlanid;
- veth = (struct vlan_ethhdr *)skb->data;
- vlan_proto = veth->h_vlan_proto;
-
if ((vlan_proto == htons(ETH_P_8021Q) &&
dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
(vlan_proto == htons(ETH_P_8021AD) &&
@@ -4989,6 +5001,16 @@ static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
return ret;
}
+static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
+{
+ /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
+ * to represent incoming packet, whereas cb field in the same structure
+ * is used to store driver specific info. Thus, struct stmmac_xdp_buff
+ * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
+ */
+ return (struct stmmac_xdp_buff *)xdp;
+}
+
static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
@@ -5018,6 +5040,7 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
}
while (count < limit) {
struct stmmac_rx_buffer *buf;
+ struct stmmac_xdp_buff *ctx;
unsigned int buf1_len = 0;
struct dma_desc *np, *p;
int entry;
@@ -5103,6 +5126,11 @@ read_again:
goto read_again;
}
+ ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
+ ctx->priv = priv;
+ ctx->desc = p;
+ ctx->ndesc = np;
+
/* XDP ZC Frame only support primary buffers for now */
buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
len += buf1_len;
@@ -5181,7 +5209,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
enum dma_data_direction dma_dir;
unsigned int desc_size;
struct sk_buff *skb = NULL;
- struct xdp_buff xdp;
+ struct stmmac_xdp_buff ctx;
int xdp_status = 0;
int buf_sz;
@@ -5302,17 +5330,22 @@ read_again:
dma_sync_single_for_cpu(priv->device, buf->addr,
buf1_len, dma_dir);
- xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
- xdp_prepare_buff(&xdp, page_address(buf->page),
- buf->page_offset, buf1_len, false);
+ xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
+ xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
+ buf->page_offset, buf1_len, true);
- pre_len = xdp.data_end - xdp.data_hard_start -
+ pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
buf->page_offset;
- skb = stmmac_xdp_run_prog(priv, &xdp);
+
+ ctx.priv = priv;
+ ctx.desc = p;
+ ctx.ndesc = np;
+
+ skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
/* Due xdp_adjust_tail: DMA sync for_device
* cover max len CPU touch
*/
- sync_len = xdp.data_end - xdp.data_hard_start -
+ sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
buf->page_offset;
sync_len = max(sync_len, pre_len);
@@ -5322,7 +5355,7 @@ read_again:
if (xdp_res & STMMAC_XDP_CONSUMED) {
page_pool_put_page(rx_q->page_pool,
- virt_to_head_page(xdp.data),
+ virt_to_head_page(ctx.xdp.data),
sync_len, true);
buf->page = NULL;
priv->dev->stats.rx_dropped++;
@@ -5350,7 +5383,7 @@ read_again:
if (!skb) {
/* XDP program may expand or reduce tail */
- buf1_len = xdp.data_end - xdp.data;
+ buf1_len = ctx.xdp.data_end - ctx.xdp.data;
skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
if (!skb) {
@@ -5360,7 +5393,7 @@ read_again:
}
/* XDP program may adjust header */
- skb_copy_to_linear_data(skb, xdp.data, buf1_len);
+ skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
skb_put(skb, buf1_len);
/* Data payload copied into SKB, page ready for recycle */
@@ -6341,6 +6374,10 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
bool is_double = false;
int ret;
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
+ return ret;
+
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
@@ -6348,16 +6385,18 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
ret = stmmac_vlan_update(priv, is_double);
if (ret) {
clear_bit(vid, priv->active_vlans);
- return ret;
+ goto err_pm_put;
}
if (priv->hw->num_vlan) {
ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
if (ret)
- return ret;
+ goto err_pm_put;
}
+err_pm_put:
+ pm_runtime_put(priv->device);
- return 0;
+ return ret;
}
static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
@@ -6622,6 +6661,8 @@ int stmmac_xdp_open(struct net_device *dev)
goto init_error;
}
+ stmmac_reset_queues_param(priv);
+
/* DMA CSR Channel configuration */
for (chan = 0; chan < dma_csr_ch; chan++) {
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
@@ -6948,7 +6989,7 @@ static void stmmac_napi_del(struct net_device *dev)
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
{
struct stmmac_priv *priv = netdev_priv(dev);
- int ret = 0;
+ int ret = 0, i;
if (netif_running(dev))
stmmac_release(dev);
@@ -6957,6 +6998,10 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
priv->plat->rx_queues_to_use = rx_cnt;
priv->plat->tx_queues_to_use = tx_cnt;
+ if (!netif_is_rxfh_configured(dev))
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
+ priv->rss.table[i] = ethtool_rxfh_indir_default(i,
+ rx_cnt);
stmmac_napi_add(dev);
@@ -7045,6 +7090,37 @@ void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
}
}
+static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
+{
+ const struct stmmac_xdp_buff *ctx = (void *)_ctx;
+ struct dma_desc *desc_contains_ts = ctx->desc;
+ struct stmmac_priv *priv = ctx->priv;
+ struct dma_desc *ndesc = ctx->ndesc;
+ struct dma_desc *desc = ctx->desc;
+ u64 ns = 0;
+
+ if (!priv->hwts_rx_en)
+ return -ENODATA;
+
+ /* For GMAC4, the valid timestamp is from CTX next desc. */
+ if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
+ desc_contains_ts = ndesc;
+
+ /* Check if timestamp is available */
+ if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
+ stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
+ ns -= priv->plat->cdc_error_adj;
+ *timestamp = ns_to_ktime(ns);
+ return 0;
+ }
+
+ return -ENODATA;
+}
+
+static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
+ .xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
+};
+
/**
* stmmac_dvr_probe
* @device: device pointer
@@ -7152,6 +7228,8 @@ int stmmac_dvr_probe(struct device *device,
ndev->netdev_ops = &stmmac_netdev_ops;
+ ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
+
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
@@ -7238,6 +7316,10 @@ int stmmac_dvr_probe(struct device *device,
if (priv->dma_cap.rssen && priv->plat->rss_en)
ndev->features |= NETIF_F_RXHASH;
+ ndev->vlan_features |= ndev->features;
+ /* TSO doesn't work on VLANs yet */
+ ndev->vlan_features &= ~NETIF_F_TSO;
+
/* MTU range: 46 - hw-specific max */
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
if (priv->plat->has_xgmac)
@@ -7260,6 +7342,8 @@ int stmmac_dvr_probe(struct device *device,
if (flow_ctrl)
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
+ ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
/* Setup channels NAPI */
stmmac_napi_add(ndev);