aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c80
2 files changed, 77 insertions, 10 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 3d15e1e92e18..07ea5ab0a60b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -92,6 +92,13 @@ struct stmmac_rx_buffer {
dma_addr_t sec_addr;
};
+struct stmmac_xdp_buff {
+ struct xdp_buff xdp;
+ struct stmmac_priv *priv;
+ struct dma_desc *desc;
+ struct dma_desc *ndesc;
+};
+
struct stmmac_rx_queue {
u32 rx_count_frames;
u32 queue_index;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d7fcab057032..c0e90fda572a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1614,6 +1614,12 @@ static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
+ /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
+ * in struct xdp_buff_xsk to stash driver specific information. Thus,
+ * use this macro to make sure no size violations.
+ */
+ XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
+
for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf;
dma_addr_t dma_addr;
@@ -4998,6 +5004,16 @@ static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
return ret;
}
+static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
+{
+ /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
+ * to represent incoming packet, whereas cb field in the same structure
+ * is used to store driver specific info. Thus, struct stmmac_xdp_buff
+ * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
+ */
+ return (struct stmmac_xdp_buff *)xdp;
+}
+
static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
@@ -5027,6 +5043,7 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
}
while (count < limit) {
struct stmmac_rx_buffer *buf;
+ struct stmmac_xdp_buff *ctx;
unsigned int buf1_len = 0;
struct dma_desc *np, *p;
int entry;
@@ -5112,6 +5129,11 @@ read_again:
goto read_again;
}
+ ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
+ ctx->priv = priv;
+ ctx->desc = p;
+ ctx->ndesc = np;
+
/* XDP ZC Frame only support primary buffers for now */
buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
len += buf1_len;
@@ -5190,7 +5212,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
enum dma_data_direction dma_dir;
unsigned int desc_size;
struct sk_buff *skb = NULL;
- struct xdp_buff xdp;
+ struct stmmac_xdp_buff ctx;
int xdp_status = 0;
int buf_sz;
@@ -5311,17 +5333,22 @@ read_again:
dma_sync_single_for_cpu(priv->device, buf->addr,
buf1_len, dma_dir);
- xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
- xdp_prepare_buff(&xdp, page_address(buf->page),
- buf->page_offset, buf1_len, false);
+ xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
+ xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
+ buf->page_offset, buf1_len, true);
- pre_len = xdp.data_end - xdp.data_hard_start -
+ pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
buf->page_offset;
- skb = stmmac_xdp_run_prog(priv, &xdp);
+
+ ctx.priv = priv;
+ ctx.desc = p;
+ ctx.ndesc = np;
+
+ skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
/* Due xdp_adjust_tail: DMA sync for_device
* cover max len CPU touch
*/
- sync_len = xdp.data_end - xdp.data_hard_start -
+ sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
buf->page_offset;
sync_len = max(sync_len, pre_len);
@@ -5331,7 +5358,7 @@ read_again:
if (xdp_res & STMMAC_XDP_CONSUMED) {
page_pool_put_page(rx_q->page_pool,
- virt_to_head_page(xdp.data),
+ virt_to_head_page(ctx.xdp.data),
sync_len, true);
buf->page = NULL;
priv->dev->stats.rx_dropped++;
@@ -5359,7 +5386,7 @@ read_again:
if (!skb) {
/* XDP program may expand or reduce tail */
- buf1_len = xdp.data_end - xdp.data;
+ buf1_len = ctx.xdp.data_end - ctx.xdp.data;
skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
if (!skb) {
@@ -5369,7 +5396,7 @@ read_again:
}
/* XDP program may adjust header */
- skb_copy_to_linear_data(skb, xdp.data, buf1_len);
+ skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
skb_put(skb, buf1_len);
/* Data payload copied into SKB, page ready for recycle */
@@ -7060,6 +7087,37 @@ void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
}
}
+static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
+{
+ const struct stmmac_xdp_buff *ctx = (void *)_ctx;
+ struct dma_desc *desc_contains_ts = ctx->desc;
+ struct stmmac_priv *priv = ctx->priv;
+ struct dma_desc *ndesc = ctx->ndesc;
+ struct dma_desc *desc = ctx->desc;
+ u64 ns = 0;
+
+ if (!priv->hwts_rx_en)
+ return -ENODATA;
+
+ /* For GMAC4, the valid timestamp is from CTX next desc. */
+ if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
+ desc_contains_ts = ndesc;
+
+ /* Check if timestamp is available */
+ if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
+ stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
+ ns -= priv->plat->cdc_error_adj;
+ *timestamp = ns_to_ktime(ns);
+ return 0;
+ }
+
+ return -ENODATA;
+}
+
+static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
+ .xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
+};
+
/**
* stmmac_dvr_probe
* @device: device pointer
@@ -7167,6 +7225,8 @@ int stmmac_dvr_probe(struct device *device,
ndev->netdev_ops = &stmmac_netdev_ops;
+ ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
+
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |