diff options
Diffstat (limited to 'drivers/net/ethernet/socionext/netsec.c')
| -rw-r--r-- | drivers/net/ethernet/socionext/netsec.c | 31 | 
1 files changed, 14 insertions, 17 deletions
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index e8224b543dfc..a5a0fb60193a 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -589,6 +589,8 @@ static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)  }  static const struct ethtool_ops netsec_ethtool_ops = { +	.supported_coalesce_params = ETHTOOL_COALESCE_USECS | +				     ETHTOOL_COALESCE_MAX_FRAMES,  	.get_drvinfo		= netsec_et_get_drvinfo,  	.get_link_ksettings	= phy_ethtool_get_link_ksettings,  	.set_link_ksettings	= phy_ethtool_set_link_ksettings, @@ -896,9 +898,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,  	case XDP_TX:  		ret = netsec_xdp_xmit_back(priv, xdp);  		if (ret != NETSEC_XDP_TX) -			__page_pool_put_page(dring->page_pool, -					     virt_to_head_page(xdp->data), -					     len, true); +			page_pool_put_page(dring->page_pool, +					   virt_to_head_page(xdp->data), len, +					   true);  		break;  	case XDP_REDIRECT:  		err = xdp_do_redirect(priv->ndev, xdp, prog); @@ -906,9 +908,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,  			ret = NETSEC_XDP_REDIR;  		} else {  			ret = NETSEC_XDP_CONSUMED; -			__page_pool_put_page(dring->page_pool, -					     virt_to_head_page(xdp->data), -					     len, true); +			page_pool_put_page(dring->page_pool, +					   virt_to_head_page(xdp->data), len, +					   true);  		}  		break;  	default: @@ -919,9 +921,8 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,  		/* fall through -- handle aborts by dropping packet */  	case XDP_DROP:  		ret = NETSEC_XDP_CONSUMED; -		__page_pool_put_page(dring->page_pool, -				     virt_to_head_page(xdp->data), -				     len, true); +		page_pool_put_page(dring->page_pool, +				   virt_to_head_page(xdp->data), len, true);  		break;  	} @@ -1020,8 +1021,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)  			 * cache state. Since we paid the allocation cost if  			 * building an skb fails try to put the page into cache  			 */ -			__page_pool_put_page(dring->page_pool, page, -					     pkt_len, true); +			page_pool_put_page(dring->page_pool, page, pkt_len, +					   true);  			netif_err(priv, drv, priv->ndev,  				  "rx failed to build skb\n");  			break; @@ -1148,11 +1149,7 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,  				~tcp_v4_check(0, ip_hdr(skb)->saddr,  					      ip_hdr(skb)->daddr, 0);  		} else { -			ipv6_hdr(skb)->payload_len = 0; -			tcp_hdr(skb)->check = -				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, -						 &ipv6_hdr(skb)->daddr, -						 0, IPPROTO_TCP, 0); +			tcp_v6_gso_csum_prep(skb);  		}  		tx_ctrl.tcp_seg_offload_flag = true; @@ -1199,7 +1196,7 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)  		if (id == NETSEC_RING_RX) {  			struct page *page = virt_to_page(desc->addr); -			page_pool_put_page(dring->page_pool, page, false); +			page_pool_put_full_page(dring->page_pool, page, false);  		} else if (id == NETSEC_RING_TX) {  			dma_unmap_single(priv->dev, desc->dma_addr, desc->len,  					 DMA_TO_DEVICE);  |