diff options
Diffstat (limited to 'net/core/pktgen.c')
| -rw-r--r-- | net/core/pktgen.c | 58 | 
1 files changed, 30 insertions, 28 deletions
| diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 96947f5d41e4..6e1e10ff433a 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2675,7 +2675,7 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,  				goto err;  			}  			/* restore ll */ -			eth = (struct ethhdr *)skb_push(skb, ETH_HLEN); +			eth = skb_push(skb, ETH_HLEN);  			memcpy(eth, pkt_dev->hh, 2 * ETH_ALEN);  			eth->h_proto = protocol; @@ -2714,11 +2714,11 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,  	struct timeval timestamp;  	struct pktgen_hdr *pgh; -	pgh = (struct pktgen_hdr *)skb_put(skb, sizeof(*pgh)); +	pgh = skb_put(skb, sizeof(*pgh));  	datalen -= sizeof(*pgh);  	if (pkt_dev->nfrags <= 0) { -		memset(skb_put(skb, datalen), 0, datalen); +		skb_put_zero(skb, datalen);  	} else {  		int frags = pkt_dev->nfrags;  		int i, len; @@ -2729,7 +2729,7 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,  			frags = MAX_SKB_FRAGS;  		len = datalen - frags * PAGE_SIZE;  		if (len > 0) { -			memset(skb_put(skb, len), 0, len); +			skb_put_zero(skb, len);  			datalen = frags * PAGE_SIZE;  		} @@ -2844,34 +2844,35 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,  	skb_reserve(skb, 16);  	/*  Reserve for ethernet and IP header  */ -	eth = (__u8 *) skb_push(skb, 14); -	mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32)); +	eth = skb_push(skb, 14); +	mpls = skb_put(skb, pkt_dev->nr_labels * sizeof(__u32));  	if (pkt_dev->nr_labels)  		mpls_push(mpls, pkt_dev);  	if (pkt_dev->vlan_id != 0xffff) {  		if (pkt_dev->svlan_id != 0xffff) { -			svlan_tci = (__be16 *)skb_put(skb, sizeof(__be16)); +			svlan_tci = skb_put(skb, sizeof(__be16));  			*svlan_tci = build_tci(pkt_dev->svlan_id,  					       pkt_dev->svlan_cfi,  					       pkt_dev->svlan_p); -			svlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16)); +			svlan_encapsulated_proto = skb_put(skb, +							   sizeof(__be16));  			*svlan_encapsulated_proto = htons(ETH_P_8021Q);  		} -		vlan_tci = (__be16 *)skb_put(skb, sizeof(__be16)); +		vlan_tci = skb_put(skb, sizeof(__be16));  		*vlan_tci = build_tci(pkt_dev->vlan_id,  				      pkt_dev->vlan_cfi,  				      pkt_dev->vlan_p); -		vlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16)); +		vlan_encapsulated_proto = skb_put(skb, sizeof(__be16));  		*vlan_encapsulated_proto = htons(ETH_P_IP);  	}  	skb_reset_mac_header(skb);  	skb_set_network_header(skb, skb->len); -	iph = (struct iphdr *) skb_put(skb, sizeof(struct iphdr)); +	iph = skb_put(skb, sizeof(struct iphdr));  	skb_set_transport_header(skb, skb->len); -	udph = (struct udphdr *) skb_put(skb, sizeof(struct udphdr)); +	udph = skb_put(skb, sizeof(struct udphdr));  	skb_set_queue_mapping(skb, queue_map);  	skb->priority = pkt_dev->skb_priority; @@ -2971,34 +2972,35 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,  	skb_reserve(skb, 16);  	/*  Reserve for ethernet and IP header  */ -	eth = (__u8 *) skb_push(skb, 14); -	mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32)); +	eth = skb_push(skb, 14); +	mpls = skb_put(skb, pkt_dev->nr_labels * sizeof(__u32));  	if (pkt_dev->nr_labels)  		mpls_push(mpls, pkt_dev);  	if (pkt_dev->vlan_id != 0xffff) {  		if (pkt_dev->svlan_id != 0xffff) { -			svlan_tci = (__be16 *)skb_put(skb, sizeof(__be16)); +			svlan_tci = skb_put(skb, sizeof(__be16));  			*svlan_tci = build_tci(pkt_dev->svlan_id,  					       pkt_dev->svlan_cfi,  					       pkt_dev->svlan_p); -			svlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16)); +			svlan_encapsulated_proto = skb_put(skb, +							   sizeof(__be16));  			*svlan_encapsulated_proto = htons(ETH_P_8021Q);  		} -		vlan_tci = (__be16 *)skb_put(skb, sizeof(__be16)); +		vlan_tci = skb_put(skb, sizeof(__be16));  		*vlan_tci = build_tci(pkt_dev->vlan_id,  				      pkt_dev->vlan_cfi,  				      pkt_dev->vlan_p); -		vlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16)); +		vlan_encapsulated_proto = skb_put(skb, sizeof(__be16));  		*vlan_encapsulated_proto = htons(ETH_P_IPV6);  	}  	skb_reset_mac_header(skb);  	skb_set_network_header(skb, skb->len); -	iph = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr)); +	iph = skb_put(skb, sizeof(struct ipv6hdr));  	skb_set_transport_header(skb, skb->len); -	udph = (struct udphdr *) skb_put(skb, sizeof(struct udphdr)); +	udph = skb_put(skb, sizeof(struct udphdr));  	skb_set_queue_mapping(skb, queue_map);  	skb->priority = pkt_dev->skb_priority; @@ -3361,7 +3363,7 @@ static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)  {  	ktime_t idle_start = ktime_get(); -	while (atomic_read(&(pkt_dev->skb->users)) != 1) { +	while (refcount_read(&(pkt_dev->skb->users)) != 1) {  		if (signal_pending(current))  			break; @@ -3418,7 +3420,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)  	if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) {  		skb = pkt_dev->skb;  		skb->protocol = eth_type_trans(skb, skb->dev); -		atomic_add(burst, &skb->users); +		refcount_add(burst, &skb->users);  		local_bh_disable();  		do {  			ret = netif_receive_skb(skb); @@ -3426,11 +3428,11 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)  				pkt_dev->errors++;  			pkt_dev->sofar++;  			pkt_dev->seq_num++; -			if (atomic_read(&skb->users) != burst) { +			if (refcount_read(&skb->users) != burst) {  				/* skb was queued by rps/rfs or taps,  				 * so cannot reuse this skb  				 */ -				atomic_sub(burst - 1, &skb->users); +				WARN_ON(refcount_sub_and_test(burst - 1, &skb->users));  				/* get out of the loop and wait  				 * until skb is consumed  				 */ @@ -3444,7 +3446,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)  		goto out; /* Skips xmit_mode M_START_XMIT */  	} else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {  		local_bh_disable(); -		atomic_inc(&pkt_dev->skb->users); +		refcount_inc(&pkt_dev->skb->users);  		ret = dev_queue_xmit(pkt_dev->skb);  		switch (ret) { @@ -3485,7 +3487,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)  		pkt_dev->last_ok = 0;  		goto unlock;  	} -	atomic_add(burst, &pkt_dev->skb->users); +	refcount_add(burst, &pkt_dev->skb->users);  xmit_more:  	ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0); @@ -3511,11 +3513,11 @@ xmit_more:  		/* fallthru */  	case NETDEV_TX_BUSY:  		/* Retry it next time */ -		atomic_dec(&(pkt_dev->skb->users)); +		refcount_dec(&(pkt_dev->skb->users));  		pkt_dev->last_ok = 0;  	}  	if (unlikely(burst)) -		atomic_sub(burst, &pkt_dev->skb->users); +		WARN_ON(refcount_sub_and_test(burst, &pkt_dev->skb->users));  unlock:  	HARD_TX_UNLOCK(odev, txq); |