diff options
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
| -rw-r--r-- | drivers/net/xen-netback/netback.c | 40 | 
1 files changed, 25 insertions, 15 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index bf627af723bf..c1501f41e2d8 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -334,6 +334,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,  struct xenvif_tx_cb {  	u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];  	u8 copy_count; +	u32 split_mask;  };  #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) @@ -361,6 +362,8 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)  	struct sk_buff *skb =  		alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,  			  GFP_ATOMIC | __GFP_NOWARN); + +	BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));  	if (unlikely(skb == NULL))  		return NULL; @@ -396,11 +399,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,  	nr_slots = shinfo->nr_frags + 1;  	copy_count(skb) = 0; +	XENVIF_TX_CB(skb)->split_mask = 0;  	/* Create copy ops for exactly data_len bytes into the skb head. */  	__skb_put(skb, data_len);  	while (data_len > 0) {  		int amount = data_len > txp->size ? txp->size : data_len; +		bool split = false;  		cop->source.u.ref = txp->gref;  		cop->source.domid = queue->vif->domid; @@ -413,6 +418,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,  		cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)  				               - data_len); +		/* Don't cross local page boundary! */ +		if (cop->dest.offset + amount > XEN_PAGE_SIZE) { +			amount = XEN_PAGE_SIZE - cop->dest.offset; +			XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb); +			split = true; +		} +  		cop->len = amount;  		cop->flags = GNTCOPY_source_gref; @@ -420,7 +432,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,  		pending_idx = queue->pending_ring[index];  		callback_param(queue, pending_idx).ctx = NULL;  		copy_pending_idx(skb, copy_count(skb)) = pending_idx; -		copy_count(skb)++; +		if (!split) +			copy_count(skb)++;  		cop++;  		data_len -= amount; @@ -441,7 +454,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,  			nr_slots--;  		} else {  			/* The copy op partially covered the tx_request. -			 * The remainder will be mapped. +			 * The remainder will be mapped or copied in the next +			 * iteration.  			 */  			txp->offset += amount;  			txp->size -= amount; @@ -539,6 +553,13 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,  		pending_idx = copy_pending_idx(skb, i);  		newerr = (*gopp_copy)->status; + +		/* Split copies need to be handled together. */ +		if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) { +			(*gopp_copy)++; +			if (!newerr) +				newerr = (*gopp_copy)->status; +		}  		if (likely(!newerr)) {  			/* The first frag might still have this slot mapped */  			if (i < copy_count(skb) - 1 || !sharedslot) @@ -883,11 +904,9 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,  		struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];  		struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];  		unsigned int extra_count; -		u16 pending_idx;  		RING_IDX idx;  		int work_to_do;  		unsigned int data_len; -		pending_ring_idx_t index;  		if (queue->tx.sring->req_prod - queue->tx.req_cons >  		    XEN_NETIF_TX_RING_SIZE) { @@ -975,17 +994,12 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,  		/* No crossing a page as the payload mustn't fragment. */  		if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) { -			netdev_err(queue->vif->dev, -				   "txreq.offset: %u, size: %u, end: %lu\n", -				   txreq.offset, txreq.size, -				   (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size); +			netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n", +				   txreq.offset, txreq.size);  			xenvif_fatal_tx_err(queue->vif);  			break;  		} -		index = pending_index(queue->pending_cons); -		pending_idx = queue->pending_ring[index]; -  		if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size)  			data_len = txreq.size; @@ -1066,10 +1080,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,  		__skb_queue_tail(&queue->tx_queue, skb);  		queue->tx.req_cons = idx; - -		if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) || -		    (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops))) -			break;  	}  	return;  |