diff options
Diffstat (limited to 'drivers/net/ethernet/intel/e1000')
| -rw-r--r-- | drivers/net/ethernet/intel/e1000/e1000_ethtool.c | 3 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/e1000/e1000_main.c | 20 | 
2 files changed, 14 insertions, 9 deletions
| diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index b691eb4f6376..4270ad2d4ddf 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -24,6 +24,7 @@  /* ethtool support for e1000 */  #include "e1000.h" +#include <linux/jiffies.h>  #include <linux/uaccess.h>  enum {NETDEV_STATS, E1000_STATS}; @@ -1460,7 +1461,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)  			ret_val = 13; /* ret_val is the same as mis-compare */  			break;  		} -		if (jiffies >= (time + 2)) { +		if (time_after_eq(jiffies, time + 2)) {  			ret_val = 14; /* error code for time out error */  			break;  		} diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 83140cbb5f01..7f997d36948f 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -2977,7 +2977,6 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,  			   struct e1000_tx_ring *tx_ring, int tx_flags,  			   int count)  { -	struct e1000_hw *hw = &adapter->hw;  	struct e1000_tx_desc *tx_desc = NULL;  	struct e1000_tx_buffer *buffer_info;  	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; @@ -3031,11 +3030,6 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,  	wmb();  	tx_ring->next_to_use = i; -	writel(i, hw->hw_addr + tx_ring->tdt); -	/* we need this if more than one processor can write to our tail -	 * at a time, it synchronizes IO on IA64/Altix systems -	 */ -	mmiowb();  }  /* 82547 workaround to avoid controller hang in half-duplex environment. @@ -3226,9 +3220,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,  		return NETDEV_TX_BUSY;  	} -	if (vlan_tx_tag_present(skb)) { +	if (skb_vlan_tag_present(skb)) {  		tx_flags |= E1000_TX_FLAGS_VLAN; -		tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); +		tx_flags |= (skb_vlan_tag_get(skb) << +			     E1000_TX_FLAGS_VLAN_SHIFT);  	}  	first = tx_ring->next_to_use; @@ -3263,6 +3258,15 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,  		/* Make sure there is space in the ring for the next send. */  		e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); +		if (!skb->xmit_more || +		    netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { +			writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); +			/* we need this if more than one processor can write to +			 * our tail at a time, it synchronizes IO on IA64/Altix +			 * systems +			 */ +			mmiowb(); +		}  	} else {  		dev_kfree_skb_any(skb);  		tx_ring->buffer_info[first].time_stamp = 0; |