diff options
Diffstat (limited to 'drivers/net')
166 files changed, 1760 insertions, 818 deletions
| diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index a1b33aa6054a..9697977b80f0 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -423,7 +423,7 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],  			return -EINVAL;  		bond_opt_initval(&newval, -				 nla_get_be64(data[IFLA_BOND_AD_ACTOR_SYSTEM])); +				 nla_get_u64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));  		err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval);  		if (err)  			return err; diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index a13a4896a8bd..760d2c07e3a2 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -184,12 +184,12 @@   * Below is some version info we got:   *    SOC   Version   IP-Version  Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-   *                                Filter? connected?  Passive detection  ception in MB - *   MX25  FlexCAN2  03.00.00.00     no        no         ?       no        no + *   MX25  FlexCAN2  03.00.00.00     no        no        no       no        no   *   MX28  FlexCAN2  03.00.04.00    yes       yes        no       no        no - *   MX35  FlexCAN2  03.00.00.00     no        no         ?       no        no + *   MX35  FlexCAN2  03.00.00.00     no        no        no       no        no   *   MX53  FlexCAN2  03.00.00.00    yes        no        no       no        no   *   MX6s  FlexCAN3  10.00.12.00    yes       yes        no       no       yes - *   VF610 FlexCAN3  ?               no       yes         ?      yes       yes? + *   VF610 FlexCAN3  ?               no       yes        no      yes       yes?   *   * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.   */ @@ -297,7 +297,8 @@ static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {  static const struct flexcan_devtype_data fsl_vf610_devtype_data = {  	.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | -		FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, +		FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | +		FLEXCAN_QUIRK_BROKEN_PERR_STATE,  };  static const struct can_bittiming_const flexcan_bittiming_const = { @@ -525,7 +526,7 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)  		data = be32_to_cpup((__be32 *)&cf->data[0]);  		flexcan_write(data, &priv->tx_mb->data[0]);  	} -	if (cf->can_dlc > 3) { +	if (cf->can_dlc > 4) {  		data = be32_to_cpup((__be32 *)&cf->data[4]);  		flexcan_write(data, &priv->tx_mb->data[1]);  	} diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index 85268be0c913..55513411a82e 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c @@ -258,21 +258,18 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,  	/* if this frame is an echo, */  	if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) &&  	    !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) { -		int n;  		unsigned long flags;  		spin_lock_irqsave(&priv->echo_lock, flags); -		n = can_get_echo_skb(priv->ndev, msg->client); +		can_get_echo_skb(priv->ndev, msg->client);  		spin_unlock_irqrestore(&priv->echo_lock, flags);  		/* count bytes of the echo instead of skb */  		stats->tx_bytes += cf_len;  		stats->tx_packets++; -		if (n) { -			/* restart tx queue only if a slot is free */ -			netif_wake_queue(priv->ndev); -		} +		/* restart tx queue (a slot is free) */ +		netif_wake_queue(priv->ndev);  		return 0;  	} diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c index b4efd711f824..788c3464a3b0 100644 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c @@ -825,7 +825,10 @@ err_release_regions:  err_disable_pci:  	pci_disable_device(pdev); -	return err; +	/* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while +	 * the probe() function must return a negative errno in case of failure +	 * (err is unchanged if negative) */ +	return pcibios_err_to_errno(err);  }  /* free the board structure object, as well as its resources: */ diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index 131026fbc2d7..5adc95c922ee 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -717,7 +717,10 @@ failure_release_regions:  failure_disable_pci:  	pci_disable_device(pdev); -	return err; +	/* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while +	 * the probe() function must return a negative errno in case of failure +	 * (err is unchanged if negative) */ +	return pcibios_err_to_errno(err);  }  static void peak_pci_remove(struct pci_dev *pdev) diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index 4d4941469cfc..db6ea936dc3f 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -637,6 +637,9 @@ static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)  		mbx_mask = hecc_read(priv, HECC_CANMIM);  		mbx_mask |= HECC_TX_MBOX_MASK;  		hecc_write(priv, HECC_CANMIM, mbx_mask); +	} else { +		/* repoll is done only if whole budget is used */ +		num_pkts = quota;  	}  	return num_pkts; diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index b3d02759c226..12ff0020ecd6 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -288,6 +288,8 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)  	case -ECONNRESET: /* unlink */  	case -ENOENT: +	case -EPIPE: +	case -EPROTO:  	case -ESHUTDOWN:  		return; @@ -393,6 +395,7 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)  		if (dev->can.state == CAN_STATE_ERROR_WARNING ||  		    dev->can.state == CAN_STATE_ERROR_PASSIVE) { +			cf->can_id |= CAN_ERR_CRTL;  			cf->data[1] = (txerr > rxerr) ?  			    CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE;  		} diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 9fdb0f0bfa06..c6dcf93675c0 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -393,6 +393,8 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)  		break;  	case -ENOENT: +	case -EPIPE: +	case -EPROTO:  	case -ESHUTDOWN:  		return; diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 68ac3e88a8ce..8bf80ad9dc44 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -449,7 +449,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)  		dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",  			rc); -	return rc; +	return (rc > 0) ? 0 : rc;  }  static void gs_usb_xmit_callback(struct urb *urb) diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 9b18d96ef526..63587b8e6825 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -609,8 +609,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,  			}  			if (pos + tmp->len > actual_len) { -				dev_err(dev->udev->dev.parent, -					"Format error\n"); +				dev_err_ratelimited(dev->udev->dev.parent, +						    "Format error\n");  				break;  			} @@ -813,6 +813,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,  	if (err) {  		netdev_err(netdev, "Error transmitting URB\n");  		usb_unanchor_urb(urb); +		kfree(buf);  		usb_free_urb(urb);  		return err;  	} @@ -1325,6 +1326,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)  	case 0:  		break;  	case -ENOENT: +	case -EPIPE: +	case -EPROTO:  	case -ESHUTDOWN:  		return;  	default: @@ -1333,7 +1336,7 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)  		goto resubmit_urb;  	} -	while (pos <= urb->actual_length - MSG_HEADER_LEN) { +	while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) {  		msg = urb->transfer_buffer + pos;  		/* The Kvaser firmware can only read and write messages that @@ -1352,7 +1355,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)  		}  		if (pos + msg->len > urb->actual_length) { -			dev_err(dev->udev->dev.parent, "Format error\n"); +			dev_err_ratelimited(dev->udev->dev.parent, +					    "Format error\n");  			break;  		} @@ -1768,6 +1772,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,  		spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);  		usb_unanchor_urb(urb); +		kfree(buf);  		stats->tx_dropped++; diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index 7f0272558bef..8d8c2086424d 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -592,6 +592,8 @@ static void mcba_usb_read_bulk_callback(struct urb *urb)  		break;  	case -ENOENT: +	case -EPIPE: +	case -EPROTO:  	case -ESHUTDOWN:  		return; @@ -862,7 +864,7 @@ static int mcba_usb_probe(struct usb_interface *intf,  		goto cleanup_unregister_candev;  	} -	dev_info(&intf->dev, "Microchip CAN BUS analizer connected\n"); +	dev_info(&intf->dev, "Microchip CAN BUS Analyzer connected\n");  	return 0; diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index d000cb62d6ae..27861c417c94 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -524,6 +524,8 @@ static void usb_8dev_read_bulk_callback(struct urb *urb)  		break;  	case -ENOENT: +	case -EPIPE: +	case -EPROTO:  	case -ESHUTDOWN:  		return; diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index 8404e8852a0f..b4c4a2c76437 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -194,7 +194,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,  		tbp = peer_tb;  	} -	if (tbp[IFLA_IFNAME]) { +	if (ifmp && tbp[IFLA_IFNAME]) {  		nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);  		name_assign_type = NET_NAME_USER;  	} else { diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index f5a8dd96fd75..4498ab897d94 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1500,10 +1500,13 @@ static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds,  {  	struct b53_device *dev = ds->priv; -	/* Older models support a different tag format that we do not -	 * support in net/dsa/tag_brcm.c yet. +	/* Older models (5325, 5365) support a different tag format that we do +	 * not support in net/dsa/tag_brcm.c yet. 539x and 531x5 require managed +	 * mode to be turned on which means we need to specifically manage ARL +	 * misses on multicast addresses (TBD).  	 */ -	if (is5325(dev) || is5365(dev) || !b53_can_enable_brcm_tags(ds, port)) +	if (is5325(dev) || is5365(dev) || is539x(dev) || is531x5(dev) || +	    !b53_can_enable_brcm_tags(ds, port))  		return DSA_TAG_PROTO_NONE;  	/* Broadcom BCM58xx chips have a flow accelerator on Port 8 diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index ea01f24f15e7..b62d47210db8 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -14,7 +14,6 @@  #include <linux/netdevice.h>  #include <linux/interrupt.h>  #include <linux/platform_device.h> -#include <linux/of.h>  #include <linux/phy.h>  #include <linux/phy_fixed.h>  #include <linux/mii.h> diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index b721a2009b50..23b45da784cb 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -625,7 +625,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,  	bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc,  				slice_num, false);  	bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc, -				slice_num, true); +				SLICE_NUM_MASK, true);  	/* Insert into TCAM now because we need to insert a second rule */  	bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]); @@ -699,7 +699,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,  	/* Insert into Action and policer RAMs now, set chain ID to  	 * the one we are chained to  	 */ -	ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num, +	ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num,  				      queue_num, true);  	if (ret)  		goto out_err; diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 8171055fde7a..66d33e97cbc5 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -339,7 +339,7 @@ static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)  	u16 mask;  	mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &mask); -	mask |= GENMASK(chip->g1_irq.nirqs, 0); +	mask &= ~GENMASK(chip->g1_irq.nirqs, 0);  	mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);  	free_irq(chip->irq, chip); @@ -395,7 +395,7 @@ static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)  	return 0;  out_disable: -	mask |= GENMASK(chip->g1_irq.nirqs, 0); +	mask &= ~GENMASK(chip->g1_irq.nirqs, 0);  	mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);  out_mapping: @@ -2177,6 +2177,19 @@ static const struct of_device_id mv88e6xxx_mdio_external_match[] = {  	{ },  }; +static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip) + +{ +	struct mv88e6xxx_mdio_bus *mdio_bus; +	struct mii_bus *bus; + +	list_for_each_entry(mdio_bus, &chip->mdios, list) { +		bus = mdio_bus->bus; + +		mdiobus_unregister(bus); +	} +} +  static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,  				    struct device_node *np)  { @@ -2201,27 +2214,16 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,  		match = of_match_node(mv88e6xxx_mdio_external_match, child);  		if (match) {  			err = mv88e6xxx_mdio_register(chip, child, true); -			if (err) +			if (err) { +				mv88e6xxx_mdios_unregister(chip);  				return err; +			}  		}  	}  	return 0;  } -static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip) - -{ -	struct mv88e6xxx_mdio_bus *mdio_bus; -	struct mii_bus *bus; - -	list_for_each_entry(mdio_bus, &chip->mdios, list) { -		bus = mdio_bus->bus; - -		mdiobus_unregister(bus); -	} -} -  static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)  {  	struct mv88e6xxx_chip *chip = ds->priv; diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index a7801f6668a5..6315774d72b3 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -338,6 +338,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,  		cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX;  		break;  	case PHY_INTERFACE_MODE_XGMII: +	case PHY_INTERFACE_MODE_XAUI:  		cmode = MV88E6XXX_PORT_STS_CMODE_XAUI;  		break;  	case PHY_INTERFACE_MODE_RXAUI: diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index f4e13a7014bd..36c8950dbd2d 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -602,7 +602,7 @@ struct vortex_private {  	struct sk_buff* rx_skbuff[RX_RING_SIZE];  	struct sk_buff* tx_skbuff[TX_RING_SIZE];  	unsigned int cur_rx, cur_tx;		/* The next free ring entry */ -	unsigned int dirty_rx, dirty_tx;	/* The ring entries to be free()ed. */ +	unsigned int dirty_tx;	/* The ring entries to be free()ed. */  	struct vortex_extra_stats xstats;	/* NIC-specific extra stats */  	struct sk_buff *tx_skb;				/* Packet being eaten by bus master ctrl.  */  	dma_addr_t tx_skb_dma;				/* Allocated DMA address for bus master ctrl DMA.   */ @@ -618,7 +618,6 @@ struct vortex_private {  	/* The remainder are related to chip state, mostly media selection. */  	struct timer_list timer;			/* Media selection timer. */ -	struct timer_list rx_oom_timer;		/* Rx skb allocation retry timer */  	int options;						/* User-settable misc. driver options. */  	unsigned int media_override:4, 		/* Passed-in media type. */  		default_media:4,				/* Read from the EEPROM/Wn3_Config. */ @@ -760,7 +759,6 @@ static void mdio_sync(struct vortex_private *vp, int bits);  static int mdio_read(struct net_device *dev, int phy_id, int location);  static void mdio_write(struct net_device *vp, int phy_id, int location, int value);  static void vortex_timer(struct timer_list *t); -static void rx_oom_timer(struct timer_list *t);  static netdev_tx_t vortex_start_xmit(struct sk_buff *skb,  				     struct net_device *dev);  static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb, @@ -1601,7 +1599,6 @@ vortex_up(struct net_device *dev)  	timer_setup(&vp->timer, vortex_timer, 0);  	mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait)); -	timer_setup(&vp->rx_oom_timer, rx_oom_timer, 0);  	if (vortex_debug > 1)  		pr_debug("%s: Initial media type %s.\n", @@ -1676,7 +1673,7 @@ vortex_up(struct net_device *dev)  	window_write16(vp, 0x0040, 4, Wn4_NetDiag);  	if (vp->full_bus_master_rx) { /* Boomerang bus master. */ -		vp->cur_rx = vp->dirty_rx = 0; +		vp->cur_rx = 0;  		/* Initialize the RxEarly register as recommended. */  		iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);  		iowrite32(0x0020, ioaddr + PktStatus); @@ -1729,6 +1726,7 @@ vortex_open(struct net_device *dev)  	struct vortex_private *vp = netdev_priv(dev);  	int i;  	int retval; +	dma_addr_t dma;  	/* Use the now-standard shared IRQ implementation. */  	if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? @@ -1753,7 +1751,11 @@ vortex_open(struct net_device *dev)  				break;			/* Bad news!  */  			skb_reserve(skb, NET_IP_ALIGN);	/* Align IP on 16 byte boundaries */ -			vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); +			dma = pci_map_single(VORTEX_PCI(vp), skb->data, +					     PKT_BUF_SZ, PCI_DMA_FROMDEVICE); +			if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma)) +				break; +			vp->rx_ring[i].addr = cpu_to_le32(dma);  		}  		if (i != RX_RING_SIZE) {  			pr_emerg("%s: no memory for rx ring\n", dev->name); @@ -2067,6 +2069,12 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)  		int len = (skb->len + 3) & ~3;  		vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,  						PCI_DMA_TODEVICE); +		if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) { +			dev_kfree_skb_any(skb); +			dev->stats.tx_dropped++; +			return NETDEV_TX_OK; +		} +  		spin_lock_irq(&vp->window_lock);  		window_set(vp, 7);  		iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr); @@ -2593,7 +2601,7 @@ boomerang_rx(struct net_device *dev)  	int entry = vp->cur_rx % RX_RING_SIZE;  	void __iomem *ioaddr = vp->ioaddr;  	int rx_status; -	int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx; +	int rx_work_limit = RX_RING_SIZE;  	if (vortex_debug > 5)  		pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS)); @@ -2614,7 +2622,8 @@ boomerang_rx(struct net_device *dev)  		} else {  			/* The packet length: up to 4.5K!. */  			int pkt_len = rx_status & 0x1fff; -			struct sk_buff *skb; +			struct sk_buff *skb, *newskb; +			dma_addr_t newdma;  			dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);  			if (vortex_debug > 4) @@ -2633,9 +2642,27 @@ boomerang_rx(struct net_device *dev)  				pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);  				vp->rx_copy++;  			} else { +				/* Pre-allocate the replacement skb.  If it or its +				 * mapping fails then recycle the buffer thats already +				 * in place +				 */ +				newskb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ); +				if (!newskb) { +					dev->stats.rx_dropped++; +					goto clear_complete; +				} +				newdma = pci_map_single(VORTEX_PCI(vp), newskb->data, +							PKT_BUF_SZ, PCI_DMA_FROMDEVICE); +				if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) { +					dev->stats.rx_dropped++; +					consume_skb(newskb); +					goto clear_complete; +				} +  				/* Pass up the skbuff already on the Rx ring. */  				skb = vp->rx_skbuff[entry]; -				vp->rx_skbuff[entry] = NULL; +				vp->rx_skbuff[entry] = newskb; +				vp->rx_ring[entry].addr = cpu_to_le32(newdma);  				skb_put(skb, pkt_len);  				pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);  				vp->rx_nocopy++; @@ -2653,55 +2680,15 @@ boomerang_rx(struct net_device *dev)  			netif_rx(skb);  			dev->stats.rx_packets++;  		} -		entry = (++vp->cur_rx) % RX_RING_SIZE; -	} -	/* Refill the Rx ring buffers. */ -	for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) { -		struct sk_buff *skb; -		entry = vp->dirty_rx % RX_RING_SIZE; -		if (vp->rx_skbuff[entry] == NULL) { -			skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ); -			if (skb == NULL) { -				static unsigned long last_jif; -				if (time_after(jiffies, last_jif + 10 * HZ)) { -					pr_warn("%s: memory shortage\n", -						dev->name); -					last_jif = jiffies; -				} -				if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) -					mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1)); -				break;			/* Bad news!  */ -			} -			vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); -			vp->rx_skbuff[entry] = skb; -		} +clear_complete:  		vp->rx_ring[entry].status = 0;	/* Clear complete bit. */  		iowrite16(UpUnstall, ioaddr + EL3_CMD); +		entry = (++vp->cur_rx) % RX_RING_SIZE;  	}  	return 0;  } -/* - * If we've hit a total OOM refilling the Rx ring we poll once a second - * for some memory.  Otherwise there is no way to restart the rx process. - */ -static void -rx_oom_timer(struct timer_list *t) -{ -	struct vortex_private *vp = from_timer(vp, t, rx_oom_timer); -	struct net_device *dev = vp->mii.dev; - -	spin_lock_irq(&vp->lock); -	if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)	/* This test is redundant, but makes me feel good */ -		boomerang_rx(dev); -	if (vortex_debug > 1) { -		pr_debug("%s: rx_oom_timer %s\n", dev->name, -			((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying"); -	} -	spin_unlock_irq(&vp->lock); -} -  static void  vortex_down(struct net_device *dev, int final_down)  { @@ -2711,7 +2698,6 @@ vortex_down(struct net_device *dev, int final_down)  	netdev_reset_queue(dev);  	netif_stop_queue(dev); -	del_timer_sync(&vp->rx_oom_timer);  	del_timer_sync(&vp->timer);  	/* Turn off statistics ASAP.  We update dev->stats below. */ diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 97c5a89a9cf7..fbe21a817bd8 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -75,6 +75,9 @@ static struct workqueue_struct *ena_wq;  MODULE_DEVICE_TABLE(pci, ena_pci_tbl);  static int ena_rss_init_default(struct ena_adapter *adapter); +static void check_for_admin_com_state(struct ena_adapter *adapter); +static void ena_destroy_device(struct ena_adapter *adapter); +static int ena_restore_device(struct ena_adapter *adapter);  static void ena_tx_timeout(struct net_device *dev)  { @@ -1565,7 +1568,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)  static int ena_up_complete(struct ena_adapter *adapter)  { -	int rc, i; +	int rc;  	rc = ena_rss_configure(adapter);  	if (rc) @@ -1584,17 +1587,6 @@ static int ena_up_complete(struct ena_adapter *adapter)  	ena_napi_enable_all(adapter); -	/* Enable completion queues interrupt */ -	for (i = 0; i < adapter->num_queues; i++) -		ena_unmask_interrupt(&adapter->tx_ring[i], -				     &adapter->rx_ring[i]); - -	/* schedule napi in case we had pending packets -	 * from the last time we disable napi -	 */ -	for (i = 0; i < adapter->num_queues; i++) -		napi_schedule(&adapter->ena_napi[i].napi); -  	return 0;  } @@ -1731,7 +1723,7 @@ create_err:  static int ena_up(struct ena_adapter *adapter)  { -	int rc; +	int rc, i;  	netdev_dbg(adapter->netdev, "%s\n", __func__); @@ -1774,6 +1766,17 @@ static int ena_up(struct ena_adapter *adapter)  	set_bit(ENA_FLAG_DEV_UP, &adapter->flags); +	/* Enable completion queues interrupt */ +	for (i = 0; i < adapter->num_queues; i++) +		ena_unmask_interrupt(&adapter->tx_ring[i], +				     &adapter->rx_ring[i]); + +	/* schedule napi in case we had pending packets +	 * from the last time we disable napi +	 */ +	for (i = 0; i < adapter->num_queues; i++) +		napi_schedule(&adapter->ena_napi[i].napi); +  	return rc;  err_up: @@ -1884,6 +1887,17 @@ static int ena_close(struct net_device *netdev)  	if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))  		ena_down(adapter); +	/* Check for device status and issue reset if needed*/ +	check_for_admin_com_state(adapter); +	if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { +		netif_err(adapter, ifdown, adapter->netdev, +			  "Destroy failure, restarting device\n"); +		ena_dump_stats_to_dmesg(adapter); +		/* rtnl lock already obtained in dev_ioctl() layer */ +		ena_destroy_device(adapter); +		ena_restore_device(adapter); +	} +  	return 0;  } @@ -2544,11 +2558,12 @@ static void ena_destroy_device(struct ena_adapter *adapter)  	ena_com_set_admin_running_state(ena_dev, false); -	ena_close(netdev); +	if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) +		ena_down(adapter);  	/* Before releasing the ENA resources, a device reset is required.  	 * (to prevent the device from accessing them). -	 * In case the reset flag is set and the device is up, ena_close +	 * In case the reset flag is set and the device is up, ena_down()  	 * already perform the reset, so it can be skipped.  	 */  	if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index 57e796870595..105fdb958cef 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h @@ -50,7 +50,7 @@  #define AQ_CFG_PCI_FUNC_MSIX_IRQS   9U  #define AQ_CFG_PCI_FUNC_PORTS       2U -#define AQ_CFG_SERVICE_TIMER_INTERVAL    (2 * HZ) +#define AQ_CFG_SERVICE_TIMER_INTERVAL    (1 * HZ)  #define AQ_CFG_POLLING_TIMER_INTERVAL   ((unsigned int)(2 * HZ))  #define AQ_CFG_SKB_FRAGS_MAX   32U @@ -80,6 +80,7 @@  #define AQ_CFG_DRV_VERSION	__stringify(NIC_MAJOR_DRIVER_VERSION)"."\  				__stringify(NIC_MINOR_DRIVER_VERSION)"."\  				__stringify(NIC_BUILD_DRIVER_VERSION)"."\ -				__stringify(NIC_REVISION_DRIVER_VERSION) +				__stringify(NIC_REVISION_DRIVER_VERSION) \ +				AQ_CFG_DRV_VERSION_SUFFIX  #endif /* AQ_CFG_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index 70efb7467bf3..f2d8063a2cef 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -66,14 +66,14 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {  	"OutUCast",  	"OutMCast",  	"OutBCast", -	"InUCastOctects", -	"OutUCastOctects", -	"InMCastOctects", -	"OutMCastOctects", -	"InBCastOctects", -	"OutBCastOctects", -	"InOctects", -	"OutOctects", +	"InUCastOctets", +	"OutUCastOctets", +	"InMCastOctets", +	"OutMCastOctets", +	"InBCastOctets", +	"OutBCastOctets", +	"InOctets", +	"OutOctets",  	"InPacketsDma",  	"OutPacketsDma",  	"InOctetsDma", diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index 0207927dc8a6..b3825de6cdfb 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -46,6 +46,28 @@ struct aq_hw_link_status_s {  	unsigned int mbps;  }; +struct aq_stats_s { +	u64 uprc; +	u64 mprc; +	u64 bprc; +	u64 erpt; +	u64 uptc; +	u64 mptc; +	u64 bptc; +	u64 erpr; +	u64 mbtc; +	u64 bbtc; +	u64 mbrc; +	u64 bbrc; +	u64 ubrc; +	u64 ubtc; +	u64 dpc; +	u64 dma_pkt_rc; +	u64 dma_pkt_tc; +	u64 dma_oct_rc; +	u64 dma_oct_tc; +}; +  #define AQ_HW_IRQ_INVALID 0U  #define AQ_HW_IRQ_LEGACY  1U  #define AQ_HW_IRQ_MSI     2U @@ -85,7 +107,9 @@ struct aq_hw_ops {  	void (*destroy)(struct aq_hw_s *self);  	int (*get_hw_caps)(struct aq_hw_s *self, -			   struct aq_hw_caps_s *aq_hw_caps); +			   struct aq_hw_caps_s *aq_hw_caps, +			   unsigned short device, +			   unsigned short subsystem_device);  	int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,  			       unsigned int frags); @@ -164,8 +188,7 @@ struct aq_hw_ops {  	int (*hw_update_stats)(struct aq_hw_s *self); -	int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data, -			       unsigned int *p_count); +	struct aq_stats_s *(*hw_get_hw_stats)(struct aq_hw_s *self);  	int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 78dfb2ab78ce..75a894a9251c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -37,6 +37,8 @@ static unsigned int aq_itr_rx;  module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);  MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate"); +static void aq_nic_update_ndev_stats(struct aq_nic_s *self); +  static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)  {  	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; @@ -166,11 +168,8 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)  static void aq_nic_service_timer_cb(struct timer_list *t)  {  	struct aq_nic_s *self = from_timer(self, t, service_timer); -	struct net_device *ndev = aq_nic_get_ndev(self); +	int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL;  	int err = 0; -	unsigned int i = 0U; -	struct aq_ring_stats_rx_s stats_rx; -	struct aq_ring_stats_tx_s stats_tx;  	if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))  		goto err_exit; @@ -182,23 +181,14 @@ static void aq_nic_service_timer_cb(struct timer_list *t)  	if (self->aq_hw_ops.hw_update_stats)  		self->aq_hw_ops.hw_update_stats(self->aq_hw); -	memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); -	memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); -	for (i = AQ_DIMOF(self->aq_vec); i--;) { -		if (self->aq_vec[i]) -			aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx); -	} +	aq_nic_update_ndev_stats(self); -	ndev->stats.rx_packets = stats_rx.packets; -	ndev->stats.rx_bytes = stats_rx.bytes; -	ndev->stats.rx_errors = stats_rx.errors; -	ndev->stats.tx_packets = stats_tx.packets; -	ndev->stats.tx_bytes = stats_tx.bytes; -	ndev->stats.tx_errors = stats_tx.errors; +	/* If no link - use faster timer rate to detect link up asap */ +	if (!netif_carrier_ok(self->ndev)) +		ctimer = max(ctimer / 2, 1);  err_exit: -	mod_timer(&self->service_timer, -		  jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL); +	mod_timer(&self->service_timer, jiffies + ctimer);  }  static void aq_nic_polling_timer_cb(struct timer_list *t) @@ -222,7 +212,7 @@ static struct net_device *aq_nic_ndev_alloc(void)  struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,  				   const struct ethtool_ops *et_ops, -				   struct device *dev, +				   struct pci_dev *pdev,  				   struct aq_pci_func_s *aq_pci_func,  				   unsigned int port,  				   const struct aq_hw_ops *aq_hw_ops) @@ -242,7 +232,7 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,  	ndev->netdev_ops = ndev_ops;  	ndev->ethtool_ops = et_ops; -	SET_NETDEV_DEV(ndev, dev); +	SET_NETDEV_DEV(ndev, &pdev->dev);  	ndev->if_port = port;  	self->ndev = ndev; @@ -254,7 +244,8 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,  	self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,  						&self->aq_hw_ops); -	err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps); +	err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps, +					  pdev->device, pdev->subsystem_device);  	if (err < 0)  		goto err_exit; @@ -749,16 +740,40 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)  void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)  { -	struct aq_vec_s *aq_vec = NULL;  	unsigned int i = 0U;  	unsigned int count = 0U; -	int err = 0; +	struct aq_vec_s *aq_vec = NULL; +	struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw); -	err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count); -	if (err < 0) +	if (!stats)  		goto err_exit; -	data += count; +	data[i] = stats->uprc + stats->mprc + stats->bprc; +	data[++i] = stats->uprc; +	data[++i] = stats->mprc; +	data[++i] = stats->bprc; +	data[++i] = stats->erpt; +	data[++i] = stats->uptc + stats->mptc + stats->bptc; +	data[++i] = stats->uptc; +	data[++i] = stats->mptc; +	data[++i] = stats->bptc; +	data[++i] = stats->ubrc; +	data[++i] = stats->ubtc; +	data[++i] = stats->mbrc; +	data[++i] = stats->mbtc; +	data[++i] = stats->bbrc; +	data[++i] = stats->bbtc; +	data[++i] = stats->ubrc + stats->mbrc + stats->bbrc; +	data[++i] = stats->ubtc + stats->mbtc + stats->bbtc; +	data[++i] = stats->dma_pkt_rc; +	data[++i] = stats->dma_pkt_tc; +	data[++i] = stats->dma_oct_rc; +	data[++i] = stats->dma_oct_tc; +	data[++i] = stats->dpc; + +	i++; + +	data += i;  	count = 0U;  	for (i = 0U, aq_vec = self->aq_vec[0]; @@ -768,7 +783,20 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)  	}  err_exit:; -	(void)err; +} + +static void aq_nic_update_ndev_stats(struct aq_nic_s *self) +{ +	struct net_device *ndev = self->ndev; +	struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw); + +	ndev->stats.rx_packets = stats->uprc + stats->mprc + stats->bprc; +	ndev->stats.rx_bytes = stats->ubrc + stats->mbrc + stats->bbrc; +	ndev->stats.rx_errors = stats->erpr; +	ndev->stats.tx_packets = stats->uptc + stats->mptc + stats->bptc; +	ndev->stats.tx_bytes = stats->ubtc + stats->mbtc + stats->bbtc; +	ndev->stats.tx_errors = stats->erpt; +	ndev->stats.multicast = stats->mprc;  }  void aq_nic_get_link_ksettings(struct aq_nic_s *self, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 4309983acdd6..3c9f8db03d5f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -71,7 +71,7 @@ struct aq_nic_cfg_s {  struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,  				   const struct ethtool_ops *et_ops, -				   struct device *dev, +				   struct pci_dev *pdev,  				   struct aq_pci_func_s *aq_pci_func,  				   unsigned int port,  				   const struct aq_hw_ops *aq_hw_ops); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index cadaa646c89f..58c29d04b186 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -51,7 +51,8 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,  	pci_set_drvdata(pdev, self);  	self->pdev = pdev; -	err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps); +	err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps, pdev->device, +				     pdev->subsystem_device);  	if (err < 0)  		goto err_exit; @@ -59,7 +60,7 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,  	for (port = 0; port < self->ports; ++port) {  		struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops, -							    &pdev->dev, self, +							    pdev, self,  							    port, aq_hw_ops);  		if (!aq_nic) { diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 07b3c49a16a4..f18dce14c93c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -18,9 +18,20 @@  #include "hw_atl_a0_internal.h"  static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self, -				 struct aq_hw_caps_s *aq_hw_caps) +				 struct aq_hw_caps_s *aq_hw_caps, +				 unsigned short device, +				 unsigned short subsystem_device)  {  	memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps)); + +	if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001) +		aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G; + +	if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) { +		aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G; +		aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_5G; +	} +  	return 0;  } @@ -333,6 +344,10 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,  	hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss);  	hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); +	/* Reset link status and read out initial hardware counters */ +	self->aq_link_status.mbps = 0; +	hw_atl_utils_update_stats(self); +  	err = aq_hw_err_from_flags(self);  	if (err < 0)  		goto err_exit; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index ec68c20efcbd..e4a22ce7bf09 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -16,11 +16,23 @@  #include "hw_atl_utils.h"  #include "hw_atl_llh.h"  #include "hw_atl_b0_internal.h" +#include "hw_atl_llh_internal.h"  static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self, -				 struct aq_hw_caps_s *aq_hw_caps) +				 struct aq_hw_caps_s *aq_hw_caps, +				 unsigned short device, +				 unsigned short subsystem_device)  {  	memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps)); + +	if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001) +		aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G; + +	if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) { +		aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G; +		aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_5G; +	} +  	return 0;  } @@ -357,6 +369,7 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,  	};  	int err = 0; +	u32 val;  	self->aq_nic_cfg = aq_nic_cfg; @@ -374,6 +387,20 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,  	hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);  	hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); +	/* Force limit MRRS on RDM/TDM to 2K */ +	val = aq_hw_read_reg(self, pci_reg_control6_adr); +	aq_hw_write_reg(self, pci_reg_control6_adr, (val & ~0x707) | 0x404); + +	/* TX DMA total request limit. B0 hardware is not capable to +	 * handle more than (8K-MRRS) incoming DMA data. +	 * Value 24 in 256byte units +	 */ +	aq_hw_write_reg(self, tx_dma_total_req_limit_adr, 24); + +	/* Reset link status and read out initial hardware counters */ +	self->aq_link_status.mbps = 0; +	hw_atl_utils_update_stats(self); +  	err = aq_hw_err_from_flags(self);  	if (err < 0)  		goto err_exit; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index 5527fc0e5942..93450ec930e8 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -2343,6 +2343,9 @@  #define tx_dma_desc_base_addrmsw_adr(descriptor) \  			(0x00007c04u + (descriptor) * 0x40) +/* tx dma total request limit */ +#define tx_dma_total_req_limit_adr 0x00007b20u +  /* tx interrupt moderation control register definitions   * Preprocessor definitions for TX Interrupt Moderation Control Register   * Base Address: 0x00008980 @@ -2369,6 +2372,9 @@  /* default value of bitfield reg_res_dsbl */  #define pci_reg_res_dsbl_default 0x1 +/* PCI core control register */ +#define pci_reg_control6_adr 0x1014u +  /* global microprocessor scratch pad definitions */  #define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 1fe016fc4bc7..f2ce12ed4218 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -503,73 +503,43 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)  	struct hw_atl_s *hw_self = PHAL_ATLANTIC;  	struct hw_aq_atl_utils_mbox mbox; -	if (!self->aq_link_status.mbps) -		return 0; -  	hw_atl_utils_mpi_read_stats(self, &mbox);  #define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \  			mbox.stats._N_ - hw_self->last_stats._N_) - -	AQ_SDELTA(uprc); -	AQ_SDELTA(mprc); -	AQ_SDELTA(bprc); -	AQ_SDELTA(erpt); - -	AQ_SDELTA(uptc); -	AQ_SDELTA(mptc); -	AQ_SDELTA(bptc); -	AQ_SDELTA(erpr); - -	AQ_SDELTA(ubrc); -	AQ_SDELTA(ubtc); -	AQ_SDELTA(mbrc); -	AQ_SDELTA(mbtc); -	AQ_SDELTA(bbrc); -	AQ_SDELTA(bbtc); -	AQ_SDELTA(dpc); - +	if (self->aq_link_status.mbps) { +		AQ_SDELTA(uprc); +		AQ_SDELTA(mprc); +		AQ_SDELTA(bprc); +		AQ_SDELTA(erpt); + +		AQ_SDELTA(uptc); +		AQ_SDELTA(mptc); +		AQ_SDELTA(bptc); +		AQ_SDELTA(erpr); + +		AQ_SDELTA(ubrc); +		AQ_SDELTA(ubtc); +		AQ_SDELTA(mbrc); +		AQ_SDELTA(mbtc); +		AQ_SDELTA(bbrc); +		AQ_SDELTA(bbtc); +		AQ_SDELTA(dpc); +	}  #undef AQ_SDELTA +	hw_self->curr_stats.dma_pkt_rc = stats_rx_dma_good_pkt_counterlsw_get(self); +	hw_self->curr_stats.dma_pkt_tc = stats_tx_dma_good_pkt_counterlsw_get(self); +	hw_self->curr_stats.dma_oct_rc = stats_rx_dma_good_octet_counterlsw_get(self); +	hw_self->curr_stats.dma_oct_tc = stats_tx_dma_good_octet_counterlsw_get(self);  	memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));  	return 0;  } -int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, -			      u64 *data, unsigned int *p_count) +struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)  { -	struct hw_atl_s *hw_self = PHAL_ATLANTIC; -	struct hw_atl_stats_s *stats = &hw_self->curr_stats; -	int i = 0; - -	data[i] = stats->uprc + stats->mprc + stats->bprc; -	data[++i] = stats->uprc; -	data[++i] = stats->mprc; -	data[++i] = stats->bprc; -	data[++i] = stats->erpt; -	data[++i] = stats->uptc + stats->mptc + stats->bptc; -	data[++i] = stats->uptc; -	data[++i] = stats->mptc; -	data[++i] = stats->bptc; -	data[++i] = stats->ubrc; -	data[++i] = stats->ubtc; -	data[++i] = stats->mbrc; -	data[++i] = stats->mbtc; -	data[++i] = stats->bbrc; -	data[++i] = stats->bbtc; -	data[++i] = stats->ubrc + stats->mbrc + stats->bbrc; -	data[++i] = stats->ubtc + stats->mbtc + stats->bbtc; -	data[++i] = stats_rx_dma_good_pkt_counterlsw_get(self); -	data[++i] = stats_tx_dma_good_pkt_counterlsw_get(self); -	data[++i] = stats_rx_dma_good_octet_counterlsw_get(self); -	data[++i] = stats_tx_dma_good_octet_counterlsw_get(self); -	data[++i] = stats->dpc; - -	if (p_count) -		*p_count = ++i; - -	return 0; +	return &PHAL_ATLANTIC->curr_stats;  }  static const u32 hw_atl_utils_hw_mac_regs[] = { diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index c99cc690e425..21aeca6908d3 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -129,7 +129,7 @@ struct __packed hw_aq_atl_utils_mbox {  struct __packed hw_atl_s {  	struct aq_hw_s base;  	struct hw_atl_stats_s last_stats; -	struct hw_atl_stats_s curr_stats; +	struct aq_stats_s curr_stats;  	u64 speed;  	unsigned int chip_features;  	u32 fw_ver_actual; @@ -207,8 +207,6 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);  int hw_atl_utils_update_stats(struct aq_hw_s *self); -int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, -			      u64 *data, -			      unsigned int *p_count); +struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);  #endif /* HW_ATL_UTILS_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h index 0de858d215c2..9009f2651e70 100644 --- a/drivers/net/ethernet/aquantia/atlantic/ver.h +++ b/drivers/net/ethernet/aquantia/atlantic/ver.h @@ -11,8 +11,10 @@  #define VER_H  #define NIC_MAJOR_DRIVER_VERSION           1 -#define NIC_MINOR_DRIVER_VERSION           5 -#define NIC_BUILD_DRIVER_VERSION           345 +#define NIC_MINOR_DRIVER_VERSION           6 +#define NIC_BUILD_DRIVER_VERSION           13  #define NIC_REVISION_DRIVER_VERSION        0 +#define AQ_CFG_DRV_VERSION_SUFFIX "-kern" +  #endif /* VER_H */ diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h index 3c63b16d485f..d9efbc8d783b 100644 --- a/drivers/net/ethernet/arc/emac.h +++ b/drivers/net/ethernet/arc/emac.h @@ -159,6 +159,8 @@ struct arc_emac_priv {  	unsigned int link;  	unsigned int duplex;  	unsigned int speed; + +	unsigned int rx_missed_errors;  };  /** diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 3241af1ce718..bd277b0dc615 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -26,6 +26,8 @@  #include "emac.h" +static void arc_emac_restart(struct net_device *ndev); +  /**   * arc_emac_tx_avail - Return the number of available slots in the tx ring.   * @priv: Pointer to ARC EMAC private data structure. @@ -210,39 +212,48 @@ static int arc_emac_rx(struct net_device *ndev, int budget)  			continue;  		} -		pktlen = info & LEN_MASK; -		stats->rx_packets++; -		stats->rx_bytes += pktlen; -		skb = rx_buff->skb; -		skb_put(skb, pktlen); -		skb->dev = ndev; -		skb->protocol = eth_type_trans(skb, ndev); - -		dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), -				 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); - -		/* Prepare the BD for next cycle */ -		rx_buff->skb = netdev_alloc_skb_ip_align(ndev, -							 EMAC_BUFFER_SIZE); -		if (unlikely(!rx_buff->skb)) { +		/* Prepare the BD for next cycle. netif_receive_skb() +		 * only if new skb was allocated and mapped to avoid holes +		 * in the RX fifo. +		 */ +		skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE); +		if (unlikely(!skb)) { +			if (net_ratelimit()) +				netdev_err(ndev, "cannot allocate skb\n"); +			/* Return ownership to EMAC */ +			rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);  			stats->rx_errors++; -			/* Because receive_skb is below, increment rx_dropped */  			stats->rx_dropped++;  			continue;  		} -		/* receive_skb only if new skb was allocated to avoid holes */ -		netif_receive_skb(skb); - -		addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, +		addr = dma_map_single(&ndev->dev, (void *)skb->data,  				      EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);  		if (dma_mapping_error(&ndev->dev, addr)) {  			if (net_ratelimit()) -				netdev_err(ndev, "cannot dma map\n"); -			dev_kfree_skb(rx_buff->skb); +				netdev_err(ndev, "cannot map dma buffer\n"); +			dev_kfree_skb(skb); +			/* Return ownership to EMAC */ +			rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);  			stats->rx_errors++; +			stats->rx_dropped++;  			continue;  		} + +		/* unmap previosly mapped skb */ +		dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), +				 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); + +		pktlen = info & LEN_MASK; +		stats->rx_packets++; +		stats->rx_bytes += pktlen; +		skb_put(rx_buff->skb, pktlen); +		rx_buff->skb->dev = ndev; +		rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev); + +		netif_receive_skb(rx_buff->skb); + +		rx_buff->skb = skb;  		dma_unmap_addr_set(rx_buff, addr, addr);  		dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); @@ -259,6 +270,53 @@ static int arc_emac_rx(struct net_device *ndev, int budget)  }  /** + * arc_emac_rx_miss_handle - handle R_MISS register + * @ndev:	Pointer to the net_device structure. + */ +static void arc_emac_rx_miss_handle(struct net_device *ndev) +{ +	struct arc_emac_priv *priv = netdev_priv(ndev); +	struct net_device_stats *stats = &ndev->stats; +	unsigned int miss; + +	miss = arc_reg_get(priv, R_MISS); +	if (miss) { +		stats->rx_errors += miss; +		stats->rx_missed_errors += miss; +		priv->rx_missed_errors += miss; +	} +} + +/** + * arc_emac_rx_stall_check - check RX stall + * @ndev:	Pointer to the net_device structure. + * @budget:	How many BDs requested to process on 1 call. + * @work_done:	How many BDs processed + * + * Under certain conditions EMAC stop reception of incoming packets and + * continuously increment R_MISS register instead of saving data into + * provided buffer. This function detect that condition and restart + * EMAC. + */ +static void arc_emac_rx_stall_check(struct net_device *ndev, +				    int budget, unsigned int work_done) +{ +	struct arc_emac_priv *priv = netdev_priv(ndev); +	struct arc_emac_bd *rxbd; + +	if (work_done) +		priv->rx_missed_errors = 0; + +	if (priv->rx_missed_errors && budget) { +		rxbd = &priv->rxbd[priv->last_rx_bd]; +		if (le32_to_cpu(rxbd->info) & FOR_EMAC) { +			arc_emac_restart(ndev); +			priv->rx_missed_errors = 0; +		} +	} +} + +/**   * arc_emac_poll - NAPI poll handler.   * @napi:	Pointer to napi_struct structure.   * @budget:	How many BDs to process on 1 call. @@ -272,6 +330,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)  	unsigned int work_done;  	arc_emac_tx_clean(ndev); +	arc_emac_rx_miss_handle(ndev);  	work_done = arc_emac_rx(ndev, budget);  	if (work_done < budget) { @@ -279,6 +338,8 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)  		arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);  	} +	arc_emac_rx_stall_check(ndev, budget, work_done); +  	return work_done;  } @@ -320,6 +381,8 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)  		if (status & MSER_MASK) {  			stats->rx_missed_errors += 0x100;  			stats->rx_errors += 0x100; +			priv->rx_missed_errors += 0x100; +			napi_schedule(&priv->napi);  		}  		if (status & RXCR_MASK) { @@ -732,6 +795,63 @@ static int arc_emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)  } +/** + * arc_emac_restart - Restart EMAC + * @ndev:	Pointer to net_device structure. + * + * This function do hardware reset of EMAC in order to restore + * network packets reception. + */ +static void arc_emac_restart(struct net_device *ndev) +{ +	struct arc_emac_priv *priv = netdev_priv(ndev); +	struct net_device_stats *stats = &ndev->stats; +	int i; + +	if (net_ratelimit()) +		netdev_warn(ndev, "restarting stalled EMAC\n"); + +	netif_stop_queue(ndev); + +	/* Disable interrupts */ +	arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); + +	/* Disable EMAC */ +	arc_reg_clr(priv, R_CTRL, EN_MASK); + +	/* Return the sk_buff to system */ +	arc_free_tx_queue(ndev); + +	/* Clean Tx BD's */ +	priv->txbd_curr = 0; +	priv->txbd_dirty = 0; +	memset(priv->txbd, 0, TX_RING_SZ); + +	for (i = 0; i < RX_BD_NUM; i++) { +		struct arc_emac_bd *rxbd = &priv->rxbd[i]; +		unsigned int info = le32_to_cpu(rxbd->info); + +		if (!(info & FOR_EMAC)) { +			stats->rx_errors++; +			stats->rx_dropped++; +		} +		/* Return ownership to EMAC */ +		rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); +	} +	priv->last_rx_bd = 0; + +	/* Make sure info is visible to EMAC before enable */ +	wmb(); + +	/* Enable interrupts */ +	arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); + +	/* Enable EMAC */ +	arc_reg_or(priv, R_CTRL, EN_MASK); + +	netif_start_queue(ndev); +} +  static const struct net_device_ops arc_emac_netdev_ops = {  	.ndo_open		= arc_emac_open,  	.ndo_stop		= arc_emac_stop, diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c index e278e3d96ee0..16f9bee992fe 100644 --- a/drivers/net/ethernet/arc/emac_rockchip.c +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -199,9 +199,11 @@ static int emac_rockchip_probe(struct platform_device *pdev)  	/* RMII interface needs always a rate of 50MHz */  	err = clk_set_rate(priv->refclk, 50000000); -	if (err) +	if (err) {  		dev_err(dev,  			"failed to change reference clock rate (%d)\n", err); +		goto out_regulator_disable; +	}  	if (priv->soc_data->need_div_macclk) {  		priv->macclk = devm_clk_get(dev, "macclk"); @@ -220,19 +222,24 @@ static int emac_rockchip_probe(struct platform_device *pdev)  		/* RMII TX/RX needs always a rate of 25MHz */  		err = clk_set_rate(priv->macclk, 25000000); -		if (err) +		if (err) {  			dev_err(dev,  				"failed to change mac clock rate (%d)\n", err); +			goto out_clk_disable_macclk; +		}  	}  	err = arc_emac_probe(ndev, interface);  	if (err) {  		dev_err(dev, "failed to probe arc emac (%d)\n", err); -		goto out_regulator_disable; +		goto out_clk_disable_macclk;  	}  	return 0; +out_clk_disable_macclk: +	if (priv->soc_data->need_div_macclk) +		clk_disable_unprepare(priv->macclk);  out_regulator_disable:  	if (priv->regulator)  		regulator_disable(priv->regulator); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 4c739d5355d2..8ae269ec17a1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3030,7 +3030,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)  	del_timer_sync(&bp->timer); -	if (IS_PF(bp)) { +	if (IS_PF(bp) && !BP_NOMCP(bp)) {  		/* Set ALWAYS_ALIVE bit in shmem */  		bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;  		bnx2x_drv_pulse(bp); @@ -3116,7 +3116,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)  	bp->cnic_loaded = false;  	/* Clear driver version indication in shmem */ -	if (IS_PF(bp)) +	if (IS_PF(bp) && !BP_NOMCP(bp))  		bnx2x_update_mng_version(bp);  	/* Check if there are pending parity attentions. If there are - set diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 91e2a7560b48..ddd5d3ebd201 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -9578,6 +9578,15 @@ static int bnx2x_init_shmem(struct bnx2x *bp)  	do {  		bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); + +		/* If we read all 0xFFs, means we are in PCI error state and +		 * should bail out to avoid crashes on adapter's FW reads. +		 */ +		if (bp->common.shmem_base == 0xFFFFFFFF) { +			bp->flags |= NO_MCP_FLAG; +			return -ENODEV; +		} +  		if (bp->common.shmem_base) {  			val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);  			if (val & SHR_MEM_VALIDITY_MB) @@ -14320,7 +14329,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)  		BNX2X_ERR("IO slot reset --> driver unload\n");  		/* MCP should have been reset; Need to wait for validity */ -		bnx2x_init_shmem(bp); +		if (bnx2x_init_shmem(bp)) { +			rtnl_unlock(); +			return PCI_ERS_RESULT_DISCONNECT; +		}  		if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {  			u32 v; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index c5c38d4b7d1c..61ca4eb7c6fa 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1883,7 +1883,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)  			 * here forever if we consistently cannot allocate  			 * buffers.  			 */ -			else if (rc == -ENOMEM) +			else if (rc == -ENOMEM && budget)  				rx_pkts++;  			else if (rc == -EBUSY)	/* partial completion */  				break; @@ -1969,7 +1969,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)  				cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);  			rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); -			if (likely(rc == -EIO)) +			if (likely(rc == -EIO) && budget)  				rx_pkts++;  			else if (rc == -EBUSY)	/* partial completion */  				break; @@ -3368,6 +3368,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,  	u16 cp_ring_id, len = 0;  	struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;  	u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; +	struct hwrm_short_input short_input = {0};  	req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);  	memset(resp, 0, PAGE_SIZE); @@ -3376,7 +3377,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,  	if (bp->flags & BNXT_FLAG_SHORT_CMD) {  		void *short_cmd_req = bp->hwrm_short_cmd_req_addr; -		struct hwrm_short_input short_input = {0};  		memcpy(short_cmd_req, req, msg_len);  		memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN - @@ -8263,8 +8263,9 @@ static void bnxt_shutdown(struct pci_dev *pdev)  	if (netif_running(dev))  		dev_close(dev); +	bnxt_ulp_shutdown(bp); +  	if (system_state == SYSTEM_POWER_OFF) { -		bnxt_ulp_shutdown(bp);  		bnxt_clear_int_mode(bp);  		pci_wake_from_d3(pdev, bp->wol);  		pci_set_power_state(pdev, PCI_D3hot); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 7ce1d4b7e67d..b13ce5ebde8d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2136,8 +2136,8 @@ static int bnxt_get_module_eeprom(struct net_device *dev,  	/* Read A2 portion of the EEPROM */  	if (length) {  		start -= ETH_MODULE_SFF_8436_LEN; -		bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start, -						 length, data); +		rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, +						      start, length, data);  	}  	return rc;  } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 5ee18660bc33..c9617675f934 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -70,7 +70,7 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)  		netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");  		return -EINVAL;  	} -	if (vf_id >= bp->pf.max_vfs) { +	if (vf_id >= bp->pf.active_vfs) {  		netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);  		return -EINVAL;  	} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index d5031f436f83..d8fee26cd45e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -56,7 +56,6 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,  {  	int ifindex = tcf_mirred_ifindex(tc_act);  	struct net_device *dev; -	u16 dst_fid;  	dev = __dev_get_by_index(dev_net(bp->dev), ifindex);  	if (!dev) { @@ -64,15 +63,7 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,  		return -EINVAL;  	} -	/* find the FID from dev */ -	dst_fid = bnxt_flow_get_dst_fid(bp, dev); -	if (dst_fid == BNXT_FID_INVALID) { -		netdev_info(bp->dev, "can't get fid for ifindex=%d", ifindex); -		return -EINVAL; -	} -  	actions->flags |= BNXT_TC_ACTION_FLAG_FWD; -	actions->dst_fid = dst_fid;  	actions->dst_dev = dev;  	return 0;  } @@ -160,13 +151,17 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,  	if (rc)  		return rc; -	/* Tunnel encap/decap action must be accompanied by a redirect action */ -	if ((actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP || -	     actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) && -	    !(actions->flags & BNXT_TC_ACTION_FLAG_FWD)) { -		netdev_info(bp->dev, -			    "error: no redir action along with encap/decap"); -		return -EINVAL; +	if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) { +		if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) { +			/* dst_fid is PF's fid */ +			actions->dst_fid = bp->pf.fw_fid; +		} else { +			/* find the FID from dst_dev */ +			actions->dst_fid = +				bnxt_flow_get_dst_fid(bp, actions->dst_dev); +			if (actions->dst_fid == BNXT_FID_INVALID) +				return -EINVAL; +		}  	}  	return rc; @@ -426,7 +421,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,  	}  	/* If all IP and L4 fields are wildcarded then this is an L2 flow */ -	if (is_wildcard(&l3_mask, sizeof(l3_mask)) && +	if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&  	    is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {  		flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;  	} else { @@ -532,10 +527,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,  	}  	if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) { -		enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR | -			   CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR; +		enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;  		ether_addr_copy(req.dst_macaddr, l2_info->dmac); -		ether_addr_copy(req.src_macaddr, l2_info->smac);  	}  	if (l2_info->num_vlans) {  		enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID; @@ -901,10 +894,10 @@ static void bnxt_tc_put_decap_handle(struct bnxt *bp,  static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,  				       struct ip_tunnel_key *tun_key, -				       struct bnxt_tc_l2_key *l2_info, -				       struct net_device *real_dst_dev) +				       struct bnxt_tc_l2_key *l2_info)  {  #ifdef CONFIG_INET +	struct net_device *real_dst_dev = bp->dev;  	struct flowi4 flow = { {0} };  	struct net_device *dst_dev;  	struct neighbour *nbr; @@ -1008,14 +1001,13 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,  	 */  	tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;  	tun_key.tp_dst = flow->tun_key.tp_dst; -	rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info, bp->dev); +	rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info);  	if (rc)  		goto put_decap; -	decap_key->ttl = tun_key.ttl;  	decap_l2_info = &decap_node->l2_info; +	/* decap smac is wildcarded */  	ether_addr_copy(decap_l2_info->dmac, l2_info.smac); -	ether_addr_copy(decap_l2_info->smac, l2_info.dmac);  	if (l2_info.num_vlans) {  		decap_l2_info->num_vlans = l2_info.num_vlans;  		decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid; @@ -1095,8 +1087,7 @@ static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,  	if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)  		goto done; -	rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info, -					 flow->actions.dst_dev); +	rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info);  	if (rc)  		goto put_encap; @@ -1169,6 +1160,15 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,  	return 0;  } +static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow, +				u16 src_fid) +{ +	if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) +		flow->src_fid = bp->pf.fw_fid; +	else +		flow->src_fid = src_fid; +} +  /* Add a new flow or replace an existing flow.   * Notes on locking:   * There are essentially two critical sections here. @@ -1204,7 +1204,8 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,  	rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);  	if (rc)  		goto free_node; -	flow->src_fid = src_fid; + +	bnxt_tc_set_src_fid(bp, flow, src_fid);  	if (!bnxt_tc_can_offload(bp, flow)) {  		rc = -ENOSPC; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index de51c2177d03..8995cfefbfcf 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -4,11 +4,13 @@   * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller ([email protected])   * Copyright (C) 2001, 2002, 2003 Jeff Garzik ([email protected])   * Copyright (C) 2004 Sun Microsystems Inc. - * Copyright (C) 2005-2014 Broadcom Corporation. + * Copyright (C) 2005-2016 Broadcom Corporation. + * Copyright (C) 2016-2017 Broadcom Limited.   *   * Firmware is:   *	Derived from proprietary unpublished source code, - *	Copyright (C) 2000-2003 Broadcom Corporation. + *	Copyright (C) 2000-2016 Broadcom Corporation. + *	Copyright (C) 2016-2017 Broadcom Ltd.   *   *	Permission is hereby granted for the distribution of this firmware   *	data in hexadecimal or equivalent format, provided this copyright @@ -10052,6 +10054,16 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)  	tw32(GRC_MODE, tp->grc_mode | val); +	/* On one of the AMD platform, MRRS is restricted to 4000 because of +	 * south bridge limitation. As a workaround, Driver is setting MRRS +	 * to 2048 instead of default 4096. +	 */ +	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && +	    tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) { +		val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK; +		tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048); +	} +  	/* Setup the timer prescalar register.  Clock is always 66Mhz. */  	val = tr32(GRC_MISC_CFG);  	val &= ~0xff; @@ -14225,7 +14237,10 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)  	/* Reset PHY, otherwise the read DMA engine will be in a mode that  	 * breaks all requests to 256 bytes.  	 */ -	if (tg3_asic_rev(tp) == ASIC_REV_57766) +	if (tg3_asic_rev(tp) == ASIC_REV_57766 || +	    tg3_asic_rev(tp) == ASIC_REV_5717 || +	    tg3_asic_rev(tp) == ASIC_REV_5719 || +	    tg3_asic_rev(tp) == ASIC_REV_5720)  		reset_phy = true;  	err = tg3_restart_hw(tp, reset_phy); diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index c2d02d02d1e6..1f0271fa7c74 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -5,7 +5,8 @@   * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller ([email protected])   * Copyright (C) 2001 Jeff Garzik ([email protected])   * Copyright (C) 2004 Sun Microsystems Inc. - * Copyright (C) 2007-2014 Broadcom Corporation. + * Copyright (C) 2007-2016 Broadcom Corporation. + * Copyright (C) 2016-2017 Broadcom Limited.   */  #ifndef _T3_H @@ -96,6 +97,7 @@  #define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR		0x0106  #define TG3PCI_SUBDEVICE_ID_DELL_MERLOT		0x0109  #define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT	0x010a +#define TG3PCI_SUBDEVICE_ID_DELL_5762		0x07f0  #define TG3PCI_SUBVENDOR_ID_COMPAQ		PCI_VENDOR_ID_COMPAQ  #define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE	0x007c  #define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2	0x009a @@ -281,6 +283,9 @@  #define TG3PCI_STD_RING_PROD_IDX	0x00000098 /* 64-bit */  #define TG3PCI_RCV_RET_RING_CON_IDX	0x000000a0 /* 64-bit */  /* 0xa8 --> 0xb8 unused */ +#define TG3PCI_DEV_STATUS_CTRL		0x000000b4 +#define  MAX_READ_REQ_SIZE_2048		 0x00004000 +#define  MAX_READ_REQ_MASK		 0x00007000  #define TG3PCI_DUAL_MAC_CTRL		0x000000b8  #define  DUAL_MAC_CTRL_CH_MASK		 0x00000003  #define  DUAL_MAC_CTRL_ID		 0x00000004 diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 6aa0eee88ea5..a5eecd895a82 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1113,7 +1113,7 @@ static int liquidio_watchdog(void *param)  				dev_err(&oct->pci_dev->dev,  					"ERROR: Octeon core %d crashed or got stuck!  See oct-fwdump for details.\n",  					core); -					err_msg_was_printed[core] = true; +				err_msg_was_printed[core] = true;  			}  		} diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index d4496e9afcdf..a3d12dbde95b 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -1355,7 +1355,8 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,  	/* Offload checksum calculation to HW */  	if (skb->ip_summed == CHECKSUM_PARTIAL) { -		hdr->csum_l3 = 1; /* Enable IP csum calculation */ +		if (ip.v4->version == 4) +			hdr->csum_l3 = 1; /* Enable IP csum calculation */  		hdr->l3_offset = skb_network_offset(skb);  		hdr->l4_offset = skb_transport_offset(skb); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 6f9fa6e3c42a..d8424ed16c33 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -344,7 +344,6 @@ struct adapter_params {  	unsigned int sf_size;             /* serial flash size in bytes */  	unsigned int sf_nsec;             /* # of flash sectors */ -	unsigned int sf_fw_start;         /* start of FW image in flash */  	unsigned int fw_vers;		  /* firmware version */  	unsigned int bs_vers;		  /* bootstrap version */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index f63210f15579..375ef86a84da 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2844,8 +2844,6 @@ enum {  	SF_RD_DATA_FAST = 0xb,        /* read flash */  	SF_RD_ID        = 0x9f,       /* read ID */  	SF_ERASE_SECTOR = 0xd8,       /* erase sector */ - -	FW_MAX_SIZE = 16 * SF_SEC_SIZE,  };  /** @@ -3558,8 +3556,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)  	const __be32 *p = (const __be32 *)fw_data;  	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;  	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; -	unsigned int fw_img_start = adap->params.sf_fw_start; -	unsigned int fw_start_sec = fw_img_start / sf_sec_size; +	unsigned int fw_start_sec = FLASH_FW_START_SEC; +	unsigned int fw_size = FLASH_FW_MAX_SIZE; +	unsigned int fw_start = FLASH_FW_START;  	if (!size) {  		dev_err(adap->pdev_dev, "FW image has no data\n"); @@ -3575,9 +3574,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)  			"FW image size differs from size in FW header\n");  		return -EINVAL;  	} -	if (size > FW_MAX_SIZE) { +	if (size > fw_size) {  		dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", -			FW_MAX_SIZE); +			fw_size);  		return -EFBIG;  	}  	if (!t4_fw_matches_chip(adap, hdr)) @@ -3604,11 +3603,11 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)  	 */  	memcpy(first_page, fw_data, SF_PAGE_SIZE);  	((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff); -	ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page); +	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);  	if (ret)  		goto out; -	addr = fw_img_start; +	addr = fw_start;  	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {  		addr += SF_PAGE_SIZE;  		fw_data += SF_PAGE_SIZE; @@ -3618,7 +3617,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)  	}  	ret = t4_write_flash(adap, -			     fw_img_start + offsetof(struct fw_hdr, fw_ver), +			     fw_start + offsetof(struct fw_hdr, fw_ver),  			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);  out:  	if (ret) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 610573855213..a74300a4459c 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -818,6 +818,12 @@ static void fec_enet_bd_init(struct net_device *dev)  		for (i = 0; i < txq->bd.ring_size; i++) {  			/* Initialize the BD for every fragment in the page. */  			bdp->cbd_sc = cpu_to_fec16(0); +			if (bdp->cbd_bufaddr && +			    !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) +				dma_unmap_single(&fep->pdev->dev, +						 fec32_to_cpu(bdp->cbd_bufaddr), +						 fec16_to_cpu(bdp->cbd_datlen), +						 DMA_TO_DEVICE);  			if (txq->tx_skbuff[i]) {  				dev_kfree_skb_any(txq->tx_skbuff[i]);  				txq->tx_skbuff[i] = NULL; @@ -3463,6 +3469,10 @@ fec_probe(struct platform_device *pdev)  			goto failed_regulator;  		}  	} else { +		if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) { +			ret = -EPROBE_DEFER; +			goto failed_regulator; +		}  		fep->reg_phy = NULL;  	} @@ -3546,8 +3556,9 @@ failed_clk_ipg:  failed_clk:  	if (of_phy_is_fixed_link(np))  		of_phy_deregister_fixed_link(np); -failed_phy:  	of_node_put(phy_node); +failed_phy: +	dev_id--;  failed_ioremap:  	free_netdev(ndev); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 5be52d89b182..7f837006bb6a 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1378,9 +1378,11 @@ static int gfar_probe(struct platform_device *ofdev)  	gfar_init_addr_hash_table(priv); -	/* Insert receive time stamps into padding alignment bytes */ +	/* Insert receive time stamps into padding alignment bytes, and +	 * plus 2 bytes padding to ensure the cpu alignment. +	 */  	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) -		priv->padding = 8; +		priv->padding = 8 + DEFAULT_PADDING;  	if (dev->features & NETIF_F_IP_CSUM ||  	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) @@ -1790,6 +1792,7 @@ static int init_phy(struct net_device *dev)  		GFAR_SUPPORTED_GBIT : 0;  	phy_interface_t interface;  	struct phy_device *phydev; +	struct ethtool_eee edata;  	priv->oldlink = 0;  	priv->oldspeed = 0; @@ -1814,6 +1817,10 @@ static int init_phy(struct net_device *dev)  	/* Add support for flow control, but don't advertise it by default */  	phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); +	/* disable EEE autoneg, EEE not supported by eTSEC */ +	memset(&edata, 0, sizeof(struct ethtool_eee)); +	phy_ethtool_set_eee(phydev, &edata); +  	return 0;  } diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index 544114281ea7..9f8d4f8e57e3 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -319,11 +319,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)  	now = tmr_cnt_read(etsects);  	now += delta;  	tmr_cnt_write(etsects, now); +	set_fipers(etsects);  	spin_unlock_irqrestore(&etsects->lock, flags); -	set_fipers(etsects); -  	return 0;  } diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index d7bdea79e9fa..8fd2458060a0 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -331,7 +331,8 @@ struct e1000_adapter {  enum e1000_state_t {  	__E1000_TESTING,  	__E1000_RESETTING, -	__E1000_DOWN +	__E1000_DOWN, +	__E1000_DISABLED  };  #undef pr_fmt diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c index 8172cf08cc33..3bac9df1c099 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.c +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c @@ -4307,8 +4307,10 @@ static void e1000_init_rx_addrs(struct e1000_hw *hw)  	rar_num = E1000_RAR_ENTRIES; -	/* Zero out the other 15 receive addresses. */ -	e_dbg("Clearing RAR[1-15]\n"); +	/* Zero out the following 14 receive addresses. RAR[15] is for +	 * manageability +	 */ +	e_dbg("Clearing RAR[1-14]\n");  	for (i = 1; i < rar_num; i++) {  		E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);  		E1000_WRITE_FLUSH(); diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 1982f7917a8d..3dd4aeb2706d 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -945,7 +945,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter,  static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  {  	struct net_device *netdev; -	struct e1000_adapter *adapter; +	struct e1000_adapter *adapter = NULL;  	struct e1000_hw *hw;  	static int cards_found; @@ -955,6 +955,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	u16 tmp = 0;  	u16 eeprom_apme_mask = E1000_EEPROM_APME;  	int bars, need_ioport; +	bool disable_dev = false;  	/* do not allocate ioport bars when not needed */  	need_ioport = e1000_is_need_ioport(pdev); @@ -1259,11 +1260,13 @@ err_mdio_ioremap:  	iounmap(hw->ce4100_gbe_mdio_base_virt);  	iounmap(hw->hw_addr);  err_ioremap: +	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);  	free_netdev(netdev);  err_alloc_etherdev:  	pci_release_selected_regions(pdev, bars);  err_pci_reg: -	pci_disable_device(pdev); +	if (!adapter || disable_dev) +		pci_disable_device(pdev);  	return err;  } @@ -1281,6 +1284,7 @@ static void e1000_remove(struct pci_dev *pdev)  	struct net_device *netdev = pci_get_drvdata(pdev);  	struct e1000_adapter *adapter = netdev_priv(netdev);  	struct e1000_hw *hw = &adapter->hw; +	bool disable_dev;  	e1000_down_and_stop(adapter);  	e1000_release_manageability(adapter); @@ -1299,9 +1303,11 @@ static void e1000_remove(struct pci_dev *pdev)  		iounmap(hw->flash_address);  	pci_release_selected_regions(pdev, adapter->bars); +	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);  	free_netdev(netdev); -	pci_disable_device(pdev); +	if (disable_dev) +		pci_disable_device(pdev);  }  /** @@ -5156,7 +5162,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)  	if (netif_running(netdev))  		e1000_free_irq(adapter); -	pci_disable_device(pdev); +	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) +		pci_disable_device(pdev);  	return 0;  } @@ -5200,6 +5207,10 @@ static int e1000_resume(struct pci_dev *pdev)  		pr_err("Cannot enable PCI device from suspend\n");  		return err;  	} + +	/* flush memory to make sure state is correct */ +	smp_mb__before_atomic(); +	clear_bit(__E1000_DISABLED, &adapter->flags);  	pci_set_master(pdev);  	pci_enable_wake(pdev, PCI_D3hot, 0); @@ -5274,7 +5285,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,  	if (netif_running(netdev))  		e1000_down(adapter); -	pci_disable_device(pdev); + +	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) +		pci_disable_device(pdev);  	/* Request a slot slot reset. */  	return PCI_ERS_RESULT_NEED_RESET; @@ -5302,6 +5315,10 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)  		pr_err("Cannot re-enable PCI device after reset.\n");  		return PCI_ERS_RESULT_DISCONNECT;  	} + +	/* flush memory to make sure state is correct */ +	smp_mb__before_atomic(); +	clear_bit(__E1000_DISABLED, &adapter->flags);  	pci_set_master(pdev);  	pci_enable_wake(pdev, PCI_D3hot, 0); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index d6d4ed7acf03..31277d3bb7dc 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1367,6 +1367,9 @@ out:   *  Checks to see of the link status of the hardware has changed.  If a   *  change in link status has been detected, then we read the PHY registers   *  to get the current speed/duplex if link exists. + * + *  Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link + *  up).   **/  static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)  { @@ -1382,7 +1385,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)  	 * Change or Rx Sequence Error interrupt.  	 */  	if (!mac->get_link_status) -		return 0; +		return 1;  	/* First we want to see if the MII Status Register reports  	 * link.  If so, then we want to get the current speed/duplex @@ -1613,10 +1616,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)  	 * different link partner.  	 */  	ret_val = e1000e_config_fc_after_link_up(hw); -	if (ret_val) +	if (ret_val) {  		e_dbg("Error configuring flow control\n"); +		return ret_val; +	} -	return ret_val; +	return 1;  }  static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 67163ca898ba..00a36df02a3f 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -113,7 +113,8 @@  #define NVM_SIZE_MULTIPLIER 4096	/*multiplier for NVMS field */  #define E1000_FLASH_BASE_ADDR 0xE000	/*offset of NVM access regs */  #define E1000_CTRL_EXT_NVMVS 0x3	/*NVM valid sector */ -#define E1000_TARC0_CB_MULTIQ_3_REQ	(1 << 28 | 1 << 29) +#define E1000_TARC0_CB_MULTIQ_3_REQ	0x30000000 +#define E1000_TARC0_CB_MULTIQ_2_REQ	0x20000000  #define PCIE_ICH8_SNOOP_ALL	PCIE_NO_SNOOP_ALL  #define E1000_ICH_RAR_ENTRIES	7 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index f2f49239b015..9f18d39bdc8f 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3034,9 +3034,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)  		ew32(IOSFPC, reg_val);  		reg_val = er32(TARC(0)); -		/* SPT and KBL Si errata workaround to avoid Tx hang */ -		reg_val &= ~BIT(28); -		reg_val |= BIT(29); +		/* SPT and KBL Si errata workaround to avoid Tx hang. +		 * Dropping the number of outstanding requests from +		 * 3 to 2 in order to avoid a buffer overrun. +		 */ +		reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ; +		reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ;  		ew32(TARC(0), reg_val);  	}  } diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 4c08cc86463e..42dcaefc4c19 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1573,11 +1573,18 @@ static int i40e_set_mac(struct net_device *netdev, void *p)  	else  		netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); +	/* Copy the address first, so that we avoid a possible race with +	 * .set_rx_mode(). If we copy after changing the address in the filter +	 * list, we might open ourselves to a narrow race window where +	 * .set_rx_mode could delete our dev_addr filter and prevent traffic +	 * from passing. +	 */ +	ether_addr_copy(netdev->dev_addr, addr->sa_data); +  	spin_lock_bh(&vsi->mac_filter_hash_lock);  	i40e_del_mac_filter(vsi, netdev->dev_addr);  	i40e_add_mac_filter(vsi, addr->sa_data);  	spin_unlock_bh(&vsi->mac_filter_hash_lock); -	ether_addr_copy(netdev->dev_addr, addr->sa_data);  	if (vsi->type == I40E_VSI_MAIN) {  		i40e_status ret; @@ -1923,6 +1930,14 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)  	struct i40e_netdev_priv *np = netdev_priv(netdev);  	struct i40e_vsi *vsi = np->vsi; +	/* Under some circumstances, we might receive a request to delete +	 * our own device address from our uc list. Because we store the +	 * device address in the VSI's MAC/VLAN filter list, we need to ignore +	 * such requests and not delete our device address from this list. +	 */ +	if (ether_addr_equal(addr, netdev->dev_addr)) +		return 0; +  	i40e_del_mac_filter(vsi, addr);  	return 0; @@ -6038,8 +6053,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)  	/* Set Bit 7 to be valid */  	mode = I40E_AQ_SET_SWITCH_BIT7_VALID; -	/* Set L4type to both TCP and UDP support */ -	mode |= I40E_AQ_SET_SWITCH_L4_TYPE_BOTH; +	/* Set L4type for TCP support */ +	mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;  	/* Set cloud filter mode */  	mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL; @@ -6969,18 +6984,18 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,  	     is_valid_ether_addr(filter->src_mac)) ||  	    (is_multicast_ether_addr(filter->dst_mac) &&  	     is_multicast_ether_addr(filter->src_mac))) -		return -EINVAL; +		return -EOPNOTSUPP; -	/* Make sure port is specified, otherwise bail out, for channel -	 * specific cloud filter needs 'L4 port' to be non-zero +	/* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP +	 * ports are not supported via big buffer now.  	 */ -	if (!filter->dst_port) -		return -EINVAL; +	if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP) +		return -EOPNOTSUPP;  	/* adding filter using src_port/src_ip is not supported at this stage */  	if (filter->src_port || filter->src_ipv4 ||  	    !ipv6_addr_any(&filter->ip.v6.src_ip6)) -		return -EINVAL; +		return -EOPNOTSUPP;  	/* copy element needed to add cloud filter from filter */  	i40e_set_cld_element(filter, &cld_filter.element); @@ -6991,7 +7006,7 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,  	    is_multicast_ether_addr(filter->src_mac)) {  		/* MAC + IP : unsupported mode */  		if (filter->dst_ipv4) -			return -EINVAL; +			return -EOPNOTSUPP;  		/* since we validated that L4 port must be valid before  		 * we get here, start with respective "flags" value @@ -7356,7 +7371,7 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,  	if (tc < 0) {  		dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n"); -		return -EINVAL; +		return -EOPNOTSUPP;  	}  	if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || @@ -7401,7 +7416,6 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,  		dev_err(&pf->pdev->dev,  			"Failed to add cloud filter, err %s\n",  			i40e_stat_str(&pf->hw, err)); -		err = i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);  		goto err;  	} diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 4566d66ffc7c..5bc2748ac468 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3047,10 +3047,30 @@ bool __i40e_chk_linearize(struct sk_buff *skb)  	/* Walk through fragments adding latest fragment, testing it, and  	 * then removing stale fragments from the sum.  	 */ -	stale = &skb_shinfo(skb)->frags[0]; -	for (;;) { +	for (stale = &skb_shinfo(skb)->frags[0];; stale++) { +		int stale_size = skb_frag_size(stale); +  		sum += skb_frag_size(frag++); +		/* The stale fragment may present us with a smaller +		 * descriptor than the actual fragment size. To account +		 * for that we need to remove all the data on the front and +		 * figure out what the remainder would be in the last +		 * descriptor associated with the fragment. +		 */ +		if (stale_size > I40E_MAX_DATA_PER_TXD) { +			int align_pad = -(stale->page_offset) & +					(I40E_MAX_READ_REQ_SIZE - 1); + +			sum -= align_pad; +			stale_size -= align_pad; + +			do { +				sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; +				stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; +			} while (stale_size > I40E_MAX_DATA_PER_TXD); +		} +  		/* if sum is negative we failed to make sufficient progress */  		if (sum < 0)  			return true; @@ -3058,7 +3078,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)  		if (!nr_frags--)  			break; -		sum -= skb_frag_size(stale++); +		sum -= stale_size;  	}  	return false; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index a3dc9b932946..36cb8e068e85 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2086,7 +2086,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)  	}  	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0, -				      (u8 *)vfres, sizeof(vfres)); +				      (u8 *)vfres, sizeof(*vfres));  }  /** diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 50864f99446d..1ba29bb85b67 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -2012,10 +2012,30 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)  	/* Walk through fragments adding latest fragment, testing it, and  	 * then removing stale fragments from the sum.  	 */ -	stale = &skb_shinfo(skb)->frags[0]; -	for (;;) { +	for (stale = &skb_shinfo(skb)->frags[0];; stale++) { +		int stale_size = skb_frag_size(stale); +  		sum += skb_frag_size(frag++); +		/* The stale fragment may present us with a smaller +		 * descriptor than the actual fragment size. To account +		 * for that we need to remove all the data on the front and +		 * figure out what the remainder would be in the last +		 * descriptor associated with the fragment. +		 */ +		if (stale_size > I40E_MAX_DATA_PER_TXD) { +			int align_pad = -(stale->page_offset) & +					(I40E_MAX_READ_REQ_SIZE - 1); + +			sum -= align_pad; +			stale_size -= align_pad; + +			do { +				sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; +				stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; +			} while (stale_size > I40E_MAX_DATA_PER_TXD); +		} +  		/* if sum is negative we failed to make sufficient progress */  		if (sum < 0)  			return true; @@ -2023,7 +2043,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)  		if (!nr_frags--)  			break; -		sum -= skb_frag_size(stale++); +		sum -= stale_size;  	}  	return false; diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index c9798210fa0f..0495487f7b42 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -344,7 +344,8 @@ static int orion_mdio_probe(struct platform_device *pdev)  			dev->regs + MVMDIO_ERR_INT_MASK);  	} else if (dev->err_interrupt == -EPROBE_DEFER) { -		return -EPROBE_DEFER; +		ret = -EPROBE_DEFER; +		goto out_mdio;  	}  	if (pdev->dev.of_node) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index bc93b69cfd1e..a539263cd79c 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1214,6 +1214,10 @@ static void mvneta_port_disable(struct mvneta_port *pp)  	val &= ~MVNETA_GMAC0_PORT_ENABLE;  	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); +	pp->link = 0; +	pp->duplex = -1; +	pp->speed = 0; +  	udelay(200);  } @@ -1958,9 +1962,9 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,  		if (!mvneta_rxq_desc_is_first_last(rx_status) ||  		    (rx_status & MVNETA_RXD_ERR_SUMMARY)) { +			mvneta_rx_error(pp, rx_desc);  err_drop_frame:  			dev->stats.rx_errors++; -			mvneta_rx_error(pp, rx_desc);  			/* leave the descriptor untouched */  			continue;  		} @@ -3011,7 +3015,7 @@ static void mvneta_cleanup_rxqs(struct mvneta_port *pp)  {  	int queue; -	for (queue = 0; queue < txq_number; queue++) +	for (queue = 0; queue < rxq_number; queue++)  		mvneta_rxq_deinit(pp, &pp->rxqs[queue]);  } diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 6c20e811f973..634b2f41cc9e 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -85,7 +85,7 @@  /* RSS Registers */  #define MVPP22_RSS_INDEX			0x1500 -#define     MVPP22_RSS_INDEX_TABLE_ENTRY(idx)	((idx) << 8) +#define     MVPP22_RSS_INDEX_TABLE_ENTRY(idx)	(idx)  #define     MVPP22_RSS_INDEX_TABLE(idx)		((idx) << 8)  #define     MVPP22_RSS_INDEX_QUEUE(idx)		((idx) << 16)  #define MVPP22_RSS_TABLE_ENTRY			0x1508 @@ -4629,11 +4629,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)  		       MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;  		val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;  		writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); - -		val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); -		val |= MVPP2_GMAC_DISABLE_PADDING; -		val &= ~MVPP2_GMAC_FLOW_CTRL_MASK; -		writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);  	} else if (phy_interface_mode_is_rgmii(port->phy_interface)) {  		val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);  		val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | @@ -4641,10 +4636,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)  		       MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;  		val &= ~MVPP22_CTRL4_DP_CLK_SEL;  		writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); - -		val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); -		val &= ~MVPP2_GMAC_DISABLE_PADDING; -		writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);  	}  	/* The port is connected to a copper PHY */ @@ -5607,7 +5598,7 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,  	u32 txq_dma;  	/* Allocate memory for TX descriptors */ -	aggr_txq->descs = dma_alloc_coherent(&pdev->dev, +	aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,  				MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,  				&aggr_txq->descs_dma, GFP_KERNEL);  	if (!aggr_txq->descs) @@ -5805,7 +5796,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,  						sizeof(*txq_pcpu->buffs),  						GFP_KERNEL);  		if (!txq_pcpu->buffs) -			goto cleanup; +			return -ENOMEM;  		txq_pcpu->count = 0;  		txq_pcpu->reserved_num = 0; @@ -5821,26 +5812,10 @@ static int mvpp2_txq_init(struct mvpp2_port *port,  					   &txq_pcpu->tso_headers_dma,  					   GFP_KERNEL);  		if (!txq_pcpu->tso_headers) -			goto cleanup; +			return -ENOMEM;  	}  	return 0; -cleanup: -	for_each_present_cpu(cpu) { -		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); -		kfree(txq_pcpu->buffs); - -		dma_free_coherent(port->dev->dev.parent, -				  txq_pcpu->size * TSO_HEADER_SIZE, -				  txq_pcpu->tso_headers, -				  txq_pcpu->tso_headers_dma); -	} - -	dma_free_coherent(port->dev->dev.parent, -			  txq->size * MVPP2_DESC_ALIGNED_SIZE, -			  txq->descs, txq->descs_dma); - -	return -ENOMEM;  }  /* Free allocated TXQ resources */ @@ -6867,6 +6842,12 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev,  	else if (!IS_ALIGNED(ring->tx_pending, 32))  		new_tx_pending = ALIGN(ring->tx_pending, 32); +	/* The Tx ring size cannot be smaller than the minimum number of +	 * descriptors needed for TSO. +	 */ +	if (new_tx_pending < MVPP2_MAX_SKB_DESCS) +		new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32); +  	if (ring->rx_pending != new_rx_pending) {  		netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",  			    ring->rx_pending, new_rx_pending); @@ -8345,7 +8326,7 @@ static int mvpp2_probe(struct platform_device *pdev)  	for_each_available_child_of_node(dn, port_node) {  		err = mvpp2_port_probe(pdev, port_node, priv, i);  		if (err < 0) -			goto err_mg_clk; +			goto err_port_probe;  		i++;  	} @@ -8361,12 +8342,19 @@ static int mvpp2_probe(struct platform_device *pdev)  	priv->stats_queue = create_singlethread_workqueue(priv->queue_name);  	if (!priv->stats_queue) {  		err = -ENOMEM; -		goto err_mg_clk; +		goto err_port_probe;  	}  	platform_set_drvdata(pdev, priv);  	return 0; +err_port_probe: +	i = 0; +	for_each_available_child_of_node(dn, port_node) { +		if (priv->port_list[i]) +			mvpp2_port_remove(priv->port_list[i]); +		i++; +	}  err_mg_clk:  	clk_disable_unprepare(priv->axi_clk);  	if (priv->hw_version == MVPP22) diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 6e423f098a60..31efc47c847e 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -4081,7 +4081,6 @@ static void skge_remove(struct pci_dev *pdev)  	if (hw->ports > 1) {  		skge_write32(hw, B0_IMSK, 0);  		skge_read32(hw, B0_IMSK); -		free_irq(pdev->irq, hw);  	}  	spin_unlock_irq(&hw->hw_lock); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 54adfd967858..fc67e35b253e 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1961,11 +1961,12 @@ static int mtk_hw_init(struct mtk_eth *eth)  	/* set GE2 TUNE */  	regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); -	/* GE1, Force 1000M/FD, FC ON */ -	mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0)); - -	/* GE2, Force 1000M/FD, FC ON */ -	mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1)); +	/* Set linkdown as the default for each GMAC. Its own MCR would be set +	 * up with the more appropriate value when mtk_phy_link_adjust call is +	 * being invoked. +	 */ +	for (i = 0; i < MTK_MAC_COUNT; i++) +		mtk_w32(eth, 0, MTK_MAC_MCR(i));  	/* Indicates CDM to parse the MTK special tag from CPU  	 * which also is working out for untag packets. diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index e0eb695318e6..1fa4849a6f56 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -188,7 +188,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)  	struct net_device *dev = mdev->pndev[port];  	struct mlx4_en_priv *priv = netdev_priv(dev);  	struct net_device_stats *stats = &dev->stats; -	struct mlx4_cmd_mailbox *mailbox; +	struct mlx4_cmd_mailbox *mailbox, *mailbox_priority;  	u64 in_mod = reset << 8 | port;  	int err;  	int i, counter_index; @@ -198,6 +198,13 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)  	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);  	if (IS_ERR(mailbox))  		return PTR_ERR(mailbox); + +	mailbox_priority = mlx4_alloc_cmd_mailbox(mdev->dev); +	if (IS_ERR(mailbox_priority)) { +		mlx4_free_cmd_mailbox(mdev->dev, mailbox); +		return PTR_ERR(mailbox_priority); +	} +  	err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,  			   MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,  			   MLX4_CMD_NATIVE); @@ -206,6 +213,28 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)  	mlx4_en_stats = mailbox->buf; +	memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats)); +	counter_index = mlx4_get_default_counter_index(mdev->dev, port); +	err = mlx4_get_counter_stats(mdev->dev, counter_index, +				     &tmp_counter_stats, reset); + +	/* 0xffs indicates invalid value */ +	memset(mailbox_priority->buf, 0xff, +	       sizeof(*flowstats) * MLX4_NUM_PRIORITIES); + +	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) { +		memset(mailbox_priority->buf, 0, +		       sizeof(*flowstats) * MLX4_NUM_PRIORITIES); +		err = mlx4_cmd_box(mdev->dev, 0, mailbox_priority->dma, +				   in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL, +				   0, MLX4_CMD_DUMP_ETH_STATS, +				   MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); +		if (err) +			goto out; +	} + +	flowstats = mailbox_priority->buf; +  	spin_lock_bh(&priv->stats_lock);  	mlx4_en_fold_software_stats(dev); @@ -345,31 +374,6 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)  	priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan);  	priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan); -	spin_unlock_bh(&priv->stats_lock); - -	memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats)); -	counter_index = mlx4_get_default_counter_index(mdev->dev, port); -	err = mlx4_get_counter_stats(mdev->dev, counter_index, -				     &tmp_counter_stats, reset); - -	/* 0xffs indicates invalid value */ -	memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES); - -	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) { -		memset(mailbox->buf, 0, -		       sizeof(*flowstats) * MLX4_NUM_PRIORITIES); -		err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, -				   in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL, -				   0, MLX4_CMD_DUMP_ETH_STATS, -				   MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); -		if (err) -			goto out; -	} - -	flowstats = mailbox->buf; - -	spin_lock_bh(&priv->stats_lock); -  	if (tmp_counter_stats.counter_mode == 0) {  		priv->pf_stats.rx_bytes   = be64_to_cpu(tmp_counter_stats.rx_bytes);  		priv->pf_stats.tx_bytes   = be64_to_cpu(tmp_counter_stats.tx_bytes); @@ -410,6 +414,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)  out:  	mlx4_free_cmd_mailbox(mdev->dev, mailbox); +	mlx4_free_cmd_mailbox(mdev->dev, mailbox_priority);  	return err;  } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index 88699b181946..946d9db7c8c2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -185,7 +185,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)  		if (priv->mdev->dev->caps.flags &  					MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {  			buf[3] = mlx4_en_test_registers(priv); -			if (priv->port_up) +			if (priv->port_up && dev->mtu >= MLX4_SELFTEST_LB_MIN_MTU)  				buf[4] = mlx4_en_test_loopback(priv);  		} diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 1856e279a7e0..2b72677eccd4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -153,6 +153,9 @@  #define SMALL_PACKET_SIZE      (256 - NET_IP_ALIGN)  #define HEADER_COPY_SIZE       (128 - NET_IP_ALIGN)  #define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN) +#define PREAMBLE_LEN           8 +#define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \ +				  ETH_HLEN + PREAMBLE_LEN)  #define MLX4_EN_MIN_MTU		46  /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 04304dd894c6..606a0e0beeae 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -611,7 +611,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)  						MLX4_MAX_PORTS;  				else  					res_alloc->guaranteed[t] = 0; -				res_alloc->res_free -= res_alloc->guaranteed[t];  				break;  			default:  				break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 1fffdebbc9e8..e9a1fbcc4adf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -362,7 +362,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,  	case MLX5_CMD_OP_QUERY_VPORT_COUNTER:  	case MLX5_CMD_OP_ALLOC_Q_COUNTER:  	case MLX5_CMD_OP_QUERY_Q_COUNTER: -	case MLX5_CMD_OP_SET_RATE_LIMIT: +	case MLX5_CMD_OP_SET_PP_RATE_LIMIT:  	case MLX5_CMD_OP_QUERY_RATE_LIMIT:  	case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:  	case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: @@ -505,7 +505,7 @@ const char *mlx5_command_str(int command)  	MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);  	MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);  	MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); -	MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT); +	MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);  	MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);  	MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);  	MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index c0872b3284cb..543060c305a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -82,6 +82,9 @@  	max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)  #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev)       MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)  #define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8) +#define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \ +	(cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \ +	MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev))  #define MLX5_MPWRQ_LOG_WQE_SZ			18  #define MLX5_MPWRQ_WQE_PAGE_ORDER  (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \ @@ -590,6 +593,7 @@ struct mlx5e_channel {  	struct mlx5_core_dev      *mdev;  	struct hwtstamp_config    *tstamp;  	int                        ix; +	int                        cpu;  };  struct mlx5e_channels { @@ -935,8 +939,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,  				 u8 cq_period_mode);  void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,  				 u8 cq_period_mode); -void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev, -			      struct mlx5e_params *params, u8 rq_type); +void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, +			       struct mlx5e_params *params, +			       u8 rq_type);  static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)  { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index c6d90b6dd80e..9bcf38f4123b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -274,6 +274,7 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)  static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,  				    struct ieee_ets *ets)  { +	bool have_ets_tc = false;  	int bw_sum = 0;  	int i; @@ -288,11 +289,14 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,  	}  	/* Validate Bandwidth Sum */ -	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) -		if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) +	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { +		if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { +			have_ets_tc = true;  			bw_sum += ets->tc_tx_bw[i]; +		} +	} -	if (bw_sum != 0 && bw_sum != 100) { +	if (have_ets_tc && bw_sum != 100) {  		netdev_err(netdev,  			   "Failed to validate ETS: BW sum is illegal\n");  		return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 23425f028405..8f05efa5c829 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1523,8 +1523,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val  	new_channels.params = priv->channels.params;  	MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val); -	mlx5e_set_rq_type_params(priv->mdev, &new_channels.params, -				 new_channels.params.rq_wq_type); +	new_channels.params.mpwqe_log_stride_sz = +		MLX5E_MPWQE_STRIDE_SZ(priv->mdev, new_val); +	new_channels.params.mpwqe_log_num_strides = +		MLX5_MPWRQ_LOG_WQE_SZ - new_channels.params.mpwqe_log_stride_sz;  	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {  		priv->channels.params = new_channels.params; @@ -1536,6 +1538,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val  		return err;  	mlx5e_switch_priv_channels(priv, &new_channels, NULL); +	mlx5e_dbg(DRV, priv, "MLX5E: RxCqeCmprss was turned %s\n", +		  MLX5E_GET_PFLAG(&priv->channels.params, +				  MLX5E_PFLAG_RX_CQE_COMPRESS) ? "ON" : "OFF"); +  	return 0;  } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index d2b057a3e512..d9d8227f195f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -71,11 +71,6 @@ struct mlx5e_channel_param {  	struct mlx5e_cq_param      icosq_cq;  }; -static int mlx5e_get_node(struct mlx5e_priv *priv, int ix) -{ -	return pci_irq_get_node(priv->mdev->pdev, MLX5_EQ_VEC_COMP_BASE + ix); -} -  static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)  {  	return MLX5_CAP_GEN(mdev, striding_rq) && @@ -83,8 +78,8 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)  		MLX5_CAP_ETH(mdev, reg_umr_sq);  } -void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev, -			      struct mlx5e_params *params, u8 rq_type) +void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, +			       struct mlx5e_params *params, u8 rq_type)  {  	params->rq_wq_type = rq_type;  	params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; @@ -93,10 +88,8 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,  		params->log_rq_size = is_kdump_kernel() ?  			MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :  			MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; -		params->mpwqe_log_stride_sz = -			MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ? -			MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : -			MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev); +		params->mpwqe_log_stride_sz = MLX5E_MPWQE_STRIDE_SZ(mdev, +			MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));  		params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -  			params->mpwqe_log_stride_sz;  		break; @@ -120,13 +113,14 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,  		       MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));  } -static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params) +static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, +				struct mlx5e_params *params)  {  	u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&  		    !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?  		    MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :  		    MLX5_WQ_TYPE_LINKED_LIST; -	mlx5e_set_rq_type_params(mdev, params, rq_type); +	mlx5e_init_rq_type_params(mdev, params, rq_type);  }  static void mlx5e_update_carrier(struct mlx5e_priv *priv) @@ -444,17 +438,16 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,  	int wq_sz = mlx5_wq_ll_get_size(&rq->wq);  	int mtt_sz = mlx5e_get_wqe_mtt_sz();  	int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1; -	int node = mlx5e_get_node(c->priv, c->ix);  	int i;  	rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info), -					GFP_KERNEL, node); +				      GFP_KERNEL, cpu_to_node(c->cpu));  	if (!rq->mpwqe.info)  		goto err_out;  	/* We allocate more than mtt_sz as we will align the pointer */ -	rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, -					GFP_KERNEL, node); +	rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL, +					cpu_to_node(c->cpu));  	if (unlikely(!rq->mpwqe.mtt_no_align))  		goto err_free_wqe_info; @@ -562,7 +555,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,  	int err;  	int i; -	rqp->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); +	rqp->wq.db_numa_node = cpu_to_node(c->cpu);  	err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,  				&rq->wq_ctrl); @@ -629,8 +622,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,  	default: /* MLX5_WQ_TYPE_LINKED_LIST */  		rq->wqe.frag_info =  			kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info), -				     GFP_KERNEL, -				     mlx5e_get_node(c->priv, c->ix)); +				     GFP_KERNEL, cpu_to_node(c->cpu));  		if (!rq->wqe.frag_info) {  			err = -ENOMEM;  			goto err_rq_wq_destroy; @@ -1000,13 +992,13 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,  	sq->uar_map   = mdev->mlx5e_res.bfreg.map;  	sq->min_inline_mode = params->tx_min_inline_mode; -	param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); +	param->wq.db_numa_node = cpu_to_node(c->cpu);  	err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);  	if (err)  		return err;  	sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; -	err = mlx5e_alloc_xdpsq_db(sq, mlx5e_get_node(c->priv, c->ix)); +	err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));  	if (err)  		goto err_sq_wq_destroy; @@ -1053,13 +1045,13 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,  	sq->channel   = c;  	sq->uar_map   = mdev->mlx5e_res.bfreg.map; -	param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); +	param->wq.db_numa_node = cpu_to_node(c->cpu);  	err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);  	if (err)  		return err;  	sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; -	err = mlx5e_alloc_icosq_db(sq, mlx5e_get_node(c->priv, c->ix)); +	err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));  	if (err)  		goto err_sq_wq_destroy; @@ -1126,13 +1118,13 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,  	if (MLX5_IPSEC_DEV(c->priv->mdev))  		set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); -	param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); +	param->wq.db_numa_node = cpu_to_node(c->cpu);  	err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);  	if (err)  		return err;  	sq->wq.db    = &sq->wq.db[MLX5_SND_DBR]; -	err = mlx5e_alloc_txqsq_db(sq, mlx5e_get_node(c->priv, c->ix)); +	err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));  	if (err)  		goto err_sq_wq_destroy; @@ -1504,8 +1496,8 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c,  	struct mlx5_core_dev *mdev = c->priv->mdev;  	int err; -	param->wq.buf_numa_node = mlx5e_get_node(c->priv, c->ix); -	param->wq.db_numa_node  = mlx5e_get_node(c->priv, c->ix); +	param->wq.buf_numa_node = cpu_to_node(c->cpu); +	param->wq.db_numa_node  = cpu_to_node(c->cpu);  	param->eq_ix   = c->ix;  	err = mlx5e_alloc_cq_common(mdev, param, cq); @@ -1604,6 +1596,11 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq)  	mlx5e_free_cq(cq);  } +static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix) +{ +	return cpumask_first(priv->mdev->priv.irq_info[ix].mask); +} +  static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,  			     struct mlx5e_params *params,  			     struct mlx5e_channel_param *cparam) @@ -1752,12 +1749,13 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,  {  	struct mlx5e_cq_moder icocq_moder = {0, 0};  	struct net_device *netdev = priv->netdev; +	int cpu = mlx5e_get_cpu(priv, ix);  	struct mlx5e_channel *c;  	unsigned int irq;  	int err;  	int eqn; -	c = kzalloc_node(sizeof(*c), GFP_KERNEL, mlx5e_get_node(priv, ix)); +	c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));  	if (!c)  		return -ENOMEM; @@ -1765,6 +1763,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,  	c->mdev     = priv->mdev;  	c->tstamp   = &priv->tstamp;  	c->ix       = ix; +	c->cpu      = cpu;  	c->pdev     = &priv->mdev->pdev->dev;  	c->netdev   = priv->netdev;  	c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); @@ -1853,8 +1852,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)  	for (tc = 0; tc < c->num_tc; tc++)  		mlx5e_activate_txqsq(&c->sq[tc]);  	mlx5e_activate_rq(&c->rq); -	netif_set_xps_queue(c->netdev, -		mlx5_get_vector_affinity(c->priv->mdev, c->ix), c->ix); +	netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);  }  static void mlx5e_deactivate_channel(struct mlx5e_channel *c) @@ -3679,6 +3677,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,  						     struct sk_buff *skb,  						     netdev_features_t features)  { +	unsigned int offset = 0;  	struct udphdr *udph;  	u8 proto;  	u16 port; @@ -3688,7 +3687,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,  		proto = ip_hdr(skb)->protocol;  		break;  	case htons(ETH_P_IPV6): -		proto = ipv6_hdr(skb)->nexthdr; +		proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);  		break;  	default:  		goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 60771865c99c..e7e7cef2bde4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -466,7 +466,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)  			break;  		case MLX5_EVENT_TYPE_CQ_ERROR:  			cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; -			mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n", +			mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",  				       cqn, eqe->data.cq_err.syndrome);  			mlx5_cq_event(dev, cqn, eqe->type);  			break; @@ -775,7 +775,7 @@ err1:  	return err;  } -int mlx5_stop_eqs(struct mlx5_core_dev *dev) +void mlx5_stop_eqs(struct mlx5_core_dev *dev)  {  	struct mlx5_eq_table *table = &dev->priv.eq_table;  	int err; @@ -784,22 +784,26 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev)  	if (MLX5_CAP_GEN(dev, pg)) {  		err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);  		if (err) -			return err; +			mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n", +				      err);  	}  #endif  	err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);  	if (err) -		return err; +		mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n", +			      err); -	mlx5_destroy_unmap_eq(dev, &table->async_eq); +	err = mlx5_destroy_unmap_eq(dev, &table->async_eq); +	if (err) +		mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n", +			      err);  	mlx5_cmd_use_polling(dev);  	err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);  	if (err) -		mlx5_cmd_use_events(dev); - -	return err; +		mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n", +			      err);  }  int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c index 3c11d6e2160a..14962969c5ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c @@ -66,6 +66,9 @@ static int mlx5_fpga_mem_read_i2c(struct mlx5_fpga_device *fdev, size_t size,  	u8 actual_size;  	int err; +	if (!size) +		return -EINVAL; +  	if (!fdev->mdev)  		return -ENOTCONN; @@ -95,6 +98,9 @@ static int mlx5_fpga_mem_write_i2c(struct mlx5_fpga_device *fdev, size_t size,  	u8 actual_size;  	int err; +	if (!size) +		return -EINVAL; +  	if (!fdev->mdev)  		return -ENOTCONN; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index c70fd663a633..dfaad9ecb2b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -174,6 +174,8 @@ static void del_hw_fte(struct fs_node *node);  static void del_sw_flow_table(struct fs_node *node);  static void del_sw_flow_group(struct fs_node *node);  static void del_sw_fte(struct fs_node *node); +static void del_sw_prio(struct fs_node *node); +static void del_sw_ns(struct fs_node *node);  /* Delete rule (destination) is special case that    * requires to lock the FTE for all the deletion process.   */ @@ -408,6 +410,16 @@ static inline struct mlx5_core_dev *get_dev(struct fs_node *node)  	return NULL;  } +static void del_sw_ns(struct fs_node *node) +{ +	kfree(node); +} + +static void del_sw_prio(struct fs_node *node) +{ +	kfree(node); +} +  static void del_hw_flow_table(struct fs_node *node)  {  	struct mlx5_flow_table *ft; @@ -2064,7 +2076,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,  		return ERR_PTR(-ENOMEM);  	fs_prio->node.type = FS_TYPE_PRIO; -	tree_init_node(&fs_prio->node, NULL, NULL); +	tree_init_node(&fs_prio->node, NULL, del_sw_prio);  	tree_add_node(&fs_prio->node, &ns->node);  	fs_prio->num_levels = num_levels;  	fs_prio->prio = prio; @@ -2090,7 +2102,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)  		return ERR_PTR(-ENOMEM);  	fs_init_namespace(ns); -	tree_init_node(&ns->node, NULL, NULL); +	tree_init_node(&ns->node, NULL, del_sw_ns);  	tree_add_node(&ns->node, &prio->node);  	list_add_tail(&ns->node.list, &prio->node.children); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 1a0e797ad001..21d29f7936f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -241,7 +241,7 @@ static void print_health_info(struct mlx5_core_dev *dev)  	u32 fw;  	int i; -	/* If the syndrom is 0, the device is OK and no need to print buffer */ +	/* If the syndrome is 0, the device is OK and no need to print buffer */  	if (!ioread8(&h->synd))  		return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index d2a66dc4adc6..8812d7208e8f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -57,7 +57,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,  				   struct mlx5e_params *params)  {  	/* Override RQ params as IPoIB supports only LINKED LIST RQ for now */ -	mlx5e_set_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST); +	mlx5e_init_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);  	/* RQ size in ipoib by default is 512 */  	params->log_rq_size = is_kdump_kernel() ? diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index f26f97fe4666..582b2f18010a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -137,6 +137,17 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)  }  EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag); +static int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev, +				       bool reset, void *out, int out_size) +{ +	u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { }; + +	MLX5_SET(query_cong_statistics_in, in, opcode, +		 MLX5_CMD_OP_QUERY_CONG_STATISTICS); +	MLX5_SET(query_cong_statistics_in, in, clear, reset); +	return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size); +} +  static struct mlx5_lag *mlx5_lag_dev_get(struct mlx5_core_dev *dev)  {  	return dev->priv.lag; @@ -633,3 +644,48 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)  	/* If bonded, we do not add an IB device for PF1. */  	return false;  } + +int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, +				 u64 *values, +				 int num_counters, +				 size_t *offsets) +{ +	int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out); +	struct mlx5_core_dev *mdev[MLX5_MAX_PORTS]; +	struct mlx5_lag *ldev; +	int num_ports; +	int ret, i, j; +	void *out; + +	out = kvzalloc(outlen, GFP_KERNEL); +	if (!out) +		return -ENOMEM; + +	memset(values, 0, sizeof(*values) * num_counters); + +	mutex_lock(&lag_mutex); +	ldev = mlx5_lag_dev_get(dev); +	if (ldev && mlx5_lag_is_bonded(ldev)) { +		num_ports = MLX5_MAX_PORTS; +		mdev[0] = ldev->pf[0].dev; +		mdev[1] = ldev->pf[1].dev; +	} else { +		num_ports = 1; +		mdev[0] = dev; +	} + +	for (i = 0; i < num_ports; ++i) { +		ret = mlx5_cmd_query_cong_counter(mdev[i], false, out, outlen); +		if (ret) +			goto unlock; + +		for (j = 0; j < num_counters; ++j) +			values[j] += be64_to_cpup((__be64 *)(out + offsets[j])); +	} + +unlock: +	mutex_unlock(&lag_mutex); +	kvfree(out); +	return ret; +} +EXPORT_SYMBOL(mlx5_lag_query_cong_counters); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 5f323442cc5a..8a89c7e8cd63 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -317,9 +317,6 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)  {  	struct mlx5_priv *priv = &dev->priv;  	struct mlx5_eq_table *table = &priv->eq_table; -	struct irq_affinity irqdesc = { -		.pre_vectors = MLX5_EQ_VEC_COMP_BASE, -	};  	int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);  	int nvec; @@ -333,10 +330,9 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)  	if (!priv->irq_info)  		goto err_free_msix; -	nvec = pci_alloc_irq_vectors_affinity(dev->pdev, +	nvec = pci_alloc_irq_vectors(dev->pdev,  			MLX5_EQ_VEC_COMP_BASE + 1, nvec, -			PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, -			&irqdesc); +			PCI_IRQ_MSIX);  	if (nvec < 0)  		return nvec; @@ -622,6 +618,63 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)  	return (u64)timer_l | (u64)timer_h1 << 32;  } +static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) +{ +	struct mlx5_priv *priv  = &mdev->priv; +	int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i); + +	if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { +		mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); +		return -ENOMEM; +	} + +	cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), +			priv->irq_info[i].mask); + +	if (IS_ENABLED(CONFIG_SMP) && +	    irq_set_affinity_hint(irq, priv->irq_info[i].mask)) +		mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); + +	return 0; +} + +static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) +{ +	struct mlx5_priv *priv  = &mdev->priv; +	int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i); + +	irq_set_affinity_hint(irq, NULL); +	free_cpumask_var(priv->irq_info[i].mask); +} + +static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev) +{ +	int err; +	int i; + +	for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) { +		err = mlx5_irq_set_affinity_hint(mdev, i); +		if (err) +			goto err_out; +	} + +	return 0; + +err_out: +	for (i--; i >= 0; i--) +		mlx5_irq_clear_affinity_hint(mdev, i); + +	return err; +} + +static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev) +{ +	int i; + +	for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) +		mlx5_irq_clear_affinity_hint(mdev, i); +} +  int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,  		    unsigned int *irqn)  { @@ -1097,6 +1150,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,  		goto err_stop_eqs;  	} +	err = mlx5_irq_set_affinity_hints(dev); +	if (err) { +		dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n"); +		goto err_affinity_hints; +	} +  	err = mlx5_init_fs(dev);  	if (err) {  		dev_err(&pdev->dev, "Failed to init flow steering\n"); @@ -1154,6 +1213,9 @@ err_sriov:  	mlx5_cleanup_fs(dev);  err_fs: +	mlx5_irq_clear_affinity_hints(dev); + +err_affinity_hints:  	free_comp_eqs(dev);  err_stop_eqs: @@ -1222,6 +1284,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,  	mlx5_sriov_detach(dev);  	mlx5_cleanup_fs(dev); +	mlx5_irq_clear_affinity_hints(dev);  	free_comp_eqs(dev);  	mlx5_stop_eqs(dev);  	mlx5_put_uars_page(dev, priv->uar); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index db9e665ab104..889130edb715 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -213,8 +213,8 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,  err_cmd:  	memset(din, 0, sizeof(din));  	memset(dout, 0, sizeof(dout)); -	MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); -	MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); +	MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP); +	MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);  	mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));  	return err;  } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c index e651e4c02867..d3c33e9eea72 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c @@ -125,16 +125,16 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,  	return ret_entry;  } -static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev, +static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,  				   u32 rate, u16 index)  { -	u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)]   = {0}; -	u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0}; +	u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)]   = {0}; +	u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0}; -	MLX5_SET(set_rate_limit_in, in, opcode, -		 MLX5_CMD_OP_SET_RATE_LIMIT); -	MLX5_SET(set_rate_limit_in, in, rate_limit_index, index); -	MLX5_SET(set_rate_limit_in, in, rate_limit, rate); +	MLX5_SET(set_pp_rate_limit_in, in, opcode, +		 MLX5_CMD_OP_SET_PP_RATE_LIMIT); +	MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index); +	MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rate);  	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));  } @@ -173,7 +173,7 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index)  		entry->refcount++;  	} else {  		/* new rate limit */ -		err = mlx5_set_rate_limit_cmd(dev, rate, entry->index); +		err = mlx5_set_pp_rate_limit_cmd(dev, rate, entry->index);  		if (err) {  			mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n",  				      rate, err); @@ -209,7 +209,7 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate)  	entry->refcount--;  	if (!entry->refcount) {  		/* need to remove rate */ -		mlx5_set_rate_limit_cmd(dev, 0, entry->index); +		mlx5_set_pp_rate_limit_cmd(dev, 0, entry->index);  		entry->rate = 0;  	} @@ -262,8 +262,8 @@ void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)  	/* Clear all configured rates */  	for (i = 0; i < table->max_size; i++)  		if (table->rl_entry[i].rate) -			mlx5_set_rate_limit_cmd(dev, 0, -						table->rl_entry[i].index); +			mlx5_set_pp_rate_limit_cmd(dev, 0, +						   table->rl_entry[i].index);  	kfree(dev->priv.rl_table.rl_entry);  } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c index 07a9ba6cfc70..2f74953e4561 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c @@ -71,9 +71,9 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)  	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;  	struct mlx5e_vxlan *vxlan; -	spin_lock(&vxlan_db->lock); +	spin_lock_bh(&vxlan_db->lock);  	vxlan = radix_tree_lookup(&vxlan_db->tree, port); -	spin_unlock(&vxlan_db->lock); +	spin_unlock_bh(&vxlan_db->lock);  	return vxlan;  } @@ -88,8 +88,12 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)  	struct mlx5e_vxlan *vxlan;  	int err; -	if (mlx5e_vxlan_lookup_port(priv, port)) +	mutex_lock(&priv->state_lock); +	vxlan = mlx5e_vxlan_lookup_port(priv, port); +	if (vxlan) { +		atomic_inc(&vxlan->refcount);  		goto free_work; +	}  	if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))  		goto free_work; @@ -99,10 +103,11 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)  		goto err_delete_port;  	vxlan->udp_port = port; +	atomic_set(&vxlan->refcount, 1); -	spin_lock_irq(&vxlan_db->lock); +	spin_lock_bh(&vxlan_db->lock);  	err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan); -	spin_unlock_irq(&vxlan_db->lock); +	spin_unlock_bh(&vxlan_db->lock);  	if (err)  		goto err_free; @@ -113,35 +118,39 @@ err_free:  err_delete_port:  	mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);  free_work: +	mutex_unlock(&priv->state_lock);  	kfree(vxlan_work);  } -static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port) +static void mlx5e_vxlan_del_port(struct work_struct *work)  { +	struct mlx5e_vxlan_work *vxlan_work = +		container_of(work, struct mlx5e_vxlan_work, work); +	struct mlx5e_priv *priv         = vxlan_work->priv;  	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; +	u16 port = vxlan_work->port;  	struct mlx5e_vxlan *vxlan; +	bool remove = false; -	spin_lock_irq(&vxlan_db->lock); -	vxlan = radix_tree_delete(&vxlan_db->tree, port); -	spin_unlock_irq(&vxlan_db->lock); - +	mutex_lock(&priv->state_lock); +	spin_lock_bh(&vxlan_db->lock); +	vxlan = radix_tree_lookup(&vxlan_db->tree, port);  	if (!vxlan) -		return; - -	mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port); - -	kfree(vxlan); -} +		goto out_unlock; -static void mlx5e_vxlan_del_port(struct work_struct *work) -{ -	struct mlx5e_vxlan_work *vxlan_work = -		container_of(work, struct mlx5e_vxlan_work, work); -	struct mlx5e_priv *priv = vxlan_work->priv; -	u16 port = vxlan_work->port; +	if (atomic_dec_and_test(&vxlan->refcount)) { +		radix_tree_delete(&vxlan_db->tree, port); +		remove = true; +	} -	__mlx5e_vxlan_core_del_port(priv, port); +out_unlock: +	spin_unlock_bh(&vxlan_db->lock); +	if (remove) { +		mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); +		kfree(vxlan); +	} +	mutex_unlock(&priv->state_lock);  	kfree(vxlan_work);  } @@ -171,12 +180,11 @@ void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)  	struct mlx5e_vxlan *vxlan;  	unsigned int port = 0; -	spin_lock_irq(&vxlan_db->lock); +	/* Lockless since we are the only radix-tree consumers, wq is disabled */  	while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {  		port = vxlan->udp_port; -		spin_unlock_irq(&vxlan_db->lock); -		__mlx5e_vxlan_core_del_port(priv, (u16)port); -		spin_lock_irq(&vxlan_db->lock); +		radix_tree_delete(&vxlan_db->tree, port); +		mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); +		kfree(vxlan);  	} -	spin_unlock_irq(&vxlan_db->lock);  } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h index 5def12c048e3..5ef6ae7d568a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h @@ -36,6 +36,7 @@  #include "en.h"  struct mlx5e_vxlan { +	atomic_t refcount;  	u16 udp_port;  }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 23f7d828cf67..6ef20e5cc77d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1643,7 +1643,12 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,  		return 0;  	} -	wmb(); /* reset needs to be written before we read control register */ +	/* Reset needs to be written before we read control register, and +	 * we must wait for the HW to become responsive once again +	 */ +	wmb(); +	msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS); +  	end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);  	do {  		u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index a6441208e9d9..fb082ad21b00 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -59,6 +59,7 @@  #define MLXSW_PCI_SW_RESET			0xF0010  #define MLXSW_PCI_SW_RESET_RST_BIT		BIT(0)  #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS	5000 +#define MLXSW_PCI_SW_RESET_WAIT_MSECS		100  #define MLXSW_PCI_FW_READY			0xA1844  #define MLXSW_PCI_FW_READY_MASK			0xFFFF  #define MLXSW_PCI_FW_READY_MAGIC		0x5E diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 2d0897b7d860..c3837ca7a705 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -4300,6 +4300,7 @@ static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,  static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)  { +	u16 vid = 1;  	int err;  	err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); @@ -4312,8 +4313,19 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)  				     true, false);  	if (err)  		goto err_port_vlan_set; + +	for (; vid <= VLAN_N_VID - 1; vid++) { +		err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, +						     vid, false); +		if (err) +			goto err_vid_learning_set; +	} +  	return 0; +err_vid_learning_set: +	for (vid--; vid >= 1; vid--) +		mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);  err_port_vlan_set:  	mlxsw_sp_port_stp_set(mlxsw_sp_port, false);  err_port_stp_set: @@ -4323,6 +4335,12 @@ err_port_stp_set:  static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)  { +	u16 vid; + +	for (vid = VLAN_N_VID - 1; vid >= 1; vid--) +		mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, +					       vid, true); +  	mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,  			       false, false);  	mlxsw_sp_port_stp_set(mlxsw_sp_port, false); @@ -4358,7 +4376,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,  		}  		if (!info->linking)  			break; -		if (netdev_has_any_upper_dev(upper_dev)) { +		if (netdev_has_any_upper_dev(upper_dev) && +		    (!netif_is_bridge_master(upper_dev) || +		     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, +							  upper_dev))) {  			NL_SET_ERR_MSG(extack,  				       "spectrum: Enslaving a port to a device that already has an upper device is not supported");  			return -EINVAL; @@ -4486,6 +4507,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,  					      u16 vid)  {  	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); +	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;  	struct netdev_notifier_changeupper_info *info = ptr;  	struct netlink_ext_ack *extack;  	struct net_device *upper_dev; @@ -4502,7 +4524,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,  		}  		if (!info->linking)  			break; -		if (netdev_has_any_upper_dev(upper_dev)) { +		if (netdev_has_any_upper_dev(upper_dev) && +		    (!netif_is_bridge_master(upper_dev) || +		     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, +							  upper_dev))) {  			NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported");  			return -EINVAL;  		} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 432ab9b12b7f..05ce1befd9b3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -365,6 +365,8 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,  void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,  				struct net_device *brport_dev,  				struct net_device *br_dev); +bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, +					 const struct net_device *br_dev);  /* spectrum.c */  int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c index c33beac5def0..b5397da94d7f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -46,7 +46,8 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,  				  int tclass_num, u32 min, u32 max,  				  u32 probability, bool is_ecn)  { -	char cwtp_cmd[max_t(u8, MLXSW_REG_CWTP_LEN, MLXSW_REG_CWTPM_LEN)]; +	char cwtpm_cmd[MLXSW_REG_CWTPM_LEN]; +	char cwtp_cmd[MLXSW_REG_CWTP_LEN];  	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;  	int err; @@ -60,10 +61,10 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,  	if (err)  		return err; -	mlxsw_reg_cwtpm_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num, +	mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,  			     MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn); -	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtp_cmd); +	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);  }  static int diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 632c7b229054..434b3922b34f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1370,8 +1370,9 @@ static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,  		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);  } -static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp, -					struct mlxsw_sp_rif *rif); +static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp, +					 struct mlxsw_sp_rif *old_rif, +					 struct mlxsw_sp_rif *new_rif);  static int  mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,  				 struct mlxsw_sp_ipip_entry *ipip_entry, @@ -1389,17 +1390,18 @@ mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,  		return PTR_ERR(new_lb_rif);  	ipip_entry->ol_lb = new_lb_rif; -	if (keep_encap) { -		list_splice_init(&old_lb_rif->common.nexthop_list, -				 &new_lb_rif->common.nexthop_list); -		mlxsw_sp_nexthop_rif_update(mlxsw_sp, &new_lb_rif->common); -	} +	if (keep_encap) +		mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common, +					     &new_lb_rif->common);  	mlxsw_sp_rif_destroy(&old_lb_rif->common);  	return 0;  } +static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp, +					struct mlxsw_sp_rif *rif); +  /**   * Update the offload related to an IPIP entry. This always updates decap, and   * in addition to that it also: @@ -1449,9 +1451,27 @@ static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,  {  	struct mlxsw_sp_ipip_entry *ipip_entry =  		mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); +	enum mlxsw_sp_l3proto ul_proto; +	union mlxsw_sp_l3addr saddr; +	u32 ul_tb_id;  	if (!ipip_entry)  		return 0; + +	/* For flat configuration cases, moving overlay to a different VRF might +	 * cause local address conflict, and the conflicting tunnels need to be +	 * demoted. +	 */ +	ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev); +	ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto; +	saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev); +	if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto, +						 saddr, ul_tb_id, +						 ipip_entry)) { +		mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); +		return 0; +	} +  	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,  						   true, false, false, extack);  } @@ -2416,25 +2436,16 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)  	rhashtable_destroy(&mlxsw_sp->router->neigh_ht);  } -static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp, -				    const struct mlxsw_sp_rif *rif) -{ -	char rauht_pl[MLXSW_REG_RAUHT_LEN]; - -	mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL, -			     rif->rif_index, rif->addr); -	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); -} -  static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,  					 struct mlxsw_sp_rif *rif)  {  	struct mlxsw_sp_neigh_entry *neigh_entry, *tmp; -	mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);  	list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list, -				 rif_list_node) +				 rif_list_node) { +		mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);  		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); +	}  }  enum mlxsw_sp_nexthop_type { @@ -3217,7 +3228,7 @@ static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,  {  	if (!removing)  		nh->should_offload = 1; -	else if (nh->offloaded) +	else  		nh->should_offload = 0;  	nh->update = 1;  } @@ -3343,22 +3354,19 @@ static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)  	return ul_dev ? (ul_dev->flags & IFF_UP) : true;  } -static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp, -				      struct mlxsw_sp_nexthop *nh, -				      struct net_device *ol_dev) +static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp, +				       struct mlxsw_sp_nexthop *nh, +				       struct mlxsw_sp_ipip_entry *ipip_entry)  {  	bool removing;  	if (!nh->nh_grp->gateway || nh->ipip_entry) -		return 0; - -	nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); -	if (!nh->ipip_entry) -		return -ENOENT; +		return; -	removing = !mlxsw_sp_ipip_netdev_ul_up(ol_dev); +	nh->ipip_entry = ipip_entry; +	removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);  	__mlxsw_sp_nexthop_neigh_update(nh, removing); -	return 0; +	mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);  }  static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp, @@ -3403,21 +3411,21 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,  				       struct mlxsw_sp_nexthop *nh,  				       struct fib_nh *fib_nh)  { -	struct mlxsw_sp_router *router = mlxsw_sp->router; +	const struct mlxsw_sp_ipip_ops *ipip_ops;  	struct net_device *dev = fib_nh->nh_dev; -	enum mlxsw_sp_ipip_type ipipt; +	struct mlxsw_sp_ipip_entry *ipip_entry;  	struct mlxsw_sp_rif *rif;  	int err; -	if (mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fib_nh, &ipipt) && -	    router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev, -						     MLXSW_SP_L3_PROTO_IPV4)) { -		nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; -		err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev); -		if (err) -			return err; -		mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common); -		return 0; +	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev); +	if (ipip_entry) { +		ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; +		if (ipip_ops->can_offload(mlxsw_sp, dev, +					  MLXSW_SP_L3_PROTO_IPV4)) { +			nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; +			mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry); +			return 0; +		}  	}  	nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; @@ -3545,6 +3553,18 @@ static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,  	}  } +static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp, +					 struct mlxsw_sp_rif *old_rif, +					 struct mlxsw_sp_rif *new_rif) +{ +	struct mlxsw_sp_nexthop *nh; + +	list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list); +	list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node) +		nh->rif = new_rif; +	mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif); +} +  static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,  					   struct mlxsw_sp_rif *rif)  { @@ -3996,7 +4016,7 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,  	case RTN_LOCAL:  		ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,  						 MLXSW_SP_L3_PROTO_IPV4, dip); -		if (ipip_entry) { +		if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {  			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;  			return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,  							     fib_entry, @@ -4694,21 +4714,21 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,  				       struct mlxsw_sp_nexthop *nh,  				       const struct rt6_info *rt)  { -	struct mlxsw_sp_router *router = mlxsw_sp->router; +	const struct mlxsw_sp_ipip_ops *ipip_ops; +	struct mlxsw_sp_ipip_entry *ipip_entry;  	struct net_device *dev = rt->dst.dev; -	enum mlxsw_sp_ipip_type ipipt;  	struct mlxsw_sp_rif *rif;  	int err; -	if (mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, &ipipt) && -	    router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev, -						     MLXSW_SP_L3_PROTO_IPV6)) { -		nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; -		err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev); -		if (err) -			return err; -		mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common); -		return 0; +	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev); +	if (ipip_entry) { +		ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; +		if (ipip_ops->can_offload(mlxsw_sp, dev, +					  MLXSW_SP_L3_PROTO_IPV6)) { +			nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; +			mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry); +			return 0; +		}  	}  	nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 7b8548e25ae7..593ad31be749 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -152,6 +152,12 @@ mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,  	return NULL;  } +bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, +					 const struct net_device *br_dev) +{ +	return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); +} +  static struct mlxsw_sp_bridge_device *  mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,  			      struct net_device *br_dev) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index e379b78e86ef..13190aa09faf 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -82,10 +82,33 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)  	return nfp_net_ebpf_capable(nn) ? "BPF" : "";  } +static int +nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) +{ +	int err; + +	nn->app_priv = kzalloc(sizeof(struct nfp_bpf_vnic), GFP_KERNEL); +	if (!nn->app_priv) +		return -ENOMEM; + +	err = nfp_app_nic_vnic_alloc(app, nn, id); +	if (err) +		goto err_free_priv; + +	return 0; +err_free_priv: +	kfree(nn->app_priv); +	return err; +} +  static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn)  { +	struct nfp_bpf_vnic *bv = nn->app_priv; +  	if (nn->dp.bpf_offload_xdp)  		nfp_bpf_xdp_offload(app, nn, NULL); +	WARN_ON(bv->tc_prog); +	kfree(bv);  }  static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, @@ -93,6 +116,9 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,  {  	struct tc_cls_bpf_offload *cls_bpf = type_data;  	struct nfp_net *nn = cb_priv; +	struct bpf_prog *oldprog; +	struct nfp_bpf_vnic *bv; +	int err;  	if (type != TC_SETUP_CLSBPF ||  	    !tc_can_offload(nn->dp.netdev) || @@ -100,8 +126,6 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,  	    cls_bpf->common.protocol != htons(ETH_P_ALL) ||  	    cls_bpf->common.chain_index)  		return -EOPNOTSUPP; -	if (nn->dp.bpf_offload_xdp) -		return -EBUSY;  	/* Only support TC direct action */  	if (!cls_bpf->exts_integrated || @@ -110,16 +134,25 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,  		return -EOPNOTSUPP;  	} -	switch (cls_bpf->command) { -	case TC_CLSBPF_REPLACE: -		return nfp_net_bpf_offload(nn, cls_bpf->prog, true); -	case TC_CLSBPF_ADD: -		return nfp_net_bpf_offload(nn, cls_bpf->prog, false); -	case TC_CLSBPF_DESTROY: -		return nfp_net_bpf_offload(nn, NULL, true); -	default: +	if (cls_bpf->command != TC_CLSBPF_OFFLOAD)  		return -EOPNOTSUPP; + +	bv = nn->app_priv; +	oldprog = cls_bpf->oldprog; + +	/* Don't remove if oldprog doesn't match driver's state */ +	if (bv->tc_prog != oldprog) { +		oldprog = NULL; +		if (!cls_bpf->prog) +			return 0;  	} + +	err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog); +	if (err) +		return err; + +	bv->tc_prog = cls_bpf->prog; +	return 0;  }  static int nfp_bpf_setup_tc_block(struct net_device *netdev, @@ -167,7 +200,7 @@ const struct nfp_app_type app_bpf = {  	.extra_cap	= nfp_bpf_extra_cap, -	.vnic_alloc	= nfp_app_nic_vnic_alloc, +	.vnic_alloc	= nfp_bpf_vnic_alloc,  	.vnic_free	= nfp_bpf_vnic_free,  	.setup_tc	= nfp_bpf_setup_tc, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 082a15f6dfb5..57b6043177a3 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -172,6 +172,14 @@ struct nfp_prog {  	struct list_head insns;  }; +/** + * struct nfp_bpf_vnic - per-vNIC BPF priv structure + * @tc_prog:	currently loaded cls_bpf program + */ +struct nfp_bpf_vnic { +	struct bpf_prog *tc_prog; +}; +  int nfp_bpf_jit(struct nfp_prog *prog);  extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 1a603fdd9e80..99b0487b6d82 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -568,6 +568,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,  		return err;  	}  	nn_writeb(nn, ctrl_offset, entry->entry); +	nfp_net_irq_unmask(nn, entry->entry);  	return 0;  } @@ -582,6 +583,7 @@ static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,  				 unsigned int vector_idx)  {  	nn_writeb(nn, ctrl_offset, 0xff); +	nn_pci_flush(nn);  	free_irq(nn->irq_entries[vector_idx].vector, nn);  } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 924a05e05da0..78b36c67c232 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -84,16 +84,13 @@ nfp_repr_phy_port_get_stats64(struct nfp_port *port,  {  	u8 __iomem *mem = port->eth_stats; -	/* TX and RX stats are flipped as we are returning the stats as seen -	 * at the switch port corresponding to the phys port. -	 */ -	stats->tx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK); -	stats->tx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS); -	stats->tx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS); +	stats->tx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK); +	stats->tx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS); +	stats->tx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS); -	stats->rx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK); -	stats->rx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS); -	stats->rx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS); +	stats->rx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK); +	stats->rx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS); +	stats->rx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS);  }  static void diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index ac8439ceea10..481876b5424c 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -1986,9 +1986,9 @@ static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)  					 tx_skb->dma_len,  					 DMA_TO_DEVICE);  		else -			pci_unmap_page(np->pci_dev, tx_skb->dma, +			dma_unmap_page(&np->pci_dev->dev, tx_skb->dma,  				       tx_skb->dma_len, -				       PCI_DMA_TODEVICE); +				       DMA_TO_DEVICE);  		tx_skb->dma = 0;  	}  } diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c index 18461fcb9815..53dbf1e163a8 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c @@ -47,6 +47,7 @@  #define MDIO_CLK_25_28                                               7  #define MDIO_WAIT_TIMES                                           1000 +#define MDIO_STATUS_DELAY_TIME                                       1  static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)  { @@ -65,7 +66,7 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)  	if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,  			       !(reg & (MDIO_START | MDIO_BUSY)), -			       100, MDIO_WAIT_TIMES * 100)) +			       MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100))  		return -EIO;  	return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK; @@ -88,8 +89,8 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)  	writel(reg, adpt->base + EMAC_MDIO_CTRL);  	if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, -			       !(reg & (MDIO_START | MDIO_BUSY)), 100, -			       MDIO_WAIT_TIMES * 100)) +			       !(reg & (MDIO_START | MDIO_BUSY)), +			       MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100))  		return -EIO;  	return 0; diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 70c92b649b29..38c924bdd32e 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -253,18 +253,18 @@ static int emac_open(struct net_device *netdev)  		return ret;  	} -	ret = emac_mac_up(adpt); +	ret = adpt->phy.open(adpt);  	if (ret) {  		emac_mac_rx_tx_rings_free_all(adpt);  		free_irq(irq->irq, irq);  		return ret;  	} -	ret = adpt->phy.open(adpt); +	ret = emac_mac_up(adpt);  	if (ret) { -		emac_mac_down(adpt);  		emac_mac_rx_tx_rings_free_all(adpt);  		free_irq(irq->irq, irq); +		adpt->phy.close(adpt);  		return ret;  	} diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 71bee1af71ef..df21e900f874 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -195,6 +195,7 @@ err2:  err1:  	rmnet_unregister_real_device(real_dev, port);  err0: +	kfree(ep);  	return err;  } diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index 29842ccc91a9..08e4afc0ab39 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -126,12 +126,12 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,  	if (skb_headroom(skb) < required_headroom) {  		if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) -			return RMNET_MAP_CONSUMED; +			goto fail;  	}  	map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);  	if (!map_header) -		return RMNET_MAP_CONSUMED; +		goto fail;  	if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {  		if (mux_id == 0xff) @@ -143,6 +143,10 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,  	skb->protocol = htons(ETH_P_MAP);  	return RMNET_MAP_SUCCESS; + +fail: +	kfree_skb(skb); +	return RMNET_MAP_CONSUMED;  }  static void diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 2b962d349f5f..009780df664b 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2308,32 +2308,9 @@ static int __maybe_unused ravb_resume(struct device *dev)  	struct ravb_private *priv = netdev_priv(ndev);  	int ret = 0; -	if (priv->wol_enabled) { -		/* Reduce the usecount of the clock to zero and then -		 * restore it to its original value. This is done to force -		 * the clock to be re-enabled which is a workaround -		 * for renesas-cpg-mssr driver which do not enable clocks -		 * when resuming from PSCI suspend/resume. -		 * -		 * Without this workaround the driver fails to communicate -		 * with the hardware if WoL was enabled when the system -		 * entered PSCI suspend. This is due to that if WoL is enabled -		 * we explicitly keep the clock from being turned off when -		 * suspending, but in PSCI sleep power is cut so the clock -		 * is disabled anyhow, the clock driver is not aware of this -		 * so the clock is not turned back on when resuming. -		 * -		 * TODO: once the renesas-cpg-mssr suspend/resume is working -		 *       this clock dance should be removed. -		 */ -		clk_disable(priv->clk); -		clk_disable(priv->clk); -		clk_enable(priv->clk); -		clk_enable(priv->clk); - -		/* Set reset mode to rearm the WoL logic */ +	/* If WoL is enabled set reset mode to rearm the WoL logic */ +	if (priv->wol_enabled)  		ravb_write(ndev, CCC_OPC_RESET, CCC); -	}  	/* All register have been reset to default values.  	 * Restore all registers which where setup at probe time and diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 7e060aa9fbed..b9e2846589f8 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -147,7 +147,7 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {  	[FWNLCR0]	= 0x0090,  	[FWALCR0]	= 0x0094,  	[TXNLCR1]	= 0x00a0, -	[TXALCR1]	= 0x00a0, +	[TXALCR1]	= 0x00a4,  	[RXNLCR1]	= 0x00a8,  	[RXALCR1]	= 0x00ac,  	[FWNLCR1]	= 0x00b0, @@ -399,7 +399,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {  	[FWNLCR0]	= 0x0090,  	[FWALCR0]	= 0x0094,  	[TXNLCR1]	= 0x00a0, -	[TXALCR1]	= 0x00a0, +	[TXALCR1]	= 0x00a4,  	[RXNLCR1]	= 0x00a8,  	[RXALCR1]	= 0x00ac,  	[FWNLCR1]	= 0x00b0, @@ -1149,7 +1149,8 @@ static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)  			   entry, le32_to_cpu(txdesc->status));  		/* Free the original skb. */  		if (mdp->tx_skbuff[entry]) { -			dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr), +			dma_unmap_single(&mdp->pdev->dev, +					 le32_to_cpu(txdesc->addr),  					 le32_to_cpu(txdesc->len) >> 16,  					 DMA_TO_DEVICE);  			dev_kfree_skb_irq(mdp->tx_skbuff[entry]); @@ -1179,14 +1180,14 @@ static void sh_eth_ring_free(struct net_device *ndev)  			if (mdp->rx_skbuff[i]) {  				struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i]; -				dma_unmap_single(&ndev->dev, +				dma_unmap_single(&mdp->pdev->dev,  						 le32_to_cpu(rxdesc->addr),  						 ALIGN(mdp->rx_buf_sz, 32),  						 DMA_FROM_DEVICE);  			}  		}  		ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; -		dma_free_coherent(NULL, ringsize, mdp->rx_ring, +		dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring,  				  mdp->rx_desc_dma);  		mdp->rx_ring = NULL;  	} @@ -1203,7 +1204,7 @@ static void sh_eth_ring_free(struct net_device *ndev)  		sh_eth_tx_free(ndev, false);  		ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; -		dma_free_coherent(NULL, ringsize, mdp->tx_ring, +		dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring,  				  mdp->tx_desc_dma);  		mdp->tx_ring = NULL;  	} @@ -1245,9 +1246,9 @@ static void sh_eth_ring_format(struct net_device *ndev)  		/* The size of the buffer is a multiple of 32 bytes. */  		buf_len = ALIGN(mdp->rx_buf_sz, 32); -		dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len, +		dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len,  					  DMA_FROM_DEVICE); -		if (dma_mapping_error(&ndev->dev, dma_addr)) { +		if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {  			kfree_skb(skb);  			break;  		} @@ -1323,8 +1324,8 @@ static int sh_eth_ring_init(struct net_device *ndev)  	/* Allocate all Rx descriptors. */  	rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; -	mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, -					  GFP_KERNEL); +	mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize, +					  &mdp->rx_desc_dma, GFP_KERNEL);  	if (!mdp->rx_ring)  		goto ring_free; @@ -1332,8 +1333,8 @@ static int sh_eth_ring_init(struct net_device *ndev)  	/* Allocate all Tx descriptors. */  	tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; -	mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, -					  GFP_KERNEL); +	mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize, +					  &mdp->tx_desc_dma, GFP_KERNEL);  	if (!mdp->tx_ring)  		goto ring_free;  	return 0; @@ -1527,7 +1528,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)  			mdp->rx_skbuff[entry] = NULL;  			if (mdp->cd->rpadir)  				skb_reserve(skb, NET_IP_ALIGN); -			dma_unmap_single(&ndev->dev, dma_addr, +			dma_unmap_single(&mdp->pdev->dev, dma_addr,  					 ALIGN(mdp->rx_buf_sz, 32),  					 DMA_FROM_DEVICE);  			skb_put(skb, pkt_len); @@ -1555,9 +1556,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)  			if (skb == NULL)  				break;	/* Better luck next round. */  			sh_eth_set_receive_align(skb); -			dma_addr = dma_map_single(&ndev->dev, skb->data, +			dma_addr = dma_map_single(&mdp->pdev->dev, skb->data,  						  buf_len, DMA_FROM_DEVICE); -			if (dma_mapping_error(&ndev->dev, dma_addr)) { +			if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {  				kfree_skb(skb);  				break;  			} @@ -1891,6 +1892,16 @@ static int sh_eth_phy_init(struct net_device *ndev)  		return PTR_ERR(phydev);  	} +	/* mask with MAC supported features */ +	if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) { +		int err = phy_set_max_speed(phydev, SPEED_100); +		if (err) { +			netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n"); +			phy_disconnect(phydev); +			return err; +		} +	} +  	phy_attached_info(phydev);  	return 0; @@ -2441,9 +2452,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)  	/* soft swap. */  	if (!mdp->cd->hw_swap)  		sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2); -	dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len, +	dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len,  				  DMA_TO_DEVICE); -	if (dma_mapping_error(&ndev->dev, dma_addr)) { +	if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {  		kfree_skb(skb);  		return NETDEV_TX_OK;  	} @@ -3214,18 +3225,37 @@ static int sh_eth_drv_probe(struct platform_device *pdev)  	/* ioremap the TSU registers */  	if (mdp->cd->tsu) {  		struct resource *rtsu; +  		rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); -		mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); -		if (IS_ERR(mdp->tsu_addr)) { -			ret = PTR_ERR(mdp->tsu_addr); +		if (!rtsu) { +			dev_err(&pdev->dev, "no TSU resource\n"); +			ret = -ENODEV; +			goto out_release; +		} +		/* We can only request the  TSU region  for the first port +		 * of the two  sharing this TSU for the probe to succeed... +		 */ +		if (devno % 2 == 0 && +		    !devm_request_mem_region(&pdev->dev, rtsu->start, +					     resource_size(rtsu), +					     dev_name(&pdev->dev))) { +			dev_err(&pdev->dev, "can't request TSU resource.\n"); +			ret = -EBUSY; +			goto out_release; +		} +		mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start, +					     resource_size(rtsu)); +		if (!mdp->tsu_addr) { +			dev_err(&pdev->dev, "TSU region ioremap() failed.\n"); +			ret = -ENOMEM;  			goto out_release;  		}  		mdp->port = devno % 2;  		ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;  	} -	/* initialize first or needed device */ -	if (!devno || pd->needs_init) { +	/* Need to init only the first port of the two sharing a TSU */ +	if (devno % 2 == 0) {  		if (mdp->cd->chip_reset)  			mdp->cd->chip_reset(ndev); diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 0ea7e16f2e6e..9937a2450e57 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -77,6 +77,7 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,  	}  	if (buffer->flags & EFX_TX_BUF_SKB) { +		EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);  		(*pkts_compl)++;  		(*bytes_compl) += buffer->skb->len;  		dev_consume_skb_any((struct sk_buff *)buffer->skb); @@ -426,12 +427,14 @@ static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,  static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)  {  	struct efx_tx_buffer *buffer; +	unsigned int bytes_compl = 0; +	unsigned int pkts_compl = 0;  	/* Work backwards until we hit the original insert pointer value */  	while (tx_queue->insert_count != tx_queue->write_count) {  		--tx_queue->insert_count;  		buffer = __efx_tx_queue_get_insert_buffer(tx_queue); -		efx_dequeue_buffer(tx_queue, buffer, NULL, NULL); +		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);  	}  } diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index e1e5ac053760..ce2ea2d491ac 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -409,7 +409,7 @@ struct stmmac_desc_ops {  	/* get timestamp value */  	 u64(*get_timestamp) (void *desc, u32 ats);  	/* get rx timestamp status */ -	int (*get_rx_timestamp_status) (void *desc, u32 ats); +	int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats);  	/* Display ring */  	void (*display_ring)(void *head, unsigned int size, bool rx);  	/* set MSS via context descriptor */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index 61cb24810d10..9e6db16af663 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -1,8 +1,8 @@  /*   * dwmac-stm32.c - DWMAC Specific Glue layer for STM32 MCU   * - * Copyright (C) Alexandre Torgue 2015 - * Author:  Alexandre Torgue <[email protected]> + * Copyright (C) STMicroelectronics SA 2017 + * Author:  Alexandre Torgue <[email protected]> for STMicroelectronics.   * License terms:  GNU General Public License (GPL), version 2   *   */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index e5ff734d4f9b..9eb7f65d8000 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -808,8 +808,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)  			 val, reg);  	if (gmac->variant->soc_has_internal_phy) { -		if (of_property_read_bool(priv->plat->phy_node, -					  "allwinner,leds-active-low")) +		if (of_property_read_bool(node, "allwinner,leds-active-low"))  			reg |= H3_EPHY_LED_POL;  		else  			reg &= ~H3_EPHY_LED_POL; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 4b286e27c4ca..7e089bf906b4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -258,7 +258,8 @@ static int dwmac4_rx_check_timestamp(void *desc)  	return ret;  } -static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats) +static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc, +						 u32 ats)  {  	struct dma_desc *p = (struct dma_desc *)desc;  	int ret = -EINVAL; @@ -270,7 +271,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)  			/* Check if timestamp is OK from context descriptor */  			do { -				ret = dwmac4_rx_check_timestamp(desc); +				ret = dwmac4_rx_check_timestamp(next_desc);  				if (ret < 0)  					goto exit;  				i++; diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 7546b3664113..2a828a312814 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -400,7 +400,8 @@ static u64 enh_desc_get_timestamp(void *desc, u32 ats)  	return ns;  } -static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats) +static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc, +					    u32 ats)  {  	if (ats) {  		struct dma_extended_desc *p = (struct dma_extended_desc *)desc; diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index f817f8f36569..db4cee57bb24 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -265,7 +265,7 @@ static u64 ndesc_get_timestamp(void *desc, u32 ats)  	return ns;  } -static int ndesc_get_rx_timestamp_status(void *desc, u32 ats) +static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats)  {  	struct dma_desc *p = (struct dma_desc *)desc; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 721b61655261..08c19ebd5306 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -34,6 +34,7 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,  {  	u32 value = readl(ioaddr + PTP_TCR);  	unsigned long data; +	u32 reg_value;  	/* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second  	 *	formula = (1/ptp_clock) * 1000000000 @@ -50,10 +51,11 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,  	data &= PTP_SSIR_SSINC_MASK; +	reg_value = data;  	if (gmac4) -		data = data << GMAC4_PTP_SSIR_SSINC_SHIFT; +		reg_value <<= GMAC4_PTP_SSIR_SSINC_SHIFT; -	writel(data, ioaddr + PTP_SSIR); +	writel(reg_value, ioaddr + PTP_SSIR);  	return data;  } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f63c2ddced3c..c0af0bc4e714 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -364,9 +364,15 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)  bool stmmac_eee_init(struct stmmac_priv *priv)  {  	struct net_device *ndev = priv->dev; +	int interface = priv->plat->interface;  	unsigned long flags;  	bool ret = false; +	if ((interface != PHY_INTERFACE_MODE_MII) && +	    (interface != PHY_INTERFACE_MODE_GMII) && +	    !phy_interface_mode_is_rgmii(interface)) +		goto out; +  	/* Using PCS we cannot dial with the phy registers at this stage  	 * so we do not support extra feature like EEE.  	 */ @@ -482,7 +488,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,  		desc = np;  	/* Check if timestamp is available */ -	if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) { +	if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) {  		ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);  		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);  		shhwtstamp = skb_hwtstamps(skb); @@ -2588,6 +2594,7 @@ static int stmmac_open(struct net_device *dev)  	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);  	priv->rx_copybreak = STMMAC_RX_COPYBREAK; +	priv->mss = 0;  	ret = alloc_dma_desc_resources(priv);  	if (ret < 0) { diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 83e6f76eb965..33949248c829 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -995,8 +995,8 @@ static int rhine_init_one_common(struct device *hwdev, u32 quirks,  	else  		name = "Rhine III"; -	netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n", -		    name, (long)ioaddr, dev->dev_addr, rp->irq); +	netdev_info(dev, "VIA %s at %p, %pM, IRQ %d\n", +		    name, ioaddr, dev->dev_addr, rp->irq);  	dev_set_drvdata(hwdev, dev); diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig index 6d68c8a8f4f2..da4ec575ccf9 100644 --- a/drivers/net/ethernet/xilinx/Kconfig +++ b/drivers/net/ethernet/xilinx/Kconfig @@ -34,6 +34,7 @@ config XILINX_AXI_EMAC  config XILINX_LL_TEMAC  	tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"  	depends on (PPC || MICROBLAZE) +	depends on !64BIT || BROKEN  	select PHYLIB  	---help---  	  This driver supports the Xilinx 10/100/1000 LocalLink TEMAC diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index b718a02a6bb6..0a48b3073d3d 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -825,6 +825,13 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,  	if (IS_ERR(rt))  		return PTR_ERR(rt); +	if (skb_dst(skb)) { +		int mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr) - +			  GENEVE_BASE_HLEN - info->options_len - 14; + +		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); +	} +  	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);  	if (geneve->collect_md) {  		tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); @@ -864,6 +871,13 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,  	if (IS_ERR(dst))  		return PTR_ERR(dst); +	if (skb_dst(skb)) { +		int mtu = dst_mtu(dst) - sizeof(struct ipv6hdr) - +			  GENEVE_BASE_HLEN - info->options_len - 14; + +		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); +	} +  	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);  	if (geneve->collect_md) {  		prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index 8483f03d5a41..1ab97d99b9ba 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -1379,8 +1379,8 @@ static int rr_close(struct net_device *dev)  			    rrpriv->info_dma);  	rrpriv->info = NULL; -	free_irq(pdev->irq, dev);  	spin_unlock_irqrestore(&rrpriv->lock, flags); +	free_irq(pdev->irq, dev);  	return 0;  } diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 11c1e7950fe5..77cc4fbaeace 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -393,6 +393,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)  		.flowi4_oif = dev->ifindex,  		.flowi4_tos = RT_TOS(ip4h->tos),  		.flowi4_flags = FLOWI_FLAG_ANYSRC, +		.flowi4_mark = skb->mark,  		.daddr = ip4h->daddr,  		.saddr = ip4h->saddr,  	}; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index a178c5efd33e..a0f2be81d52e 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1444,9 +1444,14 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,  	return 0;  unregister_netdev: +	/* macvlan_uninit would free the macvlan port */  	unregister_netdevice(dev); +	return err;  destroy_macvlan_port: -	if (create) +	/* the macvlan port may be freed by macvlan_uninit when fail to register. +	 * so we destroy the macvlan port only when it's valid. +	 */ +	if (create && macvlan_port_get_rtnl(dev))  		macvlan_port_destroy(port->dev);  	return err;  } diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 5f93e6add563..e911e4990b20 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -239,14 +239,10 @@ static int at803x_resume(struct phy_device *phydev)  {  	int value; -	mutex_lock(&phydev->lock); -  	value = phy_read(phydev, MII_BMCR);  	value &= ~(BMCR_PDOWN | BMCR_ISOLATE);  	phy_write(phydev, MII_BMCR, value); -	mutex_unlock(&phydev->lock); -  	return 0;  } diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 4d02b27df044..82104edca393 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -637,6 +637,10 @@ static int m88e1510_config_aneg(struct phy_device *phydev)  	if (err < 0)  		goto error; +	/* Do not touch the fiber page if we're in copper->sgmii mode */ +	if (phydev->interface == PHY_INTERFACE_MODE_SGMII) +		return 0; +  	/* Then the fiber link */  	err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);  	if (err < 0) @@ -875,6 +879,8 @@ static int m88e1510_config_init(struct phy_device *phydev)  	/* SGMII-to-Copper mode initialization */  	if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { +		u32 pause; +  		/* Select page 18 */  		err = marvell_set_page(phydev, 18);  		if (err < 0) @@ -898,6 +904,16 @@ static int m88e1510_config_init(struct phy_device *phydev)  		err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);  		if (err < 0)  			return err; + +		/* There appears to be a bug in the 88e1512 when used in +		 * SGMII to copper mode, where the AN advertisment register +		 * clears the pause bits each time a negotiation occurs. +		 * This means we can never be truely sure what was advertised, +		 * so disable Pause support. +		 */ +		pause = SUPPORTED_Pause | SUPPORTED_Asym_Pause; +		phydev->supported &= ~pause; +		phydev->advertising &= ~pause;  	}  	return m88e1121_config_init(phydev); @@ -2069,7 +2085,7 @@ static struct phy_driver marvell_drivers[] = {  		.flags = PHY_HAS_INTERRUPT,  		.probe = marvell_probe,  		.config_init = &m88e1145_config_init, -		.config_aneg = &marvell_config_aneg, +		.config_aneg = &m88e1101_config_aneg,  		.read_status = &genphy_read_status,  		.ack_interrupt = &marvell_ack_interrupt,  		.config_intr = &marvell_config_intr, diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index aebc08beceba..21b3f36e023a 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -16,6 +16,7 @@   * link takes priority and the other port is completely locked out.   */  #include <linux/phy.h> +#include <linux/marvell_phy.h>  enum {  	MV_PCS_BASE_T		= 0x0000, @@ -338,7 +339,7 @@ static int mv3310_read_status(struct phy_device *phydev)  static struct phy_driver mv3310_drivers[] = {  	{  		.phy_id		= 0x002b09aa, -		.phy_id_mask	= 0xffffffff, +		.phy_id_mask	= MARVELL_PHY_ID_MASK,  		.name		= "mv88x3310",  		.features	= SUPPORTED_10baseT_Full |  				  SUPPORTED_100baseT_Full | @@ -360,7 +361,7 @@ static struct phy_driver mv3310_drivers[] = {  module_phy_driver(mv3310_drivers);  static struct mdio_device_id __maybe_unused mv3310_tbl[] = { -	{ 0x002b09aa, 0xffffffff }, +	{ 0x002b09aa, MARVELL_PHY_ID_MASK },  	{ },  };  MODULE_DEVICE_TABLE(mdio, mv3310_tbl); diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c index 135296508a7e..6425ce04d3f9 100644 --- a/drivers/net/phy/mdio-sun4i.c +++ b/drivers/net/phy/mdio-sun4i.c @@ -118,8 +118,10 @@ static int sun4i_mdio_probe(struct platform_device *pdev)  	data->regulator = devm_regulator_get(&pdev->dev, "phy");  	if (IS_ERR(data->regulator)) { -		if (PTR_ERR(data->regulator) == -EPROBE_DEFER) -			return -EPROBE_DEFER; +		if (PTR_ERR(data->regulator) == -EPROBE_DEFER) { +			ret = -EPROBE_DEFER; +			goto err_out_free_mdiobus; +		}  		dev_info(&pdev->dev, "no regulator found\n");  		data->regulator = NULL; diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c index bfd3090fb055..07c6048200c6 100644 --- a/drivers/net/phy/mdio-xgene.c +++ b/drivers/net/phy/mdio-xgene.c @@ -194,8 +194,11 @@ static int xgene_mdio_reset(struct xgene_mdio_pdata *pdata)  	}  	ret = xgene_enet_ecc_init(pdata); -	if (ret) +	if (ret) { +		if (pdata->dev->of_node) +			clk_disable_unprepare(pdata->clk);  		return ret; +	}  	xgene_gmac_reset(pdata);  	return 0; @@ -388,8 +391,10 @@ static int xgene_mdio_probe(struct platform_device *pdev)  		return ret;  	mdio_bus = mdiobus_alloc(); -	if (!mdio_bus) -		return -ENOMEM; +	if (!mdio_bus) { +		ret = -ENOMEM; +		goto out_clk; +	}  	mdio_bus->name = "APM X-Gene MDIO bus"; @@ -418,7 +423,7 @@ static int xgene_mdio_probe(struct platform_device *pdev)  		mdio_bus->phy_mask = ~0;  		ret = mdiobus_register(mdio_bus);  		if (ret) -			goto out; +			goto out_mdiobus;  		acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_HANDLE(dev), 1,  				    acpi_register_phy, NULL, mdio_bus, NULL); @@ -426,16 +431,20 @@ static int xgene_mdio_probe(struct platform_device *pdev)  	}  	if (ret) -		goto out; +		goto out_mdiobus;  	pdata->mdio_bus = mdio_bus;  	xgene_mdio_status = true;  	return 0; -out: +out_mdiobus:  	mdiobus_free(mdio_bus); +out_clk: +	if (dev->of_node) +		clk_disable_unprepare(pdata->clk); +  	return ret;  } diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 2df7b62c1a36..54d00a1d2bef 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -270,6 +270,7 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus,  		if (addr == mdiodev->addr) {  			dev->of_node = child; +			dev->fwnode = of_fwnode_handle(child);  			return;  		}  	} diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index 1ea69b7585d9..842eb871a6e3 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c @@ -22,6 +22,7 @@  #include <linux/ethtool.h>  #include <linux/phy.h>  #include <linux/netdevice.h> +#include <linux/bitfield.h>  static int meson_gxl_config_init(struct phy_device *phydev)  { @@ -50,6 +51,77 @@ static int meson_gxl_config_init(struct phy_device *phydev)  	return 0;  } +/* This function is provided to cope with the possible failures of this phy + * during aneg process. When aneg fails, the PHY reports that aneg is done + * but the value found in MII_LPA is wrong: + *  - Early failures: MII_LPA is just 0x0001. if MII_EXPANSION reports that + *    the link partner (LP) supports aneg but the LP never acked our base + *    code word, it is likely that we never sent it to begin with. + *  - Late failures: MII_LPA is filled with a value which seems to make sense + *    but it actually is not what the LP is advertising. It seems that we + *    can detect this using a magic bit in the WOL bank (reg 12 - bit 12). + *    If this particular bit is not set when aneg is reported being done, + *    it means MII_LPA is likely to be wrong. + * + * In both case, forcing a restart of the aneg process solve the problem. + * When this failure happens, the first retry is usually successful but, + * in some cases, it may take up to 6 retries to get a decent result + */ +static int meson_gxl_read_status(struct phy_device *phydev) +{ +	int ret, wol, lpa, exp; + +	if (phydev->autoneg == AUTONEG_ENABLE) { +		ret = genphy_aneg_done(phydev); +		if (ret < 0) +			return ret; +		else if (!ret) +			goto read_status_continue; + +		/* Need to access WOL bank, make sure the access is open */ +		ret = phy_write(phydev, 0x14, 0x0000); +		if (ret) +			return ret; +		ret = phy_write(phydev, 0x14, 0x0400); +		if (ret) +			return ret; +		ret = phy_write(phydev, 0x14, 0x0000); +		if (ret) +			return ret; +		ret = phy_write(phydev, 0x14, 0x0400); +		if (ret) +			return ret; + +		/* Request LPI_STATUS WOL register */ +		ret = phy_write(phydev, 0x14, 0x8D80); +		if (ret) +			return ret; + +		/* Read LPI_STATUS value */ +		wol = phy_read(phydev, 0x15); +		if (wol < 0) +			return wol; + +		lpa = phy_read(phydev, MII_LPA); +		if (lpa < 0) +			return lpa; + +		exp = phy_read(phydev, MII_EXPANSION); +		if (exp < 0) +			return exp; + +		if (!(wol & BIT(12)) || +		    ((exp & EXPANSION_NWAY) && !(lpa & LPA_LPACK))) { +			/* Looks like aneg failed after all */ +			phydev_dbg(phydev, "LPA corruption - aneg restart\n"); +			return genphy_restart_aneg(phydev); +		} +	} + +read_status_continue: +	return genphy_read_status(phydev); +} +  static struct phy_driver meson_gxl_phy[] = {  	{  		.phy_id		= 0x01814400, @@ -60,7 +132,7 @@ static struct phy_driver meson_gxl_phy[] = {  		.config_init	= meson_gxl_config_init,  		.config_aneg	= genphy_config_aneg,  		.aneg_done      = genphy_aneg_done, -		.read_status	= genphy_read_status, +		.read_status	= meson_gxl_read_status,  		.suspend        = genphy_suspend,  		.resume         = genphy_resume,  	}, diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index fdb43dd9b5cd..422ff6333c52 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -496,16 +496,18 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev,  	return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);  } +/* Center KSZ9031RNX FLP timing at 16ms. */  static int ksz9031_center_flp_timing(struct phy_device *phydev)  {  	int result; -	/* Center KSZ9031RNX FLP timing at 16ms. */  	result = ksz9031_extended_write(phydev, OP_DATA, 0,  					MII_KSZ9031RN_FLP_BURST_TX_HI, 0x0006); +	if (result) +		return result; +  	result = ksz9031_extended_write(phydev, OP_DATA, 0,  					MII_KSZ9031RN_FLP_BURST_TX_LO, 0x1A80); -  	if (result)  		return result; @@ -622,6 +624,7 @@ static int ksz9031_read_status(struct phy_device *phydev)  		phydev->link = 0;  		if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))  			phydev->drv->config_intr(phydev); +		return genphy_config_aneg(phydev);  	}  	return 0; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 2b1e67bc1e73..ed10d1fc8f59 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -828,7 +828,6 @@ EXPORT_SYMBOL(phy_stop);   */  void phy_start(struct phy_device *phydev)  { -	bool do_resume = false;  	int err = 0;  	mutex_lock(&phydev->lock); @@ -841,6 +840,9 @@ void phy_start(struct phy_device *phydev)  		phydev->state = PHY_UP;  		break;  	case PHY_HALTED: +		/* if phy was suspended, bring the physical link up again */ +		phy_resume(phydev); +  		/* make sure interrupts are re-enabled for the PHY */  		if (phydev->irq != PHY_POLL) {  			err = phy_enable_interrupts(phydev); @@ -849,17 +851,12 @@ void phy_start(struct phy_device *phydev)  		}  		phydev->state = PHY_RESUMING; -		do_resume = true;  		break;  	default:  		break;  	}  	mutex_unlock(&phydev->lock); -	/* if phy was suspended, bring the physical link up again */ -	if (do_resume) -		phy_resume(phydev); -  	phy_trigger_machine(phydev, true);  }  EXPORT_SYMBOL(phy_start); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 67f25ac29025..b15b31ca2618 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -135,7 +135,9 @@ static int mdio_bus_phy_resume(struct device *dev)  	if (!mdio_bus_phy_may_suspend(phydev))  		goto no_resume; +	mutex_lock(&phydev->lock);  	ret = phy_resume(phydev); +	mutex_unlock(&phydev->lock);  	if (ret < 0)  		return ret; @@ -1026,7 +1028,9 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,  	if (err)  		goto error; +	mutex_lock(&phydev->lock);  	phy_resume(phydev); +	mutex_unlock(&phydev->lock);  	phy_led_triggers_register(phydev);  	return err; @@ -1157,6 +1161,8 @@ int phy_resume(struct phy_device *phydev)  	struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);  	int ret = 0; +	WARN_ON(!mutex_is_locked(&phydev->lock)); +  	if (phydev->drv && phydrv->resume)  		ret = phydrv->resume(phydev); @@ -1639,13 +1645,9 @@ int genphy_resume(struct phy_device *phydev)  {  	int value; -	mutex_lock(&phydev->lock); -  	value = phy_read(phydev, MII_BMCR);  	phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); -	mutex_unlock(&phydev->lock); -  	return 0;  }  EXPORT_SYMBOL(genphy_resume); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index e3bbc70372d3..249ce5cbea22 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -526,6 +526,7 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,  	pl->link_config.pause = MLO_PAUSE_AN;  	pl->link_config.speed = SPEED_UNKNOWN;  	pl->link_config.duplex = DUPLEX_UNKNOWN; +	pl->link_config.an_enabled = true;  	pl->ops = ops;  	__set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); @@ -773,6 +774,7 @@ void phylink_stop(struct phylink *pl)  		sfp_upstream_stop(pl->sfp_bus);  	set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); +	queue_work(system_power_efficient_wq, &pl->resolve);  	flush_work(&pl->resolve);  }  EXPORT_SYMBOL_GPL(phylink_stop); @@ -950,6 +952,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,  	mutex_lock(&pl->state_mutex);  	/* Configure the MAC to match the new settings */  	linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising); +	pl->link_config.interface = config.interface;  	pl->link_config.speed = our_kset.base.speed;  	pl->link_config.duplex = our_kset.base.duplex;  	pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE; @@ -1293,6 +1296,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)  		switch (cmd) {  		case SIOCGMIIPHY:  			mii->phy_id = pl->phydev->mdio.addr; +			/* fall through */  		case SIOCGMIIREG:  			ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num); @@ -1315,6 +1319,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)  		switch (cmd) {  		case SIOCGMIIPHY:  			mii->phy_id = 0; +			/* fall through */  		case SIOCGMIIREG:  			ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num); @@ -1426,9 +1431,8 @@ static void phylink_sfp_link_down(void *upstream)  	WARN_ON(!lockdep_rtnl_is_held());  	set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); +	queue_work(system_power_efficient_wq, &pl->resolve);  	flush_work(&pl->resolve); - -	netif_carrier_off(pl->netdev);  }  static void phylink_sfp_link_up(void *upstream) diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 8a1b1f4c1b7c..ab64a142b832 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -356,7 +356,8 @@ EXPORT_SYMBOL_GPL(sfp_register_upstream);  void sfp_unregister_upstream(struct sfp_bus *bus)  {  	rtnl_lock(); -	sfp_unregister_bus(bus); +	if (bus->sfp) +		sfp_unregister_bus(bus);  	bus->upstream = NULL;  	bus->netdev = NULL;  	rtnl_unlock(); @@ -459,7 +460,8 @@ EXPORT_SYMBOL_GPL(sfp_register_socket);  void sfp_unregister_socket(struct sfp_bus *bus)  {  	rtnl_lock(); -	sfp_unregister_bus(bus); +	if (bus->netdev) +		sfp_unregister_bus(bus);  	bus->sfp_dev = NULL;  	bus->sfp = NULL;  	bus->socket_ops = NULL; diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index e381811e5f11..9dfc1c4c954f 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -351,12 +351,13 @@ static void sfp_sm_link_check_los(struct sfp *sfp)  {  	unsigned int los = sfp->state & SFP_F_LOS; -	/* FIXME: what if neither SFP_OPTIONS_LOS_INVERTED nor -	 * SFP_OPTIONS_LOS_NORMAL are set?  For now, we assume -	 * the same as SFP_OPTIONS_LOS_NORMAL set. +	/* If neither SFP_OPTIONS_LOS_INVERTED nor SFP_OPTIONS_LOS_NORMAL +	 * are set, we assume that no LOS signal is available.  	 */ -	if (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED) +	if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED))  		los ^= SFP_F_LOS; +	else if (!(sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL))) +		los = 0;  	if (los)  		sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0); @@ -364,6 +365,22 @@ static void sfp_sm_link_check_los(struct sfp *sfp)  		sfp_sm_link_up(sfp);  } +static bool sfp_los_event_active(struct sfp *sfp, unsigned int event) +{ +	return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) && +		event == SFP_E_LOS_LOW) || +	       (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) && +		event == SFP_E_LOS_HIGH); +} + +static bool sfp_los_event_inactive(struct sfp *sfp, unsigned int event) +{ +	return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) && +		event == SFP_E_LOS_HIGH) || +	       (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) && +		event == SFP_E_LOS_LOW); +} +  static void sfp_sm_fault(struct sfp *sfp, bool warn)  {  	if (sfp->sm_retries && !--sfp->sm_retries) { @@ -470,6 +487,11 @@ static int sfp_sm_mod_probe(struct sfp *sfp)  		return -EINVAL;  	} +	/* If the module requires address swap mode, warn about it */ +	if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE) +		dev_warn(sfp->dev, +			 "module address swap to access page 0xA2 is not supported.\n"); +  	return sfp_module_insert(sfp->sfp_bus, &sfp->id);  } @@ -581,9 +603,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)  	case SFP_S_WAIT_LOS:  		if (event == SFP_E_TX_FAULT)  			sfp_sm_fault(sfp, true); -		else if (event == -			 (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ? -			  SFP_E_LOS_HIGH : SFP_E_LOS_LOW)) +		else if (sfp_los_event_inactive(sfp, event))  			sfp_sm_link_up(sfp);  		break; @@ -591,9 +611,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)  		if (event == SFP_E_TX_FAULT) {  			sfp_sm_link_down(sfp);  			sfp_sm_fault(sfp, true); -		} else if (event == -			   (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ? -			    SFP_E_LOS_LOW : SFP_E_LOS_HIGH)) { +		} else if (sfp_los_event_active(sfp, event)) {  			sfp_sm_link_down(sfp);  			sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);  		} @@ -639,7 +657,8 @@ static int sfp_module_info(struct sfp *sfp, struct ethtool_modinfo *modinfo)  {  	/* locking... and check module is present */ -	if (sfp->id.ext.sff8472_compliance) { +	if (sfp->id.ext.sff8472_compliance && +	    !(sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)) {  		modinfo->type = ETH_MODULE_SFF_8472;  		modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;  	} else { diff --git a/drivers/net/tap.c b/drivers/net/tap.c index e9489b88407c..0a886fda0129 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -829,8 +829,11 @@ static ssize_t tap_do_read(struct tap_queue *q,  	DEFINE_WAIT(wait);  	ssize_t ret = 0; -	if (!iov_iter_count(to)) +	if (!iov_iter_count(to)) { +		if (skb) +			kfree_skb(skb);  		return 0; +	}  	if (skb)  		goto put; @@ -1154,11 +1157,14 @@ static int tap_recvmsg(struct socket *sock, struct msghdr *m,  		       size_t total_len, int flags)  {  	struct tap_queue *q = container_of(sock, struct tap_queue, sock); +	struct sk_buff *skb = m->msg_control;  	int ret; -	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) +	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) { +		if (skb) +			kfree_skb(skb);  		return -EINVAL; -	ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, -			  m->msg_control); +	} +	ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb);  	if (ret > total_len) {  		m->msg_flags |= MSG_TRUNC;  		ret = flags & MSG_TRUNC ? ret : total_len; diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index 228d4aa6d9ae..ca5e375de27c 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -335,7 +335,7 @@ static void tbnet_free_buffers(struct tbnet_ring *ring)  		if (ring->ring->is_tx) {  			dir = DMA_TO_DEVICE;  			order = 0; -			size = tbnet_frame_size(tf); +			size = TBNET_FRAME_SIZE;  		} else {  			dir = DMA_FROM_DEVICE;  			order = TBNET_RX_PAGE_ORDER; @@ -512,6 +512,7 @@ err_free:  static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)  {  	struct tbnet_ring *ring = &net->tx_ring; +	struct device *dma_dev = tb_ring_dma_device(ring->ring);  	struct tbnet_frame *tf;  	unsigned int index; @@ -522,7 +523,9 @@ static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)  	tf = &ring->frames[index];  	tf->frame.size = 0; -	tf->frame.buffer_phy = 0; + +	dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy, +				tbnet_frame_size(tf), DMA_TO_DEVICE);  	return tf;  } @@ -531,13 +534,8 @@ static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,  			      bool canceled)  {  	struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame); -	struct device *dma_dev = tb_ring_dma_device(ring);  	struct tbnet *net = netdev_priv(tf->dev); -	dma_unmap_page(dma_dev, tf->frame.buffer_phy, tbnet_frame_size(tf), -		       DMA_TO_DEVICE); -	tf->frame.buffer_phy = 0; -  	/* Return buffer to the ring */  	net->tx_ring.prod++; @@ -548,10 +546,12 @@ static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,  static int tbnet_alloc_tx_buffers(struct tbnet *net)  {  	struct tbnet_ring *ring = &net->tx_ring; +	struct device *dma_dev = tb_ring_dma_device(ring->ring);  	unsigned int i;  	for (i = 0; i < TBNET_RING_SIZE; i++) {  		struct tbnet_frame *tf = &ring->frames[i]; +		dma_addr_t dma_addr;  		tf->page = alloc_page(GFP_KERNEL);  		if (!tf->page) { @@ -559,7 +559,17 @@ static int tbnet_alloc_tx_buffers(struct tbnet *net)  			return -ENOMEM;  		} +		dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE, +					DMA_TO_DEVICE); +		if (dma_mapping_error(dma_dev, dma_addr)) { +			__free_page(tf->page); +			tf->page = NULL; +			tbnet_free_buffers(ring); +			return -ENOMEM; +		} +  		tf->dev = net->dev; +		tf->frame.buffer_phy = dma_addr;  		tf->frame.callback = tbnet_tx_callback;  		tf->frame.sof = TBIP_PDF_FRAME_START;  		tf->frame.eof = TBIP_PDF_FRAME_END; @@ -881,19 +891,6 @@ static int tbnet_stop(struct net_device *dev)  	return 0;  } -static bool tbnet_xmit_map(struct device *dma_dev, struct tbnet_frame *tf) -{ -	dma_addr_t dma_addr; - -	dma_addr = dma_map_page(dma_dev, tf->page, 0, tbnet_frame_size(tf), -				DMA_TO_DEVICE); -	if (dma_mapping_error(dma_dev, dma_addr)) -		return false; - -	tf->frame.buffer_phy = dma_addr; -	return true; -} -  static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,  	struct tbnet_frame **frames, u32 frame_count)  { @@ -908,13 +905,14 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,  	if (skb->ip_summed != CHECKSUM_PARTIAL) {  		/* No need to calculate checksum so we just update the -		 * total frame count and map the frames for DMA. +		 * total frame count and sync the frames for DMA.  		 */  		for (i = 0; i < frame_count; i++) {  			hdr = page_address(frames[i]->page);  			hdr->frame_count = cpu_to_le32(frame_count); -			if (!tbnet_xmit_map(dma_dev, frames[i])) -				goto err_unmap; +			dma_sync_single_for_device(dma_dev, +				frames[i]->frame.buffer_phy, +				tbnet_frame_size(frames[i]), DMA_TO_DEVICE);  		}  		return true; @@ -983,21 +981,14 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,  	*tucso = csum_fold(wsum);  	/* Checksum is finally calculated and we don't touch the memory -	 * anymore, so DMA map the frames now. +	 * anymore, so DMA sync the frames now.  	 */  	for (i = 0; i < frame_count; i++) { -		if (!tbnet_xmit_map(dma_dev, frames[i])) -			goto err_unmap; +		dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy, +			tbnet_frame_size(frames[i]), DMA_TO_DEVICE);  	}  	return true; - -err_unmap: -	while (i--) -		dma_unmap_page(dma_dev, frames[i]->frame.buffer_phy, -			       tbnet_frame_size(frames[i]), DMA_TO_DEVICE); - -	return false;  }  static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num, diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 95749006d687..4f4a842a1c9c 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1952,8 +1952,11 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,  	tun_debug(KERN_INFO, tun, "tun_do_read\n"); -	if (!iov_iter_count(to)) +	if (!iov_iter_count(to)) { +		if (skb) +			kfree_skb(skb);  		return 0; +	}  	if (!skb) {  		/* Read frames from ring */ @@ -2069,22 +2072,24 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,  {  	struct tun_file *tfile = container_of(sock, struct tun_file, socket);  	struct tun_struct *tun = tun_get(tfile); +	struct sk_buff *skb = m->msg_control;  	int ret; -	if (!tun) -		return -EBADFD; +	if (!tun) { +		ret = -EBADFD; +		goto out_free_skb; +	}  	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {  		ret = -EINVAL; -		goto out; +		goto out_put_tun;  	}  	if (flags & MSG_ERRQUEUE) {  		ret = sock_recv_errqueue(sock->sk, m, total_len,  					 SOL_PACKET, TUN_TX_TIMESTAMP);  		goto out;  	} -	ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, -			  m->msg_control); +	ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb);  	if (ret > (ssize_t)total_len) {  		m->msg_flags |= MSG_TRUNC;  		ret = flags & MSG_TRUNC ? ret : total_len; @@ -2092,6 +2097,13 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,  out:  	tun_put(tun);  	return ret; + +out_put_tun: +	tun_put(tun); +out_free_skb: +	if (skb) +		kfree_skb(skb); +	return ret;  }  static int tun_peek_len(struct socket *sock) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index c750cf7c042b..728819feab44 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -261,9 +261,11 @@ static void qmi_wwan_netdev_setup(struct net_device *net)  		net->hard_header_len = 0;  		net->addr_len        = 0;  		net->flags           = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; +		set_bit(EVENT_NO_IP_ALIGN, &dev->flags);  		netdev_dbg(net, "mode: raw IP\n");  	} else if (!net->header_ops) { /* don't bother if already set */  		ether_setup(net); +		clear_bit(EVENT_NO_IP_ALIGN, &dev->flags);  		netdev_dbg(net, "mode: Ethernet\n");  	} @@ -1098,6 +1100,7 @@ static const struct usb_device_id products[] = {  	{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},  	{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},  	{QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, +	{QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)},	/* YUGA CLM920-NC5 */  	{QMI_FIXED_INTF(0x0846, 0x68a2, 8)},  	{QMI_FIXED_INTF(0x12d1, 0x140c, 1)},	/* Huawei E173 */  	{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},	/* Huawei E1820 */ @@ -1202,12 +1205,14 @@ static const struct usb_device_id products[] = {  	{QMI_FIXED_INTF(0x1199, 0x9079, 10)},	/* Sierra Wireless EM74xx */  	{QMI_FIXED_INTF(0x1199, 0x907b, 8)},	/* Sierra Wireless EM74xx */  	{QMI_FIXED_INTF(0x1199, 0x907b, 10)},	/* Sierra Wireless EM74xx */ +	{QMI_FIXED_INTF(0x1199, 0x9091, 8)},	/* Sierra Wireless EM7565 */  	{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},	/* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */  	{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},	/* Alcatel L800MA */  	{QMI_FIXED_INTF(0x2357, 0x0201, 4)},	/* TP-LINK HSUPA Modem MA180 */  	{QMI_FIXED_INTF(0x2357, 0x9000, 4)},	/* TP-LINK MA260 */  	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)},	/* Telit LE922A */  	{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)},	/* Telit ME910 */ +	{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},	/* Telit ME910 dual modem */  	{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},	/* Telit LE920 */  	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)},	/* Telit LE920, LE920A4 */  	{QMI_FIXED_INTF(0x1c9e, 0x9801, 3)},	/* Telewell TW-3G HSPA+ */ diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 80348b6a8646..d56fe32bf48d 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -484,7 +484,10 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)  		return -ENOLINK;  	} -	skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); +	if (test_bit(EVENT_NO_IP_ALIGN, &dev->flags)) +		skb = __netdev_alloc_skb(dev->net, size, flags); +	else +		skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);  	if (!skb) {  		netif_dbg(dev, rx_err, dev->net, "no rx skb\n");  		usbnet_defer_kevent (dev, EVENT_RX_MEMORY); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 19a985ef9104..559b215c0169 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -756,7 +756,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,  		int num_skb_frags;  		buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); -		if (unlikely(!ctx)) { +		if (unlikely(!buf)) {  			pr_debug("%s: rx error: %d buffers out of %d missing\n",  				 dev->name, num_buf,  				 virtio16_to_cpu(vi->vdev, diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 7ac487031b4b..31f4b7911ef8 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -874,8 +874,8 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],  static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,  			      const unsigned char *addr, union vxlan_addr ip, -			      __be16 port, __be32 src_vni, u32 vni, u32 ifindex, -			      u16 vid) +			      __be16 port, __be32 src_vni, __be32 vni, +			      u32 ifindex, u16 vid)  {  	struct vxlan_fdb *f;  	struct vxlan_rdst *rd = NULL; @@ -2155,6 +2155,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,  		}  		ndst = &rt->dst; +		if (skb_dst(skb)) { +			int mtu = dst_mtu(ndst) - VXLAN_HEADROOM; + +			skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, +						       skb, mtu); +		} +  		tos = ip_tunnel_ecn_encap(tos, old_iph, skb);  		ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);  		err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr), @@ -2190,6 +2197,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,  				goto out_unlock;  		} +		if (skb_dst(skb)) { +			int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM; + +			skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, +						       skb, mtu); +		} +  		tos = ip_tunnel_ecn_encap(tos, old_iph, skb);  		ttl = ttl ? : ip6_dst_hoplimit(ndst);  		skb_scrub_packet(skb, xnet); @@ -3103,6 +3117,11 @@ static void vxlan_config_apply(struct net_device *dev,  		max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :  					   VXLAN_HEADROOM); +		if (max_mtu < ETH_MIN_MTU) +			max_mtu = ETH_MIN_MTU; + +		if (!changelink && !conf->mtu) +			dev->mtu = max_mtu;  	}  	if (dev->mtu > max_mtu) diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 37b1e0d03e31..90a4ad9a2d08 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -494,18 +494,11 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/                              break;                      } -                    data = kmalloc(xc.len, GFP_KERNEL); -                    if (!data) { -                            ret = -ENOMEM; +                    data = memdup_user(xc.data, xc.len); +                    if (IS_ERR(data)) { +                            ret = PTR_ERR(data);                              break;                      } -                     -                    if(copy_from_user(data, xc.data, xc.len)) -                    { -                    	kfree(data); -                    	ret = -ENOMEM; -                    	break; -                    }                      printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data); diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c index dfb26f03c1a2..1b05b5d7a038 100644 --- a/drivers/net/wireless/ath/ath9k/channel.c +++ b/drivers/net/wireless/ath/ath9k/channel.c @@ -1113,7 +1113,7 @@ ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp,  		if (!avp->assoc)  			return false; -		skb = ieee80211_nullfunc_get(sc->hw, vif); +		skb = ieee80211_nullfunc_get(sc->hw, vif, false);  		if (!skb)  			return false; diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index f7d228b5ba93..987f1252a3cf 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -384,6 +384,18 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)  		}  	} +	if (changed & IEEE80211_CONF_CHANGE_PS) { +		list_for_each_entry(tmp, &wcn->vif_list, list) { +			vif = wcn36xx_priv_to_vif(tmp); +			if (hw->conf.flags & IEEE80211_CONF_PS) { +				if (vif->bss_conf.ps) /* ps allowed ? */ +					wcn36xx_pmc_enter_bmps_state(wcn, vif); +			} else { +				wcn36xx_pmc_exit_bmps_state(wcn, vif); +			} +		} +	} +  	mutex_unlock(&wcn->conf_mutex);  	return 0; @@ -747,17 +759,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,  		vif_priv->dtim_period = bss_conf->dtim_period;  	} -	if (changed & BSS_CHANGED_PS) { -		wcn36xx_dbg(WCN36XX_DBG_MAC, -			    "mac bss PS set %d\n", -			    bss_conf->ps); -		if (bss_conf->ps) { -			wcn36xx_pmc_enter_bmps_state(wcn, vif); -		} else { -			wcn36xx_pmc_exit_bmps_state(wcn, vif); -		} -	} -  	if (changed & BSS_CHANGED_BSSID) {  		wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n",  			    bss_conf->bssid); diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c index 589fe5f70971..1976b80c235f 100644 --- a/drivers/net/wireless/ath/wcn36xx/pmc.c +++ b/drivers/net/wireless/ath/wcn36xx/pmc.c @@ -45,8 +45,10 @@ int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,  	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);  	if (WCN36XX_BMPS != vif_priv->pw_state) { -		wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n"); -		return -EINVAL; +		/* Unbalanced call or last BMPS enter failed */ +		wcn36xx_dbg(WCN36XX_DBG_PMC, +			    "Not in BMPS mode, no need to exit\n"); +		return -EALREADY;  	}  	wcn36xx_smd_exit_bmps(wcn, vif);  	vif_priv->pw_state = WCN36XX_FULL_POWER; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 310c4e2746aa..cdf9e4161592 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -2070,7 +2070,7 @@ static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)  	return head_pad;  } -/** +/*   * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for   * bus layer usage.   */ @@ -4121,8 +4121,8 @@ release:  	sdio_release_host(sdiodev->func[1]);  fail:  	brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err); -	device_release_driver(dev);  	device_release_driver(&sdiodev->func[2]->dev); +	device_release_driver(dev);  }  struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h index 87b4434224a1..dfa111bb411e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h @@ -68,6 +68,9 @@   * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW   * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames   * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames + * @IWL_MVM_DQA_INJECT_MONITOR_QUEUE: a queue reserved for injection using + *	monitor mode. Note this queue is the same as the queue for P2P device + *	but we can't have active monitor mode along with P2P device anyway.   * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames   * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure   *	that we are never left without the possibility to connect to an AP. @@ -87,6 +90,7 @@ enum iwl_mvm_dqa_txq {  	IWL_MVM_DQA_CMD_QUEUE = 0,  	IWL_MVM_DQA_AUX_QUEUE = 1,  	IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2, +	IWL_MVM_DQA_INJECT_MONITOR_QUEUE = 2,  	IWL_MVM_DQA_GCAST_QUEUE = 3,  	IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,  	IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 9c889a32fe24..223fb77a3aa9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -209,8 +209,6 @@ static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt)  static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)  { -	iwl_fw_dbg_stop_recording(fwrt); -  	fwrt->dump.conf = FW_DBG_INVALID;  } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index ca0b5536a8a6..921cab9e2d73 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -117,6 +117,7 @@  #define FH_RSCSR_FRAME_INVALID		0x55550000  #define FH_RSCSR_FRAME_ALIGN		0x40  #define FH_RSCSR_RPA_EN			BIT(25) +#define FH_RSCSR_RADA_EN		BIT(26)  #define FH_RSCSR_RXQ_POS		16  #define FH_RSCSR_RXQ_MASK		0x3F0000 @@ -128,7 +129,8 @@ struct iwl_rx_packet {  	 * 31:    flag flush RB request  	 * 30:    flag ignore TC (terminal counter) request  	 * 29:    flag fast IRQ request -	 * 28-26: Reserved +	 * 28-27: Reserved +	 * 26:    RADA enabled  	 * 25:    Offload enabled  	 * 24:    RPF enabled  	 * 23:    RSS enabled diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index a2bf530eeae4..2f22e14e00fe 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -787,7 +787,7 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,  					 u32 action)  {  	struct iwl_mac_ctx_cmd cmd = {}; -	u32 tfd_queue_msk = 0; +	u32 tfd_queue_msk = BIT(mvm->snif_queue);  	int ret;  	WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 4575595ab022..55ab5349dd40 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -972,6 +972,7 @@ struct iwl_mvm {  	/* Tx queues */  	u16 aux_queue; +	u16 snif_queue;  	u16 probe_queue;  	u16 p2p_dev_queue; @@ -1060,6 +1061,7 @@ struct iwl_mvm {   * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running   * @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done   * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running + * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA   */  enum iwl_mvm_status {  	IWL_MVM_STATUS_HW_RFKILL, @@ -1071,6 +1073,7 @@ enum iwl_mvm_status {  	IWL_MVM_STATUS_ROC_AUX_RUNNING,  	IWL_MVM_STATUS_D3_RECONFIG,  	IWL_MVM_STATUS_FIRMWARE_RUNNING, +	IWL_MVM_STATUS_NEED_FLUSH_P2P,  };  /* Keep track of completed init configuration */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 7078b7e458be..45470b6b351a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -624,6 +624,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,  	mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;  	mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; +	mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;  	mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;  	mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 76dc58381e1c..3b8d44361380 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -213,6 +213,7 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,  					struct ieee80211_rx_status *rx_status)  {  	int energy_a, energy_b, max_energy; +	u32 rate_flags = le32_to_cpu(desc->rate_n_flags);  	energy_a = desc->energy_a;  	energy_a = energy_a ? -energy_a : S8_MIN; @@ -224,7 +225,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,  			energy_a, energy_b, max_energy);  	rx_status->signal = max_energy; -	rx_status->chains = 0; /* TODO: phy info */ +	rx_status->chains = +		(rate_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;  	rx_status->chain_signal[0] = energy_a;  	rx_status->chain_signal[1] = energy_b;  	rx_status->chain_signal[2] = S8_MIN; @@ -232,8 +234,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,  static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,  			     struct ieee80211_rx_status *stats, -			     struct iwl_rx_mpdu_desc *desc, int queue, -			     u8 *crypt_len) +			     struct iwl_rx_mpdu_desc *desc, u32 pkt_flags, +			     int queue, u8 *crypt_len)  {  	u16 status = le16_to_cpu(desc->status); @@ -253,6 +255,8 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,  			return -1;  		stats->flag |= RX_FLAG_DECRYPTED; +		if (pkt_flags & FH_RSCSR_RADA_EN) +			stats->flag |= RX_FLAG_MIC_STRIPPED;  		*crypt_len = IEEE80211_CCMP_HDR_LEN;  		return 0;  	case IWL_RX_MPDU_STATUS_SEC_TKIP: @@ -270,6 +274,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,  		if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) ==  				IWL_RX_MPDU_STATUS_SEC_WEP)  			*crypt_len = IEEE80211_WEP_IV_LEN; + +		if (pkt_flags & FH_RSCSR_RADA_EN) +			stats->flag |= RX_FLAG_ICV_STRIPPED; +  		return 0;  	case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:  		if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) @@ -848,7 +856,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,  	rx_status = IEEE80211_SKB_RXCB(skb); -	if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc, queue, &crypt_len)) { +	if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc, +			      le32_to_cpu(pkt->len_n_flags), queue, +			      &crypt_len)) {  		kfree_skb(skb);  		return;  	} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index c19f98489d4e..1add5615fc3a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1709,29 +1709,29 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)  	sta->sta_id = IWL_MVM_INVALID_STA;  } -static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm) +static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue, +					  u8 sta_id, u8 fifo)  {  	unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?  					mvm->cfg->base_params->wd_timeout :  					IWL_WATCHDOG_DISABLED;  	if (iwl_mvm_has_new_tx_api(mvm)) { -		int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue, -						    mvm->aux_sta.sta_id, -						    IWL_MAX_TID_COUNT, -						    wdg_timeout); -		mvm->aux_queue = queue; +		int tvqm_queue = +			iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id, +						IWL_MAX_TID_COUNT, +						wdg_timeout); +		*queue = tvqm_queue;  	} else {  		struct iwl_trans_txq_scd_cfg cfg = { -			.fifo = IWL_MVM_TX_FIFO_MCAST, -			.sta_id = mvm->aux_sta.sta_id, +			.fifo = fifo, +			.sta_id = sta_id,  			.tid = IWL_MAX_TID_COUNT,  			.aggregate = false,  			.frame_limit = IWL_FRAME_LIMIT,  		}; -		iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg, -				   wdg_timeout); +		iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);  	}  } @@ -1750,7 +1750,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)  	/* Map Aux queue to fifo - needs to happen before adding Aux station */  	if (!iwl_mvm_has_new_tx_api(mvm)) -		iwl_mvm_enable_aux_queue(mvm); +		iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, +					      mvm->aux_sta.sta_id, +					      IWL_MVM_TX_FIFO_MCAST);  	ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,  					 MAC_INDEX_AUX, 0); @@ -1764,7 +1766,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)  	 * to firmware so enable queue here - after the station was added  	 */  	if (iwl_mvm_has_new_tx_api(mvm)) -		iwl_mvm_enable_aux_queue(mvm); +		iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, +					      mvm->aux_sta.sta_id, +					      IWL_MVM_TX_FIFO_MCAST);  	return 0;  } @@ -1772,10 +1776,31 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)  int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)  {  	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); +	int ret;  	lockdep_assert_held(&mvm->mutex); -	return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr, + +	/* Map snif queue to fifo - must happen before adding snif station */ +	if (!iwl_mvm_has_new_tx_api(mvm)) +		iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, +					      mvm->snif_sta.sta_id, +					      IWL_MVM_TX_FIFO_BE); + +	ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,  					 mvmvif->id, 0); +	if (ret) +		return ret; + +	/* +	 * For 22000 firmware and on we cannot add queue to a station unknown +	 * to firmware so enable queue here - after the station was added +	 */ +	if (iwl_mvm_has_new_tx_api(mvm)) +		iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, +					      mvm->snif_sta.sta_id, +					      IWL_MVM_TX_FIFO_BE); + +	return 0;  }  int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) @@ -1784,6 +1809,8 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)  	lockdep_assert_held(&mvm->mutex); +	iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue, +			    IWL_MAX_TID_COUNT, 0);  	ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);  	if (ret)  		IWL_WARN(mvm, "Failed sending remove station\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 4d0314912e94..e25cda9fbf6c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -132,6 +132,24 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)  	 * executed, and a new time event means a new command.  	 */  	iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC); + +	/* Do the same for the P2P device queue (STA) */ +	if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) { +		struct iwl_mvm_vif *mvmvif; + +		/* +		 * NB: access to this pointer would be racy, but the flush bit +		 * can only be set when we had a P2P-Device VIF, and we have a +		 * flush of this work in iwl_mvm_prepare_mac_removal() so it's +		 * not really racy. +		 */ + +		if (!WARN_ON(!mvm->p2p_device_vif)) { +			mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif); +			iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, +					  CMD_ASYNC); +		} +	}  }  static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) @@ -855,10 +873,12 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)  	mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); -	if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) +	if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {  		iwl_mvm_remove_time_event(mvm, mvmvif, te_data); -	else +		set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status); +	} else {  		iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data); +	}  	iwl_mvm_roc_finished(mvm);  } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 593b7f97b29c..333bcb75b8af 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -657,7 +657,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)  			if (ap_sta_id != IWL_MVM_INVALID_STA)  				sta_id = ap_sta_id;  		} else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { -			queue = mvm->aux_queue; +			queue = mvm->snif_queue; +			sta_id = mvm->snif_sta.sta_id;  		}  	} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index d46115e2d69e..03ffd84786ca 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -1134,9 +1134,18 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,  	unsigned int default_timeout =  		cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout; -	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) +	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) { +		/* +		 * We can't know when the station is asleep or awake, so we +		 * must disable the queue hang detection. +		 */ +		if (fw_has_capa(&mvm->fw->ucode_capa, +				IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) && +		    vif && vif->type == NL80211_IFTYPE_AP) +			return IWL_WATCHDOG_DISABLED;  		return iwlmvm_mod_params.tfd_q_hang_detect ?  			default_timeout : IWL_WATCHDOG_DISABLED; +	}  	trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);  	txq_timer = (void *)trigger->data; @@ -1163,6 +1172,8 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,  		return le32_to_cpu(txq_timer->p2p_go);  	case NL80211_IFTYPE_P2P_DEVICE:  		return le32_to_cpu(txq_timer->p2p_device); +	case NL80211_IFTYPE_MONITOR: +		return default_timeout;  	default:  		WARN_ON(1);  		return mvm->cfg->base_params->wd_timeout; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index f21fe59faccf..ccd7c33c4c28 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -553,6 +553,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {  	{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},  	{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},  	{IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)}, +	{IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)},  	{IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)},  	{IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)},  	{IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)}, @@ -664,6 +665,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {  	{IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)},  	{IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)},  	{IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)}, +	{IWL_PCI_DEVICE(0xA0F0, 0x0000, iwla000_2ax_cfg_hr)},  #endif /* CONFIG_IWLMVM */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index d749abeca3ae..403e65c309d0 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -670,11 +670,15 @@ static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)  	return index & (q->n_window - 1);  } -static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie, +static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,  				     struct iwl_txq *txq, int idx)  { -	return txq->tfds + trans_pcie->tfd_size * iwl_pcie_get_cmd_index(txq, -									 idx); +	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + +	if (trans->cfg->use_tfh) +		idx = iwl_pcie_get_cmd_index(txq, idx); + +	return txq->tfds + trans_pcie->tfd_size * idx;  }  static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index c59f4581e972..ac05fd1e74c4 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -49,6 +49,7 @@   *   *****************************************************************************/  #include "iwl-trans.h" +#include "iwl-prph.h"  #include "iwl-context-info.h"  #include "internal.h" @@ -156,6 +157,11 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)  	trans_pcie->is_down = true; +	/* Stop dbgc before stopping device */ +	iwl_write_prph(trans, DBGC_IN_SAMPLE, 0); +	udelay(100); +	iwl_write_prph(trans, DBGC_OUT_CTRL, 0); +  	/* tell the device to stop sending interrupts */  	iwl_disable_interrupts(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index b7a51603465b..4541c86881d6 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -166,6 +166,7 @@ static void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)  		print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,  			       4, buf, i, 0);  	} +	goto out;  err_read:  	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); @@ -1226,6 +1227,15 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)  	trans_pcie->is_down = true; +	/* Stop dbgc before stopping device */ +	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { +		iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100); +	} else { +		iwl_write_prph(trans, DBGC_IN_SAMPLE, 0); +		udelay(100); +		iwl_write_prph(trans, DBGC_OUT_CTRL, 0); +	} +  	/* tell the device to stop sending interrupts */  	iwl_disable_interrupts(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 16b345f54ff0..6d0a907d5ba5 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -171,8 +171,6 @@ static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,  static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)  { -	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -  	/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and  	 * idx is bounded by n_window  	 */ @@ -181,7 +179,7 @@ static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)  	lockdep_assert_held(&txq->lock);  	iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta, -				iwl_pcie_get_tfd(trans_pcie, txq, idx)); +				iwl_pcie_get_tfd(trans, txq, idx));  	/* free SKB */  	if (txq->entries) { @@ -364,11 +362,9 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,  					    struct sk_buff *skb,  					    struct iwl_cmd_meta *out_meta)  { -	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;  	int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); -	struct iwl_tfh_tfd *tfd = -		iwl_pcie_get_tfd(trans_pcie, txq, idx); +	struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);  	dma_addr_t tb_phys;  	bool amsdu;  	int i, len, tb1_len, tb2_len, hdr_len; @@ -565,8 +561,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,  	u8 group_id = iwl_cmd_groupid(cmd->id);  	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];  	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; -	struct iwl_tfh_tfd *tfd = -		iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); +	struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);  	memset(tfd, 0, sizeof(*tfd)); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index fed6d842a5e1..3f85713c41dc 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -373,7 +373,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	int i, num_tbs; -	void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index); +	void *tfd = iwl_pcie_get_tfd(trans, txq, index);  	/* Sanity check on number of chunks */  	num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); @@ -2018,7 +2018,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,  	}  	trace_iwlwifi_dev_tx(trans->dev, skb, -			     iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), +			     iwl_pcie_get_tfd(trans, txq, txq->write_ptr),  			     trans_pcie->tfd_size,  			     &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,  			     hdr_len); @@ -2092,7 +2092,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,  		IEEE80211_CCMP_HDR_LEN : 0;  	trace_iwlwifi_dev_tx(trans->dev, skb, -			     iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), +			     iwl_pcie_get_tfd(trans, txq, txq->write_ptr),  			     trans_pcie->tfd_size,  			     &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); @@ -2425,7 +2425,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,  	memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,  	       IWL_FIRST_TB_SIZE); -	tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); +	tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);  	/* Set up entry for this TFD in Tx byte-count array */  	iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),  					 iwl_pcie_tfd_get_num_tbs(trans, tfd)); diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 10b075a46b26..e8189c07b41f 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -684,6 +684,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,  	hdr = skb_put(skb, sizeof(*hdr) - ETH_ALEN);  	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |  					 IEEE80211_STYPE_NULLFUNC | +					 IEEE80211_FCTL_TODS |  					 (ps ? IEEE80211_FCTL_PM : 0));  	hdr->duration_id = cpu_to_le16(0);  	memcpy(hdr->addr1, vp->bssid, ETH_ALEN); @@ -3215,7 +3216,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)  		if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info)))  			continue; -		skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); +		skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);  		if (!skb) {  			res = -ENOMEM;  			goto out_err; diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c index 03687a80d6e9..38678e9a0562 100644 --- a/drivers/net/wireless/st/cw1200/sta.c +++ b/drivers/net/wireless/st/cw1200/sta.c @@ -198,7 +198,7 @@ void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv,  		priv->bss_loss_state++; -		skb = ieee80211_nullfunc_get(priv->hw, priv->vif); +		skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);  		WARN_ON(!skb);  		if (skb)  			cw1200_tx(priv->hw, NULL, skb); @@ -2265,7 +2265,7 @@ static int cw1200_upload_null(struct cw1200_common *priv)  		.rate = 0xFF,  	}; -	frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif); +	frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);  	if (!frame.skb)  		return -ENOMEM; diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index 9915d83a4a30..6d02c660b4ab 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c @@ -566,7 +566,7 @@ static int wl1251_build_null_data(struct wl1251 *wl)  		size = sizeof(struct wl12xx_null_data_template);  		ptr = NULL;  	} else { -		skb = ieee80211_nullfunc_get(wl->hw, wl->vif); +		skb = ieee80211_nullfunc_get(wl->hw, wl->vif, false);  		if (!skb)  			goto out;  		size = skb->len; diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index 2bfc12fdc929..761cf8573a80 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -1069,7 +1069,8 @@ int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)  		ptr = NULL;  	} else {  		skb = ieee80211_nullfunc_get(wl->hw, -					     wl12xx_wlvif_to_vif(wlvif)); +					     wl12xx_wlvif_to_vif(wlvif), +					     false);  		if (!skb)  			goto out;  		size = skb->len; @@ -1096,7 +1097,7 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,  	struct sk_buff *skb = NULL;  	int ret = -ENOMEM; -	skb = ieee80211_nullfunc_get(wl->hw, vif); +	skb = ieee80211_nullfunc_get(wl->hw, vif, false);  	if (!skb)  		goto out; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index d6dff347f896..78ebe494fef0 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -186,7 +186,7 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)  	/* Obtain the queue to be used to transmit this packet */  	index = skb_get_queue_mapping(skb);  	if (index >= num_queues) { -		pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.", +		pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",  				    index, vif->dev->name);  		index %= num_queues;  	} diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 18c85e55e76a..9bd7ddeeb6a5 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -87,6 +87,8 @@ struct netfront_cb {  /* IRQ name is queue name with "-tx" or "-rx" appended */  #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) +static DECLARE_WAIT_QUEUE_HEAD(module_unload_q); +  struct netfront_stats {  	u64			packets;  	u64			bytes; @@ -1324,6 +1326,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)  	netif_carrier_off(netdev); +	xenbus_switch_state(dev, XenbusStateInitialising);  	return netdev;   exit: @@ -2020,10 +2023,12 @@ static void netback_changed(struct xenbus_device *dev,  		break;  	case XenbusStateClosed: +		wake_up_all(&module_unload_q);  		if (dev->state == XenbusStateClosed)  			break;  		/* Missed the backend's CLOSING state -- fallthrough */  	case XenbusStateClosing: +		wake_up_all(&module_unload_q);  		xenbus_frontend_closed(dev);  		break;  	} @@ -2129,6 +2134,20 @@ static int xennet_remove(struct xenbus_device *dev)  	dev_dbg(&dev->dev, "%s\n", dev->nodename); +	if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { +		xenbus_switch_state(dev, XenbusStateClosing); +		wait_event(module_unload_q, +			   xenbus_read_driver_state(dev->otherend) == +			   XenbusStateClosing); + +		xenbus_switch_state(dev, XenbusStateClosed); +		wait_event(module_unload_q, +			   xenbus_read_driver_state(dev->otherend) == +			   XenbusStateClosed || +			   xenbus_read_driver_state(dev->otherend) == +			   XenbusStateUnknown); +	} +  	xennet_disconnect_backend(info);  	unregister_netdev(info->netdev); |