aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/atm/idt77252.c1
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c3
-rw-r--r--drivers/net/caif/caif_hsi.c2
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c1
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c57
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c6
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/jme.c5
-rw-r--r--drivers/net/ethernet/marvell/skge.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c6
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c5
-rw-r--r--drivers/net/ethernet/neterion/s2io.c1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw.c26
-rw-r--r--drivers/net/ethernet/ti/tlan.c1
-rw-r--r--drivers/net/hippi/rrunner.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c1
-rw-r--r--include/uapi/linux/pkt_sched.h2
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/ipv4/fib_frontend.c5
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/rds/ib.h1
-rw-r--r--net/rds/ib_cm.c9
-rw-r--r--net/rds/ib_frmr.c84
-rw-r--r--net/rds/ib_mr.h4
-rw-r--r--net/rds/ib_rdma.c60
-rw-r--r--net/sched/Kconfig2
-rw-r--r--net/sched/cls_api.c1
-rw-r--r--net/sched/sch_fq_codel.c2
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sched/sch_taprio.c6
-rw-r--r--net/sctp/sm_make_chunk.c12
-rw-r--r--net/tipc/node.c1
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh35
50 files changed, 259 insertions, 162 deletions
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 43a14579e80e..df51680e8931 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1379,7 +1379,6 @@ init_tsq(struct idt77252_dev *card)
printk("%s: can't allocate TSQ.\n", card->name);
return -1;
}
- memset(card->tsq.base, 0, TSQSIZE);
card->tsq.last = card->tsq.base + TSQ_NUM_ENTRIES - 1;
card->tsq.next = card->tsq.last;
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 4c99739b937e..0e224232f746 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -1955,6 +1955,9 @@ hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
/* get endpoint base */
idx = ((ep_addr & 0x7f) - 1) * 2;
+ if (idx > 15)
+ return -EIO;
+
if (ep_addr & 0x80)
idx++;
attr = ep->desc.bmAttributes;
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index b2f10b6ad6e5..bbb2575d4728 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -1455,7 +1455,7 @@ static void __exit cfhsi_exit_module(void)
rtnl_lock();
list_for_each_safe(list_node, n, &cfhsi_list) {
cfhsi = list_entry(list_node, struct cfhsi, list);
- unregister_netdev(cfhsi->ndev);
+ unregister_netdevice(cfhsi->ndev);
}
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 72a57c6cd254..8f450a03a885 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -35,6 +35,7 @@
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/clk.h>
+#include <linux/io.h>
/* For our NAPI weight bigger does *NOT* mean better - it means more
* D-cache misses and lots more wasted cycles than we'll ever
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 7c767ce9aafa..b5c6dc914720 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1060,8 +1060,6 @@ static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
goto err_nomem;
}
- memset(ring_header->desc, 0, ring_header->size);
-
/* init TPD ring */
tpd_ring->dma = ring_header->dma;
offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 3a3fb5ce0fee..3aba38322717 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -291,7 +291,6 @@ static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter)
&adapter->ring_dma);
if (!adapter->ring_vir_addr)
return -ENOMEM;
- memset(adapter->ring_vir_addr, 0, adapter->ring_size);
/* Init TXD Ring */
adapter->txd_dma = adapter->ring_dma ;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 3f632028eff0..1069eb01cc55 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2677,8 +2677,6 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
mapping = txr->tx_push_mapping +
sizeof(struct tx_push_bd);
txr->data_mapping = cpu_to_le64(mapping);
-
- memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
}
qidx = bp->tc_to_qidx[j];
ring->queue_id = bp->q_info[qidx].queue_id;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 34466b827dde..a2b57807453b 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -3083,39 +3083,42 @@ static void bcmgenet_timeout(struct net_device *dev)
netif_tx_wake_all_queues(dev);
}
-#define MAX_MC_COUNT 16
+#define MAX_MDF_FILTER 17
static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
unsigned char *addr,
- int *i,
- int *mc)
+ int *i)
{
- u32 reg;
-
bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
UMAC_MDF_ADDR + (*i * 4));
bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
addr[4] << 8 | addr[5],
UMAC_MDF_ADDR + ((*i + 1) * 4));
- reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
- reg |= (1 << (MAX_MC_COUNT - *mc));
- bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
*i += 2;
- (*mc)++;
}
static void bcmgenet_set_rx_mode(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct netdev_hw_addr *ha;
- int i, mc;
+ int i, nfilter;
u32 reg;
netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
- /* Promiscuous mode */
+ /* Number of filters needed */
+ nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2;
+
+ /*
+ * Turn on promicuous mode for three scenarios
+ * 1. IFF_PROMISC flag is set
+ * 2. IFF_ALLMULTI flag is set
+ * 3. The number of filters needed exceeds the number filters
+ * supported by the hardware.
+ */
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- if (dev->flags & IFF_PROMISC) {
+ if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
+ (nfilter > MAX_MDF_FILTER)) {
reg |= CMD_PROMISC;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
@@ -3125,32 +3128,24 @@ static void bcmgenet_set_rx_mode(struct net_device *dev)
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
}
- /* UniMac doesn't support ALLMULTI */
- if (dev->flags & IFF_ALLMULTI) {
- netdev_warn(dev, "ALLMULTI is not supported\n");
- return;
- }
-
/* update MDF filter */
i = 0;
- mc = 0;
/* Broadcast */
- bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
+ bcmgenet_set_mdf_addr(priv, dev->broadcast, &i);
/* my own address.*/
- bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
- /* Unicast list*/
- if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
- return;
+ bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i);
- if (!netdev_uc_empty(dev))
- netdev_for_each_uc_addr(ha, dev)
- bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
- /* Multicast */
- if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
- return;
+ /* Unicast */
+ netdev_for_each_uc_addr(ha, dev)
+ bcmgenet_set_mdf_addr(priv, ha->addr, &i);
+ /* Multicast */
netdev_for_each_mc_addr(ha, dev)
- bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
+ bcmgenet_set_mdf_addr(priv, ha->addr, &i);
+
+ /* Enable filters */
+ reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter);
+ bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
}
/* Set the hardware MAC address. */
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index fcf20a8f92d9..032224178b64 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -218,15 +218,13 @@ int octeon_setup_iq(struct octeon_device *oct,
return 0;
}
oct->instr_queue[iq_no] =
- vmalloc_node(sizeof(struct octeon_instr_queue), numa_node);
+ vzalloc_node(sizeof(struct octeon_instr_queue), numa_node);
if (!oct->instr_queue[iq_no])
oct->instr_queue[iq_no] =
- vmalloc(sizeof(struct octeon_instr_queue));
+ vzalloc(sizeof(struct octeon_instr_queue));
if (!oct->instr_queue[iq_no])
return 1;
- memset(oct->instr_queue[iq_no], 0,
- sizeof(struct octeon_instr_queue));
oct->instr_queue[iq_no]->q_index = q_index;
oct->instr_queue[iq_no]->app_ctx = app_ctx;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index ba6c153ee45c..60218dc676a8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -207,7 +207,6 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
goto out_err;
/* Bind queue to specified class */
- memset(qe, 0, sizeof(*qe));
qe->cntxt_id = qid;
memcpy(&qe->param, p, sizeof(qe->param));
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 82015c8a5ed7..b7a246b33599 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4697,8 +4697,12 @@ int be_update_queues(struct be_adapter *adapter)
struct net_device *netdev = adapter->netdev;
int status;
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
+ /* device cannot transmit now, avoid dev_watchdog timeouts */
+ netif_carrier_off(netdev);
+
be_close(netdev);
+ }
be_cancel_worker(adapter);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 9d459ccf251d..e5610a4da539 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3144,8 +3144,6 @@ static int fec_enet_init(struct net_device *ndev)
return -ENOMEM;
}
- memset(cbd_base, 0, bd_size);
-
/* Get the Ethernet address */
fec_get_mac(ndev);
/* make sure MAC we just acquired is programmed into the hw */
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 76b7b7b85e35..0b668357db4d 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -582,11 +582,6 @@ jme_setup_tx_resources(struct jme_adapter *jme)
if (unlikely(!(txring->bufinf)))
goto err_free_txring;
- /*
- * Initialize Transmit Descriptors
- */
- memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
-
return 0;
err_free_txring:
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 35a92fd2cf39..9ac854c2b371 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -2558,8 +2558,6 @@ static int skge_up(struct net_device *dev)
goto free_pci_mem;
}
- memset(skge->mem, 0, skge->mem_size);
-
err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma);
if (err)
goto free_pci_mem;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index b20b3a5a1ebb..c39d7f4ab1d4 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2548,8 +2548,10 @@ static int mtk_probe(struct platform_device *pdev)
continue;
err = mtk_add_mac(eth, mac_np);
- if (err)
+ if (err) {
+ of_node_put(mac_np);
goto err_deinit_hw;
+ }
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index a5be27772b8e..c790a5fcea73 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1013,8 +1013,6 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
dma_list[i] = t;
eq->page_list[i].map = t;
-
- memset(eq->page_list[i].buf, 0, PAGE_SIZE);
}
eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 2d6436257f9d..cc096f6011d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1499,7 +1499,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
*match_level = MLX5_MATCH_NONE;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ ~(BIT(FLOW_DISSECTOR_KEY_META) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_VLAN) |
@@ -1522,11 +1523,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
- flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) ||
- flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
- flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
- flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ if (mlx5e_get_tc_tun(filter_dev)) {
if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level))
return -EOPNOTSUPP;
@@ -2647,6 +2644,10 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
family = ip_tunnel_info_af(tun_info);
key.ip_tun_key = &tun_info->key;
key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
+ if (!key.tc_tunnel) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel");
+ return -EOPNOTSUPP;
+ }
hash_key = hash_encap_info(&key);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 3b04d8927fb1..1f3891fde2eb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -2450,7 +2450,6 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
MLX5_SET(query_vport_counter_in, in, other_vport, 1);
- memset(out, 0, outlen);
err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
if (err)
goto free_out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 957d9b09dc3f..089ae4d48a82 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1134,7 +1134,6 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
}
/* create send-to-vport group */
- memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS);
@@ -1293,8 +1292,6 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
return -ENOMEM;
/* create vport rx group */
- memset(flow_group_in, 0, inlen);
-
esw_set_flow_group_source_port(esw, flow_group_in);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 051b19388a81..615455a21567 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -847,7 +847,6 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
&mem_item->mapaddr);
if (!mem_item->buf)
return -ENOMEM;
- memset(mem_item->buf, 0, mem_item->size);
q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
if (!q->elem_info) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index a252b080dda9..131f62ce9297 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -830,6 +830,7 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_prio_qopt_offload *p);
/* spectrum_fid.c */
+bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid);
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
u16 fid_index);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index b25048c6c761..21296fa7f7fb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -408,14 +408,6 @@ static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
have_dscp = mlxsw_sp_port_dcb_app_prio_dscp_map(mlxsw_sp_port,
&prio_map);
- if (!have_dscp) {
- err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
- MLXSW_REG_QPTS_TRUST_STATE_PCP);
- if (err)
- netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n");
- return err;
- }
-
mlxsw_sp_port_dcb_app_dscp_prio_map(mlxsw_sp_port, default_prio,
&dscp_map);
err = mlxsw_sp_port_dcb_app_update_qpdpm(mlxsw_sp_port,
@@ -432,6 +424,14 @@ static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
return err;
}
+ if (!have_dscp) {
+ err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
+ MLXSW_REG_QPTS_TRUST_STATE_PCP);
+ if (err)
+ netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n");
+ return err;
+ }
+
err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
MLXSW_REG_QPTS_TRUST_STATE_DSCP);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 46baf3b44309..8df3cb21baa6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -126,6 +126,16 @@ static const int *mlxsw_sp_packet_type_sfgc_types[] = {
[MLXSW_SP_FLOOD_TYPE_MC] = mlxsw_sp_sfgc_mc_packet_types,
};
+bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index)
+{
+ enum mlxsw_sp_fid_type fid_type = MLXSW_SP_FID_TYPE_DUMMY;
+ struct mlxsw_sp_fid_family *fid_family;
+
+ fid_family = mlxsw_sp->fid_core->fid_family_arr[fid_type];
+
+ return fid_family->start_index == fid_index;
+}
+
bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid)
{
return fid->fid_family->lag_vid_valid;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 50111f228d77..5ecb45118400 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -2468,6 +2468,9 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
goto just_remove;
}
+ if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
+ goto just_remove;
+
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
if (!mlxsw_sp_port_vlan) {
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
@@ -2527,6 +2530,9 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
goto just_remove;
}
+ if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
+ goto just_remove;
+
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
if (!mlxsw_sp_port_vlan) {
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 58bde1a9eacb..2451d4a96490 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -291,8 +291,10 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
continue;
err = ocelot_probe_port(ocelot, port, regs, phy);
- if (err)
+ if (err) {
+ of_node_put(portnp);
return err;
+ }
phy_mode = of_get_phy_mode(portnp);
if (phy_mode < 0)
@@ -318,6 +320,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
dev_err(ocelot->dev,
"invalid phy mode for port%d, (Q)SGMII only\n",
port);
+ of_node_put(portnp);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 3b2ae1a21678..e0b2bf327905 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -747,7 +747,6 @@ static int init_shared_mem(struct s2io_nic *nic)
return -ENOMEM;
}
mem_allocated += size;
- memset(tmp_v_addr, 0, size);
size = sizeof(struct rxd_info) *
rxd_count[nic->rxd_mode];
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index 433052f734ed..5e9f8ee99800 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -442,10 +442,8 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
goto out_free_rq;
}
- memset(rq_addr, 0, rq_size);
prq = rq_addr;
- memset(rsp_addr, 0, rsp_size);
prsp = rsp_addr;
prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
@@ -755,7 +753,6 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
return -ENOMEM;
}
- memset(addr, 0, sizeof(struct netxen_ring_ctx));
recv_ctx->hwctx = addr;
recv_ctx->hwctx->ctx_id = cpu_to_le32(port);
recv_ctx->hwctx->cmd_consumer_offset =
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index f320f9a0de8b..32a89744972d 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2570,7 +2570,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
ret = PTR_ERR(slave_data->ifphy);
dev_err(&pdev->dev,
"%d: Error retrieving port phy: %d\n", i, ret);
- return ret;
+ goto err_node_put;
}
slave_data->slave_node = slave_node;
@@ -2589,7 +2589,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
if (ret) {
if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
- return ret;
+ goto err_node_put;
}
slave_data->phy_node = of_node_get(slave_node);
} else if (parp) {
@@ -2607,7 +2607,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
of_node_put(mdio_node);
if (!mdio) {
dev_err(&pdev->dev, "Missing mdio platform device\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_node_put;
}
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
PHY_ID_FMT, mdio->name, phyid);
@@ -2622,7 +2623,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
if (slave_data->phy_if < 0) {
dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
i);
- return slave_data->phy_if;
+ ret = slave_data->phy_if;
+ goto err_node_put;
}
no_phy_slave:
@@ -2633,7 +2635,7 @@ no_phy_slave:
ret = ti_cm_get_macid(&pdev->dev, i,
slave_data->mac_addr);
if (ret)
- return ret;
+ goto err_node_put;
}
if (data->dual_emac) {
if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
@@ -2648,11 +2650,17 @@ no_phy_slave:
}
i++;
- if (i == data->slaves)
- break;
+ if (i == data->slaves) {
+ ret = 0;
+ goto err_node_put;
+ }
}
return 0;
+
+err_node_put:
+ of_node_put(slave_node);
+ return ret;
}
static void cpsw_remove_dt(struct platform_device *pdev)
@@ -2675,8 +2683,10 @@ static void cpsw_remove_dt(struct platform_device *pdev)
of_node_put(slave_data->phy_node);
i++;
- if (i == data->slaves)
+ if (i == data->slaves) {
+ of_node_put(slave_node);
break;
+ }
}
of_platform_depopulate(&pdev->dev);
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index b4ab1a5f6cd0..78f0f2d59e22 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -855,7 +855,6 @@ static int tlan_init(struct net_device *dev)
dev->name);
return -ENOMEM;
}
- memset(priv->dma_storage, 0, dma_size);
priv->rx_list = (struct tlan_list *)
ALIGN((unsigned long)priv->dma_storage, 8);
priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 7b9350dbebdd..2a6ec5394966 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1196,7 +1196,6 @@ static int rr_open(struct net_device *dev)
goto error;
}
rrpriv->rx_ctrl_dma = dma_addr;
- memset(rrpriv->rx_ctrl, 0, 256*sizeof(struct ring_ctrl));
rrpriv->info = pci_alloc_consistent(pdev, sizeof(struct rr_info),
&dma_addr);
@@ -1205,7 +1204,6 @@ static int rr_open(struct net_device *dev)
goto error;
}
rrpriv->info_dma = dma_addr;
- memset(rrpriv->info, 0, sizeof(struct rr_info));
wmb();
spin_lock_irqsave(&rrpriv->lock, flags);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3f48f05dd2a6..2a1918f25e47 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -3430,7 +3430,6 @@ vmxnet3_probe_device(struct pci_dev *pdev,
err = -ENOMEM;
goto err_ver;
}
- memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
adapter->default_coal_mode = true;
}
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 1f623252abe8..18f185299f47 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -1174,7 +1174,7 @@ enum {
TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME, /* s64 */
TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, /* s64 */
TCA_TAPRIO_ATTR_FLAGS, /* u32 */
- TCA_TAPRIO_ATTR_TXTIME_DELAY, /* s32 */
+ TCA_TAPRIO_ATTR_TXTIME_DELAY, /* u32 */
__TCA_TAPRIO_ATTR_MAX,
};
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 742cea4ce72e..0dfc97bc8760 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1124,6 +1124,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
atomic_set(&neigh->probes,
NEIGH_VAR(neigh->parms, UCAST_PROBES));
+ neigh_del_timer(neigh);
neigh->nud_state = NUD_INCOMPLETE;
neigh->updated = now;
next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
@@ -1140,6 +1141,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
}
} else if (neigh->nud_state & NUD_STALE) {
neigh_dbg(2, "neigh %p is delayed\n", neigh);
+ neigh_del_timer(neigh);
neigh->nud_state = NUD_DELAY;
neigh->updated = jiffies;
neigh_add_timer(neigh, jiffies +
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 6f1e31f674a3..0338820ee0ec 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -762,7 +762,7 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
printk("%sdev name=%s feat=0x%pNF\n",
level, dev->name, &dev->features);
if (sk)
- printk("%ssk family=%hu type=%hu proto=%hu\n",
+ printk("%ssk family=%hu type=%u proto=%u\n",
level, sk->sk_family, sk->sk_type, sk->sk_protocol);
if (full_pkt && headroom)
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 317339cd7f03..e8bc939b56dd 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -388,6 +388,11 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
fib_combine_itag(itag, &res);
dev_match = fib_info_nh_uses_dev(res.fi, dev);
+ /* This is not common, loopback packets retain skb_dst so normally they
+ * would not even hit this slow path.
+ */
+ dev_match = dev_match || (res.type == RTN_LOCAL &&
+ dev == net->loopback_dev);
if (dev_match) {
ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST;
return ret;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index c21862ba9c02..d88821c794fb 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2170,7 +2170,7 @@ start_lookup:
/* Initialize UDP checksum. If exited with zero value (success),
* CHECKSUM_UNNECESSARY means, that no more checks are required.
- * Otherwise, csum completion requires chacksumming packet body,
+ * Otherwise, csum completion requires checksumming packet body,
* including udp header and folding it to skb->csum.
*/
static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 4d2e6b31a8d6..6fe3097b9ab7 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2563,7 +2563,7 @@ static struct dst_entry *rt6_check(struct rt6_info *rt,
{
u32 rt_cookie = 0;
- if ((from && !fib6_get_cookie_safe(from, &rt_cookie)) ||
+ if (!from || !fib6_get_cookie_safe(from, &rt_cookie) ||
rt_cookie != cookie)
return NULL;
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 66c03c7665b2..303c6ee8bdb7 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -156,6 +156,7 @@ struct rds_ib_connection {
/* To control the number of wrs from fastreg */
atomic_t i_fastreg_wrs;
+ atomic_t i_fastreg_inuse_count;
/* interrupt handling */
struct tasklet_struct i_send_tasklet;
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 8891822eba4f..4de0214da63c 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -40,6 +40,7 @@
#include "rds_single_path.h"
#include "rds.h"
#include "ib.h"
+#include "ib_mr.h"
/*
* Set the selected protocol version
@@ -526,7 +527,6 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
attr.qp_type = IB_QPT_RC;
attr.send_cq = ic->i_send_cq;
attr.recv_cq = ic->i_recv_cq;
- atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR);
/*
* XXX this can fail if max_*_wr is too large? Are we supposed
@@ -993,6 +993,11 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
ic->i_cm_id, err);
}
+ /* kick off "flush_worker" for all pools in order to reap
+ * all FRMR registrations that are still marked "FRMR_IS_INUSE"
+ */
+ rds_ib_flush_mrs();
+
/*
* We want to wait for tx and rx completion to finish
* before we tear down the connection, but we have to be
@@ -1005,6 +1010,7 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
wait_event(rds_ib_ring_empty_wait,
rds_ib_ring_empty(&ic->i_recv_ring) &&
(atomic_read(&ic->i_signaled_sends) == 0) &&
+ (atomic_read(&ic->i_fastreg_inuse_count) == 0) &&
(atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR));
tasklet_kill(&ic->i_send_tasklet);
tasklet_kill(&ic->i_recv_tasklet);
@@ -1132,6 +1138,7 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
spin_lock_init(&ic->i_ack_lock);
#endif
atomic_set(&ic->i_signaled_sends, 0);
+ atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR);
/*
* rds_ib_conn_shutdown() waits for these to be emptied so they
diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
index 32ae26ed58a0..06ecf9d2d4bf 100644
--- a/net/rds/ib_frmr.c
+++ b/net/rds/ib_frmr.c
@@ -32,6 +32,24 @@
#include "ib_mr.h"
+static inline void
+rds_transition_frwr_state(struct rds_ib_mr *ibmr,
+ enum rds_ib_fr_state old_state,
+ enum rds_ib_fr_state new_state)
+{
+ if (cmpxchg(&ibmr->u.frmr.fr_state,
+ old_state, new_state) == old_state &&
+ old_state == FRMR_IS_INUSE) {
+ /* enforce order of ibmr->u.frmr.fr_state update
+ * before decrementing i_fastreg_inuse_count
+ */
+ smp_mb__before_atomic();
+ atomic_dec(&ibmr->ic->i_fastreg_inuse_count);
+ if (waitqueue_active(&rds_ib_ring_empty_wait))
+ wake_up(&rds_ib_ring_empty_wait);
+ }
+}
+
static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev,
int npages)
{
@@ -75,6 +93,8 @@ static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev,
pool->max_items_soft = pool->max_items;
frmr->fr_state = FRMR_IS_FREE;
+ init_waitqueue_head(&frmr->fr_inv_done);
+ init_waitqueue_head(&frmr->fr_reg_done);
return ibmr;
out_no_cigar:
@@ -116,13 +136,19 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
if (unlikely(ret != ibmr->sg_len))
return ret < 0 ? ret : -EINVAL;
+ if (cmpxchg(&frmr->fr_state,
+ FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE)
+ return -EBUSY;
+
+ atomic_inc(&ibmr->ic->i_fastreg_inuse_count);
+
/* Perform a WR for the fast_reg_mr. Each individual page
* in the sg list is added to the fast reg page list and placed
* inside the fast_reg_mr WR. The key used is a rolling 8bit
* counter, which should guarantee uniqueness.
*/
ib_update_fast_reg_key(frmr->mr, ibmr->remap_count++);
- frmr->fr_state = FRMR_IS_INUSE;
+ frmr->fr_reg = true;
memset(&reg_wr, 0, sizeof(reg_wr));
reg_wr.wr.wr_id = (unsigned long)(void *)ibmr;
@@ -138,12 +164,23 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
ret = ib_post_send(ibmr->ic->i_cm_id->qp, &reg_wr.wr, NULL);
if (unlikely(ret)) {
/* Failure here can be because of -ENOMEM as well */
- frmr->fr_state = FRMR_IS_STALE;
+ rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
+
atomic_inc(&ibmr->ic->i_fastreg_wrs);
if (printk_ratelimit())
pr_warn("RDS/IB: %s returned error(%d)\n",
__func__, ret);
+ goto out;
}
+
+ /* Wait for the registration to complete in order to prevent an invalid
+ * access error resulting from a race between the memory region already
+ * being accessed while registration is still pending.
+ */
+ wait_event(frmr->fr_reg_done, !frmr->fr_reg);
+
+out:
+
return ret;
}
@@ -255,12 +292,29 @@ static int rds_ib_post_inv(struct rds_ib_mr *ibmr)
ret = ib_post_send(i_cm_id->qp, s_wr, NULL);
if (unlikely(ret)) {
- frmr->fr_state = FRMR_IS_STALE;
+ rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
frmr->fr_inv = false;
+ /* enforce order of frmr->fr_inv update
+ * before incrementing i_fastreg_wrs
+ */
+ smp_mb__before_atomic();
atomic_inc(&ibmr->ic->i_fastreg_wrs);
pr_err("RDS/IB: %s returned error(%d)\n", __func__, ret);
goto out;
}
+
+ /* Wait for the FRMR_IS_FREE (or FRMR_IS_STALE) transition in order to
+ * 1) avoid a silly bouncing between "clean_list" and "drop_list"
+ * triggered by function "rds_ib_reg_frmr" as it is releases frmr
+ * regions whose state is not "FRMR_IS_FREE" right away.
+ * 2) prevents an invalid access error in a race
+ * from a pending "IB_WR_LOCAL_INV" operation
+ * with a teardown ("dma_unmap_sg", "put_page")
+ * and de-registration ("ib_dereg_mr") of the corresponding
+ * memory region.
+ */
+ wait_event(frmr->fr_inv_done, frmr->fr_state != FRMR_IS_INUSE);
+
out:
return ret;
}
@@ -271,7 +325,7 @@ void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
struct rds_ib_frmr *frmr = &ibmr->u.frmr;
if (wc->status != IB_WC_SUCCESS) {
- frmr->fr_state = FRMR_IS_STALE;
+ rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
if (rds_conn_up(ic->conn))
rds_ib_conn_error(ic->conn,
"frmr completion <%pI4,%pI4> status %u(%s), vendor_err 0x%x, disconnecting and reconnecting\n",
@@ -283,10 +337,20 @@ void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
}
if (frmr->fr_inv) {
- frmr->fr_state = FRMR_IS_FREE;
+ rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_FREE);
frmr->fr_inv = false;
+ wake_up(&frmr->fr_inv_done);
}
+ if (frmr->fr_reg) {
+ frmr->fr_reg = false;
+ wake_up(&frmr->fr_reg_done);
+ }
+
+ /* enforce order of frmr->{fr_reg,fr_inv} update
+ * before incrementing i_fastreg_wrs
+ */
+ smp_mb__before_atomic();
atomic_inc(&ic->i_fastreg_wrs);
}
@@ -295,14 +359,18 @@ void rds_ib_unreg_frmr(struct list_head *list, unsigned int *nfreed,
{
struct rds_ib_mr *ibmr, *next;
struct rds_ib_frmr *frmr;
- int ret = 0;
+ int ret = 0, ret2;
unsigned int freed = *nfreed;
/* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
list_for_each_entry(ibmr, list, unmap_list) {
- if (ibmr->sg_dma_len)
- ret |= rds_ib_post_inv(ibmr);
+ if (ibmr->sg_dma_len) {
+ ret2 = rds_ib_post_inv(ibmr);
+ if (ret2 && !ret)
+ ret = ret2;
+ }
}
+
if (ret)
pr_warn("RDS/IB: %s failed (err=%d)\n", __func__, ret);
diff --git a/net/rds/ib_mr.h b/net/rds/ib_mr.h
index 5da12c248431..9045a8c0edff 100644
--- a/net/rds/ib_mr.h
+++ b/net/rds/ib_mr.h
@@ -57,6 +57,9 @@ struct rds_ib_frmr {
struct ib_mr *mr;
enum rds_ib_fr_state fr_state;
bool fr_inv;
+ wait_queue_head_t fr_inv_done;
+ bool fr_reg;
+ wait_queue_head_t fr_reg_done;
struct ib_send_wr fr_wr;
unsigned int dma_npages;
unsigned int sg_byte_len;
@@ -97,6 +100,7 @@ struct rds_ib_mr_pool {
struct llist_head free_list; /* unused MRs */
struct llist_head clean_list; /* unused & unmapped MRs */
wait_queue_head_t flush_wait;
+ spinlock_t clean_lock; /* "clean_list" concurrency */
atomic_t free_pinned; /* memory pinned by free MRs */
unsigned long max_items;
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 0b347f46b2f4..c8c1e3ae8d84 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -40,9 +40,6 @@
struct workqueue_struct *rds_ib_mr_wq;
-static DEFINE_PER_CPU(unsigned long, clean_list_grace);
-#define CLEAN_LIST_BUSY_BIT 0
-
static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
{
struct rds_ib_device *rds_ibdev;
@@ -195,12 +192,11 @@ struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool)
{
struct rds_ib_mr *ibmr = NULL;
struct llist_node *ret;
- unsigned long *flag;
+ unsigned long flags;
- preempt_disable();
- flag = this_cpu_ptr(&clean_list_grace);
- set_bit(CLEAN_LIST_BUSY_BIT, flag);
+ spin_lock_irqsave(&pool->clean_lock, flags);
ret = llist_del_first(&pool->clean_list);
+ spin_unlock_irqrestore(&pool->clean_lock, flags);
if (ret) {
ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
if (pool->pool_type == RDS_IB_MR_8K_POOL)
@@ -209,23 +205,9 @@ struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool)
rds_ib_stats_inc(s_ib_rdma_mr_1m_reused);
}
- clear_bit(CLEAN_LIST_BUSY_BIT, flag);
- preempt_enable();
return ibmr;
}
-static inline void wait_clean_list_grace(void)
-{
- int cpu;
- unsigned long *flag;
-
- for_each_online_cpu(cpu) {
- flag = &per_cpu(clean_list_grace, cpu);
- while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
- cpu_relax();
- }
-}
-
void rds_ib_sync_mr(void *trans_private, int direction)
{
struct rds_ib_mr *ibmr = trans_private;
@@ -324,8 +306,7 @@ static unsigned int llist_append_to_list(struct llist_head *llist,
* of clusters. Each cluster has linked llist nodes of
* MR_CLUSTER_SIZE mrs that are ready for reuse.
*/
-static void list_to_llist_nodes(struct rds_ib_mr_pool *pool,
- struct list_head *list,
+static void list_to_llist_nodes(struct list_head *list,
struct llist_node **nodes_head,
struct llist_node **nodes_tail)
{
@@ -402,8 +383,13 @@ int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
*/
dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
- if (free_all)
+ if (free_all) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->clean_lock, flags);
llist_append_to_list(&pool->clean_list, &unmap_list);
+ spin_unlock_irqrestore(&pool->clean_lock, flags);
+ }
free_goal = rds_ib_flush_goal(pool, free_all);
@@ -416,27 +402,20 @@ int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
rds_ib_unreg_fmr(&unmap_list, &nfreed, &unpinned, free_goal);
if (!list_empty(&unmap_list)) {
- /* we have to make sure that none of the things we're about
- * to put on the clean list would race with other cpus trying
- * to pull items off. The llist would explode if we managed to
- * remove something from the clean list and then add it back again
- * while another CPU was spinning on that same item in llist_del_first.
- *
- * This is pretty unlikely, but just in case wait for an llist grace period
- * here before adding anything back into the clean list.
- */
- wait_clean_list_grace();
-
- list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
+ unsigned long flags;
+
+ list_to_llist_nodes(&unmap_list, &clean_nodes, &clean_tail);
if (ibmr_ret) {
*ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
clean_nodes = clean_nodes->next;
}
/* more than one entry in llist nodes */
- if (clean_nodes)
+ if (clean_nodes) {
+ spin_lock_irqsave(&pool->clean_lock, flags);
llist_add_batch(clean_nodes, clean_tail,
&pool->clean_list);
-
+ spin_unlock_irqrestore(&pool->clean_lock, flags);
+ }
}
atomic_sub(unpinned, &pool->free_pinned);
@@ -471,7 +450,7 @@ struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted);
else
rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted);
- return ERR_PTR(-EAGAIN);
+ break;
}
/* We do have some empty MRs. Flush them out. */
@@ -485,7 +464,7 @@ struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
return ibmr;
}
- return ibmr;
+ return NULL;
}
static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
@@ -610,6 +589,7 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
init_llist_head(&pool->free_list);
init_llist_head(&pool->drop_list);
init_llist_head(&pool->clean_list);
+ spin_lock_init(&pool->clean_lock);
mutex_init(&pool->flush_lock);
init_waitqueue_head(&pool->flush_wait);
INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index dd55b9ac3a66..afd2ba157a13 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -942,7 +942,7 @@ config NET_ACT_TUNNEL_KEY
config NET_ACT_CT
tristate "connection tracking tc action"
- depends on NET_CLS_ACT && NF_CONNTRACK
+ depends on NET_CLS_ACT && NF_CONNTRACK && NF_NAT
help
Say Y here to allow sending the packets to conntrack module.
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 278014e26aec..d144233423c5 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -2152,6 +2152,7 @@ replay:
tfilter_notify(net, skb, n, tp, block, q, parent, fh,
RTM_NEWTFILTER, false, rtnl_held);
tfilter_put(tp, fh);
+ q->flags &= ~TCQ_F_CAN_BYPASS;
}
errout:
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index e2faf33d282b..d59fbcc745d1 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -596,8 +596,6 @@ static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
- /* we cannot bypass queue discipline anymore */
- sch->flags &= ~TCQ_F_CAN_BYPASS;
return 0;
}
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 420bd8411677..68404a9d2ce4 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -824,8 +824,6 @@ static unsigned long sfq_find(struct Qdisc *sch, u32 classid)
static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
- /* we cannot bypass queue discipline anymore */
- sch->flags &= ~TCQ_F_CAN_BYPASS;
return 0;
}
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 388750ddc57a..c39db507ba3f 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -75,7 +75,7 @@ struct taprio_sched {
struct sched_gate_list __rcu *admin_sched;
struct hrtimer advance_timer;
struct list_head taprio_list;
- int txtime_delay;
+ u32 txtime_delay;
};
static ktime_t sched_base_time(const struct sched_gate_list *sched)
@@ -1113,7 +1113,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
goto unlock;
}
- q->txtime_delay = nla_get_s32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
+ q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
}
if (!TXTIME_ASSIST_IS_ENABLED(taprio_flags) &&
@@ -1430,7 +1430,7 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
goto options_error;
if (q->txtime_delay &&
- nla_put_s32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
+ nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
goto options_error;
if (oper && dump_schedule(skb, oper))
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index ed39396b9bba..36bd8a6e82df 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2582,8 +2582,7 @@ do_addr_param:
case SCTP_PARAM_STATE_COOKIE:
asoc->peer.cookie_len =
ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
- if (asoc->peer.cookie)
- kfree(asoc->peer.cookie);
+ kfree(asoc->peer.cookie);
asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp);
if (!asoc->peer.cookie)
retval = 0;
@@ -2648,8 +2647,7 @@ do_addr_param:
goto fall_through;
/* Save peer's random parameter */
- if (asoc->peer.peer_random)
- kfree(asoc->peer.peer_random);
+ kfree(asoc->peer.peer_random);
asoc->peer.peer_random = kmemdup(param.p,
ntohs(param.p->length), gfp);
if (!asoc->peer.peer_random) {
@@ -2663,8 +2661,7 @@ do_addr_param:
goto fall_through;
/* Save peer's HMAC list */
- if (asoc->peer.peer_hmacs)
- kfree(asoc->peer.peer_hmacs);
+ kfree(asoc->peer.peer_hmacs);
asoc->peer.peer_hmacs = kmemdup(param.p,
ntohs(param.p->length), gfp);
if (!asoc->peer.peer_hmacs) {
@@ -2680,8 +2677,7 @@ do_addr_param:
if (!ep->auth_enable)
goto fall_through;
- if (asoc->peer.peer_chunks)
- kfree(asoc->peer.peer_chunks);
+ kfree(asoc->peer.peer_chunks);
asoc->peer.peer_chunks = kmemdup(param.p,
ntohs(param.p->length), gfp);
if (!asoc->peer.peer_chunks)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 324a1f91b394..3a5be1d7e572 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1807,6 +1807,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
__skb_queue_head_init(&xmitq);
/* Ensure message is well-formed before touching the header */
+ TIPC_SKB_CB(skb)->validated = false;
if (unlikely(!tipc_msg_validate(&skb)))
goto discard;
hdr = buf_msg(skb);
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 9457aaeae092..4465fc2dae14 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -9,12 +9,13 @@ ret=0
ksft_skip=4
# all tests in this script. Can be overridden with -t option
-TESTS="unregister down carrier nexthop ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics ipv4_route_v6_gw"
+TESTS="unregister down carrier nexthop ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics ipv4_route_v6_gw rp_filter"
VERBOSE=0
PAUSE_ON_FAIL=no
PAUSE=no
IP="ip -netns ns1"
+NS_EXEC="ip netns exec ns1"
log_test()
{
@@ -433,6 +434,37 @@ fib_carrier_test()
fib_carrier_unicast_test
}
+fib_rp_filter_test()
+{
+ echo
+ echo "IPv4 rp_filter tests"
+
+ setup
+
+ set -e
+ $IP link set dev lo address 52:54:00:6a:c7:5e
+ $IP link set dummy0 address 52:54:00:6a:c7:5e
+ $IP link add dummy1 type dummy
+ $IP link set dummy1 address 52:54:00:6a:c7:5e
+ $IP link set dev dummy1 up
+ $NS_EXEC sysctl -qw net.ipv4.conf.all.rp_filter=1
+ $NS_EXEC sysctl -qw net.ipv4.conf.all.accept_local=1
+ $NS_EXEC sysctl -qw net.ipv4.conf.all.route_localnet=1
+
+ $NS_EXEC tc qd add dev dummy1 parent root handle 1: fq_codel
+ $NS_EXEC tc filter add dev dummy1 parent 1: protocol arp basic action mirred egress redirect dev lo
+ $NS_EXEC tc filter add dev dummy1 parent 1: protocol ip basic action mirred egress redirect dev lo
+ set +e
+
+ run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 198.51.100.1"
+ log_test $? 0 "rp_filter passes local packets"
+
+ run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 127.0.0.1"
+ log_test $? 0 "rp_filter passes loopback packets"
+
+ cleanup
+}
+
################################################################################
# Tests on nexthop spec
@@ -1557,6 +1589,7 @@ do
fib_unreg_test|unregister) fib_unreg_test;;
fib_down_test|down) fib_down_test;;
fib_carrier_test|carrier) fib_carrier_test;;
+ fib_rp_filter_test|rp_filter) fib_rp_filter_test;;
fib_nexthop_test|nexthop) fib_nexthop_test;;
ipv6_route_test|ipv6_rt) ipv6_route_test;;
ipv4_route_test|ipv4_rt) ipv4_route_test;;