diff options
38 files changed, 329 insertions, 142 deletions
diff --git a/Documentation/process/maintainer-netdev.rst b/Documentation/process/maintainer-netdev.rst index c1c732e9748b..09dcf6377c27 100644 --- a/Documentation/process/maintainer-netdev.rst +++ b/Documentation/process/maintainer-netdev.rst @@ -98,7 +98,7 @@ If you aren't subscribed to netdev and/or are simply unsure if repository link above for any new networking-related commits. You may also check the following website for the current status: - https://patchwork.hopto.org/net-next.html + https://netdev.bots.linux.dev/net-next.html The ``net`` tree continues to collect fixes for the vX.Y content, and is fed back to Linus at regular (~weekly) intervals. Meaning that the @@ -120,7 +120,37 @@ queue for netdev: https://patchwork.kernel.org/project/netdevbpf/list/ The "State" field will tell you exactly where things are at with your -patch. Patches are indexed by the ``Message-ID`` header of the emails +patch: + +================== ============================================================= +Patch state Description +================== ============================================================= +New, Under review pending review, patch is in the maintainer’s queue for + review; the two states are used interchangeably (depending on + the exact co-maintainer handling patchwork at the time) +Accepted patch was applied to the appropriate networking tree, this is + usually set automatically by the pw-bot +Needs ACK waiting for an ack from an area expert or testing +Changes requested patch has not passed the review, new revision is expected + with appropriate code and commit message changes +Rejected patch has been rejected and new revision is not expected +Not applicable patch is expected to be applied outside of the networking + subsystem +Awaiting upstream patch should be reviewed and handled by appropriate + sub-maintainer, who will send it on to the networking trees; + patches set to ``Awaiting upstream`` in netdev's patchwork + will usually remain in this state, whether the sub-maintainer + requested changes, accepted or rejected the patch +Deferred patch needs to be reposted later, usually due to dependency + or because it was posted for a closed tree +Superseded new version of the patch was posted, usually set by the + pw-bot +RFC not to be applied, usually not in maintainer’s review queue, + pw-bot can automatically set patches to this state based + on subject tags +================== ============================================================= + +Patches are indexed by the ``Message-ID`` header of the emails which carried them so if you have trouble finding your patch append the value of ``Message-ID`` to the URL above. @@ -155,7 +185,7 @@ must match the MAINTAINERS entry) and a handful of senior reviewers. Bot records its activity here: - https://patchwork.hopto.org/pw-bot.html + https://netdev.bots.linux.dev/pw-bot.html Review timelines ~~~~~~~~~~~~~~~~ diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h index dee35ba924ad..0617d5ccd3ff 100644 --- a/drivers/net/dsa/sja1105/sja1105.h +++ b/drivers/net/dsa/sja1105/sja1105.h @@ -132,6 +132,8 @@ struct sja1105_info { int max_frame_mem; int num_ports; bool multiple_cascade_ports; + /* Every {port, TXQ} has its own CBS shaper */ + bool fixed_cbs_mapping; enum dsa_tag_protocol tag_proto; const struct sja1105_dynamic_table_ops *dyn_ops; const struct sja1105_table_ops *static_ops; diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 331bb1c6676a..a23d980d28f5 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -2115,11 +2115,36 @@ static void sja1105_bridge_leave(struct dsa_switch *ds, int port, } #define BYTES_PER_KBIT (1000LL / 8) +/* Port 0 (the uC port) does not have CBS shapers */ +#define SJA1110_FIXED_CBS(port, prio) ((((port) - 1) * SJA1105_NUM_TC) + (prio)) + +static int sja1105_find_cbs_shaper(struct sja1105_private *priv, + int port, int prio) +{ + int i; + + if (priv->info->fixed_cbs_mapping) { + i = SJA1110_FIXED_CBS(port, prio); + if (i >= 0 && i < priv->info->num_cbs_shapers) + return i; + + return -1; + } + + for (i = 0; i < priv->info->num_cbs_shapers; i++) + if (priv->cbs[i].port == port && priv->cbs[i].prio == prio) + return i; + + return -1; +} static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv) { int i; + if (priv->info->fixed_cbs_mapping) + return -1; + for (i = 0; i < priv->info->num_cbs_shapers; i++) if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope) return i; @@ -2150,14 +2175,20 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port, { struct sja1105_private *priv = ds->priv; struct sja1105_cbs_entry *cbs; + s64 port_transmit_rate_kbps; int index; if (!offload->enable) return sja1105_delete_cbs_shaper(priv, port, offload->queue); - index = sja1105_find_unused_cbs_shaper(priv); - if (index < 0) - return -ENOSPC; + /* The user may be replacing an existing shaper */ + index = sja1105_find_cbs_shaper(priv, port, offload->queue); + if (index < 0) { + /* That isn't the case - see if we can allocate a new one */ + index = sja1105_find_unused_cbs_shaper(priv); + if (index < 0) + return -ENOSPC; + } cbs = &priv->cbs[index]; cbs->port = port; @@ -2167,9 +2198,17 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port, */ cbs->credit_hi = offload->hicredit; cbs->credit_lo = abs(offload->locredit); - /* User space is in kbits/sec, hardware in bytes/sec */ - cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT; - cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT); + /* User space is in kbits/sec, while the hardware in bytes/sec times + * link speed. Since the given offload->sendslope is good only for the + * current link speed anyway, and user space is likely to reprogram it + * when that changes, don't even bother to track the port's link speed, + * but deduce the port transmit rate from idleslope - sendslope. + */ + port_transmit_rate_kbps = offload->idleslope - offload->sendslope; + cbs->idle_slope = div_s64(offload->idleslope * BYTES_PER_KBIT, + port_transmit_rate_kbps); + cbs->send_slope = div_s64(abs(offload->sendslope * BYTES_PER_KBIT), + port_transmit_rate_kbps); /* Convert the negative values from 64-bit 2's complement * to 32-bit 2's complement (for the case of 0x80000000 whose * negative is still negative). diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c index 5ce29c8057a4..834b5c1b4db0 100644 --- a/drivers/net/dsa/sja1105/sja1105_spi.c +++ b/drivers/net/dsa/sja1105/sja1105_spi.c @@ -781,6 +781,7 @@ const struct sja1105_info sja1110a_info = { .tag_proto = DSA_TAG_PROTO_SJA1110, .can_limit_mcast_flood = true, .multiple_cascade_ports = true, + .fixed_cbs_mapping = true, .ptp_ts_bits = 32, .ptpegr_ts_bytes = 8, .max_frame_mem = SJA1110_MAX_FRAME_MEMORY, @@ -831,6 +832,7 @@ const struct sja1105_info sja1110b_info = { .tag_proto = DSA_TAG_PROTO_SJA1110, .can_limit_mcast_flood = true, .multiple_cascade_ports = true, + .fixed_cbs_mapping = true, .ptp_ts_bits = 32, .ptpegr_ts_bytes = 8, .max_frame_mem = SJA1110_MAX_FRAME_MEMORY, @@ -881,6 +883,7 @@ const struct sja1105_info sja1110c_info = { .tag_proto = DSA_TAG_PROTO_SJA1110, .can_limit_mcast_flood = true, .multiple_cascade_ports = true, + .fixed_cbs_mapping = true, .ptp_ts_bits = 32, .ptpegr_ts_bytes = 8, .max_frame_mem = SJA1110_MAX_FRAME_MEMORY, @@ -931,6 +934,7 @@ const struct sja1105_info sja1110d_info = { .tag_proto = DSA_TAG_PROTO_SJA1110, .can_limit_mcast_flood = true, .multiple_cascade_ports = true, + .fixed_cbs_mapping = true, .ptp_ts_bits = 32, .ptpegr_ts_bytes = 8, .max_frame_mem = SJA1110_MAX_FRAME_MEMORY, diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index ea0e38b4d9e9..f281e42a7ef9 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -570,7 +570,10 @@ static int gve_rx_append_frags(struct napi_struct *napi, if (!skb) return -1; - skb_shinfo(rx->ctx.skb_tail)->frag_list = skb; + if (rx->ctx.skb_tail == rx->ctx.skb_head) + skb_shinfo(rx->ctx.skb_head)->frag_list = skb; + else + rx->ctx.skb_tail->next = skb; rx->ctx.skb_tail = skb; num_frags = 0; } diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 015b78144114..a2b759531cb7 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -34,11 +34,11 @@ struct igb_adapter; /* TX/RX descriptor defines */ #define IGB_DEFAULT_TXD 256 #define IGB_DEFAULT_TX_WORK 128 -#define IGB_MIN_TXD 80 +#define IGB_MIN_TXD 64 #define IGB_MAX_TXD 4096 #define IGB_DEFAULT_RXD 256 -#define IGB_MIN_RXD 80 +#define IGB_MIN_RXD 64 #define IGB_MAX_RXD 4096 #define IGB_DEFAULT_ITR 3 /* dynamic */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 1ab787ed254d..13ba9c74bd84 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3933,8 +3933,9 @@ static void igb_probe_vfs(struct igb_adapter *adapter) struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; - /* Virtualization features not supported on i210 family. */ - if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) + /* Virtualization features not supported on i210 and 82580 family. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211) || + (hw->mac.type == e1000_82580)) return; /* Of the below we really only want the effect of getting diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h index 57d39ee00b58..7b83678ba83a 100644 --- a/drivers/net/ethernet/intel/igbvf/igbvf.h +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h @@ -39,11 +39,11 @@ enum latency_range { /* Tx/Rx descriptor defines */ #define IGBVF_DEFAULT_TXD 256 #define IGBVF_MAX_TXD 4096 -#define IGBVF_MIN_TXD 80 +#define IGBVF_MIN_TXD 64 #define IGBVF_DEFAULT_RXD 256 #define IGBVF_MAX_RXD 4096 -#define IGBVF_MIN_RXD 80 +#define IGBVF_MIN_RXD 64 #define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ #define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 8ebe6999a528..f48f82d5e274 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -379,11 +379,11 @@ static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc) /* TX/RX descriptor defines */ #define IGC_DEFAULT_TXD 256 #define IGC_DEFAULT_TX_WORK 128 -#define IGC_MIN_TXD 80 +#define IGC_MIN_TXD 64 #define IGC_MAX_TXD 4096 #define IGC_DEFAULT_RXD 256 -#define IGC_MIN_RXD 80 +#define IGC_MIN_RXD 64 #define IGC_MAX_RXD 4096 /* Supported Rx Buffer Sizes */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index c2f68678e947..23c2f2ed2fb8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -846,6 +846,21 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, return 0; } +static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req, + u16 *smq, u16 *smq_mask) +{ + struct nix_cn10k_aq_enq_req *aq_req; + + if (!is_rvu_otx2(rvu)) { + aq_req = (struct nix_cn10k_aq_enq_req *)req; + *smq = aq_req->sq.smq; + *smq_mask = aq_req->sq_mask.smq; + } else { + *smq = req->sq.smq; + *smq_mask = req->sq_mask.smq; + } +} + static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp) @@ -857,6 +872,7 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, struct rvu_block *block; struct admin_queue *aq; struct rvu_pfvf *pfvf; + u16 smq, smq_mask; void *ctx, *mask; bool ena; u64 cfg; @@ -928,13 +944,14 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, if (rc) return rc; + nix_get_aq_req_smq(rvu, req, &smq, &smq_mask); /* Check if SQ pointed SMQ belongs to this PF/VF or not */ if (req->ctype == NIX_AQ_CTYPE_SQ && ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || (req->op == NIX_AQ_INSTOP_WRITE && - req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) { + req->sq_mask.ena && req->sq.ena && smq_mask))) { if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, - pcifunc, req->sq.smq)) + pcifunc, smq)) return NIX_AF_ERR_AQ_ENQUEUE; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c index 92d3952dfa8b..feeb41693c17 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c @@ -17,8 +17,10 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state, if (err) return err; - if (mlx5e_is_eswitch_flow(parse_state->flow)) + if (mlx5e_is_eswitch_flow(parse_state->flow)) { attr->esw_attr->split_count = attr->esw_attr->out_count; + parse_state->if_count = 0; + } attr->flags |= MLX5_ATTR_FLAG_CT; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c index 291193f7120d..f63402c48028 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c @@ -294,6 +294,7 @@ parse_mirred_ovs_master(struct mlx5e_tc_act_parse_state *parse_state, if (err) return err; + parse_state->if_count = 0; esw_attr->out_count++; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c index 3b272bbf4c53..368a95fa77d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c @@ -98,8 +98,10 @@ tc_act_parse_pedit(struct mlx5e_tc_act_parse_state *parse_state, attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - if (ns_type == MLX5_FLOW_NAMESPACE_FDB) + if (ns_type == MLX5_FLOW_NAMESPACE_FDB) { esw_attr->split_count = esw_attr->out_count; + parse_state->if_count = 0; + } return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c index ad09a8a5f36e..2d1d4a04501b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c @@ -66,6 +66,7 @@ tc_act_parse_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state, if (err) return err; + parse_state->if_count = 0; esw_attr->out_count++; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c index c8a3eaf189f6..a13c5e707b83 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c @@ -166,6 +166,7 @@ tc_act_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state, return err; esw_attr->split_count = esw_attr->out_count; + parse_state->if_count = 0; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c index 310b99230760..f17575b09788 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c @@ -65,8 +65,10 @@ tc_act_parse_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state, if (err) return err; - if (ns_type == MLX5_FLOW_NAMESPACE_FDB) + if (ns_type == MLX5_FLOW_NAMESPACE_FDB) { attr->esw_attr->split_count = attr->esw_attr->out_count; + parse_state->if_count = 0; + } return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 318083690fcd..c24828b688ac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -3936,6 +3936,7 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state, } i_split = i + 1; + parse_state->if_count = 0; list_add(&attr->list, &flow->attrs); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 6cd7d6497e10..d4cde6555063 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1276,12 +1276,19 @@ int mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, enum mlx5_eswitch_vport_event enabled_events) { + bool pf_needed; int ret; + pf_needed = mlx5_core_is_ecpf_esw_manager(esw->dev) || + esw->mode == MLX5_ESWITCH_LEGACY; + /* Enable PF vport */ - ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF, enabled_events); - if (ret) - return ret; + if (pf_needed) { + ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF, + enabled_events); + if (ret) + return ret; + } /* Enable external host PF HCA */ ret = host_pf_enable_hca(esw->dev); @@ -1317,7 +1324,8 @@ ec_vf_err: ecpf_err: host_pf_disable_hca(esw->dev); pf_hca_err: - mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF); + if (pf_needed) + mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF); return ret; } @@ -1335,7 +1343,10 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw) } host_pf_disable_hca(esw->dev); - mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF); + + if (mlx5_core_is_ecpf_esw_manager(esw->dev) || + esw->mode == MLX5_ESWITCH_LEGACY) + mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF); } static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 752fb0dfb111..b296ac52a439 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -3216,26 +3216,47 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, esw_acl_ingress_ofld_cleanup(esw, vport); } -static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) +static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw) { - struct mlx5_vport *vport; + struct mlx5_vport *uplink, *manager; + int ret; - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); - if (IS_ERR(vport)) - return PTR_ERR(vport); + uplink = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); + if (IS_ERR(uplink)) + return PTR_ERR(uplink); + + ret = esw_vport_create_offloads_acl_tables(esw, uplink); + if (ret) + return ret; + + manager = mlx5_eswitch_get_vport(esw, esw->manager_vport); + if (IS_ERR(manager)) { + ret = PTR_ERR(manager); + goto err_manager; + } - return esw_vport_create_offloads_acl_tables(esw, vport); + ret = esw_vport_create_offloads_acl_tables(esw, manager); + if (ret) + goto err_manager; + + return 0; + +err_manager: + esw_vport_destroy_offloads_acl_tables(esw, uplink); + return ret; } -static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) +static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw) { struct mlx5_vport *vport; - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); - if (IS_ERR(vport)) - return; + vport = mlx5_eswitch_get_vport(esw, esw->manager_vport); + if (!IS_ERR(vport)) + esw_vport_destroy_offloads_acl_tables(esw, vport); - esw_vport_destroy_offloads_acl_tables(esw, vport); + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); + if (!IS_ERR(vport)) + esw_vport_destroy_offloads_acl_tables(esw, vport); } int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw) @@ -3280,7 +3301,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) } esw->fdb_table.offloads.indir = indir; - err = esw_create_uplink_offloads_acl_tables(esw); + err = esw_create_offloads_acl_tables(esw); if (err) goto create_acl_err; @@ -3321,7 +3342,7 @@ create_fdb_err: create_restore_err: esw_destroy_offloads_table(esw); create_offloads_err: - esw_destroy_uplink_offloads_acl_tables(esw); + esw_destroy_offloads_acl_tables(esw); create_acl_err: mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir); create_indir_err: @@ -3337,7 +3358,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) esw_destroy_offloads_fdb_tables(esw); esw_destroy_restore_table(esw); esw_destroy_offloads_table(esw); - esw_destroy_uplink_offloads_acl_tables(esw); + esw_destroy_offloads_acl_tables(esw); mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir); mutex_destroy(&esw->fdb_table.offloads.vports.lock); } diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index c3f30663070f..b7e151439c48 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -1330,8 +1330,7 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) struct crypto_aead *tfm; int ret; - /* Pick a sync gcm(aes) cipher to ensure order is preserved. */ - tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); + tfm = crypto_alloc_aead("gcm(aes)", 0, 0); if (IS_ERR(tfm)) return tfm; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index e8b94580194e..ad29122a5468 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1135,8 +1135,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev, struct netlink_ext_ack *extack) { struct net_device *dev = team->dev; - struct team_port *port; char *portname = port_dev->name; + struct team_port *port; int err; if (port_dev->flags & IFF_LOOPBACK) { @@ -1203,18 +1203,31 @@ static int team_port_add(struct team *team, struct net_device *port_dev, memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len); - err = team_port_enter(team, port); + err = dev_open(port_dev, extack); if (err) { - netdev_err(dev, "Device %s failed to enter team mode\n", + netdev_dbg(dev, "Device %s opening failed\n", portname); - goto err_port_enter; + goto err_dev_open; } - err = dev_open(port_dev, extack); + err = team_upper_dev_link(team, port, extack); if (err) { - netdev_dbg(dev, "Device %s opening failed\n", + netdev_err(dev, "Device %s failed to set upper link\n", portname); - goto err_dev_open; + goto err_set_upper_link; + } + + /* lockdep subclass variable(dev->nested_level) was updated by + * team_upper_dev_link(). + */ + team_unlock(team); + team_lock(team); + + err = team_port_enter(team, port); + if (err) { + netdev_err(dev, "Device %s failed to enter team mode\n", + portname); + goto err_port_enter; } err = vlan_vids_add_by_dev(port_dev, dev); @@ -1242,13 +1255,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev, goto err_handler_register; } - err = team_upper_dev_link(team, port, extack); - if (err) { - netdev_err(dev, "Device %s failed to set upper link\n", - portname); - goto err_set_upper_link; - } - err = __team_option_inst_add_port(team, port); if (err) { netdev_err(dev, "Device %s failed to add per-port options\n", @@ -1295,9 +1301,6 @@ err_set_slave_promisc: __team_option_inst_del_port(team, port); err_option_port_add: - team_upper_dev_unlink(team, port); - -err_set_upper_link: netdev_rx_handler_unregister(port_dev); err_handler_register: @@ -1307,13 +1310,16 @@ err_enable_netpoll: vlan_vids_del_by_dev(port_dev, dev); err_vids_add: + team_port_leave(team, port); + +err_port_enter: + team_upper_dev_unlink(team, port); + +err_set_upper_link: dev_close(port_dev); err_dev_open: - team_port_leave(team, port); team_port_set_orig_dev_addr(port); - -err_port_enter: dev_set_mtu(port_dev, port->orig.mtu); err_set_mtu: @@ -1616,6 +1622,7 @@ static int team_init(struct net_device *dev) int err; team->dev = dev; + mutex_init(&team->lock); team_set_no_mode(team); team->notifier_ctx = false; @@ -1643,8 +1650,6 @@ static int team_init(struct net_device *dev) goto err_options_register; netif_carrier_off(dev); - lockdep_register_key(&team->team_lock_key); - __mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key); netdev_lockdep_set_classes(dev); return 0; @@ -1665,7 +1670,7 @@ static void team_uninit(struct net_device *dev) struct team_port *port; struct team_port *tmp; - mutex_lock(&team->lock); + team_lock(team); list_for_each_entry_safe(port, tmp, &team->port_list, list) team_port_del(team, port->dev); @@ -1674,9 +1679,8 @@ static void team_uninit(struct net_device *dev) team_mcast_rejoin_fini(team); team_notify_peers_fini(team); team_queue_override_fini(team); - mutex_unlock(&team->lock); + team_unlock(team); netdev_change_features(dev); - lockdep_unregister_key(&team->team_lock_key); } static void team_destructor(struct net_device *dev) @@ -1790,18 +1794,18 @@ static void team_set_rx_mode(struct net_device *dev) static int team_set_mac_address(struct net_device *dev, void *p) { - struct sockaddr *addr = p; struct team *team = netdev_priv(dev); + struct sockaddr *addr = p; struct team_port *port; if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; dev_addr_set(dev, addr->sa_data); - mutex_lock(&team->lock); + team_lock(team); list_for_each_entry(port, &team->port_list, list) if (team->ops.port_change_dev_addr) team->ops.port_change_dev_addr(team, port); - mutex_unlock(&team->lock); + team_unlock(team); return 0; } @@ -1815,7 +1819,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu) * Alhough this is reader, it's guarded by team lock. It's not possible * to traverse list in reverse under rcu_read_lock */ - mutex_lock(&team->lock); + team_lock(team); team->port_mtu_change_allowed = true; list_for_each_entry(port, &team->port_list, list) { err = dev_set_mtu(port->dev, new_mtu); @@ -1826,7 +1830,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu) } } team->port_mtu_change_allowed = false; - mutex_unlock(&team->lock); + team_unlock(team); dev->mtu = new_mtu; @@ -1836,7 +1840,7 @@ unwind: list_for_each_entry_continue_reverse(port, &team->port_list, list) dev_set_mtu(port->dev, dev->mtu); team->port_mtu_change_allowed = false; - mutex_unlock(&team->lock); + team_unlock(team); return err; } @@ -1890,20 +1894,20 @@ static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) * Alhough this is reader, it's guarded by team lock. It's not possible * to traverse list in reverse under rcu_read_lock */ - mutex_lock(&team->lock); + team_lock(team); list_for_each_entry(port, &team->port_list, list) { err = vlan_vid_add(port->dev, proto, vid); if (err) goto unwind; } - mutex_unlock(&team->lock); + team_unlock(team); return 0; unwind: list_for_each_entry_continue_reverse(port, &team->port_list, list) vlan_vid_del(port->dev, proto, vid); - mutex_unlock(&team->lock); + team_unlock(team); return err; } @@ -1913,10 +1917,10 @@ static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) struct team *team = netdev_priv(dev); struct team_port *port; - mutex_lock(&team->lock); + team_lock(team); list_for_each_entry(port, &team->port_list, list) vlan_vid_del(port->dev, proto, vid); - mutex_unlock(&team->lock); + team_unlock(team); return 0; } @@ -1938,9 +1942,9 @@ static void team_netpoll_cleanup(struct net_device *dev) { struct team *team = netdev_priv(dev); - mutex_lock(&team->lock); + team_lock(team); __team_netpoll_cleanup(team); - mutex_unlock(&team->lock); + team_unlock(team); } static int team_netpoll_setup(struct net_device *dev, @@ -1950,7 +1954,7 @@ static int team_netpoll_setup(struct net_device *dev, struct team_port *port; int err = 0; - mutex_lock(&team->lock); + team_lock(team); list_for_each_entry(port, &team->port_list, list) { err = __team_port_enable_netpoll(port); if (err) { @@ -1958,7 +1962,7 @@ static int team_netpoll_setup(struct net_device *dev, break; } } - mutex_unlock(&team->lock); + team_unlock(team); return err; } #endif @@ -1969,9 +1973,9 @@ static int team_add_slave(struct net_device *dev, struct net_device *port_dev, struct team *team = netdev_priv(dev); int err; - mutex_lock(&team->lock); + team_lock(team); err = team_port_add(team, port_dev, extack); - mutex_unlock(&team->lock); + team_unlock(team); if (!err) netdev_change_features(dev); @@ -1984,19 +1988,12 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev) struct team *team = netdev_priv(dev); int err; - mutex_lock(&team->lock); + team_lock(team); err = team_port_del(team, port_dev); - mutex_unlock(&team->lock); - - if (err) - return err; + team_unlock(team); - if (netif_is_team_master(port_dev)) { - lockdep_unregister_key(&team->team_lock_key); - lockdep_register_key(&team->team_lock_key); - lockdep_set_class(&team->lock, &team->team_lock_key); - } - netdev_change_features(dev); + if (!err) + netdev_change_features(dev); return err; } @@ -2316,13 +2313,13 @@ static struct team *team_nl_team_get(struct genl_info *info) } team = netdev_priv(dev); - mutex_lock(&team->lock); + __team_lock(team); return team; } static void team_nl_team_put(struct team *team) { - mutex_unlock(&team->lock); + team_unlock(team); dev_put(team->dev); } @@ -2984,9 +2981,9 @@ static void team_port_change_check(struct team_port *port, bool linkup) { struct team *team = port->team; - mutex_lock(&team->lock); + team_lock(team); __team_port_change_check(port, linkup); - mutex_unlock(&team->lock); + team_unlock(team); } diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c index 00f8989c29c0..7bcc9d37447a 100644 --- a/drivers/net/team/team_mode_loadbalance.c +++ b/drivers/net/team/team_mode_loadbalance.c @@ -478,7 +478,7 @@ static void lb_stats_refresh(struct work_struct *work) team = lb_priv_ex->team; lb_priv = get_lb_priv(team); - if (!mutex_trylock(&team->lock)) { + if (!team_trylock(team)) { schedule_delayed_work(&lb_priv_ex->stats.refresh_dw, 0); return; } @@ -515,7 +515,7 @@ static void lb_stats_refresh(struct work_struct *work) schedule_delayed_work(&lb_priv_ex->stats.refresh_dw, (lb_priv_ex->stats.refresh_interval * HZ) / 10); - mutex_unlock(&team->lock); + team_unlock(team); } static void lb_stats_refresh_interval_get(struct team *team, diff --git a/drivers/net/veth.c b/drivers/net/veth.c index d43e62ebc2fc..9c6f4f83f22b 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -344,6 +344,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) { struct veth_priv *rcv_priv, *priv = netdev_priv(dev); struct veth_rq *rq = NULL; + int ret = NETDEV_TX_OK; struct net_device *rcv; int length = skb->len; bool use_napi = false; @@ -378,11 +379,12 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) } else { drop: atomic64_inc(&priv->dropped); + ret = NET_XMIT_DROP; } rcu_read_unlock(); - return NETDEV_TX_OK; + return ret; } static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes) diff --git a/include/linux/if_team.h b/include/linux/if_team.h index 1b9b15a492fa..12d4447fc8ab 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -221,10 +221,38 @@ struct team { atomic_t count_pending; struct delayed_work dw; } mcast_rejoin; - struct lock_class_key team_lock_key; long mode_priv[TEAM_MODE_PRIV_LONGS]; }; +static inline void __team_lock(struct team *team) +{ + mutex_lock(&team->lock); +} + +static inline int team_trylock(struct team *team) +{ + return mutex_trylock(&team->lock); +} + +#ifdef CONFIG_LOCKDEP +static inline void team_lock(struct team *team) +{ + ASSERT_RTNL(); + mutex_lock_nested(&team->lock, team->dev->nested_level); +} + +#else +static inline void team_lock(struct team *team) +{ + __team_lock(team); +} +#endif + +static inline void team_unlock(struct team *team) +{ + mutex_unlock(&team->lock); +} + static inline int team_dev_queue_xmit(struct team *team, struct team_port *port, struct sk_buff *skb) { diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index 8bef1ab62bba..322d87255984 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -41,9 +41,9 @@ #define PHY_ID_KSZ9477 0x00221631 /* struct phy_device dev_flags definitions */ -#define MICREL_PHY_50MHZ_CLK 0x00000001 -#define MICREL_PHY_FXEN 0x00000002 -#define MICREL_KSZ8_P1_ERRATA 0x00000003 +#define MICREL_PHY_50MHZ_CLK BIT(0) +#define MICREL_PHY_FXEN BIT(1) +#define MICREL_KSZ8_P1_ERRATA BIT(2) #define MICREL_KSZ9021_EXTREG_CTRL 0xB #define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 7d07f8736431..2b886ea654bb 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -600,7 +600,7 @@ void pcs_get_state(struct phylink_pcs *pcs, * * The %neg_mode argument should be tested via the phylink_mode_*() family of * functions, or for PCS that set pcs->neg_mode true, should be tested - * against the %PHYLINK_PCS_NEG_* definitions. + * against the PHYLINK_PCS_NEG_* definitions. */ int pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, const unsigned long *advertising, @@ -630,7 +630,7 @@ void pcs_an_restart(struct phylink_pcs *pcs); * * The %mode argument should be tested via the phylink_mode_*() family of * functions, or for PCS that set pcs->neg_mode true, should be tested - * against the %PHYLINK_PCS_NEG_* definitions. + * against the PHYLINK_PCS_NEG_* definitions. */ void pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, int speed, int duplex); diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index e8750b4ef7e1..f346b4efbc30 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -483,15 +483,14 @@ static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len) u64_stats_inc(&tstats->tx_packets); u64_stats_update_end(&tstats->syncp); put_cpu_ptr(tstats); + return; + } + + if (pkt_len < 0) { + DEV_STATS_INC(dev, tx_errors); + DEV_STATS_INC(dev, tx_aborted_errors); } else { - struct net_device_stats *err_stats = &dev->stats; - - if (pkt_len < 0) { - err_stats->tx_errors++; - err_stats->tx_aborted_errors++; - } else { - err_stats->tx_dropped++; - } + DEV_STATS_INC(dev, tx_dropped); } } diff --git a/include/net/scm.h b/include/net/scm.h index c5bcdf65f55c..e8c76b4be2fe 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -9,6 +9,7 @@ #include <linux/pid.h> #include <linux/nsproxy.h> #include <linux/sched/signal.h> +#include <net/compat.h> /* Well, we should have at least one descriptor open * to accept passed FDs 8) @@ -123,14 +124,17 @@ static inline bool scm_has_secdata(struct socket *sock) static __inline__ void scm_pidfd_recv(struct msghdr *msg, struct scm_cookie *scm) { struct file *pidfd_file = NULL; - int pidfd; + int len, pidfd; - /* - * put_cmsg() doesn't return an error if CMSG is truncated, + /* put_cmsg() doesn't return an error if CMSG is truncated, * that's why we need to opencode these checks here. */ - if ((msg->msg_controllen <= sizeof(struct cmsghdr)) || - (msg->msg_controllen - sizeof(struct cmsghdr)) < sizeof(int)) { + if (msg->msg_flags & MSG_CMSG_COMPAT) + len = sizeof(struct compat_cmsghdr) + sizeof(int); + else + len = sizeof(struct cmsghdr) + sizeof(int); + + if (msg->msg_controllen < len) { msg->msg_flags |= MSG_CTRUNC; return; } diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 89d15ceaf9af..b3b3af0e7844 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -1831,8 +1831,7 @@ u32 __skb_get_hash_symmetric(const struct sk_buff *skb) memset(&keys, 0, sizeof(keys)); __skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric, - &keys, NULL, 0, 0, 0, - FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); + &keys, NULL, 0, 0, 0, 0); return __flow_hash_from_keys(&keys, &hashrnd); } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 17caf4ea67da..4eaf7ed0d1f4 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -550,7 +550,7 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node, bool *pfmemalloc) { bool ret_pfmemalloc = false; - unsigned int obj_size; + size_t obj_size; void *obj; obj_size = SKB_HEAD_ALIGN(*size); @@ -567,7 +567,13 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node, obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node); goto out; } - *size = obj_size = kmalloc_size_roundup(obj_size); + + obj_size = kmalloc_size_roundup(obj_size); + /* The following cast might truncate high-order bits of obj_size, this + * is harmless because kmalloc(obj_size >= 2^32) will fail anyway. + */ + *size = (unsigned int)obj_size; + /* * Try a regular allocation, when that fails and we're not entitled * to the reserves, fail. diff --git a/net/core/sock.c b/net/core/sock.c index d3c7b53368d2..16584e2dd648 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2747,9 +2747,9 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo) prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) break; - if (sk->sk_shutdown & SEND_SHUTDOWN) + if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) break; - if (sk->sk_err) + if (READ_ONCE(sk->sk_err)) break; timeo = schedule_timeout(timeo); } @@ -2777,7 +2777,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, goto failure; err = -EPIPE; - if (sk->sk_shutdown & SEND_SHUTDOWN) + if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) goto failure; if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf)) diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 0c9e768e5628..418e5fb58fd3 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -353,8 +353,9 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) struct flowi4 fl4; int hlen = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; - unsigned int size = mtu; + unsigned int size; + size = min(mtu, IP_MAX_MTU); while (1) { skb = alloc_skb(size + hlen + tlen, GFP_ATOMIC | __GFP_NOWARN); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 47d1dd8501b7..85cdbc252654 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1378,7 +1378,7 @@ retry: * idev->desync_factor if it's larger */ cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft); - max_desync_factor = min_t(__u32, + max_desync_factor = min_t(long, idev->cnf.max_desync_factor, cnf_temp_preferred_lft - regen_advance); diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 393f01b2a7e6..4580f61426bb 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -1859,6 +1859,8 @@ static __net_exit void kcm_exit_net(struct net *net) * that all multiplexors and psocks have been destroyed. */ WARN_ON(!list_empty(&knet->mux_list)); + + mutex_destroy(&knet->mutex); } static struct pernet_operations kcm_net_ops = { diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c index ea8c4a7174bb..35f49edf63db 100644 --- a/net/sched/sch_plug.c +++ b/net/sched/sch_plug.c @@ -207,7 +207,7 @@ static struct Qdisc_ops plug_qdisc_ops __read_mostly = { .priv_size = sizeof(struct plug_sched_data), .enqueue = plug_enqueue, .dequeue = plug_dequeue, - .peek = qdisc_peek_head, + .peek = qdisc_peek_dequeued, .init = plug_init, .change = plug_change, .reset = qdisc_reset_queue, diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 1a25752f1a9a..546c10adcacd 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -974,10 +974,13 @@ static void qfq_update_eligible(struct qfq_sched *q) } /* Dequeue head packet of the head class in the DRR queue of the aggregate. */ -static void agg_dequeue(struct qfq_aggregate *agg, - struct qfq_class *cl, unsigned int len) +static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg, + struct qfq_class *cl, unsigned int len) { - qdisc_dequeue_peeked(cl->qdisc); + struct sk_buff *skb = qdisc_dequeue_peeked(cl->qdisc); + + if (!skb) + return NULL; cl->deficit -= (int) len; @@ -987,6 +990,8 @@ static void agg_dequeue(struct qfq_aggregate *agg, cl->deficit += agg->lmax; list_move_tail(&cl->alist, &agg->active); } + + return skb; } static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg, @@ -1132,11 +1137,18 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) if (!skb) return NULL; - qdisc_qstats_backlog_dec(sch, skb); sch->q.qlen--; + + skb = agg_dequeue(in_serv_agg, cl, len); + + if (!skb) { + sch->q.qlen++; + return NULL; + } + + qdisc_qstats_backlog_dec(sch, skb); qdisc_bstats_update(sch, skb); - agg_dequeue(in_serv_agg, cl, len); /* If lmax is lowered, through qfq_change_class, for a class * owning pending packets with larger size than the new value * of lmax, then the following condition may hold. diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 86930a8ed012..3e8a04a13668 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -680,7 +680,7 @@ static void unix_release_sock(struct sock *sk, int embrion) * What the above comment does talk about? --ANK(980817) */ - if (unix_tot_inflight) + if (READ_ONCE(unix_tot_inflight)) unix_gc(); /* Garbage collect fds */ } diff --git a/net/unix/scm.c b/net/unix/scm.c index e9dde7176c8a..6ff628f2349f 100644 --- a/net/unix/scm.c +++ b/net/unix/scm.c @@ -64,7 +64,7 @@ void unix_inflight(struct user_struct *user, struct file *fp) /* Paired with READ_ONCE() in wait_for_unix_gc() */ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1); } - user->unix_inflight++; + WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1); spin_unlock(&unix_gc_lock); } @@ -85,7 +85,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp) /* Paired with READ_ONCE() in wait_for_unix_gc() */ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1); } - user->unix_inflight--; + WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1); spin_unlock(&unix_gc_lock); } @@ -99,7 +99,7 @@ static inline bool too_many_unix_fds(struct task_struct *p) { struct user_struct *user = current_user(); - if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE))) + if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE))) return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); return false; } |