diff options
32 files changed, 900 insertions, 274 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-mdio b/Documentation/ABI/testing/sysfs-bus-mdio new file mode 100644 index 000000000000..da86efc7781b --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-mdio @@ -0,0 +1,63 @@ +What: /sys/bus/mdio_bus/devices/.../statistics/ +Date: January 2020 +KernelVersion: 5.6 +Contact: [email protected] +Description: + This folder contains statistics about global and per + MDIO bus address statistics. + +What: /sys/bus/mdio_bus/devices/.../statistics/transfers +Date: January 2020 +KernelVersion: 5.6 +Contact: [email protected] +Description: + Total number of transfers for this MDIO bus. + +What: /sys/bus/mdio_bus/devices/.../statistics/errors +Date: January 2020 +KernelVersion: 5.6 +Contact: [email protected] +Description: + Total number of transfer errors for this MDIO bus. + +What: /sys/bus/mdio_bus/devices/.../statistics/writes +Date: January 2020 +KernelVersion: 5.6 +Contact: [email protected] +Description: + Total number of write transactions for this MDIO bus. + +What: /sys/bus/mdio_bus/devices/.../statistics/reads +Date: January 2020 +KernelVersion: 5.6 +Contact: [email protected] +Description: + Total number of read transactions for this MDIO bus. + +What: /sys/bus/mdio_bus/devices/.../statistics/transfers_<addr> +Date: January 2020 +KernelVersion: 5.6 +Contact: [email protected] +Description: + Total number of transfers for this MDIO bus address. + +What: /sys/bus/mdio_bus/devices/.../statistics/errors_<addr> +Date: January 2020 +KernelVersion: 5.6 +Contact: [email protected] +Description: + Total number of transfer errors for this MDIO bus address. + +What: /sys/bus/mdio_bus/devices/.../statistics/writes_<addr> +Date: January 2020 +KernelVersion: 5.6 +Contact: [email protected] +Description: + Total number of write transactions for this MDIO bus address. + +What: /sys/bus/mdio_bus/devices/.../statistics/reads_<addr> +Date: January 2020 +KernelVersion: 5.6 +Contact: [email protected] +Description: + Total number of read transactions for this MDIO bus address. diff --git a/Documentation/networking/devlink/mlxsw.rst b/Documentation/networking/devlink/mlxsw.rst index ccba9769c651..5f9bb0a0616a 100644 --- a/Documentation/networking/devlink/mlxsw.rst +++ b/Documentation/networking/devlink/mlxsw.rst @@ -40,7 +40,7 @@ The ``mlxsw`` driver supports reloading via ``DEVLINK_CMD_RELOAD`` Info versions ============= -The ``mlx5`` driver reports the following versions +The ``mlxsw`` driver reports the following versions .. list-table:: devlink info versions implemented :widths: 5 5 90 diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index e65ed4d0a7ad..dee79588d2b1 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -549,6 +549,7 @@ source "drivers/net/hyperv/Kconfig" config NETDEVSIM tristate "Simulated networking device" depends on DEBUG_FS + depends on INET depends on IPV6 || IPV6=n select NET_DEVLINK help diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index feccb6201660..3257962c147e 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -172,11 +172,10 @@ static void felix_phylink_validate(struct dsa_switch *ds, int port, phylink_set(mask, Autoneg); phylink_set(mask, Pause); phylink_set(mask, Asym_Pause); - if (state->interface != PHY_INTERFACE_MODE_2500BASEX) { - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Full); - phylink_set(mask, 1000baseT_Full); - } + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Full); + phylink_set(mask, 1000baseT_Full); + /* The internal ports that run at 2.5G are overclocked GMII */ if (state->interface == PHY_INTERFACE_MODE_GMII || state->interface == PHY_INTERFACE_MODE_2500BASEX || @@ -222,8 +221,12 @@ static void felix_phylink_mac_config(struct dsa_switch *ds, int port, * specification in incoming pause frames. */ mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(state->speed); - if (state->pause & MLO_PAUSE_RX) - mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA; + + /* handle Rx pause in all cases, with 2500base-X this is used for rate + * adaptation. + */ + mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA; + if (state->pause & MLO_PAUSE_TX) mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA | SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) | @@ -323,7 +326,7 @@ static int felix_parse_ports_node(struct felix *felix, struct device *dev = felix->ocelot.dev; struct device_node *child; - for_each_child_of_node(ports_node, child) { + for_each_available_child_of_node(ports_node, child) { phy_interface_t phy_mode; u32 port; int err; diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 03482616faa7..2c812b481778 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -645,6 +645,27 @@ static void vsc9959_pcs_init_sgmii(struct phy_device *pcs, const struct phylink_link_state *state) { if (link_an_mode == MLO_AN_INBAND) { + int bmsr, bmcr; + + /* Some PHYs like VSC8234 don't like it when AN restarts on + * their system side and they restart line side AN too, going + * into an endless link up/down loop. Don't restart PCS AN if + * link is up already. + * We do check that AN is enabled just in case this is the 1st + * call, PCS detects a carrier but AN is disabled from power on + * or by boot loader. + */ + bmcr = phy_read(pcs, MII_BMCR); + if (bmcr < 0) + return; + + bmsr = phy_read(pcs, MII_BMSR); + if (bmsr < 0) + return; + + if ((bmcr & BMCR_ANENABLE) && (bmsr & BMSR_LSTATUS)) + return; + /* SGMII spec requires tx_config_Reg[15:0] to be exactly 0x4001 * for the MAC PCS in order to acknowledge the AN. */ @@ -892,7 +913,6 @@ static void vsc9959_pcs_link_state_usxgmii(struct phy_device *pcs, break; } - pcs->link = USXGMII_LPA_LNKS(lpa); if (USXGMII_LPA_DUPLEX(lpa)) pcs->duplex = DUPLEX_FULL; else diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index 00382b7c5bd8..0c6bf3a55a9a 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -36,7 +36,6 @@ void enetc_sched_speed_set(struct net_device *ndev) case SPEED_10: default: pspeed = ENETC_PMR_PSPEED_10M; - netdev_err(ndev, "Qbv PSPEED set speed link down.\n"); } priv->speed = speed; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index ba2566e2123d..0637ccadee79 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -696,21 +696,24 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) /** * fm10k_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure + * @txqueue: the index of the Tx queue that timed out **/ static void fm10k_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_ring *tx_ring; bool real_tx_hang = false; - int i; - -#define TX_TIMEO_LIMIT 16000 - for (i = 0; i < interface->num_tx_queues; i++) { - struct fm10k_ring *tx_ring = interface->tx_ring[i]; - if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) - real_tx_hang = true; + if (txqueue >= interface->num_tx_queues) { + WARN(1, "invalid Tx queue index %d", txqueue); + return; } + tx_ring = interface->tx_ring[txqueue]; + if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) + real_tx_hang = true; + +#define TX_TIMEO_LIMIT 16000 if (real_tx_hang) { fm10k_tx_timeout_reset(interface); } else { diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 33912cf964eb..8c3e753bfb9d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -307,37 +307,18 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue) struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_ring *tx_ring = NULL; - unsigned int i, hung_queue = 0; + unsigned int i; u32 head, val; pf->tx_timeout_count++; - /* find the stopped queue the same way the stack does */ - for (i = 0; i < netdev->num_tx_queues; i++) { - struct netdev_queue *q; - unsigned long trans_start; - - q = netdev_get_tx_queue(netdev, i); - trans_start = q->trans_start; - if (netif_xmit_stopped(q) && - time_after(jiffies, - (trans_start + netdev->watchdog_timeo))) { - hung_queue = i; - break; - } - } - - if (i == netdev->num_tx_queues) { - netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); - } else { - /* now that we have an index, find the tx_ring struct */ - for (i = 0; i < vsi->num_queue_pairs; i++) { - if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { - if (hung_queue == - vsi->tx_rings[i]->queue_index) { - tx_ring = vsi->tx_rings[i]; - break; - } + /* with txqueue index, find the tx_ring struct */ + for (i = 0; i < vsi->num_queue_pairs; i++) { + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { + if (txqueue == + vsi->tx_rings[i]->queue_index) { + tx_ring = vsi->tx_rings[i]; + break; } } } @@ -363,14 +344,14 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue) val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n", - vsi->seid, hung_queue, tx_ring->next_to_clean, + vsi->seid, txqueue, tx_ring->next_to_clean, head, tx_ring->next_to_use, readl(tx_ring->tail), val); } pf->tx_timeout_last_recovery = jiffies; - netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", - pf->tx_timeout_recovery_level, hung_queue); + netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n", + pf->tx_timeout_recovery_level, txqueue); switch (pf->tx_timeout_recovery_level) { case 1: diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index bf539483e25e..eb9d00608e9a 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -5086,36 +5086,17 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) struct ice_ring *tx_ring = NULL; struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; - int hung_queue = -1; u32 i; pf->tx_timeout_count++; - /* find the stopped queue the same way dev_watchdog() does */ - for (i = 0; i < netdev->num_tx_queues; i++) { - unsigned long trans_start; - struct netdev_queue *q; - - q = netdev_get_tx_queue(netdev, i); - trans_start = q->trans_start; - if (netif_xmit_stopped(q) && - time_after(jiffies, - trans_start + netdev->watchdog_timeo)) { - hung_queue = i; - break; - } - } - - if (i == netdev->num_tx_queues) - netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); - else - /* now that we have an index, find the tx_ring struct */ - for (i = 0; i < vsi->num_txq; i++) - if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) - if (hung_queue == vsi->tx_rings[i]->q_index) { - tx_ring = vsi->tx_rings[i]; - break; - } + /* now that we have an index, find the tx_ring struct */ + for (i = 0; i < vsi->num_txq; i++) + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) + if (txqueue == vsi->tx_rings[i]->q_index) { + tx_ring = vsi->tx_rings[i]; + break; + } /* Reset recovery level if enough time has elapsed after last timeout. * Also ensure no new reset action happens before next timeout period. @@ -5130,19 +5111,19 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) struct ice_hw *hw = &pf->hw; u32 head, val = 0; - head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) & + head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) & QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; /* Read interrupt register */ val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", - vsi->vsi_num, hung_queue, tx_ring->next_to_clean, + vsi->vsi_num, txqueue, tx_ring->next_to_clean, head, tx_ring->next_to_use, val); } pf->tx_timeout_last_recovery = jiffies; - netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", - pf->tx_timeout_recovery_level, hung_queue); + netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n", + pf->tx_timeout_recovery_level, txqueue); switch (pf->tx_timeout_recovery_level) { case 1: diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 0e401f116d54..149dca0012ba 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -1020,8 +1020,8 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget) s16 ntc = xdp_ring->next_to_clean; struct ice_tx_desc *tx_desc; struct ice_tx_buf *tx_buf; - bool xmit_done = true; u32 xsk_frames = 0; + bool xmit_done; tx_desc = ICE_TX_DESC(xdp_ring, ntc); tx_buf = &xdp_ring->tx_buf[ntc]; diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index db289bcce21d..5a506440560a 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -212,6 +212,7 @@ static s32 igc_get_invariants_base(struct igc_hw *hw) case IGC_DEV_ID_I225_I: case IGC_DEV_ID_I220_V: case IGC_DEV_ID_I225_K: + case IGC_DEV_ID_I225_BLANK_NVM: mac->type = igc_i225; break; default: diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 2121fc34e300..58efa7a02c68 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -282,6 +282,10 @@ #define IGC_TXD_STAT_TC 0x00000004 /* Tx Underrun */ #define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ +/* IPSec Encrypt Enable */ +#define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + /* Transmit Control */ #define IGC_TCTL_EN 0x00000002 /* enable Tx */ #define IGC_TCTL_PSP 0x00000008 /* pad short packets */ @@ -460,6 +464,7 @@ /* PHY Status Register */ #define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ #define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define IGC_PHY_RST_COMP 0x0100 /* Internal PHY reset completion */ /* PHY 1000 MII Register/Bit Definitions */ /* PHY Registers defined by IEEE */ diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index 20f710645746..90ac0e0144d8 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -21,8 +21,7 @@ #define IGC_DEV_ID_I225_I 0x15F8 #define IGC_DEV_ID_I220_V 0x15F7 #define IGC_DEV_ID_I225_K 0x3100 - -#define IGC_FUNC_0 0 +#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD /* Function pointers for the MAC. */ struct igc_mac_operations { diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index c359f3d9fb25..d9d5425fe8d9 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -45,6 +45,7 @@ static const struct pci_device_id igc_pci_tbl[] = { { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base }, /* required last entry */ {0, } }; @@ -880,7 +881,7 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, /* set bits to identify this as an advanced context descriptor */ type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT; - /* For 82575, context index must be unique per ring. */ + /* For i225, context index must be unique per ring. */ if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) mss_l4len_idx |= tx_ring->reg_idx << 4; @@ -999,6 +1000,10 @@ static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) IGC_ADVTXD_DCMD_DEXT | IGC_ADVTXD_DCMD_IFCS; + /* set segmentation bits for TSO */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO, + (IGC_ADVTXD_DCMD_TSE)); + /* set timestamp bit if present */ cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP, (IGC_ADVTXD_MAC_TSTAMP)); @@ -1170,6 +1175,100 @@ dma_error: return -1; } +static int igc_tso(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + u8 *hdr_len) +{ + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; + struct sk_buff *skb = first->skb; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_partial(trans_start, + csum_start - trans_start, + 0)); + type_tucmd |= IGC_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; + first->tx_flags |= IGC_TX_FLAGS_TSO | + IGC_TX_FLAGS_CSUM | + IGC_TX_FLAGS_IPV4; + } else { + ip.v6->payload_len = 0; + first->tx_flags |= IGC_TX_FLAGS_TSO | + IGC_TX_FLAGS_CSUM; + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) { + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + } else { + /* compute length of segmentation header */ + *hdr_len = sizeof(*l4.udp) + l4_offset; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + } + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* MSS L4LEN IDX */ + mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; + + /* VLAN MACLEN IPLEN */ + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; + + igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, + type_tucmd, mss_l4len_idx); + + return 1; +} + static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, struct igc_ring *tx_ring) { @@ -1179,6 +1278,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, u32 tx_flags = 0; unsigned short f; u8 hdr_len = 0; + int tso = 0; /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, @@ -1225,11 +1325,21 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, first->tx_flags = tx_flags; first->protocol = protocol; - igc_tx_csum(tx_ring, first); + tso = igc_tso(tx_ring, first, &hdr_len); + if (tso < 0) + goto out_drop; + else if (!tso) + igc_tx_csum(tx_ring, first); igc_tx_map(tx_ring, first, hdr_len); return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + return NETDEV_TX_OK; } static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter, @@ -4588,6 +4698,8 @@ static int igc_probe(struct pci_dev *pdev, /* Add supported features to the features list*/ netdev->features |= NETIF_F_SG; + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; netdev->features |= NETIF_F_RXCSUM; netdev->features |= NETIF_F_HW_CSUM; netdev->features |= NETIF_F_SCTP_CRC; diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index f4b05af0dd2f..8e1799508edc 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -173,6 +173,7 @@ s32 igc_check_downshift(struct igc_hw *hw) s32 igc_phy_hw_reset(struct igc_hw *hw) { struct igc_phy_info *phy = &hw->phy; + u32 phpm = 0, timeout = 10000; s32 ret_val; u32 ctrl; @@ -186,6 +187,8 @@ s32 igc_phy_hw_reset(struct igc_hw *hw) if (ret_val) goto out; + phpm = rd32(IGC_I225_PHPM); + ctrl = rd32(IGC_CTRL); wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST); wrfl(); @@ -195,7 +198,18 @@ s32 igc_phy_hw_reset(struct igc_hw *hw) wr32(IGC_CTRL, ctrl); wrfl(); - usleep_range(1500, 2000); + /* SW should guarantee 100us for the completion of the PHY reset */ + usleep_range(100, 150); + do { + phpm = rd32(IGC_I225_PHPM); + timeout--; + udelay(1); + } while (!(phpm & IGC_PHY_RST_COMP) && timeout); + + if (!timeout) + hw_dbg("Timeout is expired after a phy reset\n"); + + usleep_range(100, 150); phy->ops.release(hw); diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index c82111051898..c9029b549b90 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -12,6 +12,7 @@ #define IGC_MDIC 0x00020 /* MDI Control - RW */ #define IGC_MDICNFG 0x00E04 /* MDC/MDIO Configuration - RW */ #define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */ /* Internal Packet Buffer Size Registers */ #define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index 8f56289fc2ec..f32d56ac3e80 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -467,7 +467,7 @@ nsim_fib6_rt_create(struct nsim_fib_data *data, fib6_rt = kzalloc(sizeof(*fib6_rt), GFP_ATOMIC); if (!fib6_rt) - return NULL; + return ERR_PTR(-ENOMEM); nsim_fib_rt_init(data, &fib6_rt->common, &rt->fib6_dst.addr, sizeof(rt->fib6_dst.addr), rt->fib6_dst.plen, AF_INET6, @@ -650,8 +650,8 @@ static int nsim_fib6_rt_insert(struct nsim_fib_data *data, int err; fib6_rt = nsim_fib6_rt_create(data, fen6_info); - if (!fib6_rt) - return -ENOMEM; + if (IS_ERR(fib6_rt)) + return PTR_ERR(fib6_rt); fib6_rt_old = nsim_fib6_rt_lookup(&data->fib_rt_ht, fen6_info->rt); if (!fib6_rt_old) diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c index cf5a391c93e6..c7eabe4382fb 100644 --- a/drivers/net/phy/adin.c +++ b/drivers/net/phy/adin.c @@ -145,7 +145,7 @@ struct adin_clause45_mmd_map { u16 adin_regnum; }; -static struct adin_clause45_mmd_map adin_clause45_mmd_map[] = { +static const struct adin_clause45_mmd_map adin_clause45_mmd_map[] = { { MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE, ADIN1300_EEE_CAP_REG }, { MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, ADIN1300_EEE_LPABLE_REG }, { MDIO_MMD_AN, MDIO_AN_EEE_ADV, ADIN1300_EEE_ADV_REG }, @@ -159,7 +159,7 @@ struct adin_hw_stat { u16 reg2; }; -static struct adin_hw_stat adin_hw_stats[] = { +static const struct adin_hw_stat adin_hw_stats[] = { { "total_frames_checked_count", 0x940A, 0x940B }, /* hi + lo */ { "length_error_frames_count", 0x940C }, { "alignment_error_frames_count", 0x940D }, @@ -456,7 +456,7 @@ static int adin_phy_config_intr(struct phy_device *phydev) static int adin_cl45_to_adin_reg(struct phy_device *phydev, int devad, u16 cl45_regnum) { - struct adin_clause45_mmd_map *m; + const struct adin_clause45_mmd_map *m; int i; if (devad == MDIO_MMD_VEND1) @@ -625,7 +625,7 @@ static int adin_soft_reset(struct phy_device *phydev) if (rc < 0) return rc; - msleep(10); + msleep(20); /* If we get a read error something may be wrong */ rc = phy_read_mmd(phydev, MDIO_MMD_VEND1, @@ -650,7 +650,7 @@ static void adin_get_strings(struct phy_device *phydev, u8 *data) } static int adin_read_mmd_stat_regs(struct phy_device *phydev, - struct adin_hw_stat *stat, + const struct adin_hw_stat *stat, u32 *val) { int ret; @@ -676,7 +676,7 @@ static int adin_read_mmd_stat_regs(struct phy_device *phydev, static u64 adin_get_stat(struct phy_device *phydev, int i) { - struct adin_hw_stat *stat = &adin_hw_stats[i]; + const struct adin_hw_stat *stat = &adin_hw_stats[i]; struct adin_priv *priv = phydev->priv; u32 val; int ret; diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 8d753bb07227..9bb9f37f21dc 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -158,9 +158,11 @@ struct mii_bus *mdiobus_alloc_size(size_t size) if (size) bus->priv = (void *)bus + aligned_size; - /* Initialise the interrupts to polling */ - for (i = 0; i < PHY_MAX_ADDR; i++) + /* Initialise the interrupts to polling and 64-bit seqcounts */ + for (i = 0; i < PHY_MAX_ADDR; i++) { bus->irq[i] = PHY_POLL; + u64_stats_init(&bus->stats[i].syncp); + } return bus; } @@ -249,9 +251,215 @@ static void mdiobus_release(struct device *d) kfree(bus); } +struct mdio_bus_stat_attr { + int addr; + unsigned int field_offset; +}; + +static u64 mdio_bus_get_stat(struct mdio_bus_stats *s, unsigned int offset) +{ + const char *p = (const char *)s + offset; + unsigned int start; + u64 val = 0; + + do { + start = u64_stats_fetch_begin(&s->syncp); + val = u64_stats_read((const u64_stats_t *)p); + } while (u64_stats_fetch_retry(&s->syncp, start)); + + return val; +} + +static u64 mdio_bus_get_global_stat(struct mii_bus *bus, unsigned int offset) +{ + unsigned int i; + u64 val = 0; + + for (i = 0; i < PHY_MAX_ADDR; i++) + val += mdio_bus_get_stat(&bus->stats[i], offset); + + return val; +} + +static ssize_t mdio_bus_stat_field_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mii_bus *bus = to_mii_bus(dev); + struct mdio_bus_stat_attr *sattr; + struct dev_ext_attribute *eattr; + u64 val; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + sattr = eattr->var; + + if (sattr->addr < 0) + val = mdio_bus_get_global_stat(bus, sattr->field_offset); + else + val = mdio_bus_get_stat(&bus->stats[sattr->addr], + sattr->field_offset); + + return sprintf(buf, "%llu\n", val); +} + +static ssize_t mdio_bus_device_stat_field_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mdio_device *mdiodev = to_mdio_device(dev); + struct mii_bus *bus = mdiodev->bus; + struct mdio_bus_stat_attr *sattr; + struct dev_ext_attribute *eattr; + int addr = mdiodev->addr; + u64 val; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + sattr = eattr->var; + + val = mdio_bus_get_stat(&bus->stats[addr], sattr->field_offset); + + return sprintf(buf, "%llu\n", val); +} + +#define MDIO_BUS_STATS_ATTR_DECL(field, file) \ +static struct dev_ext_attribute dev_attr_mdio_bus_##field = { \ + .attr = { .attr = { .name = file, .mode = 0444 }, \ + .show = mdio_bus_stat_field_show, \ + }, \ + .var = &((struct mdio_bus_stat_attr) { \ + -1, offsetof(struct mdio_bus_stats, field) \ + }), \ +}; \ +static struct dev_ext_attribute dev_attr_mdio_bus_device_##field = { \ + .attr = { .attr = { .name = file, .mode = 0444 }, \ + .show = mdio_bus_device_stat_field_show, \ + }, \ + .var = &((struct mdio_bus_stat_attr) { \ + -1, offsetof(struct mdio_bus_stats, field) \ + }), \ +}; + +#define MDIO_BUS_STATS_ATTR(field) \ + MDIO_BUS_STATS_ATTR_DECL(field, __stringify(field)) + +MDIO_BUS_STATS_ATTR(transfers); +MDIO_BUS_STATS_ATTR(errors); +MDIO_BUS_STATS_ATTR(writes); +MDIO_BUS_STATS_ATTR(reads); + +#define MDIO_BUS_STATS_ADDR_ATTR_DECL(field, addr, file) \ +static struct dev_ext_attribute dev_attr_mdio_bus_addr_##field##_##addr = { \ + .attr = { .attr = { .name = file, .mode = 0444 }, \ + .show = mdio_bus_stat_field_show, \ + }, \ + .var = &((struct mdio_bus_stat_attr) { \ + addr, offsetof(struct mdio_bus_stats, field) \ + }), \ +} + +#define MDIO_BUS_STATS_ADDR_ATTR(field, addr) \ + MDIO_BUS_STATS_ADDR_ATTR_DECL(field, addr, \ + __stringify(field) "_" __stringify(addr)) + +#define MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(addr) \ + MDIO_BUS_STATS_ADDR_ATTR(transfers, addr); \ + MDIO_BUS_STATS_ADDR_ATTR(errors, addr); \ + MDIO_BUS_STATS_ADDR_ATTR(writes, addr); \ + MDIO_BUS_STATS_ADDR_ATTR(reads, addr) \ + +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(0); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(1); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(2); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(3); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(4); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(5); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(6); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(7); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(8); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(9); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(10); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(11); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(12); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(13); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(14); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(15); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(16); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(17); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(18); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(19); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(20); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(21); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(22); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(23); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(24); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(25); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(26); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(27); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(28); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(29); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(30); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(31); + +#define MDIO_BUS_STATS_ADDR_ATTR_GROUP(addr) \ + &dev_attr_mdio_bus_addr_transfers_##addr.attr.attr, \ + &dev_attr_mdio_bus_addr_errors_##addr.attr.attr, \ + &dev_attr_mdio_bus_addr_writes_##addr.attr.attr, \ + &dev_attr_mdio_bus_addr_reads_##addr.attr.attr \ + +static struct attribute *mdio_bus_statistics_attrs[] = { + &dev_attr_mdio_bus_transfers.attr.attr, + &dev_attr_mdio_bus_errors.attr.attr, + &dev_attr_mdio_bus_writes.attr.attr, + &dev_attr_mdio_bus_reads.attr.attr, + MDIO_BUS_STATS_ADDR_ATTR_GROUP(0), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(1), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(2), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(3), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(4), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(5), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(6), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(7), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(8), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(9), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(10), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(11), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(12), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(13), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(14), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(15), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(16), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(17), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(18), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(19), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(20), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(21), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(22), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(23), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(24), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(25), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(26), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(27), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(28), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(29), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(30), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(31), + NULL, +}; + +static const struct attribute_group mdio_bus_statistics_group = { + .name = "statistics", + .attrs = mdio_bus_statistics_attrs, +}; + +static const struct attribute_group *mdio_bus_groups[] = { + &mdio_bus_statistics_group, + NULL, +}; + static struct class mdio_bus_class = { .name = "mdio_bus", .dev_release = mdiobus_release, + .dev_groups = mdio_bus_groups, }; #if IS_ENABLED(CONFIG_OF_MDIO) @@ -530,6 +738,24 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) } EXPORT_SYMBOL(mdiobus_scan); +static void mdiobus_stats_acct(struct mdio_bus_stats *stats, bool op, int ret) +{ + u64_stats_update_begin(&stats->syncp); + + u64_stats_inc(&stats->transfers); + if (ret < 0) { + u64_stats_inc(&stats->errors); + goto out; + } + + if (op) + u64_stats_inc(&stats->reads); + else + u64_stats_inc(&stats->writes); +out: + u64_stats_update_end(&stats->syncp); +} + /** * __mdiobus_read - Unlocked version of the mdiobus_read function * @bus: the mii_bus struct @@ -549,6 +775,7 @@ int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum) retval = bus->read(bus, addr, regnum); trace_mdio_access(bus, 1, addr, regnum, retval, retval); + mdiobus_stats_acct(&bus->stats[addr], true, retval); return retval; } @@ -574,6 +801,7 @@ int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val) err = bus->write(bus, addr, regnum, val); trace_mdio_access(bus, 0, addr, regnum, val, err); + mdiobus_stats_acct(&bus->stats[addr], false, err); return err; } @@ -719,8 +947,27 @@ static int mdio_uevent(struct device *dev, struct kobj_uevent_env *env) return 0; } +static struct attribute *mdio_bus_device_statistics_attrs[] = { + &dev_attr_mdio_bus_device_transfers.attr.attr, + &dev_attr_mdio_bus_device_errors.attr.attr, + &dev_attr_mdio_bus_device_writes.attr.attr, + &dev_attr_mdio_bus_device_reads.attr.attr, + NULL, +}; + +static const struct attribute_group mdio_bus_device_statistics_group = { + .name = "statistics", + .attrs = mdio_bus_device_statistics_attrs, +}; + +static const struct attribute_group *mdio_bus_dev_groups[] = { + &mdio_bus_device_statistics_group, + NULL, +}; + struct bus_type mdio_bus_type = { .name = "mdio_bus", + .dev_groups = mdio_bus_dev_groups, .match = mdio_bus_match, .uevent = mdio_uevent, }; diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c index 10d580c3dea3..6b7532f7c936 100644 --- a/drivers/net/xen-netback/hash.c +++ b/drivers/net/xen-netback/hash.c @@ -51,7 +51,8 @@ static void xenvif_add_hash(struct xenvif *vif, const u8 *tag, found = false; oldest = NULL; - list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) { + list_for_each_entry_rcu(entry, &vif->hash.cache.list, link, + lockdep_is_held(&vif->hash.cache.lock)) { /* Make sure we don't add duplicate entries */ if (entry->len == len && memcmp(entry->tag, tag, len) == 0) @@ -102,7 +103,8 @@ static void xenvif_flush_hash(struct xenvif *vif) spin_lock_irqsave(&vif->hash.cache.lock, flags); - list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) { + list_for_each_entry_rcu(entry, &vif->hash.cache.list, link, + lockdep_is_held(&vif->hash.cache.lock)) { list_del_rcu(&entry->link); vif->hash.cache.count--; kfree_rcu(entry, rcu); diff --git a/include/linux/phy.h b/include/linux/phy.h index 2929d0bc307f..99a87f02667f 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -22,6 +22,7 @@ #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/mod_devicetable.h> +#include <linux/u64_stats_sync.h> #include <linux/atomic.h> @@ -212,6 +213,15 @@ struct sfp_bus; struct sfp_upstream_ops; struct sk_buff; +struct mdio_bus_stats { + u64_stats_t transfers; + u64_stats_t errors; + u64_stats_t writes; + u64_stats_t reads; + /* Must be last, add new statistics above */ + struct u64_stats_sync syncp; +}; + /* * The Bus class for PHYs. Devices which provide access to * PHYs should register using this structure @@ -224,6 +234,7 @@ struct mii_bus { int (*read)(struct mii_bus *bus, int addr, int regnum); int (*write)(struct mii_bus *bus, int addr, int regnum, u16 val); int (*reset)(struct mii_bus *bus); + struct mdio_bus_stats stats[PHY_MAX_ADDR]; /* * A lock to ensure that only one thing can read/write diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index b8ceaf0cd997..854d39ef1ca3 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -347,9 +347,9 @@ static inline struct net *read_pnet(const possible_net_t *pnet) #endif int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp); -int peernet2id(struct net *net, struct net *peer); -bool peernet_has_id(struct net *net, struct net *peer); -struct net *get_net_ns_by_id(struct net *net, int id); +int peernet2id(const struct net *net, struct net *peer); +bool peernet_has_id(const struct net *net, struct net *peer); +struct net *get_net_ns_by_id(const struct net *net, int id); struct pernet_operations { struct list_head list; @@ -427,7 +427,7 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header) } #endif -static inline int rt_genid_ipv4(struct net *net) +static inline int rt_genid_ipv4(const struct net *net) { return atomic_read(&net->ipv4.rt_genid); } @@ -459,7 +459,7 @@ static inline void rt_genid_bump_all(struct net *net) rt_genid_bump_ipv6(net); } -static inline int fnhe_genid(struct net *net) +static inline int fnhe_genid(const struct net *net) { return atomic_read(&net->fnhe_genid); } diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index 415b8f49d150..e0f709d9d547 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -47,6 +47,11 @@ struct nf_flowtable { possible_net_t net; }; +static inline bool nf_flowtable_hw_offload(struct nf_flowtable *flowtable) +{ + return flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD; +} + enum flow_offload_tuple_dir { FLOW_OFFLOAD_DIR_ORIGINAL = IP_CT_DIR_ORIGINAL, FLOW_OFFLOAD_DIR_REPLY = IP_CT_DIR_REPLY, @@ -83,13 +88,15 @@ struct flow_offload_tuple_rhash { struct flow_offload_tuple tuple; }; -#define FLOW_OFFLOAD_SNAT 0x1 -#define FLOW_OFFLOAD_DNAT 0x2 -#define FLOW_OFFLOAD_DYING 0x4 -#define FLOW_OFFLOAD_TEARDOWN 0x8 -#define FLOW_OFFLOAD_HW 0x10 -#define FLOW_OFFLOAD_HW_DYING 0x20 -#define FLOW_OFFLOAD_HW_DEAD 0x40 +enum nf_flow_flags { + NF_FLOW_SNAT, + NF_FLOW_DNAT, + NF_FLOW_TEARDOWN, + NF_FLOW_HW, + NF_FLOW_HW_DYING, + NF_FLOW_HW_DEAD, + NF_FLOW_HW_REFRESH, +}; enum flow_offload_type { NF_FLOW_OFFLOAD_UNSPEC = 0, @@ -99,7 +106,7 @@ enum flow_offload_type { struct flow_offload { struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX]; struct nf_conn *ct; - u16 flags; + unsigned long flags; u16 type; u32 timeout; struct rcu_head rcu_head; @@ -134,10 +141,6 @@ int nf_flow_table_init(struct nf_flowtable *flow_table); void nf_flow_table_free(struct nf_flowtable *flow_table); void flow_offload_teardown(struct flow_offload *flow); -static inline void flow_offload_dead(struct flow_offload *flow) -{ - flow->flags |= FLOW_OFFLOAD_DYING; -} int nf_flow_snat_port(const struct flow_offload *flow, struct sk_buff *skb, unsigned int thoff, diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index e237ecbdcd8a..261864736b26 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -485,6 +485,20 @@ enum nft_immediate_attributes { #define NFTA_IMMEDIATE_MAX (__NFTA_IMMEDIATE_MAX - 1) /** + * enum nft_bitwise_ops - nf_tables bitwise operations + * + * @NFT_BITWISE_BOOL: mask-and-xor operation used to implement NOT, AND, OR and + * XOR boolean operations + * @NFT_BITWISE_LSHIFT: left-shift operation + * @NFT_BITWISE_RSHIFT: right-shift operation + */ +enum nft_bitwise_ops { + NFT_BITWISE_BOOL, + NFT_BITWISE_LSHIFT, + NFT_BITWISE_RSHIFT, +}; + +/** * enum nft_bitwise_attributes - nf_tables bitwise expression netlink attributes * * @NFTA_BITWISE_SREG: source register (NLA_U32: nft_registers) @@ -492,16 +506,20 @@ enum nft_immediate_attributes { * @NFTA_BITWISE_LEN: length of operands (NLA_U32) * @NFTA_BITWISE_MASK: mask value (NLA_NESTED: nft_data_attributes) * @NFTA_BITWISE_XOR: xor value (NLA_NESTED: nft_data_attributes) + * @NFTA_BITWISE_OP: type of operation (NLA_U32: nft_bitwise_ops) + * @NFTA_BITWISE_DATA: argument for non-boolean operations + * (NLA_NESTED: nft_data_attributes) * - * The bitwise expression performs the following operation: + * The bitwise expression supports boolean and shift operations. It implements + * the boolean operations by performing the following operation: * * dreg = (sreg & mask) ^ xor * - * which allow to express all bitwise operations: + * with these mask and xor values: * * mask xor * NOT: 1 1 - * OR: 0 x + * OR: ~x x * XOR: 1 x * AND: x 0 */ @@ -512,6 +530,8 @@ enum nft_bitwise_attributes { NFTA_BITWISE_LEN, NFTA_BITWISE_MASK, NFTA_BITWISE_XOR, + NFTA_BITWISE_OP, + NFTA_BITWISE_DATA, __NFTA_BITWISE_MAX }; #define NFTA_BITWISE_MAX (__NFTA_BITWISE_MAX - 1) diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 6412c1fbfcb5..757cc1d084e7 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -268,7 +268,7 @@ int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp) EXPORT_SYMBOL_GPL(peernet2id_alloc); /* This function returns, if assigned, the id of a peer netns. */ -int peernet2id(struct net *net, struct net *peer) +int peernet2id(const struct net *net, struct net *peer) { int id; @@ -283,12 +283,12 @@ EXPORT_SYMBOL(peernet2id); /* This function returns true is the peer netns has an id assigned into the * current netns. */ -bool peernet_has_id(struct net *net, struct net *peer) +bool peernet_has_id(const struct net *net, struct net *peer) { return peernet2id(net, peer) >= 0; } -struct net *get_net_ns_by_id(struct net *net, int id) +struct net *get_net_ns_by_id(const struct net *net, int id) { struct net *peer; diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index e33a73cb1f42..7e91989a1b55 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -61,9 +61,9 @@ struct flow_offload *flow_offload_alloc(struct nf_conn *ct) flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY); if (ct->status & IPS_SRC_NAT) - flow->flags |= FLOW_OFFLOAD_SNAT; + __set_bit(NF_FLOW_SNAT, &flow->flags); if (ct->status & IPS_DST_NAT) - flow->flags |= FLOW_OFFLOAD_DNAT; + __set_bit(NF_FLOW_DNAT, &flow->flags); return flow; @@ -182,8 +182,6 @@ void flow_offload_free(struct flow_offload *flow) default: break; } - if (flow->flags & FLOW_OFFLOAD_DYING) - nf_ct_delete(flow->ct, 0, 0); nf_ct_put(flow->ct); kfree_rcu(flow, rcu_head); } @@ -245,8 +243,10 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow) return err; } - if (flow_table->flags & NF_FLOWTABLE_HW_OFFLOAD) + if (nf_flowtable_hw_offload(flow_table)) { + __set_bit(NF_FLOW_HW, &flow->flags); nf_flow_offload_add(flow_table, flow); + } return 0; } @@ -271,7 +271,7 @@ static void flow_offload_del(struct nf_flowtable *flow_table, if (nf_flow_has_expired(flow)) flow_offload_fixup_ct(flow->ct); - else if (flow->flags & FLOW_OFFLOAD_TEARDOWN) + else if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) flow_offload_fixup_ct_timeout(flow->ct); flow_offload_free(flow); @@ -279,7 +279,7 @@ static void flow_offload_del(struct nf_flowtable *flow_table, void flow_offload_teardown(struct flow_offload *flow) { - flow->flags |= FLOW_OFFLOAD_TEARDOWN; + set_bit(NF_FLOW_TEARDOWN, &flow->flags); flow_offload_fixup_ct_state(flow->ct); } @@ -300,7 +300,7 @@ flow_offload_lookup(struct nf_flowtable *flow_table, dir = tuplehash->tuple.dir; flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); - if (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN)) + if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) return NULL; if (unlikely(nf_ct_is_dying(flow->ct))) @@ -348,19 +348,18 @@ static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data) { struct nf_flowtable *flow_table = data; - if (flow->flags & FLOW_OFFLOAD_HW) - nf_flow_offload_stats(flow_table, flow); - if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct) || - (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))) { - if (flow->flags & FLOW_OFFLOAD_HW) { - if (!(flow->flags & FLOW_OFFLOAD_HW_DYING)) + test_bit(NF_FLOW_TEARDOWN, &flow->flags)) { + if (test_bit(NF_FLOW_HW, &flow->flags)) { + if (!test_bit(NF_FLOW_HW_DYING, &flow->flags)) nf_flow_offload_del(flow_table, flow); - else if (flow->flags & FLOW_OFFLOAD_HW_DEAD) + else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags)) flow_offload_del(flow_table, flow); } else { flow_offload_del(flow_table, flow); } + } else if (test_bit(NF_FLOW_HW, &flow->flags)) { + nf_flow_offload_stats(flow_table, flow); } } @@ -524,7 +523,7 @@ static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data) if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) && (flow->tuplehash[0].tuple.iifidx == dev->ifindex || flow->tuplehash[1].tuple.iifidx == dev->ifindex)) - flow_offload_dead(flow); + flow_offload_teardown(flow); } static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable, diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index 7ea2ddc2aa93..9e563fd3da0f 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -144,11 +144,11 @@ static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb, { struct iphdr *iph = ip_hdr(skb); - if (flow->flags & FLOW_OFFLOAD_SNAT && + if (test_bit(NF_FLOW_SNAT, &flow->flags) && (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 || nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0)) return -1; - if (flow->flags & FLOW_OFFLOAD_DNAT && + if (test_bit(NF_FLOW_DNAT, &flow->flags) && (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 || nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0)) return -1; @@ -232,6 +232,13 @@ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb, return NF_STOLEN; } +static bool nf_flow_offload_refresh(struct nf_flowtable *flow_table, + struct flow_offload *flow) +{ + return nf_flowtable_hw_offload(flow_table) && + test_and_clear_bit(NF_FLOW_HW_REFRESH, &flow->flags); +} + unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) @@ -272,6 +279,9 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff)) return NF_ACCEPT; + if (unlikely(nf_flow_offload_refresh(flow_table, flow))) + nf_flow_offload_add(flow_table, flow); + if (nf_flow_offload_dst_check(&rt->dst)) { flow_offload_teardown(flow); return NF_ACCEPT; @@ -414,11 +424,11 @@ static int nf_flow_nat_ipv6(const struct flow_offload *flow, struct ipv6hdr *ip6h = ipv6_hdr(skb); unsigned int thoff = sizeof(*ip6h); - if (flow->flags & FLOW_OFFLOAD_SNAT && + if (test_bit(NF_FLOW_SNAT, &flow->flags) && (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 || nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0)) return -1; - if (flow->flags & FLOW_OFFLOAD_DNAT && + if (test_bit(NF_FLOW_DNAT, &flow->flags) && (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 || nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0)) return -1; @@ -498,6 +508,9 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, sizeof(*ip6h))) return NF_ACCEPT; + if (unlikely(nf_flow_offload_refresh(flow_table, flow))) + nf_flow_offload_add(flow_table, flow); + if (nf_flow_offload_dst_check(&rt->dst)) { flow_offload_teardown(flow); return NF_ACCEPT; diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index d06969af1085..c8b70ffeef0c 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -24,6 +24,7 @@ struct flow_offload_work { }; struct nf_flow_key { + struct flow_dissector_key_meta meta; struct flow_dissector_key_control control; struct flow_dissector_key_basic basic; union { @@ -55,6 +56,7 @@ static int nf_flow_rule_match(struct nf_flow_match *match, struct nf_flow_key *mask = &match->mask; struct nf_flow_key *key = &match->key; + NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_META, meta); NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CONTROL, control); NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_BASIC, basic); NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4); @@ -62,6 +64,9 @@ static int nf_flow_rule_match(struct nf_flow_match *match, NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_TCP, tcp); NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_PORTS, tp); + key->meta.ingress_ifindex = tuple->iifidx; + mask->meta.ingress_ifindex = 0xffffffff; + switch (tuple->l3proto) { case AF_INET: key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; @@ -105,7 +110,8 @@ static int nf_flow_rule_match(struct nf_flow_match *match, key->tp.dst = tuple->dst_port; mask->tp.dst = 0xffff; - match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL) | + match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_META) | + BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_PORTS); return 0; @@ -444,16 +450,16 @@ int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow, flow_offload_eth_dst(net, flow, dir, flow_rule) < 0) return -1; - if (flow->flags & FLOW_OFFLOAD_SNAT) { + if (test_bit(NF_FLOW_SNAT, &flow->flags)) { flow_offload_ipv4_snat(net, flow, dir, flow_rule); flow_offload_port_snat(net, flow, dir, flow_rule); } - if (flow->flags & FLOW_OFFLOAD_DNAT) { + if (test_bit(NF_FLOW_DNAT, &flow->flags)) { flow_offload_ipv4_dnat(net, flow, dir, flow_rule); flow_offload_port_dnat(net, flow, dir, flow_rule); } - if (flow->flags & FLOW_OFFLOAD_SNAT || - flow->flags & FLOW_OFFLOAD_DNAT) + if (test_bit(NF_FLOW_SNAT, &flow->flags) || + test_bit(NF_FLOW_DNAT, &flow->flags)) flow_offload_ipv4_checksum(net, flow, flow_rule); flow_offload_redirect(flow, dir, flow_rule); @@ -470,11 +476,11 @@ int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow, flow_offload_eth_dst(net, flow, dir, flow_rule) < 0) return -1; - if (flow->flags & FLOW_OFFLOAD_SNAT) { + if (test_bit(NF_FLOW_SNAT, &flow->flags)) { flow_offload_ipv6_snat(net, flow, dir, flow_rule); flow_offload_port_snat(net, flow, dir, flow_rule); } - if (flow->flags & FLOW_OFFLOAD_DNAT) { + if (test_bit(NF_FLOW_DNAT, &flow->flags)) { flow_offload_ipv6_dnat(net, flow, dir, flow_rule); flow_offload_port_dnat(net, flow, dir, flow_rule); } @@ -586,23 +592,25 @@ static void nf_flow_offload_init(struct flow_cls_offload *cls_flow, cls_flow->cookie = (unsigned long)tuple; } -static int flow_offload_tuple_add(struct flow_offload_work *offload, - struct nf_flow_rule *flow_rule, - enum flow_offload_tuple_dir dir) +static int nf_flow_offload_tuple(struct nf_flowtable *flowtable, + struct flow_offload *flow, + struct nf_flow_rule *flow_rule, + enum flow_offload_tuple_dir dir, + int priority, int cmd, + struct list_head *block_cb_list) { - struct nf_flowtable *flowtable = offload->flowtable; struct flow_cls_offload cls_flow = {}; struct flow_block_cb *block_cb; struct netlink_ext_ack extack; __be16 proto = ETH_P_ALL; int err, i = 0; - nf_flow_offload_init(&cls_flow, proto, offload->priority, - FLOW_CLS_REPLACE, - &offload->flow->tuplehash[dir].tuple, &extack); - cls_flow.rule = flow_rule->rule; + nf_flow_offload_init(&cls_flow, proto, priority, cmd, + &flow->tuplehash[dir].tuple, &extack); + if (cmd == FLOW_CLS_REPLACE) + cls_flow.rule = flow_rule->rule; - list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list) { + list_for_each_entry(block_cb, block_cb_list, list) { err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow, block_cb->cb_priv); if (err < 0) @@ -614,23 +622,22 @@ static int flow_offload_tuple_add(struct flow_offload_work *offload, return i; } +static int flow_offload_tuple_add(struct flow_offload_work *offload, + struct nf_flow_rule *flow_rule, + enum flow_offload_tuple_dir dir) +{ + return nf_flow_offload_tuple(offload->flowtable, offload->flow, + flow_rule, dir, offload->priority, + FLOW_CLS_REPLACE, + &offload->flowtable->flow_block.cb_list); +} + static void flow_offload_tuple_del(struct flow_offload_work *offload, enum flow_offload_tuple_dir dir) { - struct nf_flowtable *flowtable = offload->flowtable; - struct flow_cls_offload cls_flow = {}; - struct flow_block_cb *block_cb; - struct netlink_ext_ack extack; - __be16 proto = ETH_P_ALL; - - nf_flow_offload_init(&cls_flow, proto, offload->priority, - FLOW_CLS_DESTROY, - &offload->flow->tuplehash[dir].tuple, &extack); - - list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list) - block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow, block_cb->cb_priv); - - offload->flow->flags |= FLOW_OFFLOAD_HW_DEAD; + nf_flow_offload_tuple(offload->flowtable, offload->flow, NULL, dir, + offload->priority, FLOW_CLS_DESTROY, + &offload->flowtable->flow_block.cb_list); } static int flow_offload_rule_add(struct flow_offload_work *offload, @@ -648,20 +655,20 @@ static int flow_offload_rule_add(struct flow_offload_work *offload, return 0; } -static int flow_offload_work_add(struct flow_offload_work *offload) +static void flow_offload_work_add(struct flow_offload_work *offload) { struct nf_flow_rule *flow_rule[FLOW_OFFLOAD_DIR_MAX]; int err; err = nf_flow_offload_alloc(offload, flow_rule); if (err < 0) - return -ENOMEM; + return; err = flow_offload_rule_add(offload, flow_rule); + if (err < 0) + set_bit(NF_FLOW_HW_REFRESH, &offload->flow->flags); nf_flow_offload_destroy(flow_rule); - - return err; } static void flow_offload_work_del(struct flow_offload_work *offload) @@ -706,7 +713,6 @@ static void flow_offload_work_handler(struct work_struct *work) { struct flow_offload_work *offload, *next; LIST_HEAD(offload_pending_list); - int ret; spin_lock_bh(&flow_offload_pending_list_lock); list_replace_init(&flow_offload_pending_list, &offload_pending_list); @@ -715,9 +721,7 @@ static void flow_offload_work_handler(struct work_struct *work) list_for_each_entry_safe(offload, next, &offload_pending_list, list) { switch (offload->cmd) { case FLOW_CLS_REPLACE: - ret = flow_offload_work_add(offload); - if (ret < 0) - offload->flow->flags &= ~FLOW_OFFLOAD_HW; + flow_offload_work_add(offload); break; case FLOW_CLS_DESTROY: flow_offload_work_del(offload); @@ -742,20 +746,33 @@ static void flow_offload_queue_work(struct flow_offload_work *offload) schedule_work(&nf_flow_offload_work); } -void nf_flow_offload_add(struct nf_flowtable *flowtable, - struct flow_offload *flow) +static struct flow_offload_work * +nf_flow_offload_work_alloc(struct nf_flowtable *flowtable, + struct flow_offload *flow, unsigned int cmd) { struct flow_offload_work *offload; offload = kmalloc(sizeof(struct flow_offload_work), GFP_ATOMIC); if (!offload) - return; + return NULL; - offload->cmd = FLOW_CLS_REPLACE; + offload->cmd = cmd; offload->flow = flow; offload->priority = flowtable->priority; offload->flowtable = flowtable; - flow->flags |= FLOW_OFFLOAD_HW; + + return offload; +} + + +void nf_flow_offload_add(struct nf_flowtable *flowtable, + struct flow_offload *flow) +{ + struct flow_offload_work *offload; + + offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_REPLACE); + if (!offload) + return; flow_offload_queue_work(offload); } @@ -765,15 +782,11 @@ void nf_flow_offload_del(struct nf_flowtable *flowtable, { struct flow_offload_work *offload; - offload = kzalloc(sizeof(struct flow_offload_work), GFP_ATOMIC); + offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_DESTROY); if (!offload) return; - offload->cmd = FLOW_CLS_DESTROY; - offload->flow = flow; - offload->flow->flags |= FLOW_OFFLOAD_HW_DYING; - offload->flowtable = flowtable; - + set_bit(NF_FLOW_HW_DYING, &flow->flags); flow_offload_queue_work(offload); } @@ -784,24 +797,19 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable, __s32 delta; delta = nf_flow_timeout_delta(flow->timeout); - if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10) || - flow->flags & FLOW_OFFLOAD_HW_DYING) + if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10)) return; - offload = kzalloc(sizeof(struct flow_offload_work), GFP_ATOMIC); + offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_STATS); if (!offload) return; - offload->cmd = FLOW_CLS_STATS; - offload->flow = flow; - offload->flowtable = flowtable; - flow_offload_queue_work(offload); } void nf_flow_table_offload_flush(struct nf_flowtable *flowtable) { - if (flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD) + if (nf_flowtable_hw_offload(flowtable)) flush_work(&nf_flow_offload_work); } @@ -830,28 +838,44 @@ static int nf_flow_table_block_setup(struct nf_flowtable *flowtable, return err; } -int nf_flow_table_offload_setup(struct nf_flowtable *flowtable, - struct net_device *dev, - enum flow_block_command cmd) +static int nf_flow_table_offload_cmd(struct flow_block_offload *bo, + struct nf_flowtable *flowtable, + struct net_device *dev, + enum flow_block_command cmd, + struct netlink_ext_ack *extack) { - struct netlink_ext_ack extack = {}; - struct flow_block_offload bo = {}; int err; - if (!(flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD)) + if (!nf_flowtable_hw_offload(flowtable)) return 0; if (!dev->netdev_ops->ndo_setup_tc) return -EOPNOTSUPP; - bo.net = dev_net(dev); - bo.block = &flowtable->flow_block; - bo.command = cmd; - bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; - bo.extack = &extack; - INIT_LIST_HEAD(&bo.cb_list); + memset(bo, 0, sizeof(*bo)); + bo->net = dev_net(dev); + bo->block = &flowtable->flow_block; + bo->command = cmd; + bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; + bo->extack = extack; + INIT_LIST_HEAD(&bo->cb_list); + + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, bo); + if (err < 0) + return err; + + return 0; +} + +int nf_flow_table_offload_setup(struct nf_flowtable *flowtable, + struct net_device *dev, + enum flow_block_command cmd) +{ + struct netlink_ext_ack extack = {}; + struct flow_block_offload bo; + int err; - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, &bo); + err = nf_flow_table_offload_cmd(&bo, flowtable, dev, cmd, &extack); if (err < 0) return err; diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c index 10e9d50e4e19..0ed2281f03be 100644 --- a/net/netfilter/nft_bitwise.c +++ b/net/netfilter/nft_bitwise.c @@ -18,21 +18,66 @@ struct nft_bitwise { enum nft_registers sreg:8; enum nft_registers dreg:8; + enum nft_bitwise_ops op:8; u8 len; struct nft_data mask; struct nft_data xor; + struct nft_data data; }; +static void nft_bitwise_eval_bool(u32 *dst, const u32 *src, + const struct nft_bitwise *priv) +{ + unsigned int i; + + for (i = 0; i < DIV_ROUND_UP(priv->len, 4); i++) + dst[i] = (src[i] & priv->mask.data[i]) ^ priv->xor.data[i]; +} + +static void nft_bitwise_eval_lshift(u32 *dst, const u32 *src, + const struct nft_bitwise *priv) +{ + u32 shift = priv->data.data[0]; + unsigned int i; + u32 carry = 0; + + for (i = DIV_ROUND_UP(priv->len, sizeof(u32)); i > 0; i--) { + dst[i - 1] = (src[i - 1] << shift) | carry; + carry = src[i - 1] >> (BITS_PER_TYPE(u32) - shift); + } +} + +static void nft_bitwise_eval_rshift(u32 *dst, const u32 *src, + const struct nft_bitwise *priv) +{ + u32 shift = priv->data.data[0]; + unsigned int i; + u32 carry = 0; + + for (i = 0; i < DIV_ROUND_UP(priv->len, sizeof(u32)); i++) { + dst[i] = carry | (src[i] >> shift); + carry = src[i] << (BITS_PER_TYPE(u32) - shift); + } +} + void nft_bitwise_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_bitwise *priv = nft_expr_priv(expr); const u32 *src = ®s->data[priv->sreg]; u32 *dst = ®s->data[priv->dreg]; - unsigned int i; - for (i = 0; i < DIV_ROUND_UP(priv->len, 4); i++) - dst[i] = (src[i] & priv->mask.data[i]) ^ priv->xor.data[i]; + switch (priv->op) { + case NFT_BITWISE_BOOL: + nft_bitwise_eval_bool(dst, src, priv); + break; + case NFT_BITWISE_LSHIFT: + nft_bitwise_eval_lshift(dst, src, priv); + break; + case NFT_BITWISE_RSHIFT: + nft_bitwise_eval_rshift(dst, src, priv); + break; + } } static const struct nla_policy nft_bitwise_policy[NFTA_BITWISE_MAX + 1] = { @@ -41,40 +86,22 @@ static const struct nla_policy nft_bitwise_policy[NFTA_BITWISE_MAX + 1] = { [NFTA_BITWISE_LEN] = { .type = NLA_U32 }, [NFTA_BITWISE_MASK] = { .type = NLA_NESTED }, [NFTA_BITWISE_XOR] = { .type = NLA_NESTED }, + [NFTA_BITWISE_OP] = { .type = NLA_U32 }, + [NFTA_BITWISE_DATA] = { .type = NLA_NESTED }, }; -static int nft_bitwise_init(const struct nft_ctx *ctx, - const struct nft_expr *expr, - const struct nlattr * const tb[]) +static int nft_bitwise_init_bool(struct nft_bitwise *priv, + const struct nlattr *const tb[]) { - struct nft_bitwise *priv = nft_expr_priv(expr); struct nft_data_desc d1, d2; - u32 len; int err; - if (tb[NFTA_BITWISE_SREG] == NULL || - tb[NFTA_BITWISE_DREG] == NULL || - tb[NFTA_BITWISE_LEN] == NULL || - tb[NFTA_BITWISE_MASK] == NULL || - tb[NFTA_BITWISE_XOR] == NULL) + if (tb[NFTA_BITWISE_DATA]) return -EINVAL; - err = nft_parse_u32_check(tb[NFTA_BITWISE_LEN], U8_MAX, &len); - if (err < 0) - return err; - - priv->len = len; - - priv->sreg = nft_parse_register(tb[NFTA_BITWISE_SREG]); - err = nft_validate_register_load(priv->sreg, priv->len); - if (err < 0) - return err; - - priv->dreg = nft_parse_register(tb[NFTA_BITWISE_DREG]); - err = nft_validate_register_store(ctx, priv->dreg, NULL, - NFT_DATA_VALUE, priv->len); - if (err < 0) - return err; + if (!tb[NFTA_BITWISE_MASK] || + !tb[NFTA_BITWISE_XOR]) + return -EINVAL; err = nft_data_init(NULL, &priv->mask, sizeof(priv->mask), &d1, tb[NFTA_BITWISE_MASK]); @@ -102,40 +129,151 @@ err1: return err; } -static int nft_bitwise_dump(struct sk_buff *skb, const struct nft_expr *expr) +static int nft_bitwise_init_shift(struct nft_bitwise *priv, + const struct nlattr *const tb[]) { - const struct nft_bitwise *priv = nft_expr_priv(expr); + struct nft_data_desc d; + int err; - if (nft_dump_register(skb, NFTA_BITWISE_SREG, priv->sreg)) - goto nla_put_failure; - if (nft_dump_register(skb, NFTA_BITWISE_DREG, priv->dreg)) - goto nla_put_failure; - if (nla_put_be32(skb, NFTA_BITWISE_LEN, htonl(priv->len))) - goto nla_put_failure; + if (tb[NFTA_BITWISE_MASK] || + tb[NFTA_BITWISE_XOR]) + return -EINVAL; + + if (!tb[NFTA_BITWISE_DATA]) + return -EINVAL; + err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &d, + tb[NFTA_BITWISE_DATA]); + if (err < 0) + return err; + if (d.type != NFT_DATA_VALUE || d.len != sizeof(u32) || + priv->data.data[0] >= BITS_PER_TYPE(u32)) { + nft_data_release(&priv->data, d.type); + return -EINVAL; + } + + return 0; +} + +static int nft_bitwise_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]) +{ + struct nft_bitwise *priv = nft_expr_priv(expr); + u32 len; + int err; + + if (!tb[NFTA_BITWISE_SREG] || + !tb[NFTA_BITWISE_DREG] || + !tb[NFTA_BITWISE_LEN]) + return -EINVAL; + + err = nft_parse_u32_check(tb[NFTA_BITWISE_LEN], U8_MAX, &len); + if (err < 0) + return err; + + priv->len = len; + + priv->sreg = nft_parse_register(tb[NFTA_BITWISE_SREG]); + err = nft_validate_register_load(priv->sreg, priv->len); + if (err < 0) + return err; + + priv->dreg = nft_parse_register(tb[NFTA_BITWISE_DREG]); + err = nft_validate_register_store(ctx, priv->dreg, NULL, + NFT_DATA_VALUE, priv->len); + if (err < 0) + return err; + + if (tb[NFTA_BITWISE_OP]) { + priv->op = ntohl(nla_get_be32(tb[NFTA_BITWISE_OP])); + switch (priv->op) { + case NFT_BITWISE_BOOL: + case NFT_BITWISE_LSHIFT: + case NFT_BITWISE_RSHIFT: + break; + default: + return -EOPNOTSUPP; + } + } else { + priv->op = NFT_BITWISE_BOOL; + } + + switch(priv->op) { + case NFT_BITWISE_BOOL: + err = nft_bitwise_init_bool(priv, tb); + break; + case NFT_BITWISE_LSHIFT: + case NFT_BITWISE_RSHIFT: + err = nft_bitwise_init_shift(priv, tb); + break; + } + + return err; +} + +static int nft_bitwise_dump_bool(struct sk_buff *skb, + const struct nft_bitwise *priv) +{ if (nft_data_dump(skb, NFTA_BITWISE_MASK, &priv->mask, NFT_DATA_VALUE, priv->len) < 0) - goto nla_put_failure; + return -1; if (nft_data_dump(skb, NFTA_BITWISE_XOR, &priv->xor, NFT_DATA_VALUE, priv->len) < 0) - goto nla_put_failure; + return -1; return 0; +} + +static int nft_bitwise_dump_shift(struct sk_buff *skb, + const struct nft_bitwise *priv) +{ + if (nft_data_dump(skb, NFTA_BITWISE_DATA, &priv->data, + NFT_DATA_VALUE, sizeof(u32)) < 0) + return -1; + return 0; +} + +static int nft_bitwise_dump(struct sk_buff *skb, const struct nft_expr *expr) +{ + const struct nft_bitwise *priv = nft_expr_priv(expr); + int err = 0; + + if (nft_dump_register(skb, NFTA_BITWISE_SREG, priv->sreg)) + return -1; + if (nft_dump_register(skb, NFTA_BITWISE_DREG, priv->dreg)) + return -1; + if (nla_put_be32(skb, NFTA_BITWISE_LEN, htonl(priv->len))) + return -1; + if (nla_put_be32(skb, NFTA_BITWISE_OP, htonl(priv->op))) + return -1; + + switch (priv->op) { + case NFT_BITWISE_BOOL: + err = nft_bitwise_dump_bool(skb, priv); + break; + case NFT_BITWISE_LSHIFT: + case NFT_BITWISE_RSHIFT: + err = nft_bitwise_dump_shift(skb, priv); + break; + } -nla_put_failure: - return -1; + return err; } static struct nft_data zero; static int nft_bitwise_offload(struct nft_offload_ctx *ctx, - struct nft_flow_rule *flow, - const struct nft_expr *expr) + struct nft_flow_rule *flow, + const struct nft_expr *expr) { const struct nft_bitwise *priv = nft_expr_priv(expr); struct nft_offload_reg *reg = &ctx->regs[priv->dreg]; + if (priv->op != NFT_BITWISE_BOOL) + return -EOPNOTSUPP; + if (memcmp(&priv->xor, &zero, sizeof(priv->xor)) || priv->sreg != priv->dreg || priv->len != reg->len) return -EOPNOTSUPP; diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c index 087a056e34d1..87e8d9ba0c9b 100644 --- a/net/netfilter/nft_set_bitmap.c +++ b/net/netfilter/nft_set_bitmap.c @@ -259,8 +259,8 @@ static u64 nft_bitmap_privsize(const struct nlattr * const nla[], } static int nft_bitmap_init(const struct nft_set *set, - const struct nft_set_desc *desc, - const struct nlattr * const nla[]) + const struct nft_set_desc *desc, + const struct nlattr * const nla[]) { struct nft_bitmap *priv = nft_set_priv(set); diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index b331a3c9a3a8..d350a7cd3af0 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -645,7 +645,7 @@ static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features, } static bool nft_hash_fast_estimate(const struct nft_set_desc *desc, u32 features, - struct nft_set_estimate *est) + struct nft_set_estimate *est) { if (!desc->size) return false; diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index ced3fc8fad7c..bccd47cd7190 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -357,21 +357,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg, return 0; } -static bool select_all(const struct xt_hashlimit_htable *ht, - const struct dsthash_ent *he) -{ - return true; -} - -static bool select_gc(const struct xt_hashlimit_htable *ht, - const struct dsthash_ent *he) -{ - return time_after_eq(jiffies, he->expires); -} - -static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, - bool (*select)(const struct xt_hashlimit_htable *ht, - const struct dsthash_ent *he)) +static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, bool select_all) { unsigned int i; @@ -381,7 +367,7 @@ static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, spin_lock_bh(&ht->lock); hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) { - if ((*select)(ht, dh)) + if (time_after_eq(jiffies, dh->expires) || select_all) dsthash_free(ht, dh); } spin_unlock_bh(&ht->lock); @@ -395,7 +381,7 @@ static void htable_gc(struct work_struct *work) ht = container_of(work, struct xt_hashlimit_htable, gc_work.work); - htable_selective_cleanup(ht, select_gc); + htable_selective_cleanup(ht, false); queue_delayed_work(system_power_efficient_wq, &ht->gc_work, msecs_to_jiffies(ht->cfg.gc_interval)); @@ -419,7 +405,7 @@ static void htable_destroy(struct xt_hashlimit_htable *hinfo) { cancel_delayed_work_sync(&hinfo->gc_work); htable_remove_proc_entry(hinfo); - htable_selective_cleanup(hinfo, select_all); + htable_selective_cleanup(hinfo, true); kfree(hinfo->name); vfree(hinfo); } |